Merge branch 'semaphore' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
[pandora-kernel.git] / net / ipv4 / tcp_input.c
index d6ea970..75efd24 100644 (file)
@@ -602,7 +602,7 @@ static u32 tcp_rto_min(struct sock *sk)
        u32 rto_min = TCP_RTO_MIN;
 
        if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
-               rto_min = dst_metric(dst, RTAX_RTO_MIN);
+               rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
        return rto_min;
 }
 
@@ -729,6 +729,7 @@ void tcp_update_metrics(struct sock *sk)
        if (dst && (dst->flags & DST_HOST)) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
                int m;
+               unsigned long rtt;
 
                if (icsk->icsk_backoff || !tp->srtt) {
                        /* This session failed to estimate rtt. Why?
@@ -740,7 +741,8 @@ void tcp_update_metrics(struct sock *sk)
                        return;
                }
 
-               m = dst_metric(dst, RTAX_RTT) - tp->srtt;
+               rtt = dst_metric_rtt(dst, RTAX_RTT);
+               m = rtt - tp->srtt;
 
                /* If newly calculated rtt larger than stored one,
                 * store new one. Otherwise, use EWMA. Remember,
@@ -748,12 +750,13 @@ void tcp_update_metrics(struct sock *sk)
                 */
                if (!(dst_metric_locked(dst, RTAX_RTT))) {
                        if (m <= 0)
-                               dst->metrics[RTAX_RTT - 1] = tp->srtt;
+                               set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
                        else
-                               dst->metrics[RTAX_RTT - 1] -= (m >> 3);
+                               set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
                }
 
                if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
+                       unsigned long var;
                        if (m < 0)
                                m = -m;
 
@@ -762,11 +765,13 @@ void tcp_update_metrics(struct sock *sk)
                        if (m < tp->mdev)
                                m = tp->mdev;
 
-                       if (m >= dst_metric(dst, RTAX_RTTVAR))
-                               dst->metrics[RTAX_RTTVAR - 1] = m;
+                       var = dst_metric_rtt(dst, RTAX_RTTVAR);
+                       if (m >= var)
+                               var = m;
                        else
-                               dst->metrics[RTAX_RTTVAR-1] -=
-                                       (dst_metric(dst, RTAX_RTTVAR) - m)>>2;
+                               var -= (var - m) >> 2;
+
+                       set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
                }
 
                if (tp->snd_ssthresh >= 0xFFFF) {
@@ -897,7 +902,7 @@ static void tcp_init_metrics(struct sock *sk)
        if (dst_metric(dst, RTAX_RTT) == 0)
                goto reset;
 
-       if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
+       if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
                goto reset;
 
        /* Initial rtt is determined from SYN,SYN-ACK.
@@ -914,12 +919,12 @@ static void tcp_init_metrics(struct sock *sk)
         * to low value, and then abruptly stops to do it and starts to delay
         * ACKs, wait for troubles.
         */
-       if (dst_metric(dst, RTAX_RTT) > tp->srtt) {
-               tp->srtt = dst_metric(dst, RTAX_RTT);
+       if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
+               tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
                tp->rtt_seq = tp->snd_nxt;
        }
-       if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) {
-               tp->mdev = dst_metric(dst, RTAX_RTTVAR);
+       if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
+               tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
                tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
        }
        tcp_set_rto(sk);
@@ -961,7 +966,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
                else
                        mib_idx = LINUX_MIB_TCPSACKREORDER;
 
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 #if FASTRETRANS_DEBUG > 1
                printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
                       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1157,7 +1162,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
                                tp->lost_out += tcp_skb_pcount(skb);
                                TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                        }
-                       NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
                } else {
                        if (before(ack_seq, new_low_seq))
                                new_low_seq = ack_seq;
@@ -1169,10 +1174,11 @@ static void tcp_mark_lost_retrans(struct sock *sk)
                tp->lost_retrans_low = new_low_seq;
 }
 
-static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
+static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
                           struct tcp_sack_block_wire *sp, int num_sacks,
                           u32 prior_snd_una)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
        u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
        int dup_sack = 0;
@@ -1180,7 +1186,7 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
        if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
                dup_sack = 1;
                tcp_dsack_seen(tp);
-               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
        } else if (num_sacks > 1) {
                u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
                u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1189,7 +1195,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
                    !before(start_seq_0, start_seq_1)) {
                        dup_sack = 1;
                        tcp_dsack_seen(tp);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPDSACKOFORECV);
                }
        }
 
@@ -1416,10 +1423,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
        unsigned char *ptr = (skb_transport_header(ack_skb) +
                              TCP_SKB_CB(ack_skb)->sacked);
        struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
-       struct tcp_sack_block sp[4];
+       struct tcp_sack_block sp[TCP_NUM_SACKS];
        struct tcp_sack_block *cache;
        struct sk_buff *skb;
-       int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE) >> 3;
+       int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
        int used_sacks;
        int reord = tp->packets_out;
        int flag = 0;
@@ -1434,7 +1441,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                tcp_highest_sack_reset(sk);
        }
 
-       found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire,
+       found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
                                         num_sacks, prior_snd_una);
        if (found_dup_sack)
                flag |= FLAG_DSACKING_ACK;
@@ -1475,7 +1482,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                                mib_idx = LINUX_MIB_TCPSACKDISCARD;
                        }
 
-                       NET_INC_STATS_BH(mib_idx);
+                       NET_INC_STATS_BH(sock_net(sk), mib_idx);
                        if (i == 0)
                                first_sack_index = -1;
                        continue;
@@ -1968,7 +1975,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
 {
        if (flag & FLAG_SACK_RENEGING) {
                struct inet_connection_sock *icsk = inet_csk(sk);
-               NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
 
                tcp_enter_loss(sk, 1);
                icsk->icsk_retransmits++;
@@ -2400,7 +2407,7 @@ static int tcp_try_undo_recovery(struct sock *sk)
                else
                        mib_idx = LINUX_MIB_TCPFULLUNDO;
 
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
                tp->undo_marker = 0;
        }
        if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2423,7 +2430,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
                DBGUNDO(sk, "D-SACK");
                tcp_undo_cwr(sk, 1);
                tp->undo_marker = 0;
-               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
        }
 }
 
@@ -2446,7 +2453,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
 
                DBGUNDO(sk, "Hoe");
                tcp_undo_cwr(sk, 0);
-               NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
 
                /* So... Do not make Hoe's retransmit yet.
                 * If the first packet was delayed, the rest
@@ -2475,7 +2482,7 @@ static int tcp_try_undo_loss(struct sock *sk)
                DBGUNDO(sk, "partial loss");
                tp->lost_out = 0;
                tcp_undo_cwr(sk, 1);
-               NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
                inet_csk(sk)->icsk_retransmits = 0;
                tp->undo_marker = 0;
                if (tcp_is_sack(tp))
@@ -2594,7 +2601,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
            icsk->icsk_ca_state != TCP_CA_Open &&
            tp->fackets_out > tp->reordering) {
                tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
-               NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
        }
 
        /* D. Check consistency of the current state. */
@@ -2699,7 +2706,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
                else
                        mib_idx = LINUX_MIB_TCPSACKRECOVERY;
 
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
                tp->high_seq = tp->snd_nxt;
                tp->prior_ssthresh = 0;
@@ -3210,7 +3217,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
                }
                tp->frto_counter = 0;
                tp->undo_marker = 0;
-               NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
        }
        return 0;
 }
@@ -3263,12 +3270,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
 
                tcp_ca_event(sk, CA_EVENT_FAST_ACK);
 
-               NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
        } else {
                if (ack_seq != TCP_SKB_CB(skb)->end_seq)
                        flag |= FLAG_DATA;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
 
                flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
@@ -3285,6 +3292,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
         * log. Something worked...
         */
        sk->sk_err_soft = 0;
+       icsk->icsk_probes_out = 0;
        tp->rcv_tstamp = tcp_time_stamp;
        prior_packets = tp->packets_out;
        if (!prior_packets)
@@ -3317,8 +3325,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        return 1;
 
 no_queue:
-       icsk->icsk_probes_out = 0;
-
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
@@ -3711,8 +3717,10 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
        return 0;
 }
 
-static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
+static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
                int mib_idx;
 
@@ -3721,20 +3729,21 @@ static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
                else
                        mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
 
-               NET_INC_STATS_BH(mib_idx);
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
                tp->rx_opt.dsack = 1;
                tp->duplicate_sack[0].start_seq = seq;
                tp->duplicate_sack[0].end_seq = end_seq;
-               tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1,
-                                          4 - tp->rx_opt.tstamp_ok);
+               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
        }
 }
 
-static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
+static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (!tp->rx_opt.dsack)
-               tcp_dsack_set(tp, seq, end_seq);
+               tcp_dsack_set(sk, seq, end_seq);
        else
                tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
 }
@@ -3745,7 +3754,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
 
        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
            before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_enter_quickack_mode(sk);
 
                if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -3753,7 +3762,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
 
                        if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
                                end_seq = tp->rcv_nxt;
-                       tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq);
+                       tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
                }
        }
 
@@ -3780,9 +3789,8 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         * Decrease num_sacks.
                         */
                        tp->rx_opt.num_sacks--;
-                       tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks +
-                                                  tp->rx_opt.dsack,
-                                                  4 - tp->rx_opt.tstamp_ok);
+                       tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
+                                              tp->rx_opt.dsack;
                        for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
                                sp[i] = sp[i + 1];
                        continue;
@@ -3832,7 +3840,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
         *
         * If the sack array is full, forget about the last one.
         */
-       if (this_sack >= 4) {
+       if (this_sack >= TCP_NUM_SACKS) {
                this_sack--;
                tp->rx_opt.num_sacks--;
                sp--;
@@ -3845,8 +3853,7 @@ new_sack:
        sp->start_seq = seq;
        sp->end_seq = end_seq;
        tp->rx_opt.num_sacks++;
-       tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack,
-                                  4 - tp->rx_opt.tstamp_ok);
+       tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 }
 
 /* RCV.NXT advances, some SACKs should be eaten. */
@@ -3883,9 +3890,8 @@ static void tcp_sack_remove(struct tcp_sock *tp)
        }
        if (num_sacks != tp->rx_opt.num_sacks) {
                tp->rx_opt.num_sacks = num_sacks;
-               tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks +
-                                          tp->rx_opt.dsack,
-                                          4 - tp->rx_opt.tstamp_ok);
+               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
+                                      tp->rx_opt.dsack;
        }
 }
 
@@ -3906,7 +3912,7 @@ static void tcp_ofo_queue(struct sock *sk)
                        __u32 dsack = dsack_high;
                        if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
                                dsack_high = TCP_SKB_CB(skb)->end_seq;
-                       tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack);
+                       tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
                }
 
                if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
@@ -3964,8 +3970,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 
        if (tp->rx_opt.dsack) {
                tp->rx_opt.dsack = 0;
-               tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks,
-                                            4 - tp->rx_opt.tstamp_ok);
+               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
        }
 
        /*  Queue data for delivery to the user.
@@ -4034,8 +4039,8 @@ queue_and_out:
 
        if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
                /* A retransmit, 2nd most common case.  Force an immediate ack. */
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
-               tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
+               tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
 out_of_window:
                tcp_enter_quickack_mode(sk);
@@ -4057,7 +4062,7 @@ drop:
                           tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
                           TCP_SKB_CB(skb)->end_seq);
 
-               tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
+               tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
 
                /* If window is closed, drop tail of packet. But after
                 * remembering D-SACK for its head made in previous line.
@@ -4122,12 +4127,12 @@ drop:
                        if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                                /* All the bits are present. Drop. */
                                __kfree_skb(skb);
-                               tcp_dsack_set(tp, seq, end_seq);
+                               tcp_dsack_set(sk, seq, end_seq);
                                goto add_sack;
                        }
                        if (after(seq, TCP_SKB_CB(skb1)->seq)) {
                                /* Partial overlap. */
-                               tcp_dsack_set(tp, seq,
+                               tcp_dsack_set(sk, seq,
                                              TCP_SKB_CB(skb1)->end_seq);
                        } else {
                                skb1 = skb1->prev;
@@ -4140,12 +4145,12 @@ drop:
                       (struct sk_buff *)&tp->out_of_order_queue &&
                       after(end_seq, TCP_SKB_CB(skb1)->seq)) {
                        if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
-                               tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq,
+                               tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                                 end_seq);
                                break;
                        }
                        __skb_unlink(skb1, &tp->out_of_order_queue);
-                       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq,
+                       tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                         TCP_SKB_CB(skb1)->end_seq);
                        __kfree_skb(skb1);
                }
@@ -4176,7 +4181,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                        struct sk_buff *next = skb->next;
                        __skb_unlink(skb, list);
                        __kfree_skb(skb);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
                        skb = next;
                        continue;
                }
@@ -4244,7 +4249,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                                struct sk_buff *next = skb->next;
                                __skb_unlink(skb, list);
                                __kfree_skb(skb);
-                               NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
                                skb = next;
                                if (skb == tail ||
                                    tcp_hdr(skb)->syn ||
@@ -4307,7 +4312,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
        int res = 0;
 
        if (!skb_queue_empty(&tp->out_of_order_queue)) {
-               NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
                __skb_queue_purge(&tp->out_of_order_queue);
 
                /* Reset SACK state.  A conforming SACK implementation will
@@ -4336,7 +4341,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
 
-       NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
@@ -4365,7 +4370,7 @@ static int tcp_prune_queue(struct sock *sk)
         * drop receive data on the floor.  It will get retransmitted
         * and hopefully then we'll have sufficient space.
         */
-       NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
 
        /* Massive buffer overcommit. */
        tp->pred_flags = 0;
@@ -4795,7 +4800,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                tcp_data_snd_check(sk);
                                return 0;
                        } else { /* Header too small */
-                               TCP_INC_STATS_BH(TCP_MIB_INERRS);
+                               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
                                goto discard;
                        }
                } else {
@@ -4832,7 +4837,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                                        __skb_pull(skb, tcp_header_len);
                                        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-                                       NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
+                                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
                                }
                                if (copied_early)
                                        tcp_cleanup_rbuf(sk, skb->len);
@@ -4855,7 +4860,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                if ((int)skb->truesize > sk->sk_forward_alloc)
                                        goto step5;
 
-                               NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
                                /* Bulk data transfer: receiver */
                                __skb_pull(skb, tcp_header_len);
@@ -4899,7 +4904,7 @@ slow_path:
        if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
                        tcp_send_dupack(sk, skb);
                        goto discard;
                }
@@ -4934,8 +4939,8 @@ slow_path:
        tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
 
        if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               TCP_INC_STATS_BH(TCP_MIB_INERRS);
-               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
                tcp_reset(sk);
                return 1;
        }
@@ -4957,7 +4962,7 @@ step5:
        return 0;
 
 csum_error:
-       TCP_INC_STATS_BH(TCP_MIB_INERRS);
+       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
 discard:
        __kfree_skb(skb);
@@ -4991,7 +4996,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
                    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
                             tcp_time_stamp)) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
                        goto reset_and_undo;
                }
 
@@ -5275,7 +5280,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
                        tcp_send_dupack(sk, skb);
                        goto discard;
                }
@@ -5304,7 +5309,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
         *      Check for a SYN in window.
         */
        if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
                tcp_reset(sk);
                return 1;
        }
@@ -5386,7 +5391,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                            (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
                                             after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
                                                tcp_done(sk);
-                                               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+                                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
                                                return 1;
                                        }
 
@@ -5446,7 +5451,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                if (sk->sk_shutdown & RCV_SHUTDOWN) {
                        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
                            after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
-                               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
                                tcp_reset(sk);
                                return 1;
                        }