TCP: fix a bug that triggers large number of TCP RST by mistake
[pandora-kernel.git] / net / ipv4 / tcp_input.c
index 6d8ab1c..eb7f82e 100644 (file)
@@ -734,7 +734,7 @@ void tcp_update_metrics(struct sock *sk)
                         * Reset our results.
                         */
                        if (!(dst_metric_locked(dst, RTAX_RTT)))
-                               dst->metrics[RTAX_RTT - 1] = 0;
+                               dst_metric_set(dst, RTAX_RTT, 0);
                        return;
                }
 
@@ -776,34 +776,38 @@ void tcp_update_metrics(struct sock *sk)
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
                            (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
+                               dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
                        if (!dst_metric_locked(dst, RTAX_CWND) &&
                            tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd;
+                               dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
                } else if (tp->snd_cwnd > tp->snd_ssthresh &&
                           icsk->icsk_ca_state == TCP_CA_Open) {
                        /* Cong. avoidance phase, cwnd is reliable. */
                        if (!dst_metric_locked(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] =
-                                       max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
+                               dst_metric_set(dst, RTAX_SSTHRESH,
+                                              max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
                        if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1;
+                               dst_metric_set(dst, RTAX_CWND,
+                                              (dst_metric(dst, RTAX_CWND) +
+                                               tp->snd_cwnd) >> 1);
                } else {
                        /* Else slow start did not finish, cwnd is non-sense,
                           ssthresh may be also invalid.
                         */
                        if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1;
+                               dst_metric_set(dst, RTAX_CWND,
+                                              (dst_metric(dst, RTAX_CWND) +
+                                               tp->snd_ssthresh) >> 1);
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
                            tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
-                               dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
+                               dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
                }
 
                if (!dst_metric_locked(dst, RTAX_REORDERING)) {
                        if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
                            tp->reordering != sysctl_tcp_reordering)
-                               dst->metrics[RTAX_REORDERING-1] = tp->reordering;
+                               dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
                }
        }
 }
@@ -912,25 +916,20 @@ static void tcp_init_metrics(struct sock *sk)
                tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
        }
        tcp_set_rto(sk);
-       if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
-               goto reset;
-
-cwnd:
-       tp->snd_cwnd = tcp_init_cwnd(tp, dst);
-       tp->snd_cwnd_stamp = tcp_time_stamp;
-       return;
-
+       if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) {
 reset:
-       /* Play conservative. If timestamps are not
-        * supported, TCP will fail to recalculate correct
-        * rtt, if initial rto is too small. FORGET ALL AND RESET!
-        */
-       if (!tp->rx_opt.saw_tstamp && tp->srtt) {
-               tp->srtt = 0;
-               tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
-               inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+               /* Play conservative. If timestamps are not
+                * supported, TCP will fail to recalculate correct
+                * rtt, if initial rto is too small. FORGET ALL AND RESET!
+                */
+               if (!tp->rx_opt.saw_tstamp && tp->srtt) {
+                       tp->srtt = 0;
+                       tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
+                       inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+               }
        }
-       goto cwnd;
+       tp->snd_cwnd = tcp_init_cwnd(tp, dst);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 static void tcp_update_reordering(struct sock *sk, const int metric,
@@ -4400,7 +4399,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
                        if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
                                tp->ucopy.len -= chunk;
                                tp->copied_seq += chunk;
-                               eaten = (chunk == skb->len && !th->fin);
+                               eaten = (chunk == skb->len);
                                tcp_rcv_space_adjust(sk);
                        }
                        local_bh_disable();