tcp: limit payload size of sacked skbs
[pandora-kernel.git] / net / ipv4 / tcp_output.c
index e614810..cb1130c 100644 (file)
@@ -220,7 +220,8 @@ void tcp_select_initial_window(int __space, __u32 mss,
                /* Set window scaling on max possible window
                 * See RFC1323 for an explanation of the limit to 14
                 */
-               space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
+               space = max_t(u32, space, sysctl_tcp_rmem[2]);
+               space = max_t(u32, space, sysctl_rmem_max);
                space = min_t(u32, space, *window_clamp);
                while (space > 65535 && (*rcv_wscale) < 14) {
                        space >>= 1;
@@ -1171,8 +1172,8 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
        mss_now -= icsk->icsk_ext_hdr_len;
 
        /* Then reserve room for full set of TCP options and 8 bytes of data */
-       if (mss_now < 48)
-               mss_now = 48;
+       if (mss_now < TCP_MIN_SND_MSS)
+               mss_now = TCP_MIN_SND_MSS;
 
        /* Now subtract TCP options size, not including SACKs */
        mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
@@ -1675,16 +1676,19 @@ static int tcp_mtu_probe(struct sock *sk)
        nskb->ip_summed = skb->ip_summed;
 
        tcp_insert_write_queue_before(nskb, skb, sk);
+       tcp_highest_sack_replace(sk, skb, nskb);
 
        len = 0;
        tcp_for_write_queue_from_safe(skb, next, sk) {
                copy = min_t(int, skb->len, probe_size - len);
-               if (nskb->ip_summed)
+               if (nskb->ip_summed) {
                        skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
-               else
-                       nskb->csum = skb_copy_and_csum_bits(skb, 0,
-                                                           skb_put(nskb, copy),
-                                                           copy, nskb->csum);
+               } else {
+                       __wsum csum = skb_copy_and_csum_bits(skb, 0,
+                                                            skb_put(nskb, copy),
+                                                            copy, 0);
+                       nskb->csum = csum_block_add(nskb->csum, csum, len);
+               }
 
                if (skb->len <= copy) {
                        /* We've eaten all the data from this skb.
@@ -1920,9 +1924,11 @@ u32 __tcp_select_window(struct sock *sk)
        int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
        int window;
 
-       if (mss > full_space)
+       if (unlikely(mss > full_space)) {
                mss = full_space;
-
+               if (mss <= 0)
+                       return 0;
+       }
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
@@ -1982,7 +1988,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
 
        BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
 
-       tcp_highest_sack_combine(sk, next_skb, skb);
+       tcp_highest_sack_replace(sk, next_skb, skb);
 
        tcp_unlink_write_queue(next_skb, sk);
 
@@ -2096,7 +2102,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
         * copying overhead: fragmentation, tunneling, mangling etc.
         */
        if (atomic_read(&sk->sk_wmem_alloc) >
-           min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
+           min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
+                 sk->sk_sndbuf))
                return -EAGAIN;
 
        if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {