tcp: fix tcp_md5_hash_skb_data()
[pandora-kernel.git] / net / ipv4 / tcp.c
index 11ba922..ec8b4b7 100644 (file)
@@ -481,14 +481,12 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                         !tp->urg_data ||
                         before(tp->urg_seq, tp->copied_seq) ||
                         !before(tp->urg_seq, tp->rcv_nxt)) {
-                       struct sk_buff *skb;
 
                        answ = tp->rcv_nxt - tp->copied_seq;
 
-                       /* Subtract 1, if FIN is in queue. */
-                       skb = skb_peek_tail(&sk->sk_receive_queue);
-                       if (answ && skb)
-                               answ -= tcp_hdr(skb)->fin;
+                       /* Subtract 1, if FIN was received */
+                       if (answ && sock_flag(sk, SOCK_DONE))
+                               answ--;
                } else
                        answ = tp->urg_seq - tp->copied_seq;
                release_sock(sk);
@@ -706,7 +704,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
                         * Make sure that we have exactly size bytes
                         * available to the caller, no more, no less.
                         */
-                       skb->avail_size = size;
+                       skb->reserved_tailroom = skb->end - skb->tail - size;
                        return skb;
                }
                __kfree_skb(skb);
@@ -740,7 +738,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
                           old_size_goal + mss_now > xmit_size_goal)) {
                        xmit_size_goal = old_size_goal;
                } else {
-                       tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+                       tp->xmit_size_goal_segs =
+                               min_t(u16, xmit_size_goal / mss_now,
+                                     sk->sk_gso_max_segs);
                        xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
                }
        }
@@ -1587,8 +1587,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                }
 
 #ifdef CONFIG_NET_DMA
-               if (tp->ucopy.dma_chan)
-                       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+               if (tp->ucopy.dma_chan) {
+                       if (tp->rcv_wnd == 0 &&
+                           !skb_queue_empty(&sk->sk_async_wait_queue)) {
+                               tcp_service_net_dma(sk, true);
+                               tcp_cleanup_rbuf(sk, copied);
+                       } else
+                               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+               }
 #endif
                if (copied >= target) {
                        /* Do not sleep, just process backlog. */
@@ -2391,7 +2397,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                /* Cap the max timeout in ms TCP will retry/retrans
                 * before giving up and aborting (ETIMEDOUT) a connection.
                 */
-               icsk->icsk_user_timeout = msecs_to_jiffies(val);
+               if (val < 0)
+                       err = -EINVAL;
+               else
+                       icsk->icsk_user_timeout = msecs_to_jiffies(val);
                break;
        default:
                err = -ENOPROTOOPT;
@@ -3028,8 +3037,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 
        for (i = 0; i < shi->nr_frags; ++i) {
                const struct skb_frag_struct *f = &shi->frags[i];
-               struct page *page = skb_frag_page(f);
-               sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+               unsigned int offset = f->page_offset;
+               struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
+
+               sg_set_page(&sg, page, skb_frag_size(f),
+                           offset_in_page(offset));
                if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
                        return 1;
        }