[IPV4]: Just increment OutDatagrams once per a datagram.
[pandora-kernel.git] / net / ipv4 / tcp.c
index 7663145..7e74011 100644 (file)
@@ -658,9 +658,10 @@ static inline int select_size(struct sock *sk)
        return tmp;
 }
 
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
                size_t size)
 {
+       struct sock *sk = sock->sk;
        struct iovec *iov;
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -1064,7 +1065,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                                        break;
                        }
                        used = recv_actor(desc, skb, offset, len);
-                       if (used <= len) {
+                       if (used < 0) {
+                               if (!copied)
+                                       copied = used;
+                               break;
+                       } else if (used <= len) {
                                seq += used;
                                copied += used;
                                offset += used;
@@ -1086,7 +1091,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
        tcp_rcv_space_adjust(sk);
 
        /* Clean up data we have read: This will do ACK frames. */
-       if (copied)
+       if (copied > 0)
                tcp_cleanup_rbuf(sk, copied);
        return copied;
 }
@@ -1112,6 +1117,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        long timeo;
        struct task_struct *user_recv = NULL;
        int copied_early = 0;
+       struct sk_buff *skb;
 
        lock_sock(sk);
 
@@ -1138,16 +1144,26 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 #ifdef CONFIG_NET_DMA
        tp->ucopy.dma_chan = NULL;
        preempt_disable();
-       if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
-           !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
-               preempt_enable_no_resched();
-               tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
-       } else
-               preempt_enable_no_resched();
+       skb = skb_peek_tail(&sk->sk_receive_queue);
+       {
+               int available = 0;
+
+               if (skb)
+                       available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
+               if ((available < target) &&
+                   (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
+                   !sysctl_tcp_low_latency &&
+                   __get_cpu_var(softnet_data).net_dma) {
+                       preempt_enable_no_resched();
+                       tp->ucopy.pinned_list =
+                                       dma_pin_iovec_pages(msg->msg_iov, len);
+               } else {
+                       preempt_enable_no_resched();
+               }
+       }
 #endif
 
        do {
-               struct sk_buff *skb;
                u32 offset;
 
                /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
@@ -1435,7 +1451,6 @@ skip_copy:
 
 #ifdef CONFIG_NET_DMA
        if (tp->ucopy.dma_chan) {
-               struct sk_buff *skb;
                dma_cookie_t done, used;
 
                dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
@@ -2416,7 +2431,7 @@ void __init tcp_init(void)
        tcp_hashinfo.bind_bucket_cachep =
                kmem_cache_create("tcp_bind_bucket",
                                  sizeof(struct inet_bind_bucket), 0,
-                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
        /* Size and allocate the main established and bind bucket
         * hash tables.
@@ -2464,13 +2479,10 @@ void __init tcp_init(void)
                        order++)
                ;
        if (order >= 4) {
-               sysctl_local_port_range[0] = 32768;
-               sysctl_local_port_range[1] = 61000;
                tcp_death_row.sysctl_max_tw_buckets = 180000;
                sysctl_tcp_max_orphans = 4096 << (order - 4);
                sysctl_max_syn_backlog = 1024;
        } else if (order < 3) {
-               sysctl_local_port_range[0] = 1024 * (3 - order);
                tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
                sysctl_tcp_max_orphans >>= (3 - order);
                sysctl_max_syn_backlog = 128;