Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / net / ipv4 / tcp.c
index 525dcf5..1ab341e 100644 (file)
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 
-DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
-
 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
 
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
@@ -316,10 +314,10 @@ int tcp_memory_pressure __read_mostly;
 
 EXPORT_SYMBOL(tcp_memory_pressure);
 
-void tcp_enter_memory_pressure(void)
+void tcp_enter_memory_pressure(struct sock *sk)
 {
        if (!tcp_memory_pressure) {
-               NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
                tcp_memory_pressure = 1;
        }
 }
@@ -649,7 +647,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
                }
                __kfree_skb(skb);
        } else {
-               sk->sk_prot->enter_memory_pressure();
+               sk->sk_prot->enter_memory_pressure(sk);
                sk_stream_moderate_sndbuf(sk);
        }
        return NULL;
@@ -1098,7 +1096,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
 #if TCP_DEBUG
        struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
 
-       BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
+       WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
 #endif
 
        if (inet_csk_ack_scheduled(sk)) {
@@ -1153,7 +1151,7 @@ static void tcp_prequeue_process(struct sock *sk)
        struct sk_buff *skb;
        struct tcp_sock *tp = tcp_sk(sk);
 
-       NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
+       NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
 
        /* RX process wants to run with disabled BHs, though it is not
         * necessary */
@@ -1360,7 +1358,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                goto found_ok_skb;
                        if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
-                       BUG_TRAP(flags & MSG_PEEK);
+                       WARN_ON(!(flags & MSG_PEEK));
                        skb = skb->next;
                } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
 
@@ -1423,8 +1421,8 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
                        tp->ucopy.len = len;
 
-                       BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
-                                (flags & (MSG_PEEK | MSG_TRUNC)));
+                       WARN_ON(tp->copied_seq != tp->rcv_nxt &&
+                               !(flags & (MSG_PEEK | MSG_TRUNC)));
 
                        /* Ugly... If prequeue is not empty, we have to
                         * process it before releasing socket, otherwise
@@ -1475,7 +1473,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        /* __ Restore normal policy in scheduler __ */
 
                        if ((chunk = len - tp->ucopy.len) != 0) {
-                               NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
+                               NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
                                len -= chunk;
                                copied += chunk;
                        }
@@ -1486,7 +1484,7 @@ do_prequeue:
                                tcp_prequeue_process(sk);
 
                                if ((chunk = len - tp->ucopy.len) != 0) {
-                                       NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+                                       NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
                                        len -= chunk;
                                        copied += chunk;
                                }
@@ -1601,7 +1599,7 @@ skip_copy:
                        tcp_prequeue_process(sk);
 
                        if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
-                               NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
+                               NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
                                len -= chunk;
                                copied += chunk;
                        }
@@ -1793,13 +1791,13 @@ void tcp_close(struct sock *sk, long timeout)
         */
        if (data_was_unread) {
                /* Unread data was tossed, zap the connection. */
-               NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
+               NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
                tcp_set_state(sk, TCP_CLOSE);
                tcp_send_active_reset(sk, GFP_KERNEL);
        } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
                /* Check zero linger _after_ checking for unread data. */
                sk->sk_prot->disconnect(sk, 0);
-               NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
+               NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
        } else if (tcp_close_state(sk)) {
                /* We FIN if the application ate all the data before
                 * zapping the connection.
@@ -1846,7 +1844,7 @@ adjudge_to_death:
         */
        local_bh_disable();
        bh_lock_sock(sk);
-       BUG_TRAP(!sock_owned_by_user(sk));
+       WARN_ON(sock_owned_by_user(sk));
 
        /* Have we already been destroyed by a softirq or backlog? */
        if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
@@ -1871,7 +1869,8 @@ adjudge_to_death:
                if (tp->linger2 < 0) {
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPABORTONLINGER);
                } else {
                        const int tmo = tcp_fin_time(sk);
 
@@ -1893,7 +1892,8 @@ adjudge_to_death:
                                       "sockets\n");
                        tcp_set_state(sk, TCP_CLOSE);
                        tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPABORTONMEMORY);
                }
        }
 
@@ -1973,7 +1973,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
 
-       BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
+       WARN_ON(inet->num && !icsk->icsk_bind_hash);
 
        sk->sk_error_report(sk);
        return err;
@@ -2465,76 +2465,6 @@ static unsigned long tcp_md5sig_users;
 static struct tcp_md5sig_pool **tcp_md5sig_pool;
 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
 
-int tcp_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
-                     int bplen,
-                     struct tcphdr *th, unsigned int tcplen,
-                     struct tcp_md5sig_pool *hp)
-{
-       struct scatterlist sg[4];
-       __u16 data_len;
-       int block = 0;
-       __sum16 cksum;
-       struct hash_desc *desc = &hp->md5_desc;
-       int err;
-       unsigned int nbytes = 0;
-
-       sg_init_table(sg, 4);
-
-       /* 1. The TCP pseudo-header */
-       sg_set_buf(&sg[block++], &hp->md5_blk, bplen);
-       nbytes += bplen;
-
-       /* 2. The TCP header, excluding options, and assuming a
-        * checksum of zero
-        */
-       cksum = th->check;
-       th->check = 0;
-       sg_set_buf(&sg[block++], th, sizeof(*th));
-       nbytes += sizeof(*th);
-
-       /* 3. The TCP segment data (if any) */
-       data_len = tcplen - (th->doff << 2);
-       if (data_len > 0) {
-               u8 *data = (u8 *)th + (th->doff << 2);
-               sg_set_buf(&sg[block++], data, data_len);
-               nbytes += data_len;
-       }
-
-       /* 4. an independently-specified key or password, known to both
-        * TCPs and presumably connection-specific
-        */
-       sg_set_buf(&sg[block++], key->key, key->keylen);
-       nbytes += key->keylen;
-
-       sg_mark_end(&sg[block - 1]);
-
-       /* Now store the hash into the packet */
-       err = crypto_hash_init(desc);
-       if (err) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
-               return -1;
-       }
-       err = crypto_hash_update(desc, sg, nbytes);
-       if (err) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
-               return -1;
-       }
-       err = crypto_hash_final(desc, md5_hash);
-       if (err) {
-               if (net_ratelimit())
-                       printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
-               return -1;
-       }
-
-       /* Reset header */
-       th->check = cksum;
-
-       return 0;
-}
-EXPORT_SYMBOL(tcp_calc_md5_hash);
-
 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
 {
        int cpu;
@@ -2658,6 +2588,63 @@ void __tcp_put_md5sig_pool(void)
 }
 
 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+
+int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
+                       struct tcphdr *th)
+{
+       struct scatterlist sg;
+       int err;
+
+       __sum16 old_checksum = th->check;
+       th->check = 0;
+       /* options aren't included in the hash */
+       sg_init_one(&sg, th, sizeof(struct tcphdr));
+       err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
+       th->check = old_checksum;
+       return err;
+}
+
+EXPORT_SYMBOL(tcp_md5_hash_header);
+
+int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
+                         struct sk_buff *skb, unsigned header_len)
+{
+       struct scatterlist sg;
+       const struct tcphdr *tp = tcp_hdr(skb);
+       struct hash_desc *desc = &hp->md5_desc;
+       unsigned i;
+       const unsigned head_data_len = skb_headlen(skb) > header_len ?
+                                      skb_headlen(skb) - header_len : 0;
+       const struct skb_shared_info *shi = skb_shinfo(skb);
+
+       sg_init_table(&sg, 1);
+
+       sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
+       if (crypto_hash_update(desc, &sg, head_data_len))
+               return 1;
+
+       for (i = 0; i < shi->nr_frags; ++i) {
+               const struct skb_frag_struct *f = &shi->frags[i];
+               sg_set_page(&sg, f->page, f->size, f->page_offset);
+               if (crypto_hash_update(desc, &sg, f->size))
+                       return 1;
+       }
+
+       return 0;
+}
+
+EXPORT_SYMBOL(tcp_md5_hash_skb_data);
+
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
+{
+       struct scatterlist sg;
+
+       sg_init_one(&sg, key->key, key->keylen);
+       return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
+}
+
+EXPORT_SYMBOL(tcp_md5_hash_key);
+
 #endif
 
 void tcp_done(struct sock *sk)
@@ -2800,4 +2787,3 @@ EXPORT_SYMBOL(tcp_splice_read);
 EXPORT_SYMBOL(tcp_sendpage);
 EXPORT_SYMBOL(tcp_setsockopt);
 EXPORT_SYMBOL(tcp_shutdown);
-EXPORT_SYMBOL(tcp_statistics);