net: add a limit parameter to sk_add_backlog()
authorEric Dumazet <edumazet@google.com>
Sun, 22 Apr 2012 23:34:26 +0000 (23:34 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 24 Apr 2012 02:28:28 +0000 (22:28 -0400)
sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the
memory limit. We need to make this limit a parameter for TCP use.

No functional change expected in this patch, all callers still using the
old sk_rcvbuf limit.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Rick Jones <rick.jones2@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sock.h
net/core/sock.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/llc/llc_conn.c
net/sctp/input.c
net/tipc/socket.c
net/x25/x25_dev.c

index 4cdb9b3..4e9d01e 100644 (file)
@@ -709,17 +709,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
  * Do not take into account this skb truesize,
  * to allow even a single big packet to come.
  */
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
+                                    unsigned int limit)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize > sk->sk_rcvbuf;
+       return qsize > limit;
 }
 
 /* The per-socket spinlock must be held here. */
-static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
+                                             unsigned int limit)
 {
-       if (sk_rcvqueues_full(sk, skb))
+       if (sk_rcvqueues_full(sk, skb, limit))
                return -ENOBUFS;
 
        __sk_add_backlog(sk, skb);
index 679c5bb..0431aaf 100644 (file)
@@ -389,7 +389,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 
        skb->dev = NULL;
 
-       if (sk_rcvqueues_full(sk, skb)) {
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
        }
@@ -406,7 +406,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
                rc = sk_backlog_rcv(sk, skb);
 
                mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-       } else if (sk_add_backlog(sk, skb)) {
+       } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
                bh_unlock_sock(sk);
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
index 0883921..917607e 100644 (file)
@@ -1752,7 +1752,7 @@ process:
                        if (!tcp_prequeue(sk, skb))
                                ret = tcp_v4_do_rcv(sk, skb);
                }
-       } else if (unlikely(sk_add_backlog(sk, skb))) {
+       } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
                bh_unlock_sock(sk);
                NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
                goto discard_and_relse;
index 3430e8f..279fd08 100644 (file)
@@ -1479,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                goto drop;
 
 
-       if (sk_rcvqueues_full(sk, skb))
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
                goto drop;
 
        rc = 0;
@@ -1488,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
-       else if (sk_add_backlog(sk, skb)) {
+       else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
                bh_unlock_sock(sk);
                goto drop;
        }
index 8044f6a..b04e6d8 100644 (file)
@@ -1654,7 +1654,7 @@ process:
                        if (!tcp_prequeue(sk, skb))
                                ret = tcp_v6_do_rcv(sk, skb);
                }
-       } else if (unlikely(sk_add_backlog(sk, skb))) {
+       } else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
                bh_unlock_sock(sk);
                NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
                goto discard_and_relse;
index 37b0699..d39bbc9 100644 (file)
@@ -611,14 +611,14 @@ static void flush_stack(struct sock **stack, unsigned int count,
 
                sk = stack[i];
                if (skb1) {
-                       if (sk_rcvqueues_full(sk, skb1)) {
+                       if (sk_rcvqueues_full(sk, skb1, sk->sk_rcvbuf)) {
                                kfree_skb(skb1);
                                goto drop;
                        }
                        bh_lock_sock(sk);
                        if (!sock_owned_by_user(sk))
                                udpv6_queue_rcv_skb(sk, skb1);
-                       else if (sk_add_backlog(sk, skb1)) {
+                       else if (sk_add_backlog(sk, skb1, sk->sk_rcvbuf)) {
                                kfree_skb(skb1);
                                bh_unlock_sock(sk);
                                goto drop;
@@ -790,14 +790,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 
        /* deliver */
 
-       if (sk_rcvqueues_full(sk, skb)) {
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
                sock_put(sk);
                goto discard;
        }
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                udpv6_queue_rcv_skb(sk, skb);
-       else if (sk_add_backlog(sk, skb)) {
+       else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
                atomic_inc(&sk->sk_drops);
                bh_unlock_sock(sk);
                sock_put(sk);
index ba137a6..0d0d416 100644 (file)
@@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
        else {
                dprintk("%s: adding to backlog...\n", __func__);
                llc_set_backlog_type(skb, LLC_PACKET);
-               if (sk_add_backlog(sk, skb))
+               if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
                        goto drop_unlock;
        }
 out:
index 80f71af..80564fe 100644 (file)
@@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
                sctp_bh_lock_sock(sk);
 
                if (sock_owned_by_user(sk)) {
-                       if (sk_add_backlog(sk, skb))
+                       if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
                                sctp_chunk_free(chunk);
                        else
                                backloged = 1;
@@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
        struct sctp_ep_common *rcvr = chunk->rcvr;
        int ret;
 
-       ret = sk_add_backlog(sk, skb);
+       ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
        if (!ret) {
                /* Hold the assoc/ep while hanging on the backlog queue.
                 * This way, we know structures we need will not disappear
index c19fc4a..6d4991e 100644 (file)
@@ -1330,7 +1330,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
        if (!sock_owned_by_user(sk)) {
                res = filter_rcv(sk, buf);
        } else {
-               if (sk_add_backlog(sk, buf))
+               if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
                        res = TIPC_ERR_OVERLOAD;
                else
                        res = TIPC_OK;
index f0ce862..a8a2363 100644 (file)
@@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
                if (!sock_owned_by_user(sk)) {
                        queued = x25_process_rx_frame(sk, skb);
                } else {
-                       queued = !sk_add_backlog(sk, skb);
+                       queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
                }
                bh_unlock_sock(sk);
                sock_put(sk);