net: add kfree_skb_list()
[pandora-kernel.git] / net / core / skbuff.c
index 3c30ee4..847b314 100644 (file)
@@ -45,6 +45,8 @@
 #include <linux/in.h>
 #include <linux/inet.h>
 #include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
 #include <linux/netdevice.h>
 #ifdef CONFIG_NET_CLS_ACT
 #include <net/pkt_sched.h>
 static struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 
-static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
-                                 struct pipe_buffer *buf)
-{
-       put_page(buf->page);
-}
-
-static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
-                               struct pipe_buffer *buf)
-{
-       get_page(buf->page);
-}
-
-static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
-                              struct pipe_buffer *buf)
-{
-       return 1;
-}
-
-
-/* Pipe buffer operations for a socket. */
-static const struct pipe_buf_operations sock_pipe_buf_ops = {
-       .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
-       .confirm = generic_pipe_buf_confirm,
-       .release = sock_pipe_buf_release,
-       .steal = sock_pipe_buf_steal,
-       .get = sock_pipe_buf_get,
-};
-
 /*
  *     Keep out-of-line to prevent kernel bloat.
  *     __builtin_return_address is not used because it is not always
@@ -305,15 +277,8 @@ EXPORT_SYMBOL(dev_alloc_skb);
 
 static void skb_drop_list(struct sk_buff **listp)
 {
-       struct sk_buff *list = *listp;
-
+       kfree_skb_list(*listp);
        *listp = NULL;
-
-       do {
-               struct sk_buff *this = list;
-               list = list->next;
-               kfree_skb(this);
-       } while (list);
 }
 
 static inline void skb_drop_fraglist(struct sk_buff *skb)
@@ -464,6 +429,17 @@ void kfree_skb(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(kfree_skb);
 
+void kfree_skb_list(struct sk_buff *segs)
+{
+       while (segs) {
+               struct sk_buff *next = segs->next;
+
+               kfree_skb(segs);
+               segs = next;
+       }
+}
+EXPORT_SYMBOL(kfree_skb_list);
+
 /**
  *     consume_skb - free an skbuff
  *     @skb: buffer to free
@@ -683,7 +659,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
        return 0;
 }
-
+EXPORT_SYMBOL_GPL(skb_copy_ubufs);
 
 /**
  *     skb_clone       -       duplicate an sk_buff
@@ -771,7 +747,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 {
        int headerlen = skb_headroom(skb);
-       unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
+       unsigned int size = skb_end_offset(skb) + skb->data_len;
        struct sk_buff *n = alloc_skb(size, gfp_mask);
 
        if (!n)
@@ -871,7 +847,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 {
        int i;
        u8 *data;
-       int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
+       int size = nhead + skb_end_offset(skb) + ntail;
        long off;
        bool fastpath;
 
@@ -903,9 +879,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
                goto adjust_others;
        }
 
-       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
+       data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+                      gfp_mask);
        if (!data)
                goto nodata;
+       size = SKB_WITH_OVERHEAD(ksize(data));
 
        /* Copy only real data... and, alas, header. This should be
         * optimized for the cases when header is void.
@@ -1661,8 +1639,9 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
-               .ops = &sock_pipe_buf_ops,
+               .ops = &nosteal_pipe_buf_ops,
                .spd_release = sock_spd_release,
        };
        struct sk_buff *frag_iter;
@@ -1707,7 +1686,7 @@ done:
                lock_sock(sk);
        }
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
@@ -2604,11 +2583,12 @@ EXPORT_SYMBOL(skb_append_datato_frags);
  */
 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
 {
+       unsigned char *data = skb->data;
+
        BUG_ON(len > skb->len);
-       skb->len -= len;
-       BUG_ON(skb->len < skb->data_len);
-       skb_postpull_rcsum(skb, skb->data, len);
-       return skb->data += len;
+       __skb_pull(skb, len);
+       skb_postpull_rcsum(skb, data, len);
+       return skb->data;
 }
 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
 
@@ -2667,14 +2647,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
                        if (unlikely(!nskb))
                                goto err;
 
-                       hsize = skb_end_pointer(nskb) - nskb->head;
+                       hsize = skb_end_offset(nskb);
                        if (skb_cow_head(nskb, doffset + headroom)) {
                                kfree_skb(nskb);
                                goto err;
                        }
 
-                       nskb->truesize += skb_end_pointer(nskb) - nskb->head -
-                                         hsize;
+                       nskb->truesize += skb_end_offset(nskb) - hsize;
                        skb_release_head_state(nskb);
                        __skb_push(nskb, doffset);
                } else {
@@ -2724,6 +2703,9 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
                                                 skb_put(nskb, hsize), hsize);
 
                while (pos < offset + len && i < nfrags) {
+                       if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+                               goto err;
+
                        *frag = skb_shinfo(skb)->frags[i];
                        __skb_frag_ref(frag);
                        size = skb_frag_size(frag);
@@ -3111,6 +3093,8 @@ static void sock_rmem_free(struct sk_buff *skb)
  */
 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 {
+       int len = skb->len;
+
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
            (unsigned)sk->sk_rcvbuf)
                return -ENOMEM;
@@ -3125,7 +3109,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 
        skb_queue_tail(&sk->sk_error_queue, skb);
        if (!sock_flag(sk, SOCK_DEAD))
-               sk->sk_data_ready(sk, skb->len);
+               sk->sk_data_ready(sk, len);
        return 0;
 }
 EXPORT_SYMBOL(sock_queue_err_skb);
@@ -3206,3 +3190,28 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
                           " while LRO is enabled\n", skb->dev->name);
 }
 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+
+/**
+ * skb_gso_transport_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_transport_seglen is used to determine the real size of the
+ * individual segments, including Layer4 headers (TCP/UDP).
+ *
+ * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
+ */
+unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+{
+       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+       if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+               return tcp_hdrlen(skb) + shinfo->gso_size;
+
+       /* UFO sets gso_size to the size of the fragmentation
+        * payload, i.e. the size of the L4 (UDP) header is already
+        * accounted for.
+        */
+       return shinfo->gso_size;
+}
+EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);