X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=net%2Fcore%2Fskbuff.c;h=7121d9b59bbddf58d63b07dbf18751d307f681bb;hp=3c30ee4a57105a2746b3e1b2bfab68af8ef0c917;hb=77c01a54cde87eb3bf6685fb44398352f11db3fa;hpb=975e32c287a9b144cf115d3f42ca18664b3331df diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3c30ee4a5710..7121d9b59bbd 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -45,6 +45,8 @@ #include #include #include +#include +#include #include #ifdef CONFIG_NET_CLS_ACT #include @@ -74,36 +76,6 @@ static struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly; -static void sock_pipe_buf_release(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) -{ - put_page(buf->page); -} - -static void sock_pipe_buf_get(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) -{ - get_page(buf->page); -} - -static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) -{ - return 1; -} - - -/* Pipe buffer operations for a socket. */ -static const struct pipe_buf_operations sock_pipe_buf_ops = { - .can_merge = 0, - .map = generic_pipe_buf_map, - .unmap = generic_pipe_buf_unmap, - .confirm = generic_pipe_buf_confirm, - .release = sock_pipe_buf_release, - .steal = sock_pipe_buf_steal, - .get = sock_pipe_buf_get, -}; - /* * Keep out-of-line to prevent kernel bloat. * __builtin_return_address is not used because it is not always @@ -683,7 +655,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; return 0; } - +EXPORT_SYMBOL_GPL(skb_copy_ubufs); /** * skb_clone - duplicate an sk_buff @@ -771,7 +743,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { int headerlen = skb_headroom(skb); - unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; + unsigned int size = skb_end_offset(skb) + skb->data_len; struct sk_buff *n = alloc_skb(size, gfp_mask); if (!n) @@ -871,7 +843,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, { int i; u8 *data; - int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; + int size = nhead + skb_end_offset(skb) + ntail; long off; bool fastpath; @@ -903,9 +875,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, goto adjust_others; } - data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); + data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + gfp_mask); if (!data) goto nodata; + size = SKB_WITH_OVERHEAD(ksize(data)); /* Copy only real data... and, alas, header. This should be * optimized for the cases when header is void. @@ -1661,8 +1635,9 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct splice_pipe_desc spd = { .pages = pages, .partial = partial, + .nr_pages_max = MAX_SKB_FRAGS, .flags = flags, - .ops = &sock_pipe_buf_ops, + .ops = &nosteal_pipe_buf_ops, .spd_release = sock_spd_release, }; struct sk_buff *frag_iter; @@ -1707,7 +1682,7 @@ done: lock_sock(sk); } - splice_shrink_spd(pipe, &spd); + splice_shrink_spd(&spd); return ret; } @@ -2667,14 +2642,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) if (unlikely(!nskb)) goto err; - hsize = skb_end_pointer(nskb) - nskb->head; + hsize = skb_end_offset(nskb); if (skb_cow_head(nskb, doffset + headroom)) { kfree_skb(nskb); goto err; } - nskb->truesize += skb_end_pointer(nskb) - nskb->head - - hsize; + nskb->truesize += skb_end_offset(nskb) - hsize; skb_release_head_state(nskb); __skb_push(nskb, doffset); } else { @@ -2724,6 +2698,9 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) skb_put(nskb, hsize), hsize); while (pos < offset + len && i < nfrags) { + if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) + goto err; + *frag = skb_shinfo(skb)->frags[i]; __skb_frag_ref(frag); size = skb_frag_size(frag); @@ -3111,6 +3088,8 @@ static void sock_rmem_free(struct sk_buff *skb) */ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) { + int len = skb->len; + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned)sk->sk_rcvbuf) return -ENOMEM; @@ -3125,7 +3104,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) skb_queue_tail(&sk->sk_error_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk, skb->len); + sk->sk_data_ready(sk, len); return 0; } EXPORT_SYMBOL(sock_queue_err_skb); @@ -3206,3 +3185,28 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb) " while LRO is enabled\n", skb->dev->name); } EXPORT_SYMBOL(__skb_warn_lro_forwarding); + +/** + * skb_gso_transport_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_transport_seglen is used to determine the real size of the + * individual segments, including Layer4 headers (TCP/UDP). + * + * The MAC/L2 or network (IP, IPv6) headers are not accounted for. + */ +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + return tcp_hdrlen(skb) + shinfo->gso_size; + + /* UFO sets gso_size to the size of the fragmentation + * payload, i.e. the size of the L4 (UDP) header is already + * accounted for. + */ + return shinfo->gso_size; +} +EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);