Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[pandora-kernel.git] / net / core / skbuff.c
index 7de3d67..9a423e2 100644 (file)
@@ -257,15 +257,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        kmemcheck_annotate_variable(shinfo->destructor_arg);
 
        if (flags & SKB_ALLOC_FCLONE) {
-               struct sk_buff *child = skb + 1;
-               atomic_t *fclone_ref = (atomic_t *) (child + 1);
+               struct sk_buff_fclones *fclones;
 
-               kmemcheck_annotate_bitfield(child, flags1);
+               fclones = container_of(skb, struct sk_buff_fclones, skb1);
+
+               kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
                skb->fclone = SKB_FCLONE_ORIG;
-               atomic_set(fclone_ref, 1);
+               atomic_set(&fclones->fclone_ref, 1);
 
-               child->fclone = SKB_FCLONE_UNAVAILABLE;
-               child->pfmemalloc = pfmemalloc;
+               fclones->skb2.fclone = SKB_FCLONE_FREE;
+               fclones->skb2.pfmemalloc = pfmemalloc;
        }
 out:
        return skb;
@@ -524,8 +525,7 @@ static void skb_release_data(struct sk_buff *skb)
  */
 static void kfree_skbmem(struct sk_buff *skb)
 {
-       struct sk_buff *other;
-       atomic_t *fclone_ref;
+       struct sk_buff_fclones *fclones;
 
        switch (skb->fclone) {
        case SKB_FCLONE_UNAVAILABLE:
@@ -533,22 +533,28 @@ static void kfree_skbmem(struct sk_buff *skb)
                break;
 
        case SKB_FCLONE_ORIG:
-               fclone_ref = (atomic_t *) (skb + 2);
-               if (atomic_dec_and_test(fclone_ref))
-                       kmem_cache_free(skbuff_fclone_cache, skb);
+               fclones = container_of(skb, struct sk_buff_fclones, skb1);
+               if (atomic_dec_and_test(&fclones->fclone_ref))
+                       kmem_cache_free(skbuff_fclone_cache, fclones);
                break;
 
        case SKB_FCLONE_CLONE:
-               fclone_ref = (atomic_t *) (skb + 1);
-               other = skb - 1;
+               fclones = container_of(skb, struct sk_buff_fclones, skb2);
 
-               /* The clone portion is available for
-                * fast-cloning again.
+               /* Warning : We must perform the atomic_dec_and_test() before
+                * setting skb->fclone back to SKB_FCLONE_FREE, otherwise
+                * skb_clone() could set clone_ref to 2 before our decrement.
+                * Anyway, if we are going to free the structure, no need to
+                * rewrite skb->fclone.
                 */
-               skb->fclone = SKB_FCLONE_UNAVAILABLE;
-
-               if (atomic_dec_and_test(fclone_ref))
-                       kmem_cache_free(skbuff_fclone_cache, other);
+               if (atomic_dec_and_test(&fclones->fclone_ref)) {
+                       kmem_cache_free(skbuff_fclone_cache, fclones);
+               } else {
+                       /* The clone portion is available for
+                        * fast-cloning again.
+                        */
+                       skb->fclone = SKB_FCLONE_FREE;
+               }
                break;
        }
 }
@@ -859,17 +865,22 @@ EXPORT_SYMBOL_GPL(skb_copy_ubufs);
 
 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 {
-       struct sk_buff *n;
+       struct sk_buff_fclones *fclones = container_of(skb,
+                                                      struct sk_buff_fclones,
+                                                      skb1);
+       struct sk_buff *n = &fclones->skb2;
 
        if (skb_orphan_frags(skb, gfp_mask))
                return NULL;
 
-       n = skb + 1;
        if (skb->fclone == SKB_FCLONE_ORIG &&
-           n->fclone == SKB_FCLONE_UNAVAILABLE) {
-               atomic_t *fclone_ref = (atomic_t *) (n + 1);
+           n->fclone == SKB_FCLONE_FREE) {
                n->fclone = SKB_FCLONE_CLONE;
-               atomic_inc(fclone_ref);
+               /* As our fastclone was free, clone_ref must be 1 at this point.
+                * We could use atomic_inc() here, but it is faster
+                * to set the final value.
+                */
+               atomic_set(&fclones->fclone_ref, 2);
        } else {
                if (skb_pfmemalloc(skb))
                        gfp_mask |= __GFP_MEMALLOC;
@@ -3155,6 +3166,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
                goto done;
        }
+       /* switch back to head shinfo */
+       pinfo = skb_shinfo(p);
+
        if (pinfo->frag_list)
                goto merge;
        if (skb_gro_len(p) != pinfo->gso_size)
@@ -3230,7 +3244,6 @@ done:
        NAPI_GRO_CB(skb)->same_flow = 1;
        return 0;
 }
-EXPORT_SYMBOL_GPL(skb_gro_receive);
 
 void __init skb_init(void)
 {
@@ -3240,8 +3253,7 @@ void __init skb_init(void)
                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
                                              NULL);
        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
-                                               (2*sizeof(struct sk_buff)) +
-                                               sizeof(atomic_t),
+                                               sizeof(struct sk_buff_fclones),
                                                0,
                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
                                                NULL);