slub: explicitly document position of inserting slab to partial list
[pandora-kernel.git] / mm / slub.c
index eb5a8f9..91a120f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -701,7 +701,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes)
                return check_bytes8(start, value, bytes);
 
        value64 = value | value << 8 | value << 16 | value << 24;
-       value64 = value64 | value64 << 32;
+       value64 = (value64 & 0xffffffff) | value64 << 32;
        prefix = 8 - ((unsigned long)start) % 8;
 
        if (prefix) {
@@ -1534,7 +1534,7 @@ static inline void add_partial(struct kmem_cache_node *n,
                                struct page *page, int tail)
 {
        n->nr_partial++;
-       if (tail)
+       if (tail == DEACTIVATE_TO_TAIL)
                list_add_tail(&page->lru, &n->partial);
        else
                list_add(&page->lru, &n->partial);
@@ -1781,13 +1781,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
        enum slab_modes l = M_NONE, m = M_NONE;
        void *freelist;
        void *nextfree;
-       int tail = 0;
+       int tail = DEACTIVATE_TO_HEAD;
        struct page new;
        struct page old;
 
        if (page->freelist) {
                stat(s, DEACTIVATE_REMOTE_FREES);
-               tail = 1;
+               tail = DEACTIVATE_TO_TAIL;
        }
 
        c->tid = next_tid(c->tid);
@@ -1854,7 +1854,7 @@ redo:
 
        new.frozen = 0;
 
-       if (!new.inuse && n->nr_partial < s->min_partial)
+       if (!new.inuse && n->nr_partial > s->min_partial)
                m = M_FREE;
        else if (new.freelist) {
                m = M_PARTIAL;
@@ -1893,7 +1893,7 @@ redo:
                if (m == M_PARTIAL) {
 
                        add_partial(n, page, tail);
-                       stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
+                       stat(s, tail);
 
                } else if (m == M_FULL) {
 
@@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 */
                if (unlikely(!prior)) {
                        remove_full(s, page);
-                       add_partial(n, page, 0);
+                       add_partial(n, page, DEACTIVATE_TO_TAIL);
                        stat(s, FREE_ADD_PARTIAL);
                }
        }
@@ -2387,11 +2387,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 slab_empty:
        if (prior) {
                /*
-                * Slab still on the partial list.
+                * Slab on the partial list.
                 */
                remove_partial(n, page);
                stat(s, FREE_REMOVE_PARTIAL);
-       }
+       } else
+               /* Slab must be on the full list */
+               remove_full(s, page);
 
        spin_unlock_irqrestore(&n->list_lock, flags);
        stat(s, FREE_SLAB);
@@ -2693,7 +2695,7 @@ static void early_kmem_cache_node_alloc(int node)
        init_kmem_cache_node(n, kmem_cache_node);
        inc_slabs_node(kmem_cache_node, node, page->objects);
 
-       add_partial(n, page, 0);
+       add_partial(n, page, DEACTIVATE_TO_HEAD);
 }
 
 static void free_kmem_cache_nodes(struct kmem_cache *s)