Merge master.kernel.org:/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[pandora-kernel.git] / mm / slab.c
index 6a3760e..f055c14 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -898,6 +898,30 @@ static struct array_cache *alloc_arraycache(int node, int entries,
        return nc;
 }
 
+/*
+ * Transfer objects in one arraycache to another.
+ * Locking must be handled by the caller.
+ *
+ * Return the number of entries transferred.
+ */
+static int transfer_objects(struct array_cache *to,
+               struct array_cache *from, unsigned int max)
+{
+       /* Figure out how many entries to transfer */
+       int nr = min(min(from->avail, max), to->limit - to->avail);
+
+       if (!nr)
+               return 0;
+
+       memcpy(to->entry + to->avail, from->entry + from->avail -nr,
+                       sizeof(void *) *nr);
+
+       from->avail -= nr;
+       to->avail += nr;
+       to->touched = 1;
+       return nr;
+}
+
 #ifdef CONFIG_NUMA
 static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
@@ -947,6 +971,13 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
 
        if (ac->avail) {
                spin_lock(&rl3->list_lock);
+               /*
+                * Stuff objects into the remote nodes shared array first.
+                * That way we could avoid the overhead of putting the objects
+                * into the free lists and getting them back later.
+                */
+               transfer_objects(rl3->shared, ac, ac->limit);
+
                free_block(cachep, ac->entry, ac->avail, node);
                ac->avail = 0;
                spin_unlock(&rl3->list_lock);
@@ -962,8 +993,8 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
 
        if (l3->alien) {
                struct array_cache *ac = l3->alien[node];
-               if (ac && ac->avail) {
-                       spin_lock_irq(&ac->lock);
+
+               if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
                        __drain_alien_cache(cachep, ac, node);
                        spin_unlock_irq(&ac->lock);
                }
@@ -1266,8 +1297,7 @@ void __init kmem_cache_init(void)
                if (cache_cache.num)
                        break;
        }
-       if (!cache_cache.num)
-               BUG();
+       BUG_ON(!cache_cache.num);
        cache_cache.gfporder = order;
        cache_cache.colour = left_over / cache_cache.colour_off;
        cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1943,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         * Always checks flags, a caller might be expecting debug support which
         * isn't available.
         */
-       if (flags & ~CREATE_MASK)
-               BUG();
+       BUG_ON(flags & ~CREATE_MASK);
 
        /*
         * Check that size is in terms of words.  This is needed to avoid
@@ -2175,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
 
                slabp = list_entry(l3->slabs_free.prev, struct slab, list);
 #if DEBUG
-               if (slabp->inuse)
-                       BUG();
+               BUG_ON(slabp->inuse);
 #endif
                list_del(&slabp->list);
 
@@ -2217,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
  */
 int kmem_cache_shrink(struct kmem_cache *cachep)
 {
-       if (!cachep || in_interrupt())
-               BUG();
+       BUG_ON(!cachep || in_interrupt());
 
        return __cache_shrink(cachep);
 }
@@ -2246,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
        int i;
        struct kmem_list3 *l3;
 
-       if (!cachep || in_interrupt())
-               BUG();
+       BUG_ON(!cachep || in_interrupt());
 
        /* Don't let CPUs to come and go */
        lock_cpu_hotplug();
@@ -2446,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
         * Be lazy and only check for valid flags here,  keeping it out of the
         * critical path in kmem_cache_alloc().
         */
-       if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
-               BUG();
+       BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
        if (flags & SLAB_NO_GROW)
                return 0;
 
@@ -2680,20 +2705,10 @@ retry:
        BUG_ON(ac->avail > 0 || !l3);
        spin_lock(&l3->list_lock);
 
-       if (l3->shared) {
-               struct array_cache *shared_array = l3->shared;
-               if (shared_array->avail) {
-                       if (batchcount > shared_array->avail)
-                               batchcount = shared_array->avail;
-                       shared_array->avail -= batchcount;
-                       ac->avail = batchcount;
-                       memcpy(ac->entry,
-                              &(shared_array->entry[shared_array->avail]),
-                              sizeof(void *) * batchcount);
-                       shared_array->touched = 1;
-                       goto alloc_done;
-               }
-       }
+       /* See if we can refill from the shared array */
+       if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
+               goto alloc_done;
+
        while (batchcount > 0) {
                struct list_head *entry;
                struct slab *slabp;
@@ -3290,7 +3305,7 @@ void *__alloc_percpu(size_t size)
         * and we have no way of figuring out how to fix the array
         * that we have allocated then....
         */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                int node = cpu_to_node(i);
 
                if (node_online(node))
@@ -3377,7 +3392,7 @@ void free_percpu(const void *objp)
        /*
         * We allocate for all cpus so we cannot use for online cpu here.
         */
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
            kfree(p->ptrs[i]);
        kfree(p);
 }
@@ -3397,63 +3412,86 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
 EXPORT_SYMBOL_GPL(kmem_cache_name);
 
 /*
- * This initializes kmem_list3 for all nodes.
+ * This initializes kmem_list3 or resizes varioius caches for all nodes.
  */
 static int alloc_kmemlist(struct kmem_cache *cachep)
 {
        int node;
        struct kmem_list3 *l3;
-       int err = 0;
+       struct array_cache *new_shared;
+       struct array_cache **new_alien;
 
        for_each_online_node(node) {
-               struct array_cache *nc = NULL, *new;
-               struct array_cache **new_alien = NULL;
-#ifdef CONFIG_NUMA
+
                new_alien = alloc_alien_cache(node, cachep->limit);
                if (!new_alien)
                        goto fail;
-#endif
-               new = alloc_arraycache(node, cachep->shared*cachep->batchcount,
+
+               new_shared = alloc_arraycache(node,
+                               cachep->shared*cachep->batchcount,
                                        0xbaadf00d);
-               if (!new)
+               if (!new_shared) {
+                       free_alien_cache(new_alien);
                        goto fail;
+               }
+
                l3 = cachep->nodelists[node];
                if (l3) {
+                       struct array_cache *shared = l3->shared;
+
                        spin_lock_irq(&l3->list_lock);
 
-                       nc = cachep->nodelists[node]->shared;
-                       if (nc)
-                               free_block(cachep, nc->entry, nc->avail, node);
+                       if (shared)
+                               free_block(cachep, shared->entry,
+                                               shared->avail, node);
 
-                       l3->shared = new;
-                       if (!cachep->nodelists[node]->alien) {
+                       l3->shared = new_shared;
+                       if (!l3->alien) {
                                l3->alien = new_alien;
                                new_alien = NULL;
                        }
                        l3->free_limit = (1 + nr_cpus_node(node)) *
                                        cachep->batchcount + cachep->num;
                        spin_unlock_irq(&l3->list_lock);
-                       kfree(nc);
+                       kfree(shared);
                        free_alien_cache(new_alien);
                        continue;
                }
                l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
-               if (!l3)
+               if (!l3) {
+                       free_alien_cache(new_alien);
+                       kfree(new_shared);
                        goto fail;
+               }
 
                kmem_list3_init(l3);
                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
                                ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
-               l3->shared = new;
+               l3->shared = new_shared;
                l3->alien = new_alien;
                l3->free_limit = (1 + nr_cpus_node(node)) *
                                        cachep->batchcount + cachep->num;
                cachep->nodelists[node] = l3;
        }
-       return err;
+       return 0;
+
 fail:
-       err = -ENOMEM;
-       return err;
+       if (!cachep->next.next) {
+               /* Cache is not active yet. Roll back what we did */
+               node--;
+               while (node >= 0) {
+                       if (cachep->nodelists[node]) {
+                               l3 = cachep->nodelists[node];
+
+                               kfree(l3->shared);
+                               free_alien_cache(l3->alien);
+                               kfree(l3);
+                               cachep->nodelists[node] = NULL;
+                       }
+                       node--;
+               }
+       }
+       return -ENOMEM;
 }
 
 struct ccupdate_struct {