omap2+: add drm device
[pandora-kernel.git] / mm / slub.c
index ed3334d..5710788 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1506,15 +1506,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
                freelist = page->freelist;
                counters = page->counters;
                new.counters = counters;
-               if (mode)
+               if (mode) {
                        new.inuse = page->objects;
+                       new.freelist = NULL;
+               } else {
+                       new.freelist = freelist;
+               }
 
                VM_BUG_ON(new.frozen);
                new.frozen = 1;
 
        } while (!__cmpxchg_double_slab(s, page,
                        freelist, counters,
-                       NULL, new.counters,
+                       new.freelist, new.counters,
                        "lock and freeze"));
 
        remove_partial(n, page);
@@ -1556,7 +1560,6 @@ static void *get_partial_node(struct kmem_cache *s,
                        object = t;
                        available =  page->objects - page->inuse;
                } else {
-                       page->freelist = t;
                        available = put_cpu_partial(s, page, 0);
                }
                if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
@@ -1579,6 +1582,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
        struct zone *zone;
        enum zone_type high_zoneidx = gfp_zone(flags);
        void *object;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * The defrag ratio allows a configuration of the tradeoffs between
@@ -1602,23 +1606,32 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
                        get_cycles() % 1024 > s->remote_node_defrag_ratio)
                return NULL;
 
-       get_mems_allowed();
-       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
-               struct kmem_cache_node *n;
-
-               n = get_node(s, zone_to_nid(zone));
-
-               if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
-                               n->nr_partial > s->min_partial) {
-                       object = get_partial_node(s, n, c);
-                       if (object) {
-                               put_mems_allowed();
-                               return object;
+       do {
+               cpuset_mems_cookie = get_mems_allowed();
+               zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+                       struct kmem_cache_node *n;
+
+                       n = get_node(s, zone_to_nid(zone));
+
+                       if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+                                       n->nr_partial > s->min_partial) {
+                               object = get_partial_node(s, n, c);
+                               if (object) {
+                                       /*
+                                        * Return the object even if
+                                        * put_mems_allowed indicated that
+                                        * the cpuset mems_allowed was
+                                        * updated in parallel. It's a
+                                        * harmless race between the alloc
+                                        * and the cpuset update.
+                                        */
+                                       put_mems_allowed(cpuset_mems_cookie);
+                                       return object;
+                               }
                        }
                }
-       }
-       put_mems_allowed();
+       } while (!put_mems_allowed(cpuset_mems_cookie));
 #endif
        return NULL;
 }
@@ -2166,6 +2179,11 @@ redo:
                goto new_slab;
        }
 
+       /* must check again c->freelist in case of cpu migration or IRQ */
+       object = c->freelist;
+       if (object)
+               goto load_freelist;
+
        stat(s, ALLOC_SLOWPATH);
 
        do {
@@ -3906,13 +3924,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                if (kmem_cache_open(s, n,
                                size, align, flags, ctor)) {
                        list_add(&s->list, &slab_caches);
+                       up_write(&slub_lock);
                        if (sysfs_slab_add(s)) {
+                               down_write(&slub_lock);
                                list_del(&s->list);
                                kfree(n);
                                kfree(s);
                                goto err;
                        }
-                       up_write(&slub_lock);
                        return s;
                }
                kfree(n);