tcp: add tcp_min_snd_mss sysctl
[pandora-kernel.git] / mm / slub.c
index af47188..a737852 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -269,6 +269,11 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
        return *(void **)(object + s->offset);
 }
 
+static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+{
+       prefetch(object + s->offset);
+}
+
 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
 {
        void *p;
@@ -1555,7 +1560,6 @@ static void *get_partial_node(struct kmem_cache *s,
 
                if (!object) {
                        c->page = page;
-                       c->node = page_to_nid(page);
                        stat(s, ALLOC_FROM_PARTIAL);
                        object = t;
                        available =  page->objects - page->inuse;
@@ -1582,6 +1586,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
        struct zone *zone;
        enum zone_type high_zoneidx = gfp_zone(flags);
        void *object;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * The defrag ratio allows a configuration of the tradeoffs between
@@ -1605,23 +1610,32 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
                        get_cycles() % 1024 > s->remote_node_defrag_ratio)
                return NULL;
 
-       get_mems_allowed();
-       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
-               struct kmem_cache_node *n;
-
-               n = get_node(s, zone_to_nid(zone));
-
-               if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
-                               n->nr_partial > s->min_partial) {
-                       object = get_partial_node(s, n, c);
-                       if (object) {
-                               put_mems_allowed();
-                               return object;
+       do {
+               cpuset_mems_cookie = get_mems_allowed();
+               zonelist = node_zonelist(slab_node(), flags);
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+                       struct kmem_cache_node *n;
+
+                       n = get_node(s, zone_to_nid(zone));
+
+                       if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+                                       n->nr_partial > s->min_partial) {
+                               object = get_partial_node(s, n, c);
+                               if (object) {
+                                       /*
+                                        * Return the object even if
+                                        * put_mems_allowed indicated that
+                                        * the cpuset mems_allowed was
+                                        * updated in parallel. It's a
+                                        * harmless race between the alloc
+                                        * and the cpuset update.
+                                        */
+                                       put_mems_allowed(cpuset_mems_cookie);
+                                       return object;
+                               }
                        }
                }
-       }
-       put_mems_allowed();
+       } while (!put_mems_allowed(cpuset_mems_cookie));
 #endif
        return NULL;
 }
@@ -1863,18 +1877,24 @@ redo:
 /* Unfreeze all the cpu partial slabs */
 static void unfreeze_partials(struct kmem_cache *s)
 {
-       struct kmem_cache_node *n = NULL;
+       struct kmem_cache_node *n = NULL, *n2 = NULL;
        struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
        struct page *page, *discard_page = NULL;
 
        while ((page = c->partial)) {
-               enum slab_modes { M_PARTIAL, M_FREE };
-               enum slab_modes l, m;
                struct page new;
                struct page old;
 
                c->partial = page->next;
-               l = M_FREE;
+
+               n2 = get_node(s, page_to_nid(page));
+               if (n != n2) {
+                       if (n)
+                               spin_unlock(&n->list_lock);
+
+                       n = n2;
+                       spin_lock(&n->list_lock);
+               }
 
                do {
 
@@ -1887,40 +1907,17 @@ static void unfreeze_partials(struct kmem_cache *s)
 
                        new.frozen = 0;
 
-                       if (!new.inuse && (!n || n->nr_partial > s->min_partial))
-                               m = M_FREE;
-                       else {
-                               struct kmem_cache_node *n2 = get_node(s,
-                                                       page_to_nid(page));
-
-                               m = M_PARTIAL;
-                               if (n != n2) {
-                                       if (n)
-                                               spin_unlock(&n->list_lock);
-
-                                       n = n2;
-                                       spin_lock(&n->list_lock);
-                               }
-                       }
-
-                       if (l != m) {
-                               if (l == M_PARTIAL)
-                                       remove_partial(n, page);
-                               else
-                                       add_partial(n, page,
-                                               DEACTIVATE_TO_TAIL);
-
-                               l = m;
-                       }
-
                } while (!cmpxchg_double_slab(s, page,
                                old.freelist, old.counters,
                                new.freelist, new.counters,
                                "unfreezing slab"));
 
-               if (m == M_FREE) {
+               if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
                        page->next = discard_page;
                        discard_page = page;
+               } else {
+                       add_partial(n, page, DEACTIVATE_TO_TAIL);
+                       stat(s, FREE_ADD_PARTIAL);
                }
        }
 
@@ -2028,7 +2025,7 @@ static void flush_all(struct kmem_cache *s)
 static inline int node_match(struct kmem_cache_cpu *c, int node)
 {
 #ifdef CONFIG_NUMA
-       if (node != NUMA_NO_NODE && c->node != node)
+       if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
                return 0;
 #endif
        return 1;
@@ -2117,7 +2114,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
                page->freelist = NULL;
 
                stat(s, ALLOC_SLAB);
-               c->node = page_to_nid(page);
                c->page = page;
                *pc = c;
        } else
@@ -2219,7 +2215,6 @@ new_slab:
        if (c->partial) {
                c->page = c->partial;
                c->partial = c->page->next;
-               c->node = page_to_nid(c->page);
                stat(s, CPU_PARTIAL_ALLOC);
                c->freelist = NULL;
                goto redo;
@@ -2250,7 +2245,6 @@ new_slab:
 
        c->freelist = get_freepointer(s, object);
        deactivate_slab(s, c);
-       c->node = NUMA_NO_NODE;
        local_irq_restore(flags);
        return object;
 }
@@ -2300,6 +2294,8 @@ redo:
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
        else {
+               void *next_object = get_freepointer_safe(s, object);
+
                /*
                 * The cmpxchg will only match if there was no additional
                 * operation and if we are on the right processor.
@@ -2315,11 +2311,12 @@ redo:
                if (unlikely(!irqsafe_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                object, tid,
-                               get_freepointer_safe(s, object), next_tid(tid)))) {
+                               next_object, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_alloc", s, tid);
                        goto redo;
                }
+               prefetch_freepointer(s, next_object);
                stat(s, ALLOC_FASTPATH);
        }
 
@@ -4453,30 +4450,37 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 
                for_each_possible_cpu(cpu) {
                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
-                       int node = ACCESS_ONCE(c->node);
+                       int node;
                        struct page *page;
 
-                       if (node < 0)
-                               continue;
                        page = ACCESS_ONCE(c->page);
+                       if (!page)
+                               continue;
+
+                       node = page_to_nid(page);
+                       if (flags & SO_TOTAL)
+                               x = page->objects;
+                       else if (flags & SO_OBJECTS)
+                               x = page->inuse;
+                       else
+                               x = 1;
+
+                       total += x;
+                       nodes[node] += x;
+
+                       page = ACCESS_ONCE(c->partial);
                        if (page) {
+                               node = page_to_nid(page);
                                if (flags & SO_TOTAL)
-                                       x = page->objects;
+                                       WARN_ON_ONCE(1);
                                else if (flags & SO_OBJECTS)
-                                       x = page->inuse;
+                                       WARN_ON_ONCE(1);
                                else
-                                       x = 1;
-
+                                       x = page->pages;
                                total += x;
                                nodes[node] += x;
                        }
-                       page = c->partial;
 
-                       if (page) {
-                               x = page->pobjects;
-                               total += x;
-                               nodes[node] += x;
-                       }
                        per_cpu[node]++;
                }
        }