slub: Get rid of the node field
authorChristoph Lameter <cl@linux.com>
Wed, 9 May 2012 15:09:56 +0000 (10:09 -0500)
committerGrazvydas Ignotas <notasas@gmail.com>
Sun, 28 Feb 2016 01:55:13 +0000 (03:55 +0200)
The node field is always page_to_nid(c->page). So its rather easy to
replace. Note that there maybe slightly more overhead in various hot paths
due to the need to shift the bits from page->flags. However, that is mostly
compensated for by a smaller footprint of the kmem_cache_cpu structure (this
patch reduces that to 3 words per cache) which allows better caching.

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
include/linux/slub_def.h
mm/slub.c

index a32bcfd..f52e850 100644 (file)
@@ -45,7 +45,6 @@ struct kmem_cache_cpu {
        unsigned long tid;      /* Globally unique transaction id */
        struct page *page;      /* The slab from which we are allocating */
        struct page *partial;   /* Partially allocated frozen slabs */
-       int node;               /* The node of the page (or -1 for debug) */
 #ifdef CONFIG_SLUB_STATS
        unsigned stat[NR_SLUB_STAT_ITEMS];
 #endif
index dd62331..a737852 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1560,7 +1560,6 @@ static void *get_partial_node(struct kmem_cache *s,
 
                if (!object) {
                        c->page = page;
-                       c->node = page_to_nid(page);
                        stat(s, ALLOC_FROM_PARTIAL);
                        object = t;
                        available =  page->objects - page->inuse;
@@ -2026,7 +2025,7 @@ static void flush_all(struct kmem_cache *s)
 static inline int node_match(struct kmem_cache_cpu *c, int node)
 {
 #ifdef CONFIG_NUMA
-       if (node != NUMA_NO_NODE && c->node != node)
+       if (node != NUMA_NO_NODE && page_to_nid(c->page) != node)
                return 0;
 #endif
        return 1;
@@ -2115,7 +2114,6 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
                page->freelist = NULL;
 
                stat(s, ALLOC_SLAB);
-               c->node = page_to_nid(page);
                c->page = page;
                *pc = c;
        } else
@@ -2217,7 +2215,6 @@ new_slab:
        if (c->partial) {
                c->page = c->partial;
                c->partial = c->page->next;
-               c->node = page_to_nid(c->page);
                stat(s, CPU_PARTIAL_ALLOC);
                c->freelist = NULL;
                goto redo;
@@ -2248,7 +2245,6 @@ new_slab:
 
        c->freelist = get_freepointer(s, object);
        deactivate_slab(s, c);
-       c->node = NUMA_NO_NODE;
        local_irq_restore(flags);
        return object;
 }
@@ -4454,25 +4450,25 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 
                for_each_possible_cpu(cpu) {
                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
-                       int node = ACCESS_ONCE(c->node);
+                       int node;
                        struct page *page;
 
-                       if (node < 0)
-                               continue;
                        page = ACCESS_ONCE(c->page);
-                       if (page) {
-                               if (flags & SO_TOTAL)
-                                       x = page->objects;
-                               else if (flags & SO_OBJECTS)
-                                       x = page->inuse;
-                               else
-                                       x = 1;
+                       if (!page)
+                               continue;
 
-                               total += x;
-                               nodes[node] += x;
-                       }
-                       page = c->partial;
+                       node = page_to_nid(page);
+                       if (flags & SO_TOTAL)
+                               x = page->objects;
+                       else if (flags & SO_OBJECTS)
+                               x = page->inuse;
+                       else
+                               x = 1;
 
+                       total += x;
+                       nodes[node] += x;
+
+                       page = ACCESS_ONCE(c->partial);
                        if (page) {
                                node = page_to_nid(page);
                                if (flags & SO_TOTAL)
@@ -4484,6 +4480,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                                total += x;
                                nodes[node] += x;
                        }
+
                        per_cpu[node]++;
                }
        }