SLUB: add support for dynamic cacheline size determination
authorChristoph Lameter <clameter@sgi.com>
Wed, 9 May 2007 09:32:35 +0000 (02:32 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 9 May 2007 19:30:44 +0000 (12:30 -0700)
SLUB currently assumes that the cacheline size is static.  However, i386 f.e.
supports dynamic cache line size determination.

Use cache_line_size() instead of L1_CACHE_BYTES in the allocator.

That also explains the purpose of SLAB_HWCACHE_ALIGN.  So we will need to keep
that one around to allow dynamic aligning of objects depending on boot
determination of the cache line size.

[akpm@linux-foundation.org: need to define it before we use it]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index 5db3da5..40e92d8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
 /* Internal SLUB flags */
 #define __OBJECT_POISON 0x80000000     /* Poison object */
 
+/* Not all arches define cache_line_size */
+#ifndef cache_line_size
+#define cache_line_size()      L1_CACHE_BYTES
+#endif
+
 static int kmem_size = sizeof(struct kmem_cache);
 
 #ifdef CONFIG_SMP
@@ -1480,8 +1485,8 @@ static unsigned long calculate_alignment(unsigned long flags,
         * then use it.
         */
        if ((flags & SLAB_HWCACHE_ALIGN) &&
-                       size > L1_CACHE_BYTES / 2)
-               return max_t(unsigned long, align, L1_CACHE_BYTES);
+                       size > cache_line_size() / 2)
+               return max_t(unsigned long, align, cache_line_size());
 
        if (align < ARCH_SLAB_MINALIGN)
                return ARCH_SLAB_MINALIGN;
@@ -1667,8 +1672,8 @@ static int calculate_sizes(struct kmem_cache *s)
                size += sizeof(void *);
        /*
         * Determine the alignment based on various parameters that the
-        * user specified (this is unecessarily complex due to the attempt
-        * to be compatible with SLAB. Should be cleaned up some day).
+        * user specified and the dynamic determination of cache line size
+        * on bootup.
         */
        align = calculate_alignment(flags, align, s->objsize);
 
@@ -2280,7 +2285,7 @@ void __init kmem_cache_init(void)
 
        printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
                " Processors=%d, Nodes=%d\n",
-               KMALLOC_SHIFT_HIGH, L1_CACHE_BYTES,
+               KMALLOC_SHIFT_HIGH, cache_line_size(),
                slub_min_order, slub_max_order, slub_min_objects,
                nr_cpu_ids, nr_node_ids);
 }