slub: add hooks for kmemcheck
authorVegard Nossum <vegard.nossum@gmail.com>
Thu, 3 Apr 2008 22:54:48 +0000 (00:54 +0200)
committerVegard Nossum <vegard.nossum@gmail.com>
Mon, 15 Jun 2009 10:40:07 +0000 (12:40 +0200)
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.

Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Vegard Nossum <vegardno@ifi.uio.no>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegardno@ifi.uio.no>
mm/slub.c

index 3964d3c..1cebaa7 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -18,6 +18,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/kmemtrace.h>
+#include <linux/kmemcheck.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/kmemleak.h>
                SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
 
 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
-               SLAB_CACHE_DMA)
+               SLAB_CACHE_DMA | SLAB_NOTRACK)
 
 #ifndef ARCH_KMALLOC_MINALIGN
 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
@@ -1092,6 +1093,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
                stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
        }
+
+       if (kmemcheck_enabled
+               && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
+       {
+               kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page));
+       }
+
        page->objects = oo_objects(oo);
        mod_zone_page_state(page_zone(page),
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1165,6 +1173,9 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
                __ClearPageSlubDebug(page);
        }
 
+       if (kmemcheck_page_is_tracked(page))
+               kmemcheck_free_shadow(s, page, compound_order(page));
+
        mod_zone_page_state(page_zone(page),
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -1618,7 +1629,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
        if (unlikely((gfpflags & __GFP_ZERO) && object))
                memset(object, 0, objsize);
 
+       kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
        kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
+
        return object;
 }
 
@@ -1751,6 +1764,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
        kmemleak_free_recursive(x, s->flags);
        local_irq_save(flags);
        c = get_cpu_slab(s, smp_processor_id());
+       kmemcheck_slab_free(s, object, c->objsize);
        debug_check_no_locks_freed(object, c->objsize);
        if (!(s->flags & SLAB_DEBUG_OBJECTS))
                debug_check_no_obj_freed(object, c->objsize);
@@ -2625,7 +2639,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
 
        if (!s || !text || !kmem_cache_open(s, flags, text,
                        realsize, ARCH_KMALLOC_MINALIGN,
-                       SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
+                       SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED,
+                       NULL)) {
                kfree(s);
                kfree(text);
                goto unlock_out;
@@ -4396,6 +4411,8 @@ static char *create_unique_id(struct kmem_cache *s)
                *p++ = 'a';
        if (s->flags & SLAB_DEBUG_FREE)
                *p++ = 'F';
+       if (!(s->flags & SLAB_NOTRACK))
+               *p++ = 't';
        if (p != name + 1)
                *p++ = '-';
        p += sprintf(p, "%07d", s->size);