slab: add hooks for kmemcheck
authorPekka Enberg <penberg@cs.helsinki.fi>
Fri, 9 May 2008 18:35:53 +0000 (20:35 +0200)
committerVegard Nossum <vegard.nossum@gmail.com>
Mon, 15 Jun 2009 10:40:08 +0000 (12:40 +0200)
We now have SLAB support for kmemcheck! This means that it doesn't matter
whether one chooses SLAB or SLUB, or indeed whether Linus chooses to chuck
SLAB or SLUB.. ;-)

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
mm/slab.c

index bf0c3af..95b6c5e 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/rtmutex.h>
 #include       <linux/reciprocal_div.h>
 #include       <linux/debugobjects.h>
+#include       <linux/kmemcheck.h>
 
 #include       <asm/cacheflush.h>
 #include       <asm/tlbflush.h>
                         SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
+                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
+                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
 #endif
 
 /*
@@ -1624,6 +1625,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
                        NR_SLAB_UNRECLAIMABLE, nr_pages);
        for (i = 0; i < nr_pages; i++)
                __SetPageSlab(page + i);
+
+       if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK))
+               kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder);
+
        return page_address(page);
 }
 
@@ -1636,6 +1641,9 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
        struct page *page = virt_to_page(addr);
        const unsigned long nr_freed = i;
 
+       if (kmemcheck_page_is_tracked(page))
+               kmemcheck_free_shadow(cachep, page, cachep->gfporder);
+
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
                sub_zone_page_state(page_zone(page),
                                NR_SLAB_RECLAIMABLE, nr_freed);
@@ -3309,6 +3317,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
        kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
                                 flags);
 
+       if (likely(ptr))
+               kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
+
        if (unlikely((flags & __GFP_ZERO) && ptr))
                memset(ptr, 0, obj_size(cachep));
 
@@ -3367,6 +3378,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
                                 flags);
        prefetchw(objp);
 
+       if (likely(objp))
+               kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
+
        if (unlikely((flags & __GFP_ZERO) && objp))
                memset(objp, 0, obj_size(cachep));
 
@@ -3483,6 +3497,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
        kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
+       kmemcheck_slab_free(cachep, objp, obj_size(cachep));
+
        /*
         * Skip calling cache_free_alien() when the platform is not numa.
         * This will avoid cache misses that happen while accessing slabp (which