kprobes: fix error checking of batch registration
[pandora-kernel.git] / mm / slab.c
index 04b308c..06236e4 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/fault-inject.h>
 #include       <linux/rtmutex.h>
 #include       <linux/reciprocal_div.h>
+#include       <linux/debugobjects.h>
 
 #include       <asm/cacheflush.h>
 #include       <asm/tlbflush.h>
 #define        BYTES_PER_WORD          sizeof(void *)
 #define        REDZONE_ALIGN           max(BYTES_PER_WORD, __alignof__(unsigned long long))
 
-#ifndef cache_line_size
-#define cache_line_size()      L1_CACHE_BYTES
-#endif
-
 #ifndef ARCH_KMALLOC_MINALIGN
 /*
  * Enforce a minimum alignment for the kmalloc caches.
                         SLAB_CACHE_DMA | \
                         SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+                        SLAB_DEBUG_OBJECTS)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+                        SLAB_DEBUG_OBJECTS)
 #endif
 
 /*
@@ -862,7 +861,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
        *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 }
 
-#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
+#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 
 static void __slab_error(const char *function, struct kmem_cache *cachep,
                        char *msg)
@@ -1160,14 +1159,13 @@ static void __cpuinit cpuup_canceled(long cpu)
        struct kmem_cache *cachep;
        struct kmem_list3 *l3 = NULL;
        int node = cpu_to_node(cpu);
+       node_to_cpumask_ptr(mask, node);
 
        list_for_each_entry(cachep, &cache_chain, next) {
                struct array_cache *nc;
                struct array_cache *shared;
                struct array_cache **alien;
-               cpumask_t mask;
 
-               mask = node_to_cpumask(node);
                /* cpu is dead; no one can alloc from it. */
                nc = cachep->array[cpu];
                cachep->array[cpu] = NULL;
@@ -1183,7 +1181,7 @@ static void __cpuinit cpuup_canceled(long cpu)
                if (nc)
                        free_block(cachep, nc->entry, nc->avail, node);
 
-               if (!cpus_empty(mask)) {
+               if (!cpus_empty(*mask)) {
                        spin_unlock_irq(&l3->list_lock);
                        goto free_array_cache;
                }
@@ -2158,7 +2156,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         */
        if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
            size > KMALLOC_MAX_SIZE) {
-               printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
+               printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
                                name);
                BUG();
        }
@@ -3243,15 +3241,16 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 {
        struct zonelist *zonelist;
        gfp_t local_flags;
-       struct zone **z;
+       struct zoneref *z;
+       struct zone *zone;
+       enum zone_type high_zoneidx = gfp_zone(flags);
        void *obj = NULL;
        int nid;
 
        if (flags & __GFP_THISNODE)
                return NULL;
 
-       zonelist = &NODE_DATA(slab_node(current->mempolicy))
-                       ->node_zonelists[gfp_zone(flags)];
+       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
 
 retry:
@@ -3259,10 +3258,10 @@ retry:
         * Look through allowed nodes for objects available
         * from existing per node queues.
         */
-       for (z = zonelist->zones; *z && !obj; z++) {
-               nid = zone_to_nid(*z);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+               nid = zone_to_nid(zone);
 
-               if (cpuset_zone_allowed_hardwall(*z, flags) &&
+               if (cpuset_zone_allowed_hardwall(zone, flags) &&
                        cache->nodelists[nid] &&
                        cache->nodelists[nid]->free_objects)
                                obj = ____cache_alloc_node(cache,
@@ -3764,6 +3763,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 
        local_irq_save(flags);
        debug_check_no_locks_freed(objp, obj_size(cachep));
+       if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+               debug_check_no_obj_freed(objp, obj_size(cachep));
        __cache_free(cachep, objp);
        local_irq_restore(flags);
 }
@@ -3789,6 +3790,7 @@ void kfree(const void *objp)
        kfree_debugcheck(objp);
        c = virt_to_cache(objp);
        debug_check_no_locks_freed(objp, obj_size(c));
+       debug_check_no_obj_freed(objp, obj_size(c));
        __cache_free(c, (void *)objp);
        local_irq_restore(flags);
 }