int free_touched; /* updated without locking */
};
-/*
- * The slab allocator is initialized with interrupts disabled. Therefore, make
- * sure early boot allocations don't accidentally enable interrupts.
- */
-static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
-
/*
* Need this for bootstrapping a per node allocator.
*/
{
struct kmem_cache *cachep;
- /*
- * Interrupts are enabled now so all GFP allocations are safe.
- */
- slab_gfp_mask = __GFP_BITS_MASK;
-
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
/* really off slab. No need for manual alignment */
slab_size =
cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
+
+#ifdef CONFIG_PAGE_POISONING
+ /* If we're going to use the generic kernel_map_pages()
+ * poisoning, then it's going to smash the contents of
+ * the redzone and userword anyhow, so switch them off.
+ */
+ if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
+ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
+#endif
}
cachep->colour_off = cache_line_size();
unsigned long save_flags;
void *ptr;
- flags &= slab_gfp_mask;
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
unsigned long save_flags;
void *objp;
- flags &= slab_gfp_mask;
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);