X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=mm%2Fslab.c;h=bb39255d8b3f731283fb0328baac63141e8a6f59;hp=708efe886154da626cfe01f4a2b6eaf055ada914;hb=4cecd7e369e1b252db1d64451462221b362eed1c;hpb=1dd6c0770d7d4ca477a1a8452ab0161b1150e4ad diff --git a/mm/slab.c b/mm/slab.c index 708efe886154..bb39255d8b3f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -595,6 +595,7 @@ static enum { PARTIAL_AC, PARTIAL_L3, EARLY, + LATE, FULL } g_cpucache_up; @@ -671,7 +672,7 @@ static void init_node_lock_keys(int q) { struct cache_sizes *s = malloc_sizes; - if (g_cpucache_up != FULL) + if (g_cpucache_up < LATE) return; for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { @@ -1666,8 +1667,7 @@ void __init kmem_cache_init_late(void) { struct kmem_cache *cachep; - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); + g_cpucache_up = LATE; /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); @@ -1676,6 +1676,9 @@ void __init kmem_cache_init_late(void) BUG(); mutex_unlock(&cache_chain_mutex); + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); + /* Done! */ g_cpucache_up = FULL; @@ -3252,7 +3255,7 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) #ifdef CONFIG_NUMA /* - * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. + * Try allocating on another node if PFA_SPREAD_SLAB|PF_MEMPOLICY. * * If we are in_interrupt, then process context, including cpusets and * mempolicy, may not apply and should not be used for allocation policy. @@ -3264,12 +3267,10 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) if (in_interrupt() || (flags & __GFP_THISNODE)) return NULL; nid_alloc = nid_here = numa_mem_id(); - get_mems_allowed(); if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) nid_alloc = cpuset_slab_spread_node(); else if (current->mempolicy) - nid_alloc = slab_node(current->mempolicy); - put_mems_allowed(); + nid_alloc = slab_node(); if (nid_alloc != nid_here) return ____cache_alloc_node(cachep, flags, nid_alloc); return NULL; @@ -3292,14 +3293,17 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) enum zone_type high_zoneidx = gfp_zone(flags); void *obj = NULL; int nid; + unsigned int cpuset_mems_cookie; if (flags & __GFP_THISNODE) return NULL; - get_mems_allowed(); - zonelist = node_zonelist(slab_node(current->mempolicy), flags); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); +retry_cpuset: + cpuset_mems_cookie = get_mems_allowed(); + zonelist = node_zonelist(slab_node(), flags); + retry: /* * Look through allowed nodes for objects available @@ -3352,7 +3356,9 @@ retry: } } } - put_mems_allowed(); + + if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj)) + goto retry_cpuset; return obj; } @@ -3490,7 +3496,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) { void *objp; - if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { + if (unlikely((current->flags & PF_MEMPOLICY) || cpuset_do_slab_mem_spread())) { objp = alternate_node_alloc(cache, flags); if (objp) goto out;