X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=mm%2Fslab.c;h=bb39255d8b3f731283fb0328baac63141e8a6f59;hp=83311c9aaf9de0ad8494f04703c248f155cfc2c1;hb=4cecd7e369e1b252db1d64451462221b362eed1c;hpb=091c0f86bad6bb0b003dff2f6195508e29548648 diff --git a/mm/slab.c b/mm/slab.c index 83311c9aaf9d..bb39255d8b3f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1669,9 +1669,6 @@ void __init kmem_cache_init_late(void) g_cpucache_up = LATE; - /* Annotate slab for lockdep -- annotate the malloc caches */ - init_lock_keys(); - /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) @@ -1679,6 +1676,9 @@ void __init kmem_cache_init_late(void) BUG(); mutex_unlock(&cache_chain_mutex); + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); + /* Done! */ g_cpucache_up = FULL; @@ -3255,7 +3255,7 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) #ifdef CONFIG_NUMA /* - * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. + * Try allocating on another node if PFA_SPREAD_SLAB|PF_MEMPOLICY. * * If we are in_interrupt, then process context, including cpusets and * mempolicy, may not apply and should not be used for allocation policy. @@ -3267,12 +3267,10 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) if (in_interrupt() || (flags & __GFP_THISNODE)) return NULL; nid_alloc = nid_here = numa_mem_id(); - get_mems_allowed(); if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) nid_alloc = cpuset_slab_spread_node(); else if (current->mempolicy) - nid_alloc = slab_node(current->mempolicy); - put_mems_allowed(); + nid_alloc = slab_node(); if (nid_alloc != nid_here) return ____cache_alloc_node(cachep, flags, nid_alloc); return NULL; @@ -3295,14 +3293,17 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) enum zone_type high_zoneidx = gfp_zone(flags); void *obj = NULL; int nid; + unsigned int cpuset_mems_cookie; if (flags & __GFP_THISNODE) return NULL; - get_mems_allowed(); - zonelist = node_zonelist(slab_node(current->mempolicy), flags); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); +retry_cpuset: + cpuset_mems_cookie = get_mems_allowed(); + zonelist = node_zonelist(slab_node(), flags); + retry: /* * Look through allowed nodes for objects available @@ -3355,7 +3356,9 @@ retry: } } } - put_mems_allowed(); + + if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj)) + goto retry_cpuset; return obj; } @@ -3493,7 +3496,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) { void *objp; - if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { + if (unlikely((current->flags & PF_MEMPOLICY) || cpuset_do_slab_mem_spread())) { objp = alternate_node_alloc(cache, flags); if (objp) goto out;