static inline bool is_slab_pfmemalloc(struct page *page)
{
- struct page *mem_page = virt_to_page(page->s_mem);
-
- return PageSlabPfmemalloc(mem_page);
+ return PageSlabPfmemalloc(page);
}
/* Clears pfmemalloc_active if no slabs have pfmalloc set */
n = cachep->node[numa_mem_id()];
if (!list_empty(&n->slabs_free) && force_refill) {
struct page *page = virt_to_head_page(objp);
- ClearPageSlabPfmemalloc(virt_to_head_page(page->s_mem));
+ ClearPageSlabPfmemalloc(page);
clear_obj_pfmemalloc(&objp);
recheck_pfmemalloc_active(cachep, ac);
return objp;
if (unlikely(pfmemalloc_active)) {
/* Some pfmemalloc slabs exist, check if this is one */
struct page *page = virt_to_head_page(objp);
- struct page *mem_page = virt_to_head_page(page->s_mem);
- if (PageSlabPfmemalloc(mem_page))
+ if (PageSlabPfmemalloc(page))
set_obj_pfmemalloc(&objp);
}
*/
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{
- struct freelist *freelist;
+ void *freelist;
freelist = page->freelist;
slab_destroy_debugcheck(cachep, page);
* kmem_find_general_cachep till the initialization is complete.
* Hence we cannot have freelist_cache same as the original cache.
*/
-static struct freelist *alloc_slabmgmt(struct kmem_cache *cachep,
+static void *alloc_slabmgmt(struct kmem_cache *cachep,
struct page *page, int colour_off,
gfp_t local_flags, int nodeid)
{
- struct freelist *freelist;
+ void *freelist;
void *addr = page_address(page);
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
- /*
- * If the first object in the slab is leaked (it's allocated
- * but no one has a reference to it), we want to make sure
- * kmemleak does not treat the ->s_mem pointer as a reference
- * to the object. Otherwise we will not report the leak.
- */
- kmemleak_scan_area(&page->lru, sizeof(struct list_head),
- local_flags);
if (!freelist)
return NULL;
} else {
return freelist;
}
-static inline unsigned int *slab_bufctl(struct page *page)
+static inline unsigned int *slab_freelist(struct page *page)
{
return (unsigned int *)(page->freelist);
}
if (cachep->ctor)
cachep->ctor(objp);
#endif
- slab_bufctl(page)[i] = i;
+ slab_freelist(page)[i] = i;
}
}
{
void *objp;
- objp = index_to_obj(cachep, page, slab_bufctl(page)[page->active]);
+ objp = index_to_obj(cachep, page, slab_freelist(page)[page->active]);
page->active++;
#if DEBUG
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
/* Verify double free bug */
for (i = page->active; i < cachep->num; i++) {
- if (slab_bufctl(page)[i] == objnr) {
+ if (slab_freelist(page)[i] == objnr) {
printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp);
BUG();
}
#endif
page->active--;
- slab_bufctl(page)[page->active] = objnr;
+ slab_freelist(page)[page->active] = objnr;
}
/*
* virtual address for kfree, ksize, and slab debugging.
*/
static void slab_map_pages(struct kmem_cache *cache, struct page *page,
- struct freelist *freelist)
+ void *freelist)
{
page->slab_cache = cache;
page->freelist = freelist;
static int cache_grow(struct kmem_cache *cachep,
gfp_t flags, int nodeid, struct page *page)
{
- struct freelist *freelist;
+ void *freelist;
size_t offset;
gfp_t local_flags;
struct kmem_cache_node *n;
VM_BUG_ON(!mutex_is_locked(&slab_mutex));
for_each_memcg_cache_index(i) {
- c = cache_from_memcg(cachep, i);
+ c = cache_from_memcg_idx(cachep, i);
if (c)
/* return value determined by the parent cache only */
__do_tune_cpucache(c, limit, batchcount, shared, gfp);
for (j = page->active; j < c->num; j++) {
/* Skip freed item */
- if (slab_bufctl(page)[j] == i) {
+ if (slab_freelist(page)[j] == i) {
active = false;
break;
}