2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
12 #include <linux/kmemleak.h>
14 static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
17 set_page_cgroup_array_id(pc, id);
18 pc->mem_cgroup = NULL;
19 INIT_LIST_HEAD(&pc->lru);
21 static unsigned long total_usage;
23 #if !defined(CONFIG_SPARSEMEM)
26 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
28 pgdat->node_page_cgroup = NULL;
31 struct page_cgroup *lookup_page_cgroup(struct page *page)
33 unsigned long pfn = page_to_pfn(page);
35 struct page_cgroup *base;
37 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
41 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
45 struct page *lookup_cgroup_page(struct page_cgroup *pc)
51 pgdat = NODE_DATA(page_cgroup_array_id(pc));
52 pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
53 page = pfn_to_page(pfn);
54 VM_BUG_ON(pc != lookup_page_cgroup(page));
58 static int __init alloc_node_page_cgroup(int nid)
60 struct page_cgroup *base, *pc;
61 unsigned long table_size;
62 unsigned long start_pfn, nr_pages, index;
64 start_pfn = NODE_DATA(nid)->node_start_pfn;
65 nr_pages = NODE_DATA(nid)->node_spanned_pages;
70 table_size = sizeof(struct page_cgroup) * nr_pages;
72 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
73 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
76 for (index = 0; index < nr_pages; index++) {
78 init_page_cgroup(pc, nid);
80 NODE_DATA(nid)->node_page_cgroup = base;
81 total_usage += table_size;
85 void __init page_cgroup_init_flatmem(void)
90 if (mem_cgroup_disabled())
93 for_each_online_node(nid) {
94 fail = alloc_node_page_cgroup(nid);
98 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
99 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
100 " don't want memory cgroups\n");
103 printk(KERN_CRIT "allocation of page_cgroup failed.\n");
104 printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
105 panic("Out of memory");
108 #else /* CONFIG_FLAT_NODE_MEM_MAP */
110 struct page_cgroup *lookup_page_cgroup(struct page *page)
112 unsigned long pfn = page_to_pfn(page);
113 struct mem_section *section = __pfn_to_section(pfn);
115 if (!section->page_cgroup)
117 return section->page_cgroup + pfn;
120 struct page *lookup_cgroup_page(struct page_cgroup *pc)
122 struct mem_section *section;
126 nr = page_cgroup_array_id(pc);
127 section = __nr_to_section(nr);
128 page = pfn_to_page(pc - section->page_cgroup);
129 VM_BUG_ON(pc != lookup_page_cgroup(page));
133 static void *__meminit alloc_page_cgroup(size_t size, int nid)
136 gfp_t flags = GFP_KERNEL | __GFP_NOWARN;
138 addr = alloc_pages_exact_nid(nid, size, flags);
140 kmemleak_alloc(addr, size, 1, flags);
144 if (node_state(nid, N_HIGH_MEMORY))
145 addr = vmalloc_node(size, nid);
147 addr = vmalloc(size);
152 #ifdef CONFIG_MEMORY_HOTPLUG
153 static void free_page_cgroup(void *addr)
155 if (is_vmalloc_addr(addr)) {
158 struct page *page = virt_to_page(addr);
160 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
162 BUG_ON(PageReserved(page));
164 free_pages_exact(addr, table_size);
169 static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
171 struct page_cgroup *base, *pc;
172 struct mem_section *section;
173 unsigned long table_size;
177 nr = pfn_to_section_nr(pfn);
178 section = __nr_to_section(nr);
180 if (section->page_cgroup)
183 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
184 base = alloc_page_cgroup(table_size, nid);
187 * The value stored in section->page_cgroup is (base - pfn)
188 * and it does not point to the memory block allocated above,
189 * causing kmemleak false positives.
191 kmemleak_not_leak(base);
194 printk(KERN_ERR "page cgroup allocation failure\n");
198 for (index = 0; index < PAGES_PER_SECTION; index++) {
200 init_page_cgroup(pc, nr);
203 * The passed "pfn" may not be aligned to SECTION. For the calculation
204 * we need to apply a mask.
206 pfn &= PAGE_SECTION_MASK;
207 section->page_cgroup = base - pfn;
208 total_usage += table_size;
211 #ifdef CONFIG_MEMORY_HOTPLUG
212 void __free_page_cgroup(unsigned long pfn)
214 struct mem_section *ms;
215 struct page_cgroup *base;
217 ms = __pfn_to_section(pfn);
218 if (!ms || !ms->page_cgroup)
220 base = ms->page_cgroup + pfn;
221 free_page_cgroup(base);
222 ms->page_cgroup = NULL;
225 int __meminit online_page_cgroup(unsigned long start_pfn,
226 unsigned long nr_pages,
229 unsigned long start, end, pfn;
232 start = SECTION_ALIGN_DOWN(start_pfn);
233 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
237 * In this case, "nid" already exists and contains valid memory.
238 * "start_pfn" passed to us is a pfn which is an arg for
239 * online__pages(), and start_pfn should exist.
241 nid = pfn_to_nid(start_pfn);
242 VM_BUG_ON(!node_state(nid, N_ONLINE));
245 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
246 if (!pfn_present(pfn))
248 fail = init_section_page_cgroup(pfn, nid);
254 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
255 __free_page_cgroup(pfn);
260 int __meminit offline_page_cgroup(unsigned long start_pfn,
261 unsigned long nr_pages, int nid)
263 unsigned long start, end, pfn;
265 start = SECTION_ALIGN_DOWN(start_pfn);
266 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
268 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
269 __free_page_cgroup(pfn);
274 static int __meminit page_cgroup_callback(struct notifier_block *self,
275 unsigned long action, void *arg)
277 struct memory_notify *mn = arg;
280 case MEM_GOING_ONLINE:
281 ret = online_page_cgroup(mn->start_pfn,
282 mn->nr_pages, mn->status_change_nid);
285 offline_page_cgroup(mn->start_pfn,
286 mn->nr_pages, mn->status_change_nid);
288 case MEM_CANCEL_ONLINE:
289 case MEM_GOING_OFFLINE:
292 case MEM_CANCEL_OFFLINE:
296 return notifier_from_errno(ret);
301 void __init page_cgroup_init(void)
306 if (mem_cgroup_disabled())
309 for_each_node_state(nid, N_HIGH_MEMORY) {
310 unsigned long start_pfn, end_pfn;
312 start_pfn = node_start_pfn(nid);
313 end_pfn = node_end_pfn(nid);
315 * start_pfn and end_pfn may not be aligned to SECTION and the
316 * page->flags of out of node pages are not initialized. So we
317 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
319 for (pfn = start_pfn;
321 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
326 * Nodes's pfns can be overlapping.
327 * We know some arch can have a nodes layout such as
328 * -------------pfn-------------->
329 * N0 | N1 | N2 | N0 | N1 | N2|....
331 if (pfn_to_nid(pfn) != nid)
333 if (init_section_page_cgroup(pfn, nid))
337 hotplug_memory_notifier(page_cgroup_callback, 0);
338 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
339 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
340 "don't want memory cgroups\n");
343 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
344 panic("Out of memory");
347 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
355 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
357 static DEFINE_MUTEX(swap_cgroup_mutex);
358 struct swap_cgroup_ctrl {
360 unsigned long length;
364 static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
369 #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
370 #define SC_POS_MASK (SC_PER_PAGE - 1)
373 * SwapCgroup implements "lookup" and "exchange" operations.
374 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
375 * against SwapCache. At swap_free(), this is accessed directly from swap.
378 * - we have no race in "exchange" when we're accessed via SwapCache because
379 * SwapCache(and its swp_entry) is under lock.
380 * - When called via swap_free(), there is no user of this entry and no race.
381 * Then, we don't need lock around "exchange".
383 * TODO: we can push these buffers out to HIGHMEM.
387 * allocate buffer for swap_cgroup.
389 static int swap_cgroup_prepare(int type)
392 struct swap_cgroup_ctrl *ctrl;
393 unsigned long idx, max;
395 ctrl = &swap_cgroup_ctrl[type];
397 for (idx = 0; idx < ctrl->length; idx++) {
398 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
400 goto not_enough_page;
401 ctrl->map[idx] = page;
406 for (idx = 0; idx < max; idx++)
407 __free_page(ctrl->map[idx]);
413 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
414 * @end: swap entry to be cmpxchged
418 * Returns old id at success, 0 at failure.
419 * (There is no mem_cgroup using 0 as its id)
421 unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
422 unsigned short old, unsigned short new)
424 int type = swp_type(ent);
425 unsigned long offset = swp_offset(ent);
426 unsigned long idx = offset / SC_PER_PAGE;
427 unsigned long pos = offset & SC_POS_MASK;
428 struct swap_cgroup_ctrl *ctrl;
429 struct page *mappage;
430 struct swap_cgroup *sc;
432 unsigned short retval;
434 ctrl = &swap_cgroup_ctrl[type];
436 mappage = ctrl->map[idx];
437 sc = page_address(mappage);
439 spin_lock_irqsave(&ctrl->lock, flags);
445 spin_unlock_irqrestore(&ctrl->lock, flags);
450 * swap_cgroup_record - record mem_cgroup for this swp_entry.
451 * @ent: swap entry to be recorded into
452 * @mem: mem_cgroup to be recorded
454 * Returns old value at success, 0 at failure.
455 * (Of course, old value can be 0.)
457 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
459 int type = swp_type(ent);
460 unsigned long offset = swp_offset(ent);
461 unsigned long idx = offset / SC_PER_PAGE;
462 unsigned long pos = offset & SC_POS_MASK;
463 struct swap_cgroup_ctrl *ctrl;
464 struct page *mappage;
465 struct swap_cgroup *sc;
469 ctrl = &swap_cgroup_ctrl[type];
471 mappage = ctrl->map[idx];
472 sc = page_address(mappage);
474 spin_lock_irqsave(&ctrl->lock, flags);
477 spin_unlock_irqrestore(&ctrl->lock, flags);
483 * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
484 * @ent: swap entry to be looked up.
486 * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
488 unsigned short lookup_swap_cgroup(swp_entry_t ent)
490 int type = swp_type(ent);
491 unsigned long offset = swp_offset(ent);
492 unsigned long idx = offset / SC_PER_PAGE;
493 unsigned long pos = offset & SC_POS_MASK;
494 struct swap_cgroup_ctrl *ctrl;
495 struct page *mappage;
496 struct swap_cgroup *sc;
499 ctrl = &swap_cgroup_ctrl[type];
500 mappage = ctrl->map[idx];
501 sc = page_address(mappage);
507 int swap_cgroup_swapon(int type, unsigned long max_pages)
510 unsigned long array_size;
511 unsigned long length;
512 struct swap_cgroup_ctrl *ctrl;
514 if (!do_swap_account)
517 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
518 array_size = length * sizeof(void *);
520 array = vzalloc(array_size);
524 ctrl = &swap_cgroup_ctrl[type];
525 mutex_lock(&swap_cgroup_mutex);
526 ctrl->length = length;
528 spin_lock_init(&ctrl->lock);
529 if (swap_cgroup_prepare(type)) {
530 /* memory shortage */
533 mutex_unlock(&swap_cgroup_mutex);
537 mutex_unlock(&swap_cgroup_mutex);
541 printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
543 "swap_cgroup can be disabled by swapaccount=0 boot option\n");
547 void swap_cgroup_swapoff(int type)
550 unsigned long i, length;
551 struct swap_cgroup_ctrl *ctrl;
553 if (!do_swap_account)
556 mutex_lock(&swap_cgroup_mutex);
557 ctrl = &swap_cgroup_ctrl[type];
559 length = ctrl->length;
562 mutex_unlock(&swap_cgroup_mutex);
565 for (i = 0; i < length; i++) {
566 struct page *page = map[i];