mm: vmscan: kswapd should not free an excessive number of pages when balancing small...
[pandora-kernel.git] / mm / page_cgroup.c
1 #include <linux/mm.h>
2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
12 #include <linux/kmemleak.h>
13
14 static void __meminit
15 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
16 {
17         pc->flags = 0;
18         pc->mem_cgroup = NULL;
19         pc->page = pfn_to_page(pfn);
20         INIT_LIST_HEAD(&pc->lru);
21 }
22 static unsigned long total_usage;
23
24 #if !defined(CONFIG_SPARSEMEM)
25
26
27 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
28 {
29         pgdat->node_page_cgroup = NULL;
30 }
31
32 struct page_cgroup *lookup_page_cgroup(struct page *page)
33 {
34         unsigned long pfn = page_to_pfn(page);
35         unsigned long offset;
36         struct page_cgroup *base;
37
38         base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
39         if (unlikely(!base))
40                 return NULL;
41
42         offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
43         return base + offset;
44 }
45
46 static int __init alloc_node_page_cgroup(int nid)
47 {
48         struct page_cgroup *base, *pc;
49         unsigned long table_size;
50         unsigned long start_pfn, nr_pages, index;
51
52         start_pfn = NODE_DATA(nid)->node_start_pfn;
53         nr_pages = NODE_DATA(nid)->node_spanned_pages;
54
55         if (!nr_pages)
56                 return 0;
57
58         table_size = sizeof(struct page_cgroup) * nr_pages;
59
60         base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
61                         table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
62         if (!base)
63                 return -ENOMEM;
64         for (index = 0; index < nr_pages; index++) {
65                 pc = base + index;
66                 __init_page_cgroup(pc, start_pfn + index);
67         }
68         NODE_DATA(nid)->node_page_cgroup = base;
69         total_usage += table_size;
70         return 0;
71 }
72
73 void __init page_cgroup_init_flatmem(void)
74 {
75
76         int nid, fail;
77
78         if (mem_cgroup_disabled())
79                 return;
80
81         for_each_online_node(nid)  {
82                 fail = alloc_node_page_cgroup(nid);
83                 if (fail)
84                         goto fail;
85         }
86         printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
87         printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
88         " don't want memory cgroups\n");
89         return;
90 fail:
91         printk(KERN_CRIT "allocation of page_cgroup failed.\n");
92         printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
93         panic("Out of memory");
94 }
95
96 #else /* CONFIG_FLAT_NODE_MEM_MAP */
97
98 struct page_cgroup *lookup_page_cgroup(struct page *page)
99 {
100         unsigned long pfn = page_to_pfn(page);
101         struct mem_section *section = __pfn_to_section(pfn);
102
103         if (!section->page_cgroup)
104                 return NULL;
105         return section->page_cgroup + pfn;
106 }
107
108 /* __alloc_bootmem...() is protected by !slab_available() */
109 static int __init_refok init_section_page_cgroup(unsigned long pfn)
110 {
111         struct mem_section *section = __pfn_to_section(pfn);
112         struct page_cgroup *base, *pc;
113         unsigned long table_size;
114         int nid, index;
115
116         if (!section->page_cgroup) {
117                 nid = page_to_nid(pfn_to_page(pfn));
118                 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
119                 VM_BUG_ON(!slab_is_available());
120                 if (node_state(nid, N_HIGH_MEMORY)) {
121                         base = kmalloc_node(table_size,
122                                 GFP_KERNEL | __GFP_NOWARN, nid);
123                         if (!base)
124                                 base = vmalloc_node(table_size, nid);
125                 } else {
126                         base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN);
127                         if (!base)
128                                 base = vmalloc(table_size);
129                 }
130                 /*
131                  * The value stored in section->page_cgroup is (base - pfn)
132                  * and it does not point to the memory block allocated above,
133                  * causing kmemleak false positives.
134                  */
135                 kmemleak_not_leak(base);
136         } else {
137                 /*
138                  * We don't have to allocate page_cgroup again, but
139                  * address of memmap may be changed. So, we have to initialize
140                  * again.
141                  */
142                 base = section->page_cgroup + pfn;
143                 table_size = 0;
144                 /* check address of memmap is changed or not. */
145                 if (base->page == pfn_to_page(pfn))
146                         return 0;
147         }
148
149         if (!base) {
150                 printk(KERN_ERR "page cgroup allocation failure\n");
151                 return -ENOMEM;
152         }
153
154         for (index = 0; index < PAGES_PER_SECTION; index++) {
155                 pc = base + index;
156                 __init_page_cgroup(pc, pfn + index);
157         }
158
159         section->page_cgroup = base - pfn;
160         total_usage += table_size;
161         return 0;
162 }
163 #ifdef CONFIG_MEMORY_HOTPLUG
164 void __free_page_cgroup(unsigned long pfn)
165 {
166         struct mem_section *ms;
167         struct page_cgroup *base;
168
169         ms = __pfn_to_section(pfn);
170         if (!ms || !ms->page_cgroup)
171                 return;
172         base = ms->page_cgroup + pfn;
173         if (is_vmalloc_addr(base)) {
174                 vfree(base);
175                 ms->page_cgroup = NULL;
176         } else {
177                 struct page *page = virt_to_page(base);
178                 if (!PageReserved(page)) { /* Is bootmem ? */
179                         kfree(base);
180                         ms->page_cgroup = NULL;
181                 }
182         }
183 }
184
185 int __meminit online_page_cgroup(unsigned long start_pfn,
186                         unsigned long nr_pages,
187                         int nid)
188 {
189         unsigned long start, end, pfn;
190         int fail = 0;
191
192         start = start_pfn & ~(PAGES_PER_SECTION - 1);
193         end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
194
195         for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
196                 if (!pfn_present(pfn))
197                         continue;
198                 fail = init_section_page_cgroup(pfn);
199         }
200         if (!fail)
201                 return 0;
202
203         /* rollback */
204         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
205                 __free_page_cgroup(pfn);
206
207         return -ENOMEM;
208 }
209
210 int __meminit offline_page_cgroup(unsigned long start_pfn,
211                 unsigned long nr_pages, int nid)
212 {
213         unsigned long start, end, pfn;
214
215         start = start_pfn & ~(PAGES_PER_SECTION - 1);
216         end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
217
218         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
219                 __free_page_cgroup(pfn);
220         return 0;
221
222 }
223
224 static int __meminit page_cgroup_callback(struct notifier_block *self,
225                                unsigned long action, void *arg)
226 {
227         struct memory_notify *mn = arg;
228         int ret = 0;
229         switch (action) {
230         case MEM_GOING_ONLINE:
231                 ret = online_page_cgroup(mn->start_pfn,
232                                    mn->nr_pages, mn->status_change_nid);
233                 break;
234         case MEM_OFFLINE:
235                 offline_page_cgroup(mn->start_pfn,
236                                 mn->nr_pages, mn->status_change_nid);
237                 break;
238         case MEM_CANCEL_ONLINE:
239         case MEM_GOING_OFFLINE:
240                 break;
241         case MEM_ONLINE:
242         case MEM_CANCEL_OFFLINE:
243                 break;
244         }
245
246         return notifier_from_errno(ret);
247 }
248
249 #endif
250
251 void __init page_cgroup_init(void)
252 {
253         unsigned long pfn;
254         int fail = 0;
255
256         if (mem_cgroup_disabled())
257                 return;
258
259         for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
260                 if (!pfn_present(pfn))
261                         continue;
262                 fail = init_section_page_cgroup(pfn);
263         }
264         if (fail) {
265                 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
266                 panic("Out of memory");
267         } else {
268                 hotplug_memory_notifier(page_cgroup_callback, 0);
269         }
270         printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
271         printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
272         " want memory cgroups\n");
273 }
274
275 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
276 {
277         return;
278 }
279
280 #endif
281
282
283 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
284
285 static DEFINE_MUTEX(swap_cgroup_mutex);
286 struct swap_cgroup_ctrl {
287         struct page **map;
288         unsigned long length;
289         spinlock_t      lock;
290 };
291
292 struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
293
294 struct swap_cgroup {
295         unsigned short          id;
296 };
297 #define SC_PER_PAGE     (PAGE_SIZE/sizeof(struct swap_cgroup))
298 #define SC_POS_MASK     (SC_PER_PAGE - 1)
299
300 /*
301  * SwapCgroup implements "lookup" and "exchange" operations.
302  * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
303  * against SwapCache. At swap_free(), this is accessed directly from swap.
304  *
305  * This means,
306  *  - we have no race in "exchange" when we're accessed via SwapCache because
307  *    SwapCache(and its swp_entry) is under lock.
308  *  - When called via swap_free(), there is no user of this entry and no race.
309  * Then, we don't need lock around "exchange".
310  *
311  * TODO: we can push these buffers out to HIGHMEM.
312  */
313
314 /*
315  * allocate buffer for swap_cgroup.
316  */
317 static int swap_cgroup_prepare(int type)
318 {
319         struct page *page;
320         struct swap_cgroup_ctrl *ctrl;
321         unsigned long idx, max;
322
323         ctrl = &swap_cgroup_ctrl[type];
324
325         for (idx = 0; idx < ctrl->length; idx++) {
326                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
327                 if (!page)
328                         goto not_enough_page;
329                 ctrl->map[idx] = page;
330         }
331         return 0;
332 not_enough_page:
333         max = idx;
334         for (idx = 0; idx < max; idx++)
335                 __free_page(ctrl->map[idx]);
336
337         return -ENOMEM;
338 }
339
340 /**
341  * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
342  * @end: swap entry to be cmpxchged
343  * @old: old id
344  * @new: new id
345  *
346  * Returns old id at success, 0 at failure.
347  * (There is no mem_cgroup useing 0 as its id)
348  */
349 unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
350                                         unsigned short old, unsigned short new)
351 {
352         int type = swp_type(ent);
353         unsigned long offset = swp_offset(ent);
354         unsigned long idx = offset / SC_PER_PAGE;
355         unsigned long pos = offset & SC_POS_MASK;
356         struct swap_cgroup_ctrl *ctrl;
357         struct page *mappage;
358         struct swap_cgroup *sc;
359         unsigned long flags;
360         unsigned short retval;
361
362         ctrl = &swap_cgroup_ctrl[type];
363
364         mappage = ctrl->map[idx];
365         sc = page_address(mappage);
366         sc += pos;
367         spin_lock_irqsave(&ctrl->lock, flags);
368         retval = sc->id;
369         if (retval == old)
370                 sc->id = new;
371         else
372                 retval = 0;
373         spin_unlock_irqrestore(&ctrl->lock, flags);
374         return retval;
375 }
376
377 /**
378  * swap_cgroup_record - record mem_cgroup for this swp_entry.
379  * @ent: swap entry to be recorded into
380  * @mem: mem_cgroup to be recorded
381  *
382  * Returns old value at success, 0 at failure.
383  * (Of course, old value can be 0.)
384  */
385 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
386 {
387         int type = swp_type(ent);
388         unsigned long offset = swp_offset(ent);
389         unsigned long idx = offset / SC_PER_PAGE;
390         unsigned long pos = offset & SC_POS_MASK;
391         struct swap_cgroup_ctrl *ctrl;
392         struct page *mappage;
393         struct swap_cgroup *sc;
394         unsigned short old;
395         unsigned long flags;
396
397         ctrl = &swap_cgroup_ctrl[type];
398
399         mappage = ctrl->map[idx];
400         sc = page_address(mappage);
401         sc += pos;
402         spin_lock_irqsave(&ctrl->lock, flags);
403         old = sc->id;
404         sc->id = id;
405         spin_unlock_irqrestore(&ctrl->lock, flags);
406
407         return old;
408 }
409
410 /**
411  * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
412  * @ent: swap entry to be looked up.
413  *
414  * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
415  */
416 unsigned short lookup_swap_cgroup(swp_entry_t ent)
417 {
418         int type = swp_type(ent);
419         unsigned long offset = swp_offset(ent);
420         unsigned long idx = offset / SC_PER_PAGE;
421         unsigned long pos = offset & SC_POS_MASK;
422         struct swap_cgroup_ctrl *ctrl;
423         struct page *mappage;
424         struct swap_cgroup *sc;
425         unsigned short ret;
426
427         ctrl = &swap_cgroup_ctrl[type];
428         mappage = ctrl->map[idx];
429         sc = page_address(mappage);
430         sc += pos;
431         ret = sc->id;
432         return ret;
433 }
434
435 int swap_cgroup_swapon(int type, unsigned long max_pages)
436 {
437         void *array;
438         unsigned long array_size;
439         unsigned long length;
440         struct swap_cgroup_ctrl *ctrl;
441
442         if (!do_swap_account)
443                 return 0;
444
445         length = ((max_pages/SC_PER_PAGE) + 1);
446         array_size = length * sizeof(void *);
447
448         array = vmalloc(array_size);
449         if (!array)
450                 goto nomem;
451
452         memset(array, 0, array_size);
453         ctrl = &swap_cgroup_ctrl[type];
454         mutex_lock(&swap_cgroup_mutex);
455         ctrl->length = length;
456         ctrl->map = array;
457         spin_lock_init(&ctrl->lock);
458         if (swap_cgroup_prepare(type)) {
459                 /* memory shortage */
460                 ctrl->map = NULL;
461                 ctrl->length = 0;
462                 vfree(array);
463                 mutex_unlock(&swap_cgroup_mutex);
464                 goto nomem;
465         }
466         mutex_unlock(&swap_cgroup_mutex);
467
468         return 0;
469 nomem:
470         printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
471         printk(KERN_INFO
472                 "swap_cgroup can be disabled by noswapaccount boot option\n");
473         return -ENOMEM;
474 }
475
476 void swap_cgroup_swapoff(int type)
477 {
478         int i;
479         struct swap_cgroup_ctrl *ctrl;
480
481         if (!do_swap_account)
482                 return;
483
484         mutex_lock(&swap_cgroup_mutex);
485         ctrl = &swap_cgroup_ctrl[type];
486         if (ctrl->map) {
487                 for (i = 0; i < ctrl->length; i++) {
488                         struct page *page = ctrl->map[i];
489                         if (page)
490                                 __free_page(page);
491                 }
492                 vfree(ctrl->map);
493                 ctrl->map = NULL;
494                 ctrl->length = 0;
495         }
496         mutex_unlock(&swap_cgroup_mutex);
497 }
498
499 #endif