memcg: fix behavior under memory.limit equals to memsw.limit
[pandora-kernel.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/limits.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/swap.h>
34 #include <linux/spinlock.h>
35 #include <linux/fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/vmalloc.h>
38 #include <linux/mm_inline.h>
39 #include <linux/page_cgroup.h>
40 #include "internal.h"
41
42 #include <asm/uaccess.h>
43
44 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
45 #define MEM_CGROUP_RECLAIM_RETRIES      5
46
47 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
48 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
49 int do_swap_account __read_mostly;
50 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
51 #else
52 #define do_swap_account         (0)
53 #endif
54
55 static DEFINE_MUTEX(memcg_tasklist);    /* can be hold under cgroup_mutex */
56
57 /*
58  * Statistics for memory cgroup.
59  */
60 enum mem_cgroup_stat_index {
61         /*
62          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
63          */
64         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
65         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
66         MEM_CGROUP_STAT_MAPPED_FILE,  /* # of pages charged as file rss */
67         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
68         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
69
70         MEM_CGROUP_STAT_NSTATS,
71 };
72
73 struct mem_cgroup_stat_cpu {
74         s64 count[MEM_CGROUP_STAT_NSTATS];
75 } ____cacheline_aligned_in_smp;
76
77 struct mem_cgroup_stat {
78         struct mem_cgroup_stat_cpu cpustat[0];
79 };
80
81 /*
82  * For accounting under irq disable, no need for increment preempt count.
83  */
84 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
85                 enum mem_cgroup_stat_index idx, int val)
86 {
87         stat->count[idx] += val;
88 }
89
90 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
91                 enum mem_cgroup_stat_index idx)
92 {
93         int cpu;
94         s64 ret = 0;
95         for_each_possible_cpu(cpu)
96                 ret += stat->cpustat[cpu].count[idx];
97         return ret;
98 }
99
100 static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
101 {
102         s64 ret;
103
104         ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
105         ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
106         return ret;
107 }
108
109 /*
110  * per-zone information in memory controller.
111  */
112 struct mem_cgroup_per_zone {
113         /*
114          * spin_lock to protect the per cgroup LRU
115          */
116         struct list_head        lists[NR_LRU_LISTS];
117         unsigned long           count[NR_LRU_LISTS];
118
119         struct zone_reclaim_stat reclaim_stat;
120 };
121 /* Macro for accessing counter */
122 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
123
124 struct mem_cgroup_per_node {
125         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
126 };
127
128 struct mem_cgroup_lru_info {
129         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
130 };
131
132 /*
133  * The memory controller data structure. The memory controller controls both
134  * page cache and RSS per cgroup. We would eventually like to provide
135  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
136  * to help the administrator determine what knobs to tune.
137  *
138  * TODO: Add a water mark for the memory controller. Reclaim will begin when
139  * we hit the water mark. May be even add a low water mark, such that
140  * no reclaim occurs from a cgroup at it's low water mark, this is
141  * a feature that will be implemented much later in the future.
142  */
143 struct mem_cgroup {
144         struct cgroup_subsys_state css;
145         /*
146          * the counter to account for memory usage
147          */
148         struct res_counter res;
149         /*
150          * the counter to account for mem+swap usage.
151          */
152         struct res_counter memsw;
153         /*
154          * Per cgroup active and inactive list, similar to the
155          * per zone LRU lists.
156          */
157         struct mem_cgroup_lru_info info;
158
159         /*
160           protect against reclaim related member.
161         */
162         spinlock_t reclaim_param_lock;
163
164         int     prev_priority;  /* for recording reclaim priority */
165
166         /*
167          * While reclaiming in a hiearchy, we cache the last child we
168          * reclaimed from.
169          */
170         int last_scanned_child;
171         /*
172          * Should the accounting and control be hierarchical, per subtree?
173          */
174         bool use_hierarchy;
175         unsigned long   last_oom_jiffies;
176         atomic_t        refcnt;
177
178         unsigned int    swappiness;
179
180         /* set when res.limit == memsw.limit */
181         bool            memsw_is_minimum;
182
183         /*
184          * statistics. This must be placed at the end of memcg.
185          */
186         struct mem_cgroup_stat stat;
187 };
188
189 enum charge_type {
190         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
191         MEM_CGROUP_CHARGE_TYPE_MAPPED,
192         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
193         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
194         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
195         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
196         NR_CHARGE_TYPE,
197 };
198
199 /* only for here (for easy reading.) */
200 #define PCGF_CACHE      (1UL << PCG_CACHE)
201 #define PCGF_USED       (1UL << PCG_USED)
202 #define PCGF_LOCK       (1UL << PCG_LOCK)
203 static const unsigned long
204 pcg_default_flags[NR_CHARGE_TYPE] = {
205         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
206         PCGF_USED | PCGF_LOCK, /* Anon */
207         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
208         0, /* FORCE */
209 };
210
211 /* for encoding cft->private value on file */
212 #define _MEM                    (0)
213 #define _MEMSWAP                (1)
214 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
215 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
216 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
217
218 static void mem_cgroup_get(struct mem_cgroup *mem);
219 static void mem_cgroup_put(struct mem_cgroup *mem);
220 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
221
222 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
223                                          struct page_cgroup *pc,
224                                          bool charge)
225 {
226         int val = (charge)? 1 : -1;
227         struct mem_cgroup_stat *stat = &mem->stat;
228         struct mem_cgroup_stat_cpu *cpustat;
229         int cpu = get_cpu();
230
231         cpustat = &stat->cpustat[cpu];
232         if (PageCgroupCache(pc))
233                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
234         else
235                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
236
237         if (charge)
238                 __mem_cgroup_stat_add_safe(cpustat,
239                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
240         else
241                 __mem_cgroup_stat_add_safe(cpustat,
242                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
243         put_cpu();
244 }
245
246 static struct mem_cgroup_per_zone *
247 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
248 {
249         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
250 }
251
252 static struct mem_cgroup_per_zone *
253 page_cgroup_zoneinfo(struct page_cgroup *pc)
254 {
255         struct mem_cgroup *mem = pc->mem_cgroup;
256         int nid = page_cgroup_nid(pc);
257         int zid = page_cgroup_zid(pc);
258
259         if (!mem)
260                 return NULL;
261
262         return mem_cgroup_zoneinfo(mem, nid, zid);
263 }
264
265 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
266                                         enum lru_list idx)
267 {
268         int nid, zid;
269         struct mem_cgroup_per_zone *mz;
270         u64 total = 0;
271
272         for_each_online_node(nid)
273                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
274                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
275                         total += MEM_CGROUP_ZSTAT(mz, idx);
276                 }
277         return total;
278 }
279
280 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
281 {
282         return container_of(cgroup_subsys_state(cont,
283                                 mem_cgroup_subsys_id), struct mem_cgroup,
284                                 css);
285 }
286
287 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
288 {
289         /*
290          * mm_update_next_owner() may clear mm->owner to NULL
291          * if it races with swapoff, page migration, etc.
292          * So this can be called with p == NULL.
293          */
294         if (unlikely(!p))
295                 return NULL;
296
297         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
298                                 struct mem_cgroup, css);
299 }
300
301 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
302 {
303         struct mem_cgroup *mem = NULL;
304
305         if (!mm)
306                 return NULL;
307         /*
308          * Because we have no locks, mm->owner's may be being moved to other
309          * cgroup. We use css_tryget() here even if this looks
310          * pessimistic (rather than adding locks here).
311          */
312         rcu_read_lock();
313         do {
314                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
315                 if (unlikely(!mem))
316                         break;
317         } while (!css_tryget(&mem->css));
318         rcu_read_unlock();
319         return mem;
320 }
321
322 /*
323  * Call callback function against all cgroup under hierarchy tree.
324  */
325 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
326                           int (*func)(struct mem_cgroup *, void *))
327 {
328         int found, ret, nextid;
329         struct cgroup_subsys_state *css;
330         struct mem_cgroup *mem;
331
332         if (!root->use_hierarchy)
333                 return (*func)(root, data);
334
335         nextid = 1;
336         do {
337                 ret = 0;
338                 mem = NULL;
339
340                 rcu_read_lock();
341                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
342                                    &found);
343                 if (css && css_tryget(css))
344                         mem = container_of(css, struct mem_cgroup, css);
345                 rcu_read_unlock();
346
347                 if (mem) {
348                         ret = (*func)(mem, data);
349                         css_put(&mem->css);
350                 }
351                 nextid = found + 1;
352         } while (!ret && css);
353
354         return ret;
355 }
356
357 /*
358  * Following LRU functions are allowed to be used without PCG_LOCK.
359  * Operations are called by routine of global LRU independently from memcg.
360  * What we have to take care of here is validness of pc->mem_cgroup.
361  *
362  * Changes to pc->mem_cgroup happens when
363  * 1. charge
364  * 2. moving account
365  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
366  * It is added to LRU before charge.
367  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
368  * When moving account, the page is not on LRU. It's isolated.
369  */
370
371 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
372 {
373         struct page_cgroup *pc;
374         struct mem_cgroup *mem;
375         struct mem_cgroup_per_zone *mz;
376
377         if (mem_cgroup_disabled())
378                 return;
379         pc = lookup_page_cgroup(page);
380         /* can happen while we handle swapcache. */
381         if (list_empty(&pc->lru) || !pc->mem_cgroup)
382                 return;
383         /*
384          * We don't check PCG_USED bit. It's cleared when the "page" is finally
385          * removed from global LRU.
386          */
387         mz = page_cgroup_zoneinfo(pc);
388         mem = pc->mem_cgroup;
389         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
390         list_del_init(&pc->lru);
391         return;
392 }
393
394 void mem_cgroup_del_lru(struct page *page)
395 {
396         mem_cgroup_del_lru_list(page, page_lru(page));
397 }
398
399 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
400 {
401         struct mem_cgroup_per_zone *mz;
402         struct page_cgroup *pc;
403
404         if (mem_cgroup_disabled())
405                 return;
406
407         pc = lookup_page_cgroup(page);
408         /*
409          * Used bit is set without atomic ops but after smp_wmb().
410          * For making pc->mem_cgroup visible, insert smp_rmb() here.
411          */
412         smp_rmb();
413         /* unused page is not rotated. */
414         if (!PageCgroupUsed(pc))
415                 return;
416         mz = page_cgroup_zoneinfo(pc);
417         list_move(&pc->lru, &mz->lists[lru]);
418 }
419
420 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
421 {
422         struct page_cgroup *pc;
423         struct mem_cgroup_per_zone *mz;
424
425         if (mem_cgroup_disabled())
426                 return;
427         pc = lookup_page_cgroup(page);
428         /*
429          * Used bit is set without atomic ops but after smp_wmb().
430          * For making pc->mem_cgroup visible, insert smp_rmb() here.
431          */
432         smp_rmb();
433         if (!PageCgroupUsed(pc))
434                 return;
435
436         mz = page_cgroup_zoneinfo(pc);
437         MEM_CGROUP_ZSTAT(mz, lru) += 1;
438         list_add(&pc->lru, &mz->lists[lru]);
439 }
440
441 /*
442  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
443  * lru because the page may.be reused after it's fully uncharged (because of
444  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
445  * it again. This function is only used to charge SwapCache. It's done under
446  * lock_page and expected that zone->lru_lock is never held.
447  */
448 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
449 {
450         unsigned long flags;
451         struct zone *zone = page_zone(page);
452         struct page_cgroup *pc = lookup_page_cgroup(page);
453
454         spin_lock_irqsave(&zone->lru_lock, flags);
455         /*
456          * Forget old LRU when this page_cgroup is *not* used. This Used bit
457          * is guarded by lock_page() because the page is SwapCache.
458          */
459         if (!PageCgroupUsed(pc))
460                 mem_cgroup_del_lru_list(page, page_lru(page));
461         spin_unlock_irqrestore(&zone->lru_lock, flags);
462 }
463
464 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
465 {
466         unsigned long flags;
467         struct zone *zone = page_zone(page);
468         struct page_cgroup *pc = lookup_page_cgroup(page);
469
470         spin_lock_irqsave(&zone->lru_lock, flags);
471         /* link when the page is linked to LRU but page_cgroup isn't */
472         if (PageLRU(page) && list_empty(&pc->lru))
473                 mem_cgroup_add_lru_list(page, page_lru(page));
474         spin_unlock_irqrestore(&zone->lru_lock, flags);
475 }
476
477
478 void mem_cgroup_move_lists(struct page *page,
479                            enum lru_list from, enum lru_list to)
480 {
481         if (mem_cgroup_disabled())
482                 return;
483         mem_cgroup_del_lru_list(page, from);
484         mem_cgroup_add_lru_list(page, to);
485 }
486
487 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
488 {
489         int ret;
490         struct mem_cgroup *curr = NULL;
491
492         task_lock(task);
493         rcu_read_lock();
494         curr = try_get_mem_cgroup_from_mm(task->mm);
495         rcu_read_unlock();
496         task_unlock(task);
497         if (!curr)
498                 return 0;
499         if (curr->use_hierarchy)
500                 ret = css_is_ancestor(&curr->css, &mem->css);
501         else
502                 ret = (curr == mem);
503         css_put(&curr->css);
504         return ret;
505 }
506
507 /*
508  * prev_priority control...this will be used in memory reclaim path.
509  */
510 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
511 {
512         int prev_priority;
513
514         spin_lock(&mem->reclaim_param_lock);
515         prev_priority = mem->prev_priority;
516         spin_unlock(&mem->reclaim_param_lock);
517
518         return prev_priority;
519 }
520
521 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
522 {
523         spin_lock(&mem->reclaim_param_lock);
524         if (priority < mem->prev_priority)
525                 mem->prev_priority = priority;
526         spin_unlock(&mem->reclaim_param_lock);
527 }
528
529 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
530 {
531         spin_lock(&mem->reclaim_param_lock);
532         mem->prev_priority = priority;
533         spin_unlock(&mem->reclaim_param_lock);
534 }
535
536 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
537 {
538         unsigned long active;
539         unsigned long inactive;
540         unsigned long gb;
541         unsigned long inactive_ratio;
542
543         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
544         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
545
546         gb = (inactive + active) >> (30 - PAGE_SHIFT);
547         if (gb)
548                 inactive_ratio = int_sqrt(10 * gb);
549         else
550                 inactive_ratio = 1;
551
552         if (present_pages) {
553                 present_pages[0] = inactive;
554                 present_pages[1] = active;
555         }
556
557         return inactive_ratio;
558 }
559
560 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
561 {
562         unsigned long active;
563         unsigned long inactive;
564         unsigned long present_pages[2];
565         unsigned long inactive_ratio;
566
567         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
568
569         inactive = present_pages[0];
570         active = present_pages[1];
571
572         if (inactive * inactive_ratio < active)
573                 return 1;
574
575         return 0;
576 }
577
578 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
579 {
580         unsigned long active;
581         unsigned long inactive;
582
583         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
584         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
585
586         return (active > inactive);
587 }
588
589 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
590                                        struct zone *zone,
591                                        enum lru_list lru)
592 {
593         int nid = zone->zone_pgdat->node_id;
594         int zid = zone_idx(zone);
595         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
596
597         return MEM_CGROUP_ZSTAT(mz, lru);
598 }
599
600 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
601                                                       struct zone *zone)
602 {
603         int nid = zone->zone_pgdat->node_id;
604         int zid = zone_idx(zone);
605         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
606
607         return &mz->reclaim_stat;
608 }
609
610 struct zone_reclaim_stat *
611 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
612 {
613         struct page_cgroup *pc;
614         struct mem_cgroup_per_zone *mz;
615
616         if (mem_cgroup_disabled())
617                 return NULL;
618
619         pc = lookup_page_cgroup(page);
620         /*
621          * Used bit is set without atomic ops but after smp_wmb().
622          * For making pc->mem_cgroup visible, insert smp_rmb() here.
623          */
624         smp_rmb();
625         if (!PageCgroupUsed(pc))
626                 return NULL;
627
628         mz = page_cgroup_zoneinfo(pc);
629         if (!mz)
630                 return NULL;
631
632         return &mz->reclaim_stat;
633 }
634
635 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
636                                         struct list_head *dst,
637                                         unsigned long *scanned, int order,
638                                         int mode, struct zone *z,
639                                         struct mem_cgroup *mem_cont,
640                                         int active, int file)
641 {
642         unsigned long nr_taken = 0;
643         struct page *page;
644         unsigned long scan;
645         LIST_HEAD(pc_list);
646         struct list_head *src;
647         struct page_cgroup *pc, *tmp;
648         int nid = z->zone_pgdat->node_id;
649         int zid = zone_idx(z);
650         struct mem_cgroup_per_zone *mz;
651         int lru = LRU_FILE * !!file + !!active;
652
653         BUG_ON(!mem_cont);
654         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
655         src = &mz->lists[lru];
656
657         scan = 0;
658         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
659                 if (scan >= nr_to_scan)
660                         break;
661
662                 page = pc->page;
663                 if (unlikely(!PageCgroupUsed(pc)))
664                         continue;
665                 if (unlikely(!PageLRU(page)))
666                         continue;
667
668                 scan++;
669                 if (__isolate_lru_page(page, mode, file) == 0) {
670                         list_move(&page->lru, dst);
671                         nr_taken++;
672                 }
673         }
674
675         *scanned = scan;
676         return nr_taken;
677 }
678
679 #define mem_cgroup_from_res_counter(counter, member)    \
680         container_of(counter, struct mem_cgroup, member)
681
682 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
683 {
684         if (do_swap_account) {
685                 if (res_counter_check_under_limit(&mem->res) &&
686                         res_counter_check_under_limit(&mem->memsw))
687                         return true;
688         } else
689                 if (res_counter_check_under_limit(&mem->res))
690                         return true;
691         return false;
692 }
693
694 static unsigned int get_swappiness(struct mem_cgroup *memcg)
695 {
696         struct cgroup *cgrp = memcg->css.cgroup;
697         unsigned int swappiness;
698
699         /* root ? */
700         if (cgrp->parent == NULL)
701                 return vm_swappiness;
702
703         spin_lock(&memcg->reclaim_param_lock);
704         swappiness = memcg->swappiness;
705         spin_unlock(&memcg->reclaim_param_lock);
706
707         return swappiness;
708 }
709
710 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
711 {
712         int *val = data;
713         (*val)++;
714         return 0;
715 }
716
717 /**
718  * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
719  * @memcg: The memory cgroup that went over limit
720  * @p: Task that is going to be killed
721  *
722  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
723  * enabled
724  */
725 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
726 {
727         struct cgroup *task_cgrp;
728         struct cgroup *mem_cgrp;
729         /*
730          * Need a buffer in BSS, can't rely on allocations. The code relies
731          * on the assumption that OOM is serialized for memory controller.
732          * If this assumption is broken, revisit this code.
733          */
734         static char memcg_name[PATH_MAX];
735         int ret;
736
737         if (!memcg)
738                 return;
739
740
741         rcu_read_lock();
742
743         mem_cgrp = memcg->css.cgroup;
744         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
745
746         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
747         if (ret < 0) {
748                 /*
749                  * Unfortunately, we are unable to convert to a useful name
750                  * But we'll still print out the usage information
751                  */
752                 rcu_read_unlock();
753                 goto done;
754         }
755         rcu_read_unlock();
756
757         printk(KERN_INFO "Task in %s killed", memcg_name);
758
759         rcu_read_lock();
760         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
761         if (ret < 0) {
762                 rcu_read_unlock();
763                 goto done;
764         }
765         rcu_read_unlock();
766
767         /*
768          * Continues from above, so we don't need an KERN_ level
769          */
770         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
771 done:
772
773         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
774                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
775                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
776                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
777         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
778                 "failcnt %llu\n",
779                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
780                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
781                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
782 }
783
784 /*
785  * This function returns the number of memcg under hierarchy tree. Returns
786  * 1(self count) if no children.
787  */
788 static int mem_cgroup_count_children(struct mem_cgroup *mem)
789 {
790         int num = 0;
791         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
792         return num;
793 }
794
795 /*
796  * Visit the first child (need not be the first child as per the ordering
797  * of the cgroup list, since we track last_scanned_child) of @mem and use
798  * that to reclaim free pages from.
799  */
800 static struct mem_cgroup *
801 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
802 {
803         struct mem_cgroup *ret = NULL;
804         struct cgroup_subsys_state *css;
805         int nextid, found;
806
807         if (!root_mem->use_hierarchy) {
808                 css_get(&root_mem->css);
809                 ret = root_mem;
810         }
811
812         while (!ret) {
813                 rcu_read_lock();
814                 nextid = root_mem->last_scanned_child + 1;
815                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
816                                    &found);
817                 if (css && css_tryget(css))
818                         ret = container_of(css, struct mem_cgroup, css);
819
820                 rcu_read_unlock();
821                 /* Updates scanning parameter */
822                 spin_lock(&root_mem->reclaim_param_lock);
823                 if (!css) {
824                         /* this means start scan from ID:1 */
825                         root_mem->last_scanned_child = 0;
826                 } else
827                         root_mem->last_scanned_child = found;
828                 spin_unlock(&root_mem->reclaim_param_lock);
829         }
830
831         return ret;
832 }
833
834 /*
835  * Scan the hierarchy if needed to reclaim memory. We remember the last child
836  * we reclaimed from, so that we don't end up penalizing one child extensively
837  * based on its position in the children list.
838  *
839  * root_mem is the original ancestor that we've been reclaim from.
840  *
841  * We give up and return to the caller when we visit root_mem twice.
842  * (other groups can be removed while we're walking....)
843  *
844  * If shrink==true, for avoiding to free too much, this returns immedieately.
845  */
846 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
847                                    gfp_t gfp_mask, bool noswap, bool shrink)
848 {
849         struct mem_cgroup *victim;
850         int ret, total = 0;
851         int loop = 0;
852
853         /* If memsw_is_minimum==1, swap-out is of-no-use. */
854         if (root_mem->memsw_is_minimum)
855                 noswap = true;
856
857         while (loop < 2) {
858                 victim = mem_cgroup_select_victim(root_mem);
859                 if (victim == root_mem)
860                         loop++;
861                 if (!mem_cgroup_local_usage(&victim->stat)) {
862                         /* this cgroup's local usage == 0 */
863                         css_put(&victim->css);
864                         continue;
865                 }
866                 /* we use swappiness of local cgroup */
867                 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
868                                                    get_swappiness(victim));
869                 css_put(&victim->css);
870                 /*
871                  * At shrinking usage, we can't check we should stop here or
872                  * reclaim more. It's depends on callers. last_scanned_child
873                  * will work enough for keeping fairness under tree.
874                  */
875                 if (shrink)
876                         return ret;
877                 total += ret;
878                 if (mem_cgroup_check_under_limit(root_mem))
879                         return 1 + total;
880         }
881         return total;
882 }
883
884 bool mem_cgroup_oom_called(struct task_struct *task)
885 {
886         bool ret = false;
887         struct mem_cgroup *mem;
888         struct mm_struct *mm;
889
890         rcu_read_lock();
891         mm = task->mm;
892         if (!mm)
893                 mm = &init_mm;
894         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
895         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
896                 ret = true;
897         rcu_read_unlock();
898         return ret;
899 }
900
901 static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
902 {
903         mem->last_oom_jiffies = jiffies;
904         return 0;
905 }
906
907 static void record_last_oom(struct mem_cgroup *mem)
908 {
909         mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
910 }
911
912 /*
913  * Currently used to update mapped file statistics, but the routine can be
914  * generalized to update other statistics as well.
915  */
916 void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
917 {
918         struct mem_cgroup *mem;
919         struct mem_cgroup_stat *stat;
920         struct mem_cgroup_stat_cpu *cpustat;
921         int cpu;
922         struct page_cgroup *pc;
923
924         if (!page_is_file_cache(page))
925                 return;
926
927         pc = lookup_page_cgroup(page);
928         if (unlikely(!pc))
929                 return;
930
931         lock_page_cgroup(pc);
932         mem = pc->mem_cgroup;
933         if (!mem)
934                 goto done;
935
936         if (!PageCgroupUsed(pc))
937                 goto done;
938
939         /*
940          * Preemption is already disabled, we don't need get_cpu()
941          */
942         cpu = smp_processor_id();
943         stat = &mem->stat;
944         cpustat = &stat->cpustat[cpu];
945
946         __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
947 done:
948         unlock_page_cgroup(pc);
949 }
950
951 /*
952  * Unlike exported interface, "oom" parameter is added. if oom==true,
953  * oom-killer can be invoked.
954  */
955 static int __mem_cgroup_try_charge(struct mm_struct *mm,
956                         gfp_t gfp_mask, struct mem_cgroup **memcg,
957                         bool oom)
958 {
959         struct mem_cgroup *mem, *mem_over_limit;
960         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
961         struct res_counter *fail_res;
962
963         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
964                 /* Don't account this! */
965                 *memcg = NULL;
966                 return 0;
967         }
968
969         /*
970          * We always charge the cgroup the mm_struct belongs to.
971          * The mm_struct's mem_cgroup changes on task migration if the
972          * thread group leader migrates. It's possible that mm is not
973          * set, if so charge the init_mm (happens for pagecache usage).
974          */
975         mem = *memcg;
976         if (likely(!mem)) {
977                 mem = try_get_mem_cgroup_from_mm(mm);
978                 *memcg = mem;
979         } else {
980                 css_get(&mem->css);
981         }
982         if (unlikely(!mem))
983                 return 0;
984
985         VM_BUG_ON(css_is_removed(&mem->css));
986
987         while (1) {
988                 int ret;
989                 bool noswap = false;
990
991                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
992                 if (likely(!ret)) {
993                         if (!do_swap_account)
994                                 break;
995                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
996                                                         &fail_res);
997                         if (likely(!ret))
998                                 break;
999                         /* mem+swap counter fails */
1000                         res_counter_uncharge(&mem->res, PAGE_SIZE);
1001                         noswap = true;
1002                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1003                                                                         memsw);
1004                 } else
1005                         /* mem counter fails */
1006                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1007                                                                         res);
1008
1009                 if (!(gfp_mask & __GFP_WAIT))
1010                         goto nomem;
1011
1012                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
1013                                                         noswap, false);
1014                 if (ret)
1015                         continue;
1016
1017                 /*
1018                  * try_to_free_mem_cgroup_pages() might not give us a full
1019                  * picture of reclaim. Some pages are reclaimed and might be
1020                  * moved to swap cache or just unmapped from the cgroup.
1021                  * Check the limit again to see if the reclaim reduced the
1022                  * current usage of the cgroup before giving up
1023                  *
1024                  */
1025                 if (mem_cgroup_check_under_limit(mem_over_limit))
1026                         continue;
1027
1028                 if (!nr_retries--) {
1029                         if (oom) {
1030                                 mutex_lock(&memcg_tasklist);
1031                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
1032                                 mutex_unlock(&memcg_tasklist);
1033                                 record_last_oom(mem_over_limit);
1034                         }
1035                         goto nomem;
1036                 }
1037         }
1038         return 0;
1039 nomem:
1040         css_put(&mem->css);
1041         return -ENOMEM;
1042 }
1043
1044
1045 /*
1046  * A helper function to get mem_cgroup from ID. must be called under
1047  * rcu_read_lock(). The caller must check css_is_removed() or some if
1048  * it's concern. (dropping refcnt from swap can be called against removed
1049  * memcg.)
1050  */
1051 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1052 {
1053         struct cgroup_subsys_state *css;
1054
1055         /* ID 0 is unused ID */
1056         if (!id)
1057                 return NULL;
1058         css = css_lookup(&mem_cgroup_subsys, id);
1059         if (!css)
1060                 return NULL;
1061         return container_of(css, struct mem_cgroup, css);
1062 }
1063
1064 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
1065 {
1066         struct mem_cgroup *mem;
1067         struct page_cgroup *pc;
1068         unsigned short id;
1069         swp_entry_t ent;
1070
1071         VM_BUG_ON(!PageLocked(page));
1072
1073         if (!PageSwapCache(page))
1074                 return NULL;
1075
1076         pc = lookup_page_cgroup(page);
1077         lock_page_cgroup(pc);
1078         if (PageCgroupUsed(pc)) {
1079                 mem = pc->mem_cgroup;
1080                 if (mem && !css_tryget(&mem->css))
1081                         mem = NULL;
1082         } else {
1083                 ent.val = page_private(page);
1084                 id = lookup_swap_cgroup(ent);
1085                 rcu_read_lock();
1086                 mem = mem_cgroup_lookup(id);
1087                 if (mem && !css_tryget(&mem->css))
1088                         mem = NULL;
1089                 rcu_read_unlock();
1090         }
1091         unlock_page_cgroup(pc);
1092         return mem;
1093 }
1094
1095 /*
1096  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1097  * USED state. If already USED, uncharge and return.
1098  */
1099
1100 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1101                                      struct page_cgroup *pc,
1102                                      enum charge_type ctype)
1103 {
1104         /* try_charge() can return NULL to *memcg, taking care of it. */
1105         if (!mem)
1106                 return;
1107
1108         lock_page_cgroup(pc);
1109         if (unlikely(PageCgroupUsed(pc))) {
1110                 unlock_page_cgroup(pc);
1111                 res_counter_uncharge(&mem->res, PAGE_SIZE);
1112                 if (do_swap_account)
1113                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1114                 css_put(&mem->css);
1115                 return;
1116         }
1117         pc->mem_cgroup = mem;
1118         smp_wmb();
1119         pc->flags = pcg_default_flags[ctype];
1120
1121         mem_cgroup_charge_statistics(mem, pc, true);
1122
1123         unlock_page_cgroup(pc);
1124 }
1125
1126 /**
1127  * mem_cgroup_move_account - move account of the page
1128  * @pc: page_cgroup of the page.
1129  * @from: mem_cgroup which the page is moved from.
1130  * @to: mem_cgroup which the page is moved to. @from != @to.
1131  *
1132  * The caller must confirm following.
1133  * - page is not on LRU (isolate_page() is useful.)
1134  *
1135  * returns 0 at success,
1136  * returns -EBUSY when lock is busy or "pc" is unstable.
1137  *
1138  * This function does "uncharge" from old cgroup but doesn't do "charge" to
1139  * new cgroup. It should be done by a caller.
1140  */
1141
1142 static int mem_cgroup_move_account(struct page_cgroup *pc,
1143         struct mem_cgroup *from, struct mem_cgroup *to)
1144 {
1145         struct mem_cgroup_per_zone *from_mz, *to_mz;
1146         int nid, zid;
1147         int ret = -EBUSY;
1148         struct page *page;
1149         int cpu;
1150         struct mem_cgroup_stat *stat;
1151         struct mem_cgroup_stat_cpu *cpustat;
1152
1153         VM_BUG_ON(from == to);
1154         VM_BUG_ON(PageLRU(pc->page));
1155
1156         nid = page_cgroup_nid(pc);
1157         zid = page_cgroup_zid(pc);
1158         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
1159         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
1160
1161         if (!trylock_page_cgroup(pc))
1162                 return ret;
1163
1164         if (!PageCgroupUsed(pc))
1165                 goto out;
1166
1167         if (pc->mem_cgroup != from)
1168                 goto out;
1169
1170         res_counter_uncharge(&from->res, PAGE_SIZE);
1171         mem_cgroup_charge_statistics(from, pc, false);
1172
1173         page = pc->page;
1174         if (page_is_file_cache(page) && page_mapped(page)) {
1175                 cpu = smp_processor_id();
1176                 /* Update mapped_file data for mem_cgroup "from" */
1177                 stat = &from->stat;
1178                 cpustat = &stat->cpustat[cpu];
1179                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
1180                                                 -1);
1181
1182                 /* Update mapped_file data for mem_cgroup "to" */
1183                 stat = &to->stat;
1184                 cpustat = &stat->cpustat[cpu];
1185                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
1186                                                 1);
1187         }
1188
1189         if (do_swap_account)
1190                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1191         css_put(&from->css);
1192
1193         css_get(&to->css);
1194         pc->mem_cgroup = to;
1195         mem_cgroup_charge_statistics(to, pc, true);
1196         ret = 0;
1197 out:
1198         unlock_page_cgroup(pc);
1199         return ret;
1200 }
1201
1202 /*
1203  * move charges to its parent.
1204  */
1205
1206 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1207                                   struct mem_cgroup *child,
1208                                   gfp_t gfp_mask)
1209 {
1210         struct page *page = pc->page;
1211         struct cgroup *cg = child->css.cgroup;
1212         struct cgroup *pcg = cg->parent;
1213         struct mem_cgroup *parent;
1214         int ret;
1215
1216         /* Is ROOT ? */
1217         if (!pcg)
1218                 return -EINVAL;
1219
1220
1221         parent = mem_cgroup_from_cont(pcg);
1222
1223
1224         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1225         if (ret || !parent)
1226                 return ret;
1227
1228         if (!get_page_unless_zero(page)) {
1229                 ret = -EBUSY;
1230                 goto uncharge;
1231         }
1232
1233         ret = isolate_lru_page(page);
1234
1235         if (ret)
1236                 goto cancel;
1237
1238         ret = mem_cgroup_move_account(pc, child, parent);
1239
1240         putback_lru_page(page);
1241         if (!ret) {
1242                 put_page(page);
1243                 /* drop extra refcnt by try_charge() */
1244                 css_put(&parent->css);
1245                 return 0;
1246         }
1247
1248 cancel:
1249         put_page(page);
1250 uncharge:
1251         /* drop extra refcnt by try_charge() */
1252         css_put(&parent->css);
1253         /* uncharge if move fails */
1254         res_counter_uncharge(&parent->res, PAGE_SIZE);
1255         if (do_swap_account)
1256                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1257         return ret;
1258 }
1259
1260 /*
1261  * Charge the memory controller for page usage.
1262  * Return
1263  * 0 if the charge was successful
1264  * < 0 if the cgroup is over its limit
1265  */
1266 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1267                                 gfp_t gfp_mask, enum charge_type ctype,
1268                                 struct mem_cgroup *memcg)
1269 {
1270         struct mem_cgroup *mem;
1271         struct page_cgroup *pc;
1272         int ret;
1273
1274         pc = lookup_page_cgroup(page);
1275         /* can happen at boot */
1276         if (unlikely(!pc))
1277                 return 0;
1278         prefetchw(pc);
1279
1280         mem = memcg;
1281         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1282         if (ret || !mem)
1283                 return ret;
1284
1285         __mem_cgroup_commit_charge(mem, pc, ctype);
1286         return 0;
1287 }
1288
1289 int mem_cgroup_newpage_charge(struct page *page,
1290                               struct mm_struct *mm, gfp_t gfp_mask)
1291 {
1292         if (mem_cgroup_disabled())
1293                 return 0;
1294         if (PageCompound(page))
1295                 return 0;
1296         /*
1297          * If already mapped, we don't have to account.
1298          * If page cache, page->mapping has address_space.
1299          * But page->mapping may have out-of-use anon_vma pointer,
1300          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1301          * is NULL.
1302          */
1303         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1304                 return 0;
1305         if (unlikely(!mm))
1306                 mm = &init_mm;
1307         return mem_cgroup_charge_common(page, mm, gfp_mask,
1308                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1309 }
1310
1311 static void
1312 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1313                                         enum charge_type ctype);
1314
1315 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1316                                 gfp_t gfp_mask)
1317 {
1318         struct mem_cgroup *mem = NULL;
1319         int ret;
1320
1321         if (mem_cgroup_disabled())
1322                 return 0;
1323         if (PageCompound(page))
1324                 return 0;
1325         /*
1326          * Corner case handling. This is called from add_to_page_cache()
1327          * in usual. But some FS (shmem) precharges this page before calling it
1328          * and call add_to_page_cache() with GFP_NOWAIT.
1329          *
1330          * For GFP_NOWAIT case, the page may be pre-charged before calling
1331          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1332          * charge twice. (It works but has to pay a bit larger cost.)
1333          * And when the page is SwapCache, it should take swap information
1334          * into account. This is under lock_page() now.
1335          */
1336         if (!(gfp_mask & __GFP_WAIT)) {
1337                 struct page_cgroup *pc;
1338
1339
1340                 pc = lookup_page_cgroup(page);
1341                 if (!pc)
1342                         return 0;
1343                 lock_page_cgroup(pc);
1344                 if (PageCgroupUsed(pc)) {
1345                         unlock_page_cgroup(pc);
1346                         return 0;
1347                 }
1348                 unlock_page_cgroup(pc);
1349         }
1350
1351         if (unlikely(!mm && !mem))
1352                 mm = &init_mm;
1353
1354         if (page_is_file_cache(page))
1355                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1356                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1357
1358         /* shmem */
1359         if (PageSwapCache(page)) {
1360                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1361                 if (!ret)
1362                         __mem_cgroup_commit_charge_swapin(page, mem,
1363                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
1364         } else
1365                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1366                                         MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1367
1368         return ret;
1369 }
1370
1371 /*
1372  * While swap-in, try_charge -> commit or cancel, the page is locked.
1373  * And when try_charge() successfully returns, one refcnt to memcg without
1374  * struct page_cgroup is aquired. This refcnt will be cumsumed by
1375  * "commit()" or removed by "cancel()"
1376  */
1377 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1378                                  struct page *page,
1379                                  gfp_t mask, struct mem_cgroup **ptr)
1380 {
1381         struct mem_cgroup *mem;
1382         int ret;
1383
1384         if (mem_cgroup_disabled())
1385                 return 0;
1386
1387         if (!do_swap_account)
1388                 goto charge_cur_mm;
1389         /*
1390          * A racing thread's fault, or swapoff, may have already updated
1391          * the pte, and even removed page from swap cache: return success
1392          * to go on to do_swap_page()'s pte_same() test, which should fail.
1393          */
1394         if (!PageSwapCache(page))
1395                 return 0;
1396         mem = try_get_mem_cgroup_from_swapcache(page);
1397         if (!mem)
1398                 goto charge_cur_mm;
1399         *ptr = mem;
1400         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1401         /* drop extra refcnt from tryget */
1402         css_put(&mem->css);
1403         return ret;
1404 charge_cur_mm:
1405         if (unlikely(!mm))
1406                 mm = &init_mm;
1407         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1408 }
1409
1410 static void
1411 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1412                                         enum charge_type ctype)
1413 {
1414         struct page_cgroup *pc;
1415
1416         if (mem_cgroup_disabled())
1417                 return;
1418         if (!ptr)
1419                 return;
1420         pc = lookup_page_cgroup(page);
1421         mem_cgroup_lru_del_before_commit_swapcache(page);
1422         __mem_cgroup_commit_charge(ptr, pc, ctype);
1423         mem_cgroup_lru_add_after_commit_swapcache(page);
1424         /*
1425          * Now swap is on-memory. This means this page may be
1426          * counted both as mem and swap....double count.
1427          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1428          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1429          * may call delete_from_swap_cache() before reach here.
1430          */
1431         if (do_swap_account && PageSwapCache(page)) {
1432                 swp_entry_t ent = {.val = page_private(page)};
1433                 unsigned short id;
1434                 struct mem_cgroup *memcg;
1435
1436                 id = swap_cgroup_record(ent, 0);
1437                 rcu_read_lock();
1438                 memcg = mem_cgroup_lookup(id);
1439                 if (memcg) {
1440                         /*
1441                          * This recorded memcg can be obsolete one. So, avoid
1442                          * calling css_tryget
1443                          */
1444                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1445                         mem_cgroup_put(memcg);
1446                 }
1447                 rcu_read_unlock();
1448         }
1449         /* add this page(page_cgroup) to the LRU we want. */
1450
1451 }
1452
1453 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1454 {
1455         __mem_cgroup_commit_charge_swapin(page, ptr,
1456                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
1457 }
1458
1459 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1460 {
1461         if (mem_cgroup_disabled())
1462                 return;
1463         if (!mem)
1464                 return;
1465         res_counter_uncharge(&mem->res, PAGE_SIZE);
1466         if (do_swap_account)
1467                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1468         css_put(&mem->css);
1469 }
1470
1471
1472 /*
1473  * uncharge if !page_mapped(page)
1474  */
1475 static struct mem_cgroup *
1476 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1477 {
1478         struct page_cgroup *pc;
1479         struct mem_cgroup *mem = NULL;
1480         struct mem_cgroup_per_zone *mz;
1481
1482         if (mem_cgroup_disabled())
1483                 return NULL;
1484
1485         if (PageSwapCache(page))
1486                 return NULL;
1487
1488         /*
1489          * Check if our page_cgroup is valid
1490          */
1491         pc = lookup_page_cgroup(page);
1492         if (unlikely(!pc || !PageCgroupUsed(pc)))
1493                 return NULL;
1494
1495         lock_page_cgroup(pc);
1496
1497         mem = pc->mem_cgroup;
1498
1499         if (!PageCgroupUsed(pc))
1500                 goto unlock_out;
1501
1502         switch (ctype) {
1503         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1504         case MEM_CGROUP_CHARGE_TYPE_DROP:
1505                 if (page_mapped(page))
1506                         goto unlock_out;
1507                 break;
1508         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1509                 if (!PageAnon(page)) {  /* Shared memory */
1510                         if (page->mapping && !page_is_file_cache(page))
1511                                 goto unlock_out;
1512                 } else if (page_mapped(page)) /* Anon */
1513                                 goto unlock_out;
1514                 break;
1515         default:
1516                 break;
1517         }
1518
1519         res_counter_uncharge(&mem->res, PAGE_SIZE);
1520         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1521                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1522         mem_cgroup_charge_statistics(mem, pc, false);
1523
1524         ClearPageCgroupUsed(pc);
1525         /*
1526          * pc->mem_cgroup is not cleared here. It will be accessed when it's
1527          * freed from LRU. This is safe because uncharged page is expected not
1528          * to be reused (freed soon). Exception is SwapCache, it's handled by
1529          * special functions.
1530          */
1531
1532         mz = page_cgroup_zoneinfo(pc);
1533         unlock_page_cgroup(pc);
1534
1535         /* at swapout, this memcg will be accessed to record to swap */
1536         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1537                 css_put(&mem->css);
1538
1539         return mem;
1540
1541 unlock_out:
1542         unlock_page_cgroup(pc);
1543         return NULL;
1544 }
1545
1546 void mem_cgroup_uncharge_page(struct page *page)
1547 {
1548         /* early check. */
1549         if (page_mapped(page))
1550                 return;
1551         if (page->mapping && !PageAnon(page))
1552                 return;
1553         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1554 }
1555
1556 void mem_cgroup_uncharge_cache_page(struct page *page)
1557 {
1558         VM_BUG_ON(page_mapped(page));
1559         VM_BUG_ON(page->mapping);
1560         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1561 }
1562
1563 #ifdef CONFIG_SWAP
1564 /*
1565  * called after __delete_from_swap_cache() and drop "page" account.
1566  * memcg information is recorded to swap_cgroup of "ent"
1567  */
1568 void
1569 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
1570 {
1571         struct mem_cgroup *memcg;
1572         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
1573
1574         if (!swapout) /* this was a swap cache but the swap is unused ! */
1575                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
1576
1577         memcg = __mem_cgroup_uncharge_common(page, ctype);
1578
1579         /* record memcg information */
1580         if (do_swap_account && swapout && memcg) {
1581                 swap_cgroup_record(ent, css_id(&memcg->css));
1582                 mem_cgroup_get(memcg);
1583         }
1584         if (swapout && memcg)
1585                 css_put(&memcg->css);
1586 }
1587 #endif
1588
1589 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1590 /*
1591  * called from swap_entry_free(). remove record in swap_cgroup and
1592  * uncharge "memsw" account.
1593  */
1594 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1595 {
1596         struct mem_cgroup *memcg;
1597         unsigned short id;
1598
1599         if (!do_swap_account)
1600                 return;
1601
1602         id = swap_cgroup_record(ent, 0);
1603         rcu_read_lock();
1604         memcg = mem_cgroup_lookup(id);
1605         if (memcg) {
1606                 /*
1607                  * We uncharge this because swap is freed.
1608                  * This memcg can be obsolete one. We avoid calling css_tryget
1609                  */
1610                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1611                 mem_cgroup_put(memcg);
1612         }
1613         rcu_read_unlock();
1614 }
1615 #endif
1616
1617 /*
1618  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1619  * page belongs to.
1620  */
1621 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1622 {
1623         struct page_cgroup *pc;
1624         struct mem_cgroup *mem = NULL;
1625         int ret = 0;
1626
1627         if (mem_cgroup_disabled())
1628                 return 0;
1629
1630         pc = lookup_page_cgroup(page);
1631         lock_page_cgroup(pc);
1632         if (PageCgroupUsed(pc)) {
1633                 mem = pc->mem_cgroup;
1634                 css_get(&mem->css);
1635         }
1636         unlock_page_cgroup(pc);
1637
1638         if (mem) {
1639                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
1640                 css_put(&mem->css);
1641         }
1642         *ptr = mem;
1643         return ret;
1644 }
1645
1646 /* remove redundant charge if migration failed*/
1647 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1648                 struct page *oldpage, struct page *newpage)
1649 {
1650         struct page *target, *unused;
1651         struct page_cgroup *pc;
1652         enum charge_type ctype;
1653
1654         if (!mem)
1655                 return;
1656
1657         /* at migration success, oldpage->mapping is NULL. */
1658         if (oldpage->mapping) {
1659                 target = oldpage;
1660                 unused = NULL;
1661         } else {
1662                 target = newpage;
1663                 unused = oldpage;
1664         }
1665
1666         if (PageAnon(target))
1667                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1668         else if (page_is_file_cache(target))
1669                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1670         else
1671                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1672
1673         /* unused page is not on radix-tree now. */
1674         if (unused)
1675                 __mem_cgroup_uncharge_common(unused, ctype);
1676
1677         pc = lookup_page_cgroup(target);
1678         /*
1679          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1680          * So, double-counting is effectively avoided.
1681          */
1682         __mem_cgroup_commit_charge(mem, pc, ctype);
1683
1684         /*
1685          * Both of oldpage and newpage are still under lock_page().
1686          * Then, we don't have to care about race in radix-tree.
1687          * But we have to be careful that this page is unmapped or not.
1688          *
1689          * There is a case for !page_mapped(). At the start of
1690          * migration, oldpage was mapped. But now, it's zapped.
1691          * But we know *target* page is not freed/reused under us.
1692          * mem_cgroup_uncharge_page() does all necessary checks.
1693          */
1694         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1695                 mem_cgroup_uncharge_page(target);
1696 }
1697
1698 /*
1699  * A call to try to shrink memory usage on charge failure at shmem's swapin.
1700  * Calling hierarchical_reclaim is not enough because we should update
1701  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
1702  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
1703  * not from the memcg which this page would be charged to.
1704  * try_charge_swapin does all of these works properly.
1705  */
1706 int mem_cgroup_shmem_charge_fallback(struct page *page,
1707                             struct mm_struct *mm,
1708                             gfp_t gfp_mask)
1709 {
1710         struct mem_cgroup *mem = NULL;
1711         int ret;
1712
1713         if (mem_cgroup_disabled())
1714                 return 0;
1715
1716         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1717         if (!ret)
1718                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
1719
1720         return ret;
1721 }
1722
1723 static DEFINE_MUTEX(set_limit_mutex);
1724
1725 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1726                                 unsigned long long val)
1727 {
1728         int retry_count;
1729         int progress;
1730         u64 memswlimit;
1731         int ret = 0;
1732         int children = mem_cgroup_count_children(memcg);
1733         u64 curusage, oldusage;
1734
1735         /*
1736          * For keeping hierarchical_reclaim simple, how long we should retry
1737          * is depends on callers. We set our retry-count to be function
1738          * of # of children which we should visit in this loop.
1739          */
1740         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
1741
1742         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1743
1744         while (retry_count) {
1745                 if (signal_pending(current)) {
1746                         ret = -EINTR;
1747                         break;
1748                 }
1749                 /*
1750                  * Rather than hide all in some function, I do this in
1751                  * open coded manner. You see what this really does.
1752                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1753                  */
1754                 mutex_lock(&set_limit_mutex);
1755                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1756                 if (memswlimit < val) {
1757                         ret = -EINVAL;
1758                         mutex_unlock(&set_limit_mutex);
1759                         break;
1760                 }
1761                 ret = res_counter_set_limit(&memcg->res, val);
1762                 if (!ret) {
1763                         if (memswlimit == val)
1764                                 memcg->memsw_is_minimum = true;
1765                         else
1766                                 memcg->memsw_is_minimum = false;
1767                 }
1768                 mutex_unlock(&set_limit_mutex);
1769
1770                 if (!ret)
1771                         break;
1772
1773                 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1774                                                    false, true);
1775                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1776                 /* Usage is reduced ? */
1777                 if (curusage >= oldusage)
1778                         retry_count--;
1779                 else
1780                         oldusage = curusage;
1781         }
1782
1783         return ret;
1784 }
1785
1786 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1787                                         unsigned long long val)
1788 {
1789         int retry_count;
1790         u64 memlimit, oldusage, curusage;
1791         int children = mem_cgroup_count_children(memcg);
1792         int ret = -EBUSY;
1793
1794         /* see mem_cgroup_resize_res_limit */
1795         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
1796         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1797         while (retry_count) {
1798                 if (signal_pending(current)) {
1799                         ret = -EINTR;
1800                         break;
1801                 }
1802                 /*
1803                  * Rather than hide all in some function, I do this in
1804                  * open coded manner. You see what this really does.
1805                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1806                  */
1807                 mutex_lock(&set_limit_mutex);
1808                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1809                 if (memlimit > val) {
1810                         ret = -EINVAL;
1811                         mutex_unlock(&set_limit_mutex);
1812                         break;
1813                 }
1814                 ret = res_counter_set_limit(&memcg->memsw, val);
1815                 if (!ret) {
1816                         if (memlimit == val)
1817                                 memcg->memsw_is_minimum = true;
1818                         else
1819                                 memcg->memsw_is_minimum = false;
1820                 }
1821                 mutex_unlock(&set_limit_mutex);
1822
1823                 if (!ret)
1824                         break;
1825
1826                 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
1827                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1828                 /* Usage is reduced ? */
1829                 if (curusage >= oldusage)
1830                         retry_count--;
1831                 else
1832                         oldusage = curusage;
1833         }
1834         return ret;
1835 }
1836
1837 /*
1838  * This routine traverse page_cgroup in given list and drop them all.
1839  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1840  */
1841 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1842                                 int node, int zid, enum lru_list lru)
1843 {
1844         struct zone *zone;
1845         struct mem_cgroup_per_zone *mz;
1846         struct page_cgroup *pc, *busy;
1847         unsigned long flags, loop;
1848         struct list_head *list;
1849         int ret = 0;
1850
1851         zone = &NODE_DATA(node)->node_zones[zid];
1852         mz = mem_cgroup_zoneinfo(mem, node, zid);
1853         list = &mz->lists[lru];
1854
1855         loop = MEM_CGROUP_ZSTAT(mz, lru);
1856         /* give some margin against EBUSY etc...*/
1857         loop += 256;
1858         busy = NULL;
1859         while (loop--) {
1860                 ret = 0;
1861                 spin_lock_irqsave(&zone->lru_lock, flags);
1862                 if (list_empty(list)) {
1863                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1864                         break;
1865                 }
1866                 pc = list_entry(list->prev, struct page_cgroup, lru);
1867                 if (busy == pc) {
1868                         list_move(&pc->lru, list);
1869                         busy = 0;
1870                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1871                         continue;
1872                 }
1873                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1874
1875                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1876                 if (ret == -ENOMEM)
1877                         break;
1878
1879                 if (ret == -EBUSY || ret == -EINVAL) {
1880                         /* found lock contention or "pc" is obsolete. */
1881                         busy = pc;
1882                         cond_resched();
1883                 } else
1884                         busy = NULL;
1885         }
1886
1887         if (!ret && !list_empty(list))
1888                 return -EBUSY;
1889         return ret;
1890 }
1891
1892 /*
1893  * make mem_cgroup's charge to be 0 if there is no task.
1894  * This enables deleting this mem_cgroup.
1895  */
1896 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1897 {
1898         int ret;
1899         int node, zid, shrink;
1900         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1901         struct cgroup *cgrp = mem->css.cgroup;
1902
1903         css_get(&mem->css);
1904
1905         shrink = 0;
1906         /* should free all ? */
1907         if (free_all)
1908                 goto try_to_free;
1909 move_account:
1910         while (mem->res.usage > 0) {
1911                 ret = -EBUSY;
1912                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1913                         goto out;
1914                 ret = -EINTR;
1915                 if (signal_pending(current))
1916                         goto out;
1917                 /* This is for making all *used* pages to be on LRU. */
1918                 lru_add_drain_all();
1919                 ret = 0;
1920                 for_each_node_state(node, N_HIGH_MEMORY) {
1921                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1922                                 enum lru_list l;
1923                                 for_each_lru(l) {
1924                                         ret = mem_cgroup_force_empty_list(mem,
1925                                                         node, zid, l);
1926                                         if (ret)
1927                                                 break;
1928                                 }
1929                         }
1930                         if (ret)
1931                                 break;
1932                 }
1933                 /* it seems parent cgroup doesn't have enough mem */
1934                 if (ret == -ENOMEM)
1935                         goto try_to_free;
1936                 cond_resched();
1937         }
1938         ret = 0;
1939 out:
1940         css_put(&mem->css);
1941         return ret;
1942
1943 try_to_free:
1944         /* returns EBUSY if there is a task or if we come here twice. */
1945         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1946                 ret = -EBUSY;
1947                 goto out;
1948         }
1949         /* we call try-to-free pages for make this cgroup empty */
1950         lru_add_drain_all();
1951         /* try to free all pages in this cgroup */
1952         shrink = 1;
1953         while (nr_retries && mem->res.usage > 0) {
1954                 int progress;
1955
1956                 if (signal_pending(current)) {
1957                         ret = -EINTR;
1958                         goto out;
1959                 }
1960                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1961                                                 false, get_swappiness(mem));
1962                 if (!progress) {
1963                         nr_retries--;
1964                         /* maybe some writeback is necessary */
1965                         congestion_wait(WRITE, HZ/10);
1966                 }
1967
1968         }
1969         lru_add_drain();
1970         /* try move_account...there may be some *locked* pages. */
1971         if (mem->res.usage)
1972                 goto move_account;
1973         ret = 0;
1974         goto out;
1975 }
1976
1977 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1978 {
1979         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1980 }
1981
1982
1983 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1984 {
1985         return mem_cgroup_from_cont(cont)->use_hierarchy;
1986 }
1987
1988 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1989                                         u64 val)
1990 {
1991         int retval = 0;
1992         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1993         struct cgroup *parent = cont->parent;
1994         struct mem_cgroup *parent_mem = NULL;
1995
1996         if (parent)
1997                 parent_mem = mem_cgroup_from_cont(parent);
1998
1999         cgroup_lock();
2000         /*
2001          * If parent's use_hiearchy is set, we can't make any modifications
2002          * in the child subtrees. If it is unset, then the change can
2003          * occur, provided the current cgroup has no children.
2004          *
2005          * For the root cgroup, parent_mem is NULL, we allow value to be
2006          * set if there are no children.
2007          */
2008         if ((!parent_mem || !parent_mem->use_hierarchy) &&
2009                                 (val == 1 || val == 0)) {
2010                 if (list_empty(&cont->children))
2011                         mem->use_hierarchy = val;
2012                 else
2013                         retval = -EBUSY;
2014         } else
2015                 retval = -EINVAL;
2016         cgroup_unlock();
2017
2018         return retval;
2019 }
2020
2021 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
2022 {
2023         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2024         u64 val = 0;
2025         int type, name;
2026
2027         type = MEMFILE_TYPE(cft->private);
2028         name = MEMFILE_ATTR(cft->private);
2029         switch (type) {
2030         case _MEM:
2031                 val = res_counter_read_u64(&mem->res, name);
2032                 break;
2033         case _MEMSWAP:
2034                 val = res_counter_read_u64(&mem->memsw, name);
2035                 break;
2036         default:
2037                 BUG();
2038                 break;
2039         }
2040         return val;
2041 }
2042 /*
2043  * The user of this function is...
2044  * RES_LIMIT.
2045  */
2046 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
2047                             const char *buffer)
2048 {
2049         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
2050         int type, name;
2051         unsigned long long val;
2052         int ret;
2053
2054         type = MEMFILE_TYPE(cft->private);
2055         name = MEMFILE_ATTR(cft->private);
2056         switch (name) {
2057         case RES_LIMIT:
2058                 /* This function does all necessary parse...reuse it */
2059                 ret = res_counter_memparse_write_strategy(buffer, &val);
2060                 if (ret)
2061                         break;
2062                 if (type == _MEM)
2063                         ret = mem_cgroup_resize_limit(memcg, val);
2064                 else
2065                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
2066                 break;
2067         default:
2068                 ret = -EINVAL; /* should be BUG() ? */
2069                 break;
2070         }
2071         return ret;
2072 }
2073
2074 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
2075                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
2076 {
2077         struct cgroup *cgroup;
2078         unsigned long long min_limit, min_memsw_limit, tmp;
2079
2080         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2081         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2082         cgroup = memcg->css.cgroup;
2083         if (!memcg->use_hierarchy)
2084                 goto out;
2085
2086         while (cgroup->parent) {
2087                 cgroup = cgroup->parent;
2088                 memcg = mem_cgroup_from_cont(cgroup);
2089                 if (!memcg->use_hierarchy)
2090                         break;
2091                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
2092                 min_limit = min(min_limit, tmp);
2093                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2094                 min_memsw_limit = min(min_memsw_limit, tmp);
2095         }
2096 out:
2097         *mem_limit = min_limit;
2098         *memsw_limit = min_memsw_limit;
2099         return;
2100 }
2101
2102 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
2103 {
2104         struct mem_cgroup *mem;
2105         int type, name;
2106
2107         mem = mem_cgroup_from_cont(cont);
2108         type = MEMFILE_TYPE(event);
2109         name = MEMFILE_ATTR(event);
2110         switch (name) {
2111         case RES_MAX_USAGE:
2112                 if (type == _MEM)
2113                         res_counter_reset_max(&mem->res);
2114                 else
2115                         res_counter_reset_max(&mem->memsw);
2116                 break;
2117         case RES_FAILCNT:
2118                 if (type == _MEM)
2119                         res_counter_reset_failcnt(&mem->res);
2120                 else
2121                         res_counter_reset_failcnt(&mem->memsw);
2122                 break;
2123         }
2124         return 0;
2125 }
2126
2127
2128 /* For read statistics */
2129 enum {
2130         MCS_CACHE,
2131         MCS_RSS,
2132         MCS_MAPPED_FILE,
2133         MCS_PGPGIN,
2134         MCS_PGPGOUT,
2135         MCS_INACTIVE_ANON,
2136         MCS_ACTIVE_ANON,
2137         MCS_INACTIVE_FILE,
2138         MCS_ACTIVE_FILE,
2139         MCS_UNEVICTABLE,
2140         NR_MCS_STAT,
2141 };
2142
2143 struct mcs_total_stat {
2144         s64 stat[NR_MCS_STAT];
2145 };
2146
2147 struct {
2148         char *local_name;
2149         char *total_name;
2150 } memcg_stat_strings[NR_MCS_STAT] = {
2151         {"cache", "total_cache"},
2152         {"rss", "total_rss"},
2153         {"mapped_file", "total_mapped_file"},
2154         {"pgpgin", "total_pgpgin"},
2155         {"pgpgout", "total_pgpgout"},
2156         {"inactive_anon", "total_inactive_anon"},
2157         {"active_anon", "total_active_anon"},
2158         {"inactive_file", "total_inactive_file"},
2159         {"active_file", "total_active_file"},
2160         {"unevictable", "total_unevictable"}
2161 };
2162
2163
2164 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2165 {
2166         struct mcs_total_stat *s = data;
2167         s64 val;
2168
2169         /* per cpu stat */
2170         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2171         s->stat[MCS_CACHE] += val * PAGE_SIZE;
2172         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2173         s->stat[MCS_RSS] += val * PAGE_SIZE;
2174         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
2175         s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
2176         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2177         s->stat[MCS_PGPGIN] += val;
2178         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2179         s->stat[MCS_PGPGOUT] += val;
2180
2181         /* per zone stat */
2182         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2183         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2184         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2185         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2186         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2187         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2188         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2189         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2190         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2191         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2192         return 0;
2193 }
2194
2195 static void
2196 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2197 {
2198         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2199 }
2200
2201 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
2202                                  struct cgroup_map_cb *cb)
2203 {
2204         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
2205         struct mcs_total_stat mystat;
2206         int i;
2207
2208         memset(&mystat, 0, sizeof(mystat));
2209         mem_cgroup_get_local_stat(mem_cont, &mystat);
2210
2211         for (i = 0; i < NR_MCS_STAT; i++)
2212                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
2213
2214         /* Hierarchical information */
2215         {
2216                 unsigned long long limit, memsw_limit;
2217                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
2218                 cb->fill(cb, "hierarchical_memory_limit", limit);
2219                 if (do_swap_account)
2220                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
2221         }
2222
2223         memset(&mystat, 0, sizeof(mystat));
2224         mem_cgroup_get_total_stat(mem_cont, &mystat);
2225         for (i = 0; i < NR_MCS_STAT; i++)
2226                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2227
2228
2229 #ifdef CONFIG_DEBUG_VM
2230         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
2231
2232         {
2233                 int nid, zid;
2234                 struct mem_cgroup_per_zone *mz;
2235                 unsigned long recent_rotated[2] = {0, 0};
2236                 unsigned long recent_scanned[2] = {0, 0};
2237
2238                 for_each_online_node(nid)
2239                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2240                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
2241
2242                                 recent_rotated[0] +=
2243                                         mz->reclaim_stat.recent_rotated[0];
2244                                 recent_rotated[1] +=
2245                                         mz->reclaim_stat.recent_rotated[1];
2246                                 recent_scanned[0] +=
2247                                         mz->reclaim_stat.recent_scanned[0];
2248                                 recent_scanned[1] +=
2249                                         mz->reclaim_stat.recent_scanned[1];
2250                         }
2251                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
2252                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
2253                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
2254                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
2255         }
2256 #endif
2257
2258         return 0;
2259 }
2260
2261 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
2262 {
2263         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2264
2265         return get_swappiness(memcg);
2266 }
2267
2268 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
2269                                        u64 val)
2270 {
2271         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2272         struct mem_cgroup *parent;
2273
2274         if (val > 100)
2275                 return -EINVAL;
2276
2277         if (cgrp->parent == NULL)
2278                 return -EINVAL;
2279
2280         parent = mem_cgroup_from_cont(cgrp->parent);
2281
2282         cgroup_lock();
2283
2284         /* If under hierarchy, only empty-root can set this value */
2285         if ((parent->use_hierarchy) ||
2286             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
2287                 cgroup_unlock();
2288                 return -EINVAL;
2289         }
2290
2291         spin_lock(&memcg->reclaim_param_lock);
2292         memcg->swappiness = val;
2293         spin_unlock(&memcg->reclaim_param_lock);
2294
2295         cgroup_unlock();
2296
2297         return 0;
2298 }
2299
2300
2301 static struct cftype mem_cgroup_files[] = {
2302         {
2303                 .name = "usage_in_bytes",
2304                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2305                 .read_u64 = mem_cgroup_read,
2306         },
2307         {
2308                 .name = "max_usage_in_bytes",
2309                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2310                 .trigger = mem_cgroup_reset,
2311                 .read_u64 = mem_cgroup_read,
2312         },
2313         {
2314                 .name = "limit_in_bytes",
2315                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2316                 .write_string = mem_cgroup_write,
2317                 .read_u64 = mem_cgroup_read,
2318         },
2319         {
2320                 .name = "failcnt",
2321                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2322                 .trigger = mem_cgroup_reset,
2323                 .read_u64 = mem_cgroup_read,
2324         },
2325         {
2326                 .name = "stat",
2327                 .read_map = mem_control_stat_show,
2328         },
2329         {
2330                 .name = "force_empty",
2331                 .trigger = mem_cgroup_force_empty_write,
2332         },
2333         {
2334                 .name = "use_hierarchy",
2335                 .write_u64 = mem_cgroup_hierarchy_write,
2336                 .read_u64 = mem_cgroup_hierarchy_read,
2337         },
2338         {
2339                 .name = "swappiness",
2340                 .read_u64 = mem_cgroup_swappiness_read,
2341                 .write_u64 = mem_cgroup_swappiness_write,
2342         },
2343 };
2344
2345 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2346 static struct cftype memsw_cgroup_files[] = {
2347         {
2348                 .name = "memsw.usage_in_bytes",
2349                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2350                 .read_u64 = mem_cgroup_read,
2351         },
2352         {
2353                 .name = "memsw.max_usage_in_bytes",
2354                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2355                 .trigger = mem_cgroup_reset,
2356                 .read_u64 = mem_cgroup_read,
2357         },
2358         {
2359                 .name = "memsw.limit_in_bytes",
2360                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2361                 .write_string = mem_cgroup_write,
2362                 .read_u64 = mem_cgroup_read,
2363         },
2364         {
2365                 .name = "memsw.failcnt",
2366                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2367                 .trigger = mem_cgroup_reset,
2368                 .read_u64 = mem_cgroup_read,
2369         },
2370 };
2371
2372 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2373 {
2374         if (!do_swap_account)
2375                 return 0;
2376         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2377                                 ARRAY_SIZE(memsw_cgroup_files));
2378 };
2379 #else
2380 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2381 {
2382         return 0;
2383 }
2384 #endif
2385
2386 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2387 {
2388         struct mem_cgroup_per_node *pn;
2389         struct mem_cgroup_per_zone *mz;
2390         enum lru_list l;
2391         int zone, tmp = node;
2392         /*
2393          * This routine is called against possible nodes.
2394          * But it's BUG to call kmalloc() against offline node.
2395          *
2396          * TODO: this routine can waste much memory for nodes which will
2397          *       never be onlined. It's better to use memory hotplug callback
2398          *       function.
2399          */
2400         if (!node_state(node, N_NORMAL_MEMORY))
2401                 tmp = -1;
2402         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2403         if (!pn)
2404                 return 1;
2405
2406         mem->info.nodeinfo[node] = pn;
2407         memset(pn, 0, sizeof(*pn));
2408
2409         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2410                 mz = &pn->zoneinfo[zone];
2411                 for_each_lru(l)
2412                         INIT_LIST_HEAD(&mz->lists[l]);
2413         }
2414         return 0;
2415 }
2416
2417 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2418 {
2419         kfree(mem->info.nodeinfo[node]);
2420 }
2421
2422 static int mem_cgroup_size(void)
2423 {
2424         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2425         return sizeof(struct mem_cgroup) + cpustat_size;
2426 }
2427
2428 static struct mem_cgroup *mem_cgroup_alloc(void)
2429 {
2430         struct mem_cgroup *mem;
2431         int size = mem_cgroup_size();
2432
2433         if (size < PAGE_SIZE)
2434                 mem = kmalloc(size, GFP_KERNEL);
2435         else
2436                 mem = vmalloc(size);
2437
2438         if (mem)
2439                 memset(mem, 0, size);
2440         return mem;
2441 }
2442
2443 /*
2444  * At destroying mem_cgroup, references from swap_cgroup can remain.
2445  * (scanning all at force_empty is too costly...)
2446  *
2447  * Instead of clearing all references at force_empty, we remember
2448  * the number of reference from swap_cgroup and free mem_cgroup when
2449  * it goes down to 0.
2450  *
2451  * Removal of cgroup itself succeeds regardless of refs from swap.
2452  */
2453
2454 static void __mem_cgroup_free(struct mem_cgroup *mem)
2455 {
2456         int node;
2457
2458         free_css_id(&mem_cgroup_subsys, &mem->css);
2459
2460         for_each_node_state(node, N_POSSIBLE)
2461                 free_mem_cgroup_per_zone_info(mem, node);
2462
2463         if (mem_cgroup_size() < PAGE_SIZE)
2464                 kfree(mem);
2465         else
2466                 vfree(mem);
2467 }
2468
2469 static void mem_cgroup_get(struct mem_cgroup *mem)
2470 {
2471         atomic_inc(&mem->refcnt);
2472 }
2473
2474 static void mem_cgroup_put(struct mem_cgroup *mem)
2475 {
2476         if (atomic_dec_and_test(&mem->refcnt)) {
2477                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
2478                 __mem_cgroup_free(mem);
2479                 if (parent)
2480                         mem_cgroup_put(parent);
2481         }
2482 }
2483
2484 /*
2485  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
2486  */
2487 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
2488 {
2489         if (!mem->res.parent)
2490                 return NULL;
2491         return mem_cgroup_from_res_counter(mem->res.parent, res);
2492 }
2493
2494 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2495 static void __init enable_swap_cgroup(void)
2496 {
2497         if (!mem_cgroup_disabled() && really_do_swap_account)
2498                 do_swap_account = 1;
2499 }
2500 #else
2501 static void __init enable_swap_cgroup(void)
2502 {
2503 }
2504 #endif
2505
2506 static struct cgroup_subsys_state * __ref
2507 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2508 {
2509         struct mem_cgroup *mem, *parent;
2510         long error = -ENOMEM;
2511         int node;
2512
2513         mem = mem_cgroup_alloc();
2514         if (!mem)
2515                 return ERR_PTR(error);
2516
2517         for_each_node_state(node, N_POSSIBLE)
2518                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2519                         goto free_out;
2520         /* root ? */
2521         if (cont->parent == NULL) {
2522                 enable_swap_cgroup();
2523                 parent = NULL;
2524         } else {
2525                 parent = mem_cgroup_from_cont(cont->parent);
2526                 mem->use_hierarchy = parent->use_hierarchy;
2527         }
2528
2529         if (parent && parent->use_hierarchy) {
2530                 res_counter_init(&mem->res, &parent->res);
2531                 res_counter_init(&mem->memsw, &parent->memsw);
2532                 /*
2533                  * We increment refcnt of the parent to ensure that we can
2534                  * safely access it on res_counter_charge/uncharge.
2535                  * This refcnt will be decremented when freeing this
2536                  * mem_cgroup(see mem_cgroup_put).
2537                  */
2538                 mem_cgroup_get(parent);
2539         } else {
2540                 res_counter_init(&mem->res, NULL);
2541                 res_counter_init(&mem->memsw, NULL);
2542         }
2543         mem->last_scanned_child = 0;
2544         spin_lock_init(&mem->reclaim_param_lock);
2545
2546         if (parent)
2547                 mem->swappiness = get_swappiness(parent);
2548         atomic_set(&mem->refcnt, 1);
2549         return &mem->css;
2550 free_out:
2551         __mem_cgroup_free(mem);
2552         return ERR_PTR(error);
2553 }
2554
2555 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2556                                         struct cgroup *cont)
2557 {
2558         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2559
2560         return mem_cgroup_force_empty(mem, false);
2561 }
2562
2563 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2564                                 struct cgroup *cont)
2565 {
2566         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2567
2568         mem_cgroup_put(mem);
2569 }
2570
2571 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2572                                 struct cgroup *cont)
2573 {
2574         int ret;
2575
2576         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2577                                 ARRAY_SIZE(mem_cgroup_files));
2578
2579         if (!ret)
2580                 ret = register_memsw_files(cont, ss);
2581         return ret;
2582 }
2583
2584 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2585                                 struct cgroup *cont,
2586                                 struct cgroup *old_cont,
2587                                 struct task_struct *p)
2588 {
2589         mutex_lock(&memcg_tasklist);
2590         /*
2591          * FIXME: It's better to move charges of this process from old
2592          * memcg to new memcg. But it's just on TODO-List now.
2593          */
2594         mutex_unlock(&memcg_tasklist);
2595 }
2596
2597 struct cgroup_subsys mem_cgroup_subsys = {
2598         .name = "memory",
2599         .subsys_id = mem_cgroup_subsys_id,
2600         .create = mem_cgroup_create,
2601         .pre_destroy = mem_cgroup_pre_destroy,
2602         .destroy = mem_cgroup_destroy,
2603         .populate = mem_cgroup_populate,
2604         .attach = mem_cgroup_move_task,
2605         .early_init = 0,
2606         .use_id = 1,
2607 };
2608
2609 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2610
2611 static int __init disable_swap_account(char *s)
2612 {
2613         really_do_swap_account = 0;
2614         return 1;
2615 }
2616 __setup("noswapaccount", disable_swap_account);
2617 #endif