memcg: oom wakeup filter
[pandora-kernel.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/mutex.h>
37 #include <linux/rbtree.h>
38 #include <linux/slab.h>
39 #include <linux/swap.h>
40 #include <linux/swapops.h>
41 #include <linux/spinlock.h>
42 #include <linux/eventfd.h>
43 #include <linux/sort.h>
44 #include <linux/fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/vmalloc.h>
47 #include <linux/mm_inline.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/cpu.h>
50 #include "internal.h"
51
52 #include <asm/uaccess.h>
53
54 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
55 #define MEM_CGROUP_RECLAIM_RETRIES      5
56 struct mem_cgroup *root_mem_cgroup __read_mostly;
57
58 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
59 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
60 int do_swap_account __read_mostly;
61 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
62 #else
63 #define do_swap_account         (0)
64 #endif
65
66 /*
67  * Per memcg event counter is incremented at every pagein/pageout. This counter
68  * is used for trigger some periodic events. This is straightforward and better
69  * than using jiffies etc. to handle periodic memcg event.
70  *
71  * These values will be used as !((event) & ((1 <<(thresh)) - 1))
72  */
73 #define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
74 #define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
75
76 /*
77  * Statistics for memory cgroup.
78  */
79 enum mem_cgroup_stat_index {
80         /*
81          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
82          */
83         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
84         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
85         MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
86         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
87         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
88         MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
89         MEM_CGROUP_EVENTS,      /* incremented at every  pagein/pageout */
90
91         MEM_CGROUP_STAT_NSTATS,
92 };
93
94 struct mem_cgroup_stat_cpu {
95         s64 count[MEM_CGROUP_STAT_NSTATS];
96 };
97
98 /*
99  * per-zone information in memory controller.
100  */
101 struct mem_cgroup_per_zone {
102         /*
103          * spin_lock to protect the per cgroup LRU
104          */
105         struct list_head        lists[NR_LRU_LISTS];
106         unsigned long           count[NR_LRU_LISTS];
107
108         struct zone_reclaim_stat reclaim_stat;
109         struct rb_node          tree_node;      /* RB tree node */
110         unsigned long long      usage_in_excess;/* Set to the value by which */
111                                                 /* the soft limit is exceeded*/
112         bool                    on_tree;
113         struct mem_cgroup       *mem;           /* Back pointer, we cannot */
114                                                 /* use container_of        */
115 };
116 /* Macro for accessing counter */
117 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
118
119 struct mem_cgroup_per_node {
120         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
121 };
122
123 struct mem_cgroup_lru_info {
124         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
125 };
126
127 /*
128  * Cgroups above their limits are maintained in a RB-Tree, independent of
129  * their hierarchy representation
130  */
131
132 struct mem_cgroup_tree_per_zone {
133         struct rb_root rb_root;
134         spinlock_t lock;
135 };
136
137 struct mem_cgroup_tree_per_node {
138         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
139 };
140
141 struct mem_cgroup_tree {
142         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
143 };
144
145 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
146
147 struct mem_cgroup_threshold {
148         struct eventfd_ctx *eventfd;
149         u64 threshold;
150 };
151
152 struct mem_cgroup_threshold_ary {
153         /* An array index points to threshold just below usage. */
154         atomic_t current_threshold;
155         /* Size of entries[] */
156         unsigned int size;
157         /* Array of thresholds */
158         struct mem_cgroup_threshold entries[0];
159 };
160
161 static void mem_cgroup_threshold(struct mem_cgroup *mem);
162
163 /*
164  * The memory controller data structure. The memory controller controls both
165  * page cache and RSS per cgroup. We would eventually like to provide
166  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
167  * to help the administrator determine what knobs to tune.
168  *
169  * TODO: Add a water mark for the memory controller. Reclaim will begin when
170  * we hit the water mark. May be even add a low water mark, such that
171  * no reclaim occurs from a cgroup at it's low water mark, this is
172  * a feature that will be implemented much later in the future.
173  */
174 struct mem_cgroup {
175         struct cgroup_subsys_state css;
176         /*
177          * the counter to account for memory usage
178          */
179         struct res_counter res;
180         /*
181          * the counter to account for mem+swap usage.
182          */
183         struct res_counter memsw;
184         /*
185          * Per cgroup active and inactive list, similar to the
186          * per zone LRU lists.
187          */
188         struct mem_cgroup_lru_info info;
189
190         /*
191           protect against reclaim related member.
192         */
193         spinlock_t reclaim_param_lock;
194
195         int     prev_priority;  /* for recording reclaim priority */
196
197         /*
198          * While reclaiming in a hierarchy, we cache the last child we
199          * reclaimed from.
200          */
201         int last_scanned_child;
202         /*
203          * Should the accounting and control be hierarchical, per subtree?
204          */
205         bool use_hierarchy;
206         atomic_t        oom_lock;
207         atomic_t        refcnt;
208
209         unsigned int    swappiness;
210
211         /* set when res.limit == memsw.limit */
212         bool            memsw_is_minimum;
213
214         /* protect arrays of thresholds */
215         struct mutex thresholds_lock;
216
217         /* thresholds for memory usage. RCU-protected */
218         struct mem_cgroup_threshold_ary *thresholds;
219
220         /* thresholds for mem+swap usage. RCU-protected */
221         struct mem_cgroup_threshold_ary *memsw_thresholds;
222
223         /*
224          * Should we move charges of a task when a task is moved into this
225          * mem_cgroup ? And what type of charges should we move ?
226          */
227         unsigned long   move_charge_at_immigrate;
228
229         /*
230          * percpu counter.
231          */
232         struct mem_cgroup_stat_cpu *stat;
233 };
234
235 /* Stuffs for move charges at task migration. */
236 /*
237  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
238  * left-shifted bitmap of these types.
239  */
240 enum move_type {
241         MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
242         NR_MOVE_TYPE,
243 };
244
245 /* "mc" and its members are protected by cgroup_mutex */
246 static struct move_charge_struct {
247         struct mem_cgroup *from;
248         struct mem_cgroup *to;
249         unsigned long precharge;
250         unsigned long moved_charge;
251         unsigned long moved_swap;
252         struct task_struct *moving_task;        /* a task moving charges */
253         wait_queue_head_t waitq;                /* a waitq for other context */
254 } mc = {
255         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
256 };
257
258 /*
259  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
260  * limit reclaim to prevent infinite loops, if they ever occur.
261  */
262 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
263 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
264
265 enum charge_type {
266         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
267         MEM_CGROUP_CHARGE_TYPE_MAPPED,
268         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
269         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
270         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
271         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
272         NR_CHARGE_TYPE,
273 };
274
275 /* only for here (for easy reading.) */
276 #define PCGF_CACHE      (1UL << PCG_CACHE)
277 #define PCGF_USED       (1UL << PCG_USED)
278 #define PCGF_LOCK       (1UL << PCG_LOCK)
279 /* Not used, but added here for completeness */
280 #define PCGF_ACCT       (1UL << PCG_ACCT)
281
282 /* for encoding cft->private value on file */
283 #define _MEM                    (0)
284 #define _MEMSWAP                (1)
285 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
286 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
287 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
288
289 /*
290  * Reclaim flags for mem_cgroup_hierarchical_reclaim
291  */
292 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT   0x0
293 #define MEM_CGROUP_RECLAIM_NOSWAP       (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
294 #define MEM_CGROUP_RECLAIM_SHRINK_BIT   0x1
295 #define MEM_CGROUP_RECLAIM_SHRINK       (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
296 #define MEM_CGROUP_RECLAIM_SOFT_BIT     0x2
297 #define MEM_CGROUP_RECLAIM_SOFT         (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
298
299 static void mem_cgroup_get(struct mem_cgroup *mem);
300 static void mem_cgroup_put(struct mem_cgroup *mem);
301 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
302 static void drain_all_stock_async(void);
303
304 static struct mem_cgroup_per_zone *
305 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
306 {
307         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
308 }
309
310 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
311 {
312         return &mem->css;
313 }
314
315 static struct mem_cgroup_per_zone *
316 page_cgroup_zoneinfo(struct page_cgroup *pc)
317 {
318         struct mem_cgroup *mem = pc->mem_cgroup;
319         int nid = page_cgroup_nid(pc);
320         int zid = page_cgroup_zid(pc);
321
322         if (!mem)
323                 return NULL;
324
325         return mem_cgroup_zoneinfo(mem, nid, zid);
326 }
327
328 static struct mem_cgroup_tree_per_zone *
329 soft_limit_tree_node_zone(int nid, int zid)
330 {
331         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
332 }
333
334 static struct mem_cgroup_tree_per_zone *
335 soft_limit_tree_from_page(struct page *page)
336 {
337         int nid = page_to_nid(page);
338         int zid = page_zonenum(page);
339
340         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
341 }
342
343 static void
344 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
345                                 struct mem_cgroup_per_zone *mz,
346                                 struct mem_cgroup_tree_per_zone *mctz,
347                                 unsigned long long new_usage_in_excess)
348 {
349         struct rb_node **p = &mctz->rb_root.rb_node;
350         struct rb_node *parent = NULL;
351         struct mem_cgroup_per_zone *mz_node;
352
353         if (mz->on_tree)
354                 return;
355
356         mz->usage_in_excess = new_usage_in_excess;
357         if (!mz->usage_in_excess)
358                 return;
359         while (*p) {
360                 parent = *p;
361                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
362                                         tree_node);
363                 if (mz->usage_in_excess < mz_node->usage_in_excess)
364                         p = &(*p)->rb_left;
365                 /*
366                  * We can't avoid mem cgroups that are over their soft
367                  * limit by the same amount
368                  */
369                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
370                         p = &(*p)->rb_right;
371         }
372         rb_link_node(&mz->tree_node, parent, p);
373         rb_insert_color(&mz->tree_node, &mctz->rb_root);
374         mz->on_tree = true;
375 }
376
377 static void
378 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
379                                 struct mem_cgroup_per_zone *mz,
380                                 struct mem_cgroup_tree_per_zone *mctz)
381 {
382         if (!mz->on_tree)
383                 return;
384         rb_erase(&mz->tree_node, &mctz->rb_root);
385         mz->on_tree = false;
386 }
387
388 static void
389 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
390                                 struct mem_cgroup_per_zone *mz,
391                                 struct mem_cgroup_tree_per_zone *mctz)
392 {
393         spin_lock(&mctz->lock);
394         __mem_cgroup_remove_exceeded(mem, mz, mctz);
395         spin_unlock(&mctz->lock);
396 }
397
398
399 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
400 {
401         unsigned long long excess;
402         struct mem_cgroup_per_zone *mz;
403         struct mem_cgroup_tree_per_zone *mctz;
404         int nid = page_to_nid(page);
405         int zid = page_zonenum(page);
406         mctz = soft_limit_tree_from_page(page);
407
408         /*
409          * Necessary to update all ancestors when hierarchy is used.
410          * because their event counter is not touched.
411          */
412         for (; mem; mem = parent_mem_cgroup(mem)) {
413                 mz = mem_cgroup_zoneinfo(mem, nid, zid);
414                 excess = res_counter_soft_limit_excess(&mem->res);
415                 /*
416                  * We have to update the tree if mz is on RB-tree or
417                  * mem is over its softlimit.
418                  */
419                 if (excess || mz->on_tree) {
420                         spin_lock(&mctz->lock);
421                         /* if on-tree, remove it */
422                         if (mz->on_tree)
423                                 __mem_cgroup_remove_exceeded(mem, mz, mctz);
424                         /*
425                          * Insert again. mz->usage_in_excess will be updated.
426                          * If excess is 0, no tree ops.
427                          */
428                         __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
429                         spin_unlock(&mctz->lock);
430                 }
431         }
432 }
433
434 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
435 {
436         int node, zone;
437         struct mem_cgroup_per_zone *mz;
438         struct mem_cgroup_tree_per_zone *mctz;
439
440         for_each_node_state(node, N_POSSIBLE) {
441                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
442                         mz = mem_cgroup_zoneinfo(mem, node, zone);
443                         mctz = soft_limit_tree_node_zone(node, zone);
444                         mem_cgroup_remove_exceeded(mem, mz, mctz);
445                 }
446         }
447 }
448
449 static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
450 {
451         return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
452 }
453
454 static struct mem_cgroup_per_zone *
455 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
456 {
457         struct rb_node *rightmost = NULL;
458         struct mem_cgroup_per_zone *mz;
459
460 retry:
461         mz = NULL;
462         rightmost = rb_last(&mctz->rb_root);
463         if (!rightmost)
464                 goto done;              /* Nothing to reclaim from */
465
466         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
467         /*
468          * Remove the node now but someone else can add it back,
469          * we will to add it back at the end of reclaim to its correct
470          * position in the tree.
471          */
472         __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
473         if (!res_counter_soft_limit_excess(&mz->mem->res) ||
474                 !css_tryget(&mz->mem->css))
475                 goto retry;
476 done:
477         return mz;
478 }
479
480 static struct mem_cgroup_per_zone *
481 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
482 {
483         struct mem_cgroup_per_zone *mz;
484
485         spin_lock(&mctz->lock);
486         mz = __mem_cgroup_largest_soft_limit_node(mctz);
487         spin_unlock(&mctz->lock);
488         return mz;
489 }
490
491 static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
492                 enum mem_cgroup_stat_index idx)
493 {
494         int cpu;
495         s64 val = 0;
496
497         for_each_possible_cpu(cpu)
498                 val += per_cpu(mem->stat->count[idx], cpu);
499         return val;
500 }
501
502 static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
503 {
504         s64 ret;
505
506         ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
507         ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
508         return ret;
509 }
510
511 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
512                                          bool charge)
513 {
514         int val = (charge) ? 1 : -1;
515         this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
516 }
517
518 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
519                                          struct page_cgroup *pc,
520                                          bool charge)
521 {
522         int val = (charge) ? 1 : -1;
523
524         preempt_disable();
525
526         if (PageCgroupCache(pc))
527                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
528         else
529                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
530
531         if (charge)
532                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
533         else
534                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
535         __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
536
537         preempt_enable();
538 }
539
540 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
541                                         enum lru_list idx)
542 {
543         int nid, zid;
544         struct mem_cgroup_per_zone *mz;
545         u64 total = 0;
546
547         for_each_online_node(nid)
548                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
549                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
550                         total += MEM_CGROUP_ZSTAT(mz, idx);
551                 }
552         return total;
553 }
554
555 static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
556 {
557         s64 val;
558
559         val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
560
561         return !(val & ((1 << event_mask_shift) - 1));
562 }
563
564 /*
565  * Check events in order.
566  *
567  */
568 static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
569 {
570         /* threshold event is triggered in finer grain than soft limit */
571         if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
572                 mem_cgroup_threshold(mem);
573                 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
574                         mem_cgroup_update_tree(mem, page);
575         }
576 }
577
578 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
579 {
580         return container_of(cgroup_subsys_state(cont,
581                                 mem_cgroup_subsys_id), struct mem_cgroup,
582                                 css);
583 }
584
585 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
586 {
587         /*
588          * mm_update_next_owner() may clear mm->owner to NULL
589          * if it races with swapoff, page migration, etc.
590          * So this can be called with p == NULL.
591          */
592         if (unlikely(!p))
593                 return NULL;
594
595         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
596                                 struct mem_cgroup, css);
597 }
598
599 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
600 {
601         struct mem_cgroup *mem = NULL;
602
603         if (!mm)
604                 return NULL;
605         /*
606          * Because we have no locks, mm->owner's may be being moved to other
607          * cgroup. We use css_tryget() here even if this looks
608          * pessimistic (rather than adding locks here).
609          */
610         rcu_read_lock();
611         do {
612                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
613                 if (unlikely(!mem))
614                         break;
615         } while (!css_tryget(&mem->css));
616         rcu_read_unlock();
617         return mem;
618 }
619
620 /*
621  * Call callback function against all cgroup under hierarchy tree.
622  */
623 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
624                           int (*func)(struct mem_cgroup *, void *))
625 {
626         int found, ret, nextid;
627         struct cgroup_subsys_state *css;
628         struct mem_cgroup *mem;
629
630         if (!root->use_hierarchy)
631                 return (*func)(root, data);
632
633         nextid = 1;
634         do {
635                 ret = 0;
636                 mem = NULL;
637
638                 rcu_read_lock();
639                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
640                                    &found);
641                 if (css && css_tryget(css))
642                         mem = container_of(css, struct mem_cgroup, css);
643                 rcu_read_unlock();
644
645                 if (mem) {
646                         ret = (*func)(mem, data);
647                         css_put(&mem->css);
648                 }
649                 nextid = found + 1;
650         } while (!ret && css);
651
652         return ret;
653 }
654
655 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
656 {
657         return (mem == root_mem_cgroup);
658 }
659
660 /*
661  * Following LRU functions are allowed to be used without PCG_LOCK.
662  * Operations are called by routine of global LRU independently from memcg.
663  * What we have to take care of here is validness of pc->mem_cgroup.
664  *
665  * Changes to pc->mem_cgroup happens when
666  * 1. charge
667  * 2. moving account
668  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
669  * It is added to LRU before charge.
670  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
671  * When moving account, the page is not on LRU. It's isolated.
672  */
673
674 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
675 {
676         struct page_cgroup *pc;
677         struct mem_cgroup_per_zone *mz;
678
679         if (mem_cgroup_disabled())
680                 return;
681         pc = lookup_page_cgroup(page);
682         /* can happen while we handle swapcache. */
683         if (!TestClearPageCgroupAcctLRU(pc))
684                 return;
685         VM_BUG_ON(!pc->mem_cgroup);
686         /*
687          * We don't check PCG_USED bit. It's cleared when the "page" is finally
688          * removed from global LRU.
689          */
690         mz = page_cgroup_zoneinfo(pc);
691         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
692         if (mem_cgroup_is_root(pc->mem_cgroup))
693                 return;
694         VM_BUG_ON(list_empty(&pc->lru));
695         list_del_init(&pc->lru);
696         return;
697 }
698
699 void mem_cgroup_del_lru(struct page *page)
700 {
701         mem_cgroup_del_lru_list(page, page_lru(page));
702 }
703
704 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
705 {
706         struct mem_cgroup_per_zone *mz;
707         struct page_cgroup *pc;
708
709         if (mem_cgroup_disabled())
710                 return;
711
712         pc = lookup_page_cgroup(page);
713         /*
714          * Used bit is set without atomic ops but after smp_wmb().
715          * For making pc->mem_cgroup visible, insert smp_rmb() here.
716          */
717         smp_rmb();
718         /* unused or root page is not rotated. */
719         if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
720                 return;
721         mz = page_cgroup_zoneinfo(pc);
722         list_move(&pc->lru, &mz->lists[lru]);
723 }
724
725 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
726 {
727         struct page_cgroup *pc;
728         struct mem_cgroup_per_zone *mz;
729
730         if (mem_cgroup_disabled())
731                 return;
732         pc = lookup_page_cgroup(page);
733         VM_BUG_ON(PageCgroupAcctLRU(pc));
734         /*
735          * Used bit is set without atomic ops but after smp_wmb().
736          * For making pc->mem_cgroup visible, insert smp_rmb() here.
737          */
738         smp_rmb();
739         if (!PageCgroupUsed(pc))
740                 return;
741
742         mz = page_cgroup_zoneinfo(pc);
743         MEM_CGROUP_ZSTAT(mz, lru) += 1;
744         SetPageCgroupAcctLRU(pc);
745         if (mem_cgroup_is_root(pc->mem_cgroup))
746                 return;
747         list_add(&pc->lru, &mz->lists[lru]);
748 }
749
750 /*
751  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
752  * lru because the page may.be reused after it's fully uncharged (because of
753  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
754  * it again. This function is only used to charge SwapCache. It's done under
755  * lock_page and expected that zone->lru_lock is never held.
756  */
757 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
758 {
759         unsigned long flags;
760         struct zone *zone = page_zone(page);
761         struct page_cgroup *pc = lookup_page_cgroup(page);
762
763         spin_lock_irqsave(&zone->lru_lock, flags);
764         /*
765          * Forget old LRU when this page_cgroup is *not* used. This Used bit
766          * is guarded by lock_page() because the page is SwapCache.
767          */
768         if (!PageCgroupUsed(pc))
769                 mem_cgroup_del_lru_list(page, page_lru(page));
770         spin_unlock_irqrestore(&zone->lru_lock, flags);
771 }
772
773 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
774 {
775         unsigned long flags;
776         struct zone *zone = page_zone(page);
777         struct page_cgroup *pc = lookup_page_cgroup(page);
778
779         spin_lock_irqsave(&zone->lru_lock, flags);
780         /* link when the page is linked to LRU but page_cgroup isn't */
781         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
782                 mem_cgroup_add_lru_list(page, page_lru(page));
783         spin_unlock_irqrestore(&zone->lru_lock, flags);
784 }
785
786
787 void mem_cgroup_move_lists(struct page *page,
788                            enum lru_list from, enum lru_list to)
789 {
790         if (mem_cgroup_disabled())
791                 return;
792         mem_cgroup_del_lru_list(page, from);
793         mem_cgroup_add_lru_list(page, to);
794 }
795
796 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
797 {
798         int ret;
799         struct mem_cgroup *curr = NULL;
800
801         task_lock(task);
802         rcu_read_lock();
803         curr = try_get_mem_cgroup_from_mm(task->mm);
804         rcu_read_unlock();
805         task_unlock(task);
806         if (!curr)
807                 return 0;
808         /*
809          * We should check use_hierarchy of "mem" not "curr". Because checking
810          * use_hierarchy of "curr" here make this function true if hierarchy is
811          * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
812          * hierarchy(even if use_hierarchy is disabled in "mem").
813          */
814         if (mem->use_hierarchy)
815                 ret = css_is_ancestor(&curr->css, &mem->css);
816         else
817                 ret = (curr == mem);
818         css_put(&curr->css);
819         return ret;
820 }
821
822 /*
823  * prev_priority control...this will be used in memory reclaim path.
824  */
825 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
826 {
827         int prev_priority;
828
829         spin_lock(&mem->reclaim_param_lock);
830         prev_priority = mem->prev_priority;
831         spin_unlock(&mem->reclaim_param_lock);
832
833         return prev_priority;
834 }
835
836 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
837 {
838         spin_lock(&mem->reclaim_param_lock);
839         if (priority < mem->prev_priority)
840                 mem->prev_priority = priority;
841         spin_unlock(&mem->reclaim_param_lock);
842 }
843
844 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
845 {
846         spin_lock(&mem->reclaim_param_lock);
847         mem->prev_priority = priority;
848         spin_unlock(&mem->reclaim_param_lock);
849 }
850
851 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
852 {
853         unsigned long active;
854         unsigned long inactive;
855         unsigned long gb;
856         unsigned long inactive_ratio;
857
858         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
859         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
860
861         gb = (inactive + active) >> (30 - PAGE_SHIFT);
862         if (gb)
863                 inactive_ratio = int_sqrt(10 * gb);
864         else
865                 inactive_ratio = 1;
866
867         if (present_pages) {
868                 present_pages[0] = inactive;
869                 present_pages[1] = active;
870         }
871
872         return inactive_ratio;
873 }
874
875 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
876 {
877         unsigned long active;
878         unsigned long inactive;
879         unsigned long present_pages[2];
880         unsigned long inactive_ratio;
881
882         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
883
884         inactive = present_pages[0];
885         active = present_pages[1];
886
887         if (inactive * inactive_ratio < active)
888                 return 1;
889
890         return 0;
891 }
892
893 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
894 {
895         unsigned long active;
896         unsigned long inactive;
897
898         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
899         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
900
901         return (active > inactive);
902 }
903
904 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
905                                        struct zone *zone,
906                                        enum lru_list lru)
907 {
908         int nid = zone->zone_pgdat->node_id;
909         int zid = zone_idx(zone);
910         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
911
912         return MEM_CGROUP_ZSTAT(mz, lru);
913 }
914
915 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
916                                                       struct zone *zone)
917 {
918         int nid = zone->zone_pgdat->node_id;
919         int zid = zone_idx(zone);
920         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
921
922         return &mz->reclaim_stat;
923 }
924
925 struct zone_reclaim_stat *
926 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
927 {
928         struct page_cgroup *pc;
929         struct mem_cgroup_per_zone *mz;
930
931         if (mem_cgroup_disabled())
932                 return NULL;
933
934         pc = lookup_page_cgroup(page);
935         /*
936          * Used bit is set without atomic ops but after smp_wmb().
937          * For making pc->mem_cgroup visible, insert smp_rmb() here.
938          */
939         smp_rmb();
940         if (!PageCgroupUsed(pc))
941                 return NULL;
942
943         mz = page_cgroup_zoneinfo(pc);
944         if (!mz)
945                 return NULL;
946
947         return &mz->reclaim_stat;
948 }
949
950 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
951                                         struct list_head *dst,
952                                         unsigned long *scanned, int order,
953                                         int mode, struct zone *z,
954                                         struct mem_cgroup *mem_cont,
955                                         int active, int file)
956 {
957         unsigned long nr_taken = 0;
958         struct page *page;
959         unsigned long scan;
960         LIST_HEAD(pc_list);
961         struct list_head *src;
962         struct page_cgroup *pc, *tmp;
963         int nid = z->zone_pgdat->node_id;
964         int zid = zone_idx(z);
965         struct mem_cgroup_per_zone *mz;
966         int lru = LRU_FILE * file + active;
967         int ret;
968
969         BUG_ON(!mem_cont);
970         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
971         src = &mz->lists[lru];
972
973         scan = 0;
974         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
975                 if (scan >= nr_to_scan)
976                         break;
977
978                 page = pc->page;
979                 if (unlikely(!PageCgroupUsed(pc)))
980                         continue;
981                 if (unlikely(!PageLRU(page)))
982                         continue;
983
984                 scan++;
985                 ret = __isolate_lru_page(page, mode, file);
986                 switch (ret) {
987                 case 0:
988                         list_move(&page->lru, dst);
989                         mem_cgroup_del_lru(page);
990                         nr_taken++;
991                         break;
992                 case -EBUSY:
993                         /* we don't affect global LRU but rotate in our LRU */
994                         mem_cgroup_rotate_lru_list(page, page_lru(page));
995                         break;
996                 default:
997                         break;
998                 }
999         }
1000
1001         *scanned = scan;
1002         return nr_taken;
1003 }
1004
1005 #define mem_cgroup_from_res_counter(counter, member)    \
1006         container_of(counter, struct mem_cgroup, member)
1007
1008 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
1009 {
1010         if (do_swap_account) {
1011                 if (res_counter_check_under_limit(&mem->res) &&
1012                         res_counter_check_under_limit(&mem->memsw))
1013                         return true;
1014         } else
1015                 if (res_counter_check_under_limit(&mem->res))
1016                         return true;
1017         return false;
1018 }
1019
1020 static unsigned int get_swappiness(struct mem_cgroup *memcg)
1021 {
1022         struct cgroup *cgrp = memcg->css.cgroup;
1023         unsigned int swappiness;
1024
1025         /* root ? */
1026         if (cgrp->parent == NULL)
1027                 return vm_swappiness;
1028
1029         spin_lock(&memcg->reclaim_param_lock);
1030         swappiness = memcg->swappiness;
1031         spin_unlock(&memcg->reclaim_param_lock);
1032
1033         return swappiness;
1034 }
1035
1036 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
1037 {
1038         int *val = data;
1039         (*val)++;
1040         return 0;
1041 }
1042
1043 /**
1044  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1045  * @memcg: The memory cgroup that went over limit
1046  * @p: Task that is going to be killed
1047  *
1048  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1049  * enabled
1050  */
1051 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1052 {
1053         struct cgroup *task_cgrp;
1054         struct cgroup *mem_cgrp;
1055         /*
1056          * Need a buffer in BSS, can't rely on allocations. The code relies
1057          * on the assumption that OOM is serialized for memory controller.
1058          * If this assumption is broken, revisit this code.
1059          */
1060         static char memcg_name[PATH_MAX];
1061         int ret;
1062
1063         if (!memcg || !p)
1064                 return;
1065
1066
1067         rcu_read_lock();
1068
1069         mem_cgrp = memcg->css.cgroup;
1070         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1071
1072         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1073         if (ret < 0) {
1074                 /*
1075                  * Unfortunately, we are unable to convert to a useful name
1076                  * But we'll still print out the usage information
1077                  */
1078                 rcu_read_unlock();
1079                 goto done;
1080         }
1081         rcu_read_unlock();
1082
1083         printk(KERN_INFO "Task in %s killed", memcg_name);
1084
1085         rcu_read_lock();
1086         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1087         if (ret < 0) {
1088                 rcu_read_unlock();
1089                 goto done;
1090         }
1091         rcu_read_unlock();
1092
1093         /*
1094          * Continues from above, so we don't need an KERN_ level
1095          */
1096         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1097 done:
1098
1099         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1100                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1101                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1102                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1103         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1104                 "failcnt %llu\n",
1105                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1106                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1107                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1108 }
1109
1110 /*
1111  * This function returns the number of memcg under hierarchy tree. Returns
1112  * 1(self count) if no children.
1113  */
1114 static int mem_cgroup_count_children(struct mem_cgroup *mem)
1115 {
1116         int num = 0;
1117         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
1118         return num;
1119 }
1120
1121 /*
1122  * Visit the first child (need not be the first child as per the ordering
1123  * of the cgroup list, since we track last_scanned_child) of @mem and use
1124  * that to reclaim free pages from.
1125  */
1126 static struct mem_cgroup *
1127 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1128 {
1129         struct mem_cgroup *ret = NULL;
1130         struct cgroup_subsys_state *css;
1131         int nextid, found;
1132
1133         if (!root_mem->use_hierarchy) {
1134                 css_get(&root_mem->css);
1135                 ret = root_mem;
1136         }
1137
1138         while (!ret) {
1139                 rcu_read_lock();
1140                 nextid = root_mem->last_scanned_child + 1;
1141                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1142                                    &found);
1143                 if (css && css_tryget(css))
1144                         ret = container_of(css, struct mem_cgroup, css);
1145
1146                 rcu_read_unlock();
1147                 /* Updates scanning parameter */
1148                 spin_lock(&root_mem->reclaim_param_lock);
1149                 if (!css) {
1150                         /* this means start scan from ID:1 */
1151                         root_mem->last_scanned_child = 0;
1152                 } else
1153                         root_mem->last_scanned_child = found;
1154                 spin_unlock(&root_mem->reclaim_param_lock);
1155         }
1156
1157         return ret;
1158 }
1159
1160 /*
1161  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1162  * we reclaimed from, so that we don't end up penalizing one child extensively
1163  * based on its position in the children list.
1164  *
1165  * root_mem is the original ancestor that we've been reclaim from.
1166  *
1167  * We give up and return to the caller when we visit root_mem twice.
1168  * (other groups can be removed while we're walking....)
1169  *
1170  * If shrink==true, for avoiding to free too much, this returns immedieately.
1171  */
1172 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1173                                                 struct zone *zone,
1174                                                 gfp_t gfp_mask,
1175                                                 unsigned long reclaim_options)
1176 {
1177         struct mem_cgroup *victim;
1178         int ret, total = 0;
1179         int loop = 0;
1180         bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1181         bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1182         bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1183         unsigned long excess = mem_cgroup_get_excess(root_mem);
1184
1185         /* If memsw_is_minimum==1, swap-out is of-no-use. */
1186         if (root_mem->memsw_is_minimum)
1187                 noswap = true;
1188
1189         while (1) {
1190                 victim = mem_cgroup_select_victim(root_mem);
1191                 if (victim == root_mem) {
1192                         loop++;
1193                         if (loop >= 1)
1194                                 drain_all_stock_async();
1195                         if (loop >= 2) {
1196                                 /*
1197                                  * If we have not been able to reclaim
1198                                  * anything, it might because there are
1199                                  * no reclaimable pages under this hierarchy
1200                                  */
1201                                 if (!check_soft || !total) {
1202                                         css_put(&victim->css);
1203                                         break;
1204                                 }
1205                                 /*
1206                                  * We want to do more targetted reclaim.
1207                                  * excess >> 2 is not to excessive so as to
1208                                  * reclaim too much, nor too less that we keep
1209                                  * coming back to reclaim from this cgroup
1210                                  */
1211                                 if (total >= (excess >> 2) ||
1212                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1213                                         css_put(&victim->css);
1214                                         break;
1215                                 }
1216                         }
1217                 }
1218                 if (!mem_cgroup_local_usage(victim)) {
1219                         /* this cgroup's local usage == 0 */
1220                         css_put(&victim->css);
1221                         continue;
1222                 }
1223                 /* we use swappiness of local cgroup */
1224                 if (check_soft)
1225                         ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1226                                 noswap, get_swappiness(victim), zone,
1227                                 zone->zone_pgdat->node_id);
1228                 else
1229                         ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1230                                                 noswap, get_swappiness(victim));
1231                 css_put(&victim->css);
1232                 /*
1233                  * At shrinking usage, we can't check we should stop here or
1234                  * reclaim more. It's depends on callers. last_scanned_child
1235                  * will work enough for keeping fairness under tree.
1236                  */
1237                 if (shrink)
1238                         return ret;
1239                 total += ret;
1240                 if (check_soft) {
1241                         if (res_counter_check_under_soft_limit(&root_mem->res))
1242                                 return total;
1243                 } else if (mem_cgroup_check_under_limit(root_mem))
1244                         return 1 + total;
1245         }
1246         return total;
1247 }
1248
1249 static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
1250 {
1251         int *val = (int *)data;
1252         int x;
1253         /*
1254          * Logically, we can stop scanning immediately when we find
1255          * a memcg is already locked. But condidering unlock ops and
1256          * creation/removal of memcg, scan-all is simple operation.
1257          */
1258         x = atomic_inc_return(&mem->oom_lock);
1259         *val = max(x, *val);
1260         return 0;
1261 }
1262 /*
1263  * Check OOM-Killer is already running under our hierarchy.
1264  * If someone is running, return false.
1265  */
1266 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1267 {
1268         int lock_count = 0;
1269
1270         mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
1271
1272         if (lock_count == 1)
1273                 return true;
1274         return false;
1275 }
1276
1277 static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
1278 {
1279         /*
1280          * When a new child is created while the hierarchy is under oom,
1281          * mem_cgroup_oom_lock() may not be called. We have to use
1282          * atomic_add_unless() here.
1283          */
1284         atomic_add_unless(&mem->oom_lock, -1, 0);
1285         return 0;
1286 }
1287
1288 static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1289 {
1290         mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
1291 }
1292
1293 static DEFINE_MUTEX(memcg_oom_mutex);
1294 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1295
1296 struct oom_wait_info {
1297         struct mem_cgroup *mem;
1298         wait_queue_t    wait;
1299 };
1300
1301 static int memcg_oom_wake_function(wait_queue_t *wait,
1302         unsigned mode, int sync, void *arg)
1303 {
1304         struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1305         struct oom_wait_info *oom_wait_info;
1306
1307         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1308
1309         if (oom_wait_info->mem == wake_mem)
1310                 goto wakeup;
1311         /* if no hierarchy, no match */
1312         if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1313                 return 0;
1314         /*
1315          * Both of oom_wait_info->mem and wake_mem are stable under us.
1316          * Then we can use css_is_ancestor without taking care of RCU.
1317          */
1318         if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1319             !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1320                 return 0;
1321
1322 wakeup:
1323         return autoremove_wake_function(wait, mode, sync, arg);
1324 }
1325
1326 static void memcg_wakeup_oom(struct mem_cgroup *mem)
1327 {
1328         /* for filtering, pass "mem" as argument. */
1329         __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1330 }
1331
1332 /*
1333  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1334  */
1335 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1336 {
1337         struct oom_wait_info owait;
1338         bool locked;
1339
1340         owait.mem = mem;
1341         owait.wait.flags = 0;
1342         owait.wait.func = memcg_oom_wake_function;
1343         owait.wait.private = current;
1344         INIT_LIST_HEAD(&owait.wait.task_list);
1345
1346         /* At first, try to OOM lock hierarchy under mem.*/
1347         mutex_lock(&memcg_oom_mutex);
1348         locked = mem_cgroup_oom_lock(mem);
1349         /*
1350          * Even if signal_pending(), we can't quit charge() loop without
1351          * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1352          * under OOM is always welcomed, use TASK_KILLABLE here.
1353          */
1354         if (!locked)
1355                 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1356         mutex_unlock(&memcg_oom_mutex);
1357
1358         if (locked)
1359                 mem_cgroup_out_of_memory(mem, mask);
1360         else {
1361                 schedule();
1362                 finish_wait(&memcg_oom_waitq, &owait.wait);
1363         }
1364         mutex_lock(&memcg_oom_mutex);
1365         mem_cgroup_oom_unlock(mem);
1366         memcg_wakeup_oom(mem);
1367         mutex_unlock(&memcg_oom_mutex);
1368
1369         if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1370                 return false;
1371         /* Give chance to dying process */
1372         schedule_timeout(1);
1373         return true;
1374 }
1375
1376 /*
1377  * Currently used to update mapped file statistics, but the routine can be
1378  * generalized to update other statistics as well.
1379  */
1380 void mem_cgroup_update_file_mapped(struct page *page, int val)
1381 {
1382         struct mem_cgroup *mem;
1383         struct page_cgroup *pc;
1384
1385         pc = lookup_page_cgroup(page);
1386         if (unlikely(!pc))
1387                 return;
1388
1389         lock_page_cgroup(pc);
1390         mem = pc->mem_cgroup;
1391         if (!mem || !PageCgroupUsed(pc))
1392                 goto done;
1393
1394         /*
1395          * Preemption is already disabled. We can use __this_cpu_xxx
1396          */
1397         if (val > 0) {
1398                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1399                 SetPageCgroupFileMapped(pc);
1400         } else {
1401                 __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1402                 ClearPageCgroupFileMapped(pc);
1403         }
1404
1405 done:
1406         unlock_page_cgroup(pc);
1407 }
1408
1409 /*
1410  * size of first charge trial. "32" comes from vmscan.c's magic value.
1411  * TODO: maybe necessary to use big numbers in big irons.
1412  */
1413 #define CHARGE_SIZE     (32 * PAGE_SIZE)
1414 struct memcg_stock_pcp {
1415         struct mem_cgroup *cached; /* this never be root cgroup */
1416         int charge;
1417         struct work_struct work;
1418 };
1419 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1420 static atomic_t memcg_drain_count;
1421
1422 /*
1423  * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1424  * from local stock and true is returned. If the stock is 0 or charges from a
1425  * cgroup which is not current target, returns false. This stock will be
1426  * refilled.
1427  */
1428 static bool consume_stock(struct mem_cgroup *mem)
1429 {
1430         struct memcg_stock_pcp *stock;
1431         bool ret = true;
1432
1433         stock = &get_cpu_var(memcg_stock);
1434         if (mem == stock->cached && stock->charge)
1435                 stock->charge -= PAGE_SIZE;
1436         else /* need to call res_counter_charge */
1437                 ret = false;
1438         put_cpu_var(memcg_stock);
1439         return ret;
1440 }
1441
1442 /*
1443  * Returns stocks cached in percpu to res_counter and reset cached information.
1444  */
1445 static void drain_stock(struct memcg_stock_pcp *stock)
1446 {
1447         struct mem_cgroup *old = stock->cached;
1448
1449         if (stock->charge) {
1450                 res_counter_uncharge(&old->res, stock->charge);
1451                 if (do_swap_account)
1452                         res_counter_uncharge(&old->memsw, stock->charge);
1453         }
1454         stock->cached = NULL;
1455         stock->charge = 0;
1456 }
1457
1458 /*
1459  * This must be called under preempt disabled or must be called by
1460  * a thread which is pinned to local cpu.
1461  */
1462 static void drain_local_stock(struct work_struct *dummy)
1463 {
1464         struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1465         drain_stock(stock);
1466 }
1467
1468 /*
1469  * Cache charges(val) which is from res_counter, to local per_cpu area.
1470  * This will be consumed by consume_stock() function, later.
1471  */
1472 static void refill_stock(struct mem_cgroup *mem, int val)
1473 {
1474         struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1475
1476         if (stock->cached != mem) { /* reset if necessary */
1477                 drain_stock(stock);
1478                 stock->cached = mem;
1479         }
1480         stock->charge += val;
1481         put_cpu_var(memcg_stock);
1482 }
1483
1484 /*
1485  * Tries to drain stocked charges in other cpus. This function is asynchronous
1486  * and just put a work per cpu for draining localy on each cpu. Caller can
1487  * expects some charges will be back to res_counter later but cannot wait for
1488  * it.
1489  */
1490 static void drain_all_stock_async(void)
1491 {
1492         int cpu;
1493         /* This function is for scheduling "drain" in asynchronous way.
1494          * The result of "drain" is not directly handled by callers. Then,
1495          * if someone is calling drain, we don't have to call drain more.
1496          * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1497          * there is a race. We just do loose check here.
1498          */
1499         if (atomic_read(&memcg_drain_count))
1500                 return;
1501         /* Notify other cpus that system-wide "drain" is running */
1502         atomic_inc(&memcg_drain_count);
1503         get_online_cpus();
1504         for_each_online_cpu(cpu) {
1505                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1506                 schedule_work_on(cpu, &stock->work);
1507         }
1508         put_online_cpus();
1509         atomic_dec(&memcg_drain_count);
1510         /* We don't wait for flush_work */
1511 }
1512
1513 /* This is a synchronous drain interface. */
1514 static void drain_all_stock_sync(void)
1515 {
1516         /* called when force_empty is called */
1517         atomic_inc(&memcg_drain_count);
1518         schedule_on_each_cpu(drain_local_stock);
1519         atomic_dec(&memcg_drain_count);
1520 }
1521
1522 static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1523                                         unsigned long action,
1524                                         void *hcpu)
1525 {
1526         int cpu = (unsigned long)hcpu;
1527         struct memcg_stock_pcp *stock;
1528
1529         if (action != CPU_DEAD)
1530                 return NOTIFY_OK;
1531         stock = &per_cpu(memcg_stock, cpu);
1532         drain_stock(stock);
1533         return NOTIFY_OK;
1534 }
1535
1536 /*
1537  * Unlike exported interface, "oom" parameter is added. if oom==true,
1538  * oom-killer can be invoked.
1539  */
1540 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1541                         gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1542 {
1543         struct mem_cgroup *mem, *mem_over_limit;
1544         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1545         struct res_counter *fail_res;
1546         int csize = CHARGE_SIZE;
1547
1548         /*
1549          * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1550          * in system level. So, allow to go ahead dying process in addition to
1551          * MEMDIE process.
1552          */
1553         if (unlikely(test_thread_flag(TIF_MEMDIE)
1554                      || fatal_signal_pending(current)))
1555                 goto bypass;
1556
1557         /*
1558          * We always charge the cgroup the mm_struct belongs to.
1559          * The mm_struct's mem_cgroup changes on task migration if the
1560          * thread group leader migrates. It's possible that mm is not
1561          * set, if so charge the init_mm (happens for pagecache usage).
1562          */
1563         mem = *memcg;
1564         if (likely(!mem)) {
1565                 mem = try_get_mem_cgroup_from_mm(mm);
1566                 *memcg = mem;
1567         } else {
1568                 css_get(&mem->css);
1569         }
1570         if (unlikely(!mem))
1571                 return 0;
1572
1573         VM_BUG_ON(css_is_removed(&mem->css));
1574         if (mem_cgroup_is_root(mem))
1575                 goto done;
1576
1577         while (1) {
1578                 int ret = 0;
1579                 unsigned long flags = 0;
1580
1581                 if (consume_stock(mem))
1582                         goto done;
1583
1584                 ret = res_counter_charge(&mem->res, csize, &fail_res);
1585                 if (likely(!ret)) {
1586                         if (!do_swap_account)
1587                                 break;
1588                         ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1589                         if (likely(!ret))
1590                                 break;
1591                         /* mem+swap counter fails */
1592                         res_counter_uncharge(&mem->res, csize);
1593                         flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1594                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1595                                                                         memsw);
1596                 } else
1597                         /* mem counter fails */
1598                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1599                                                                         res);
1600
1601                 /* reduce request size and retry */
1602                 if (csize > PAGE_SIZE) {
1603                         csize = PAGE_SIZE;
1604                         continue;
1605                 }
1606                 if (!(gfp_mask & __GFP_WAIT))
1607                         goto nomem;
1608
1609                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1610                                                 gfp_mask, flags);
1611                 if (ret)
1612                         continue;
1613
1614                 /*
1615                  * try_to_free_mem_cgroup_pages() might not give us a full
1616                  * picture of reclaim. Some pages are reclaimed and might be
1617                  * moved to swap cache or just unmapped from the cgroup.
1618                  * Check the limit again to see if the reclaim reduced the
1619                  * current usage of the cgroup before giving up
1620                  *
1621                  */
1622                 if (mem_cgroup_check_under_limit(mem_over_limit))
1623                         continue;
1624
1625                 /* try to avoid oom while someone is moving charge */
1626                 if (mc.moving_task && current != mc.moving_task) {
1627                         struct mem_cgroup *from, *to;
1628                         bool do_continue = false;
1629                         /*
1630                          * There is a small race that "from" or "to" can be
1631                          * freed by rmdir, so we use css_tryget().
1632                          */
1633                         from = mc.from;
1634                         to = mc.to;
1635                         if (from && css_tryget(&from->css)) {
1636                                 if (mem_over_limit->use_hierarchy)
1637                                         do_continue = css_is_ancestor(
1638                                                         &from->css,
1639                                                         &mem_over_limit->css);
1640                                 else
1641                                         do_continue = (from == mem_over_limit);
1642                                 css_put(&from->css);
1643                         }
1644                         if (!do_continue && to && css_tryget(&to->css)) {
1645                                 if (mem_over_limit->use_hierarchy)
1646                                         do_continue = css_is_ancestor(
1647                                                         &to->css,
1648                                                         &mem_over_limit->css);
1649                                 else
1650                                         do_continue = (to == mem_over_limit);
1651                                 css_put(&to->css);
1652                         }
1653                         if (do_continue) {
1654                                 DEFINE_WAIT(wait);
1655                                 prepare_to_wait(&mc.waitq, &wait,
1656                                                         TASK_INTERRUPTIBLE);
1657                                 /* moving charge context might have finished. */
1658                                 if (mc.moving_task)
1659                                         schedule();
1660                                 finish_wait(&mc.waitq, &wait);
1661                                 continue;
1662                         }
1663                 }
1664
1665                 if (!nr_retries--) {
1666                         if (!oom)
1667                                 goto nomem;
1668                         if (mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) {
1669                                 nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1670                                 continue;
1671                         }
1672                         /* When we reach here, current task is dying .*/
1673                         css_put(&mem->css);
1674                         goto bypass;
1675                 }
1676         }
1677         if (csize > PAGE_SIZE)
1678                 refill_stock(mem, csize - PAGE_SIZE);
1679 done:
1680         return 0;
1681 nomem:
1682         css_put(&mem->css);
1683         return -ENOMEM;
1684 bypass:
1685         *memcg = NULL;
1686         return 0;
1687 }
1688
1689 /*
1690  * Somemtimes we have to undo a charge we got by try_charge().
1691  * This function is for that and do uncharge, put css's refcnt.
1692  * gotten by try_charge().
1693  */
1694 static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
1695                                                         unsigned long count)
1696 {
1697         if (!mem_cgroup_is_root(mem)) {
1698                 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
1699                 if (do_swap_account)
1700                         res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
1701                 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
1702                 WARN_ON_ONCE(count > INT_MAX);
1703                 __css_put(&mem->css, (int)count);
1704         }
1705         /* we don't need css_put for root */
1706 }
1707
1708 static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
1709 {
1710         __mem_cgroup_cancel_charge(mem, 1);
1711 }
1712
1713 /*
1714  * A helper function to get mem_cgroup from ID. must be called under
1715  * rcu_read_lock(). The caller must check css_is_removed() or some if
1716  * it's concern. (dropping refcnt from swap can be called against removed
1717  * memcg.)
1718  */
1719 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1720 {
1721         struct cgroup_subsys_state *css;
1722
1723         /* ID 0 is unused ID */
1724         if (!id)
1725                 return NULL;
1726         css = css_lookup(&mem_cgroup_subsys, id);
1727         if (!css)
1728                 return NULL;
1729         return container_of(css, struct mem_cgroup, css);
1730 }
1731
1732 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
1733 {
1734         struct mem_cgroup *mem = NULL;
1735         struct page_cgroup *pc;
1736         unsigned short id;
1737         swp_entry_t ent;
1738
1739         VM_BUG_ON(!PageLocked(page));
1740
1741         pc = lookup_page_cgroup(page);
1742         lock_page_cgroup(pc);
1743         if (PageCgroupUsed(pc)) {
1744                 mem = pc->mem_cgroup;
1745                 if (mem && !css_tryget(&mem->css))
1746                         mem = NULL;
1747         } else if (PageSwapCache(page)) {
1748                 ent.val = page_private(page);
1749                 id = lookup_swap_cgroup(ent);
1750                 rcu_read_lock();
1751                 mem = mem_cgroup_lookup(id);
1752                 if (mem && !css_tryget(&mem->css))
1753                         mem = NULL;
1754                 rcu_read_unlock();
1755         }
1756         unlock_page_cgroup(pc);
1757         return mem;
1758 }
1759
1760 /*
1761  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1762  * USED state. If already USED, uncharge and return.
1763  */
1764
1765 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1766                                      struct page_cgroup *pc,
1767                                      enum charge_type ctype)
1768 {
1769         /* try_charge() can return NULL to *memcg, taking care of it. */
1770         if (!mem)
1771                 return;
1772
1773         lock_page_cgroup(pc);
1774         if (unlikely(PageCgroupUsed(pc))) {
1775                 unlock_page_cgroup(pc);
1776                 mem_cgroup_cancel_charge(mem);
1777                 return;
1778         }
1779
1780         pc->mem_cgroup = mem;
1781         /*
1782          * We access a page_cgroup asynchronously without lock_page_cgroup().
1783          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1784          * is accessed after testing USED bit. To make pc->mem_cgroup visible
1785          * before USED bit, we need memory barrier here.
1786          * See mem_cgroup_add_lru_list(), etc.
1787          */
1788         smp_wmb();
1789         switch (ctype) {
1790         case MEM_CGROUP_CHARGE_TYPE_CACHE:
1791         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1792                 SetPageCgroupCache(pc);
1793                 SetPageCgroupUsed(pc);
1794                 break;
1795         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1796                 ClearPageCgroupCache(pc);
1797                 SetPageCgroupUsed(pc);
1798                 break;
1799         default:
1800                 break;
1801         }
1802
1803         mem_cgroup_charge_statistics(mem, pc, true);
1804
1805         unlock_page_cgroup(pc);
1806         /*
1807          * "charge_statistics" updated event counter. Then, check it.
1808          * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1809          * if they exceeds softlimit.
1810          */
1811         memcg_check_events(mem, pc->page);
1812 }
1813
1814 /**
1815  * __mem_cgroup_move_account - move account of the page
1816  * @pc: page_cgroup of the page.
1817  * @from: mem_cgroup which the page is moved from.
1818  * @to: mem_cgroup which the page is moved to. @from != @to.
1819  * @uncharge: whether we should call uncharge and css_put against @from.
1820  *
1821  * The caller must confirm following.
1822  * - page is not on LRU (isolate_page() is useful.)
1823  * - the pc is locked, used, and ->mem_cgroup points to @from.
1824  *
1825  * This function doesn't do "charge" nor css_get to new cgroup. It should be
1826  * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
1827  * true, this function does "uncharge" from old cgroup, but it doesn't if
1828  * @uncharge is false, so a caller should do "uncharge".
1829  */
1830
1831 static void __mem_cgroup_move_account(struct page_cgroup *pc,
1832         struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1833 {
1834         VM_BUG_ON(from == to);
1835         VM_BUG_ON(PageLRU(pc->page));
1836         VM_BUG_ON(!PageCgroupLocked(pc));
1837         VM_BUG_ON(!PageCgroupUsed(pc));
1838         VM_BUG_ON(pc->mem_cgroup != from);
1839
1840         if (PageCgroupFileMapped(pc)) {
1841                 /* Update mapped_file data for mem_cgroup */
1842                 preempt_disable();
1843                 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1844                 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1845                 preempt_enable();
1846         }
1847         mem_cgroup_charge_statistics(from, pc, false);
1848         if (uncharge)
1849                 /* This is not "cancel", but cancel_charge does all we need. */
1850                 mem_cgroup_cancel_charge(from);
1851
1852         /* caller should have done css_get */
1853         pc->mem_cgroup = to;
1854         mem_cgroup_charge_statistics(to, pc, true);
1855         /*
1856          * We charges against "to" which may not have any tasks. Then, "to"
1857          * can be under rmdir(). But in current implementation, caller of
1858          * this function is just force_empty() and move charge, so it's
1859          * garanteed that "to" is never removed. So, we don't check rmdir
1860          * status here.
1861          */
1862 }
1863
1864 /*
1865  * check whether the @pc is valid for moving account and call
1866  * __mem_cgroup_move_account()
1867  */
1868 static int mem_cgroup_move_account(struct page_cgroup *pc,
1869                 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1870 {
1871         int ret = -EINVAL;
1872         lock_page_cgroup(pc);
1873         if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
1874                 __mem_cgroup_move_account(pc, from, to, uncharge);
1875                 ret = 0;
1876         }
1877         unlock_page_cgroup(pc);
1878         /*
1879          * check events
1880          */
1881         memcg_check_events(to, pc->page);
1882         memcg_check_events(from, pc->page);
1883         return ret;
1884 }
1885
1886 /*
1887  * move charges to its parent.
1888  */
1889
1890 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1891                                   struct mem_cgroup *child,
1892                                   gfp_t gfp_mask)
1893 {
1894         struct page *page = pc->page;
1895         struct cgroup *cg = child->css.cgroup;
1896         struct cgroup *pcg = cg->parent;
1897         struct mem_cgroup *parent;
1898         int ret;
1899
1900         /* Is ROOT ? */
1901         if (!pcg)
1902                 return -EINVAL;
1903
1904         ret = -EBUSY;
1905         if (!get_page_unless_zero(page))
1906                 goto out;
1907         if (isolate_lru_page(page))
1908                 goto put;
1909
1910         parent = mem_cgroup_from_cont(pcg);
1911         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1912         if (ret || !parent)
1913                 goto put_back;
1914
1915         ret = mem_cgroup_move_account(pc, child, parent, true);
1916         if (ret)
1917                 mem_cgroup_cancel_charge(parent);
1918 put_back:
1919         putback_lru_page(page);
1920 put:
1921         put_page(page);
1922 out:
1923         return ret;
1924 }
1925
1926 /*
1927  * Charge the memory controller for page usage.
1928  * Return
1929  * 0 if the charge was successful
1930  * < 0 if the cgroup is over its limit
1931  */
1932 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1933                                 gfp_t gfp_mask, enum charge_type ctype,
1934                                 struct mem_cgroup *memcg)
1935 {
1936         struct mem_cgroup *mem;
1937         struct page_cgroup *pc;
1938         int ret;
1939
1940         pc = lookup_page_cgroup(page);
1941         /* can happen at boot */
1942         if (unlikely(!pc))
1943                 return 0;
1944         prefetchw(pc);
1945
1946         mem = memcg;
1947         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1948         if (ret || !mem)
1949                 return ret;
1950
1951         __mem_cgroup_commit_charge(mem, pc, ctype);
1952         return 0;
1953 }
1954
1955 int mem_cgroup_newpage_charge(struct page *page,
1956                               struct mm_struct *mm, gfp_t gfp_mask)
1957 {
1958         if (mem_cgroup_disabled())
1959                 return 0;
1960         if (PageCompound(page))
1961                 return 0;
1962         /*
1963          * If already mapped, we don't have to account.
1964          * If page cache, page->mapping has address_space.
1965          * But page->mapping may have out-of-use anon_vma pointer,
1966          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1967          * is NULL.
1968          */
1969         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1970                 return 0;
1971         if (unlikely(!mm))
1972                 mm = &init_mm;
1973         return mem_cgroup_charge_common(page, mm, gfp_mask,
1974                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1975 }
1976
1977 static void
1978 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1979                                         enum charge_type ctype);
1980
1981 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1982                                 gfp_t gfp_mask)
1983 {
1984         struct mem_cgroup *mem = NULL;
1985         int ret;
1986
1987         if (mem_cgroup_disabled())
1988                 return 0;
1989         if (PageCompound(page))
1990                 return 0;
1991         /*
1992          * Corner case handling. This is called from add_to_page_cache()
1993          * in usual. But some FS (shmem) precharges this page before calling it
1994          * and call add_to_page_cache() with GFP_NOWAIT.
1995          *
1996          * For GFP_NOWAIT case, the page may be pre-charged before calling
1997          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1998          * charge twice. (It works but has to pay a bit larger cost.)
1999          * And when the page is SwapCache, it should take swap information
2000          * into account. This is under lock_page() now.
2001          */
2002         if (!(gfp_mask & __GFP_WAIT)) {
2003                 struct page_cgroup *pc;
2004
2005
2006                 pc = lookup_page_cgroup(page);
2007                 if (!pc)
2008                         return 0;
2009                 lock_page_cgroup(pc);
2010                 if (PageCgroupUsed(pc)) {
2011                         unlock_page_cgroup(pc);
2012                         return 0;
2013                 }
2014                 unlock_page_cgroup(pc);
2015         }
2016
2017         if (unlikely(!mm && !mem))
2018                 mm = &init_mm;
2019
2020         if (page_is_file_cache(page))
2021                 return mem_cgroup_charge_common(page, mm, gfp_mask,
2022                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
2023
2024         /* shmem */
2025         if (PageSwapCache(page)) {
2026                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2027                 if (!ret)
2028                         __mem_cgroup_commit_charge_swapin(page, mem,
2029                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2030         } else
2031                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2032                                         MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
2033
2034         return ret;
2035 }
2036
2037 /*
2038  * While swap-in, try_charge -> commit or cancel, the page is locked.
2039  * And when try_charge() successfully returns, one refcnt to memcg without
2040  * struct page_cgroup is acquired. This refcnt will be consumed by
2041  * "commit()" or removed by "cancel()"
2042  */
2043 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2044                                  struct page *page,
2045                                  gfp_t mask, struct mem_cgroup **ptr)
2046 {
2047         struct mem_cgroup *mem;
2048         int ret;
2049
2050         if (mem_cgroup_disabled())
2051                 return 0;
2052
2053         if (!do_swap_account)
2054                 goto charge_cur_mm;
2055         /*
2056          * A racing thread's fault, or swapoff, may have already updated
2057          * the pte, and even removed page from swap cache: in those cases
2058          * do_swap_page()'s pte_same() test will fail; but there's also a
2059          * KSM case which does need to charge the page.
2060          */
2061         if (!PageSwapCache(page))
2062                 goto charge_cur_mm;
2063         mem = try_get_mem_cgroup_from_page(page);
2064         if (!mem)
2065                 goto charge_cur_mm;
2066         *ptr = mem;
2067         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
2068         /* drop extra refcnt from tryget */
2069         css_put(&mem->css);
2070         return ret;
2071 charge_cur_mm:
2072         if (unlikely(!mm))
2073                 mm = &init_mm;
2074         return __mem_cgroup_try_charge(mm, mask, ptr, true);
2075 }
2076
2077 static void
2078 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2079                                         enum charge_type ctype)
2080 {
2081         struct page_cgroup *pc;
2082
2083         if (mem_cgroup_disabled())
2084                 return;
2085         if (!ptr)
2086                 return;
2087         cgroup_exclude_rmdir(&ptr->css);
2088         pc = lookup_page_cgroup(page);
2089         mem_cgroup_lru_del_before_commit_swapcache(page);
2090         __mem_cgroup_commit_charge(ptr, pc, ctype);
2091         mem_cgroup_lru_add_after_commit_swapcache(page);
2092         /*
2093          * Now swap is on-memory. This means this page may be
2094          * counted both as mem and swap....double count.
2095          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2096          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2097          * may call delete_from_swap_cache() before reach here.
2098          */
2099         if (do_swap_account && PageSwapCache(page)) {
2100                 swp_entry_t ent = {.val = page_private(page)};
2101                 unsigned short id;
2102                 struct mem_cgroup *memcg;
2103
2104                 id = swap_cgroup_record(ent, 0);
2105                 rcu_read_lock();
2106                 memcg = mem_cgroup_lookup(id);
2107                 if (memcg) {
2108                         /*
2109                          * This recorded memcg can be obsolete one. So, avoid
2110                          * calling css_tryget
2111                          */
2112                         if (!mem_cgroup_is_root(memcg))
2113                                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2114                         mem_cgroup_swap_statistics(memcg, false);
2115                         mem_cgroup_put(memcg);
2116                 }
2117                 rcu_read_unlock();
2118         }
2119         /*
2120          * At swapin, we may charge account against cgroup which has no tasks.
2121          * So, rmdir()->pre_destroy() can be called while we do this charge.
2122          * In that case, we need to call pre_destroy() again. check it here.
2123          */
2124         cgroup_release_and_wakeup_rmdir(&ptr->css);
2125 }
2126
2127 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2128 {
2129         __mem_cgroup_commit_charge_swapin(page, ptr,
2130                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
2131 }
2132
2133 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2134 {
2135         if (mem_cgroup_disabled())
2136                 return;
2137         if (!mem)
2138                 return;
2139         mem_cgroup_cancel_charge(mem);
2140 }
2141
2142 static void
2143 __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2144 {
2145         struct memcg_batch_info *batch = NULL;
2146         bool uncharge_memsw = true;
2147         /* If swapout, usage of swap doesn't decrease */
2148         if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2149                 uncharge_memsw = false;
2150         /*
2151          * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2152          * In those cases, all pages freed continously can be expected to be in
2153          * the same cgroup and we have chance to coalesce uncharges.
2154          * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2155          * because we want to do uncharge as soon as possible.
2156          */
2157         if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
2158                 goto direct_uncharge;
2159
2160         batch = &current->memcg_batch;
2161         /*
2162          * In usual, we do css_get() when we remember memcg pointer.
2163          * But in this case, we keep res->usage until end of a series of
2164          * uncharges. Then, it's ok to ignore memcg's refcnt.
2165          */
2166         if (!batch->memcg)
2167                 batch->memcg = mem;
2168         /*
2169          * In typical case, batch->memcg == mem. This means we can
2170          * merge a series of uncharges to an uncharge of res_counter.
2171          * If not, we uncharge res_counter ony by one.
2172          */
2173         if (batch->memcg != mem)
2174                 goto direct_uncharge;
2175         /* remember freed charge and uncharge it later */
2176         batch->bytes += PAGE_SIZE;
2177         if (uncharge_memsw)
2178                 batch->memsw_bytes += PAGE_SIZE;
2179         return;
2180 direct_uncharge:
2181         res_counter_uncharge(&mem->res, PAGE_SIZE);
2182         if (uncharge_memsw)
2183                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2184         return;
2185 }
2186
2187 /*
2188  * uncharge if !page_mapped(page)
2189  */
2190 static struct mem_cgroup *
2191 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2192 {
2193         struct page_cgroup *pc;
2194         struct mem_cgroup *mem = NULL;
2195         struct mem_cgroup_per_zone *mz;
2196
2197         if (mem_cgroup_disabled())
2198                 return NULL;
2199
2200         if (PageSwapCache(page))
2201                 return NULL;
2202
2203         /*
2204          * Check if our page_cgroup is valid
2205          */
2206         pc = lookup_page_cgroup(page);
2207         if (unlikely(!pc || !PageCgroupUsed(pc)))
2208                 return NULL;
2209
2210         lock_page_cgroup(pc);
2211
2212         mem = pc->mem_cgroup;
2213
2214         if (!PageCgroupUsed(pc))
2215                 goto unlock_out;
2216
2217         switch (ctype) {
2218         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2219         case MEM_CGROUP_CHARGE_TYPE_DROP:
2220                 if (page_mapped(page))
2221                         goto unlock_out;
2222                 break;
2223         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2224                 if (!PageAnon(page)) {  /* Shared memory */
2225                         if (page->mapping && !page_is_file_cache(page))
2226                                 goto unlock_out;
2227                 } else if (page_mapped(page)) /* Anon */
2228                                 goto unlock_out;
2229                 break;
2230         default:
2231                 break;
2232         }
2233
2234         if (!mem_cgroup_is_root(mem))
2235                 __do_uncharge(mem, ctype);
2236         if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2237                 mem_cgroup_swap_statistics(mem, true);
2238         mem_cgroup_charge_statistics(mem, pc, false);
2239
2240         ClearPageCgroupUsed(pc);
2241         /*
2242          * pc->mem_cgroup is not cleared here. It will be accessed when it's
2243          * freed from LRU. This is safe because uncharged page is expected not
2244          * to be reused (freed soon). Exception is SwapCache, it's handled by
2245          * special functions.
2246          */
2247
2248         mz = page_cgroup_zoneinfo(pc);
2249         unlock_page_cgroup(pc);
2250
2251         memcg_check_events(mem, page);
2252         /* at swapout, this memcg will be accessed to record to swap */
2253         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2254                 css_put(&mem->css);
2255
2256         return mem;
2257
2258 unlock_out:
2259         unlock_page_cgroup(pc);
2260         return NULL;
2261 }
2262
2263 void mem_cgroup_uncharge_page(struct page *page)
2264 {
2265         /* early check. */
2266         if (page_mapped(page))
2267                 return;
2268         if (page->mapping && !PageAnon(page))
2269                 return;
2270         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2271 }
2272
2273 void mem_cgroup_uncharge_cache_page(struct page *page)
2274 {
2275         VM_BUG_ON(page_mapped(page));
2276         VM_BUG_ON(page->mapping);
2277         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2278 }
2279
2280 /*
2281  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2282  * In that cases, pages are freed continuously and we can expect pages
2283  * are in the same memcg. All these calls itself limits the number of
2284  * pages freed at once, then uncharge_start/end() is called properly.
2285  * This may be called prural(2) times in a context,
2286  */
2287
2288 void mem_cgroup_uncharge_start(void)
2289 {
2290         current->memcg_batch.do_batch++;
2291         /* We can do nest. */
2292         if (current->memcg_batch.do_batch == 1) {
2293                 current->memcg_batch.memcg = NULL;
2294                 current->memcg_batch.bytes = 0;
2295                 current->memcg_batch.memsw_bytes = 0;
2296         }
2297 }
2298
2299 void mem_cgroup_uncharge_end(void)
2300 {
2301         struct memcg_batch_info *batch = &current->memcg_batch;
2302
2303         if (!batch->do_batch)
2304                 return;
2305
2306         batch->do_batch--;
2307         if (batch->do_batch) /* If stacked, do nothing. */
2308                 return;
2309
2310         if (!batch->memcg)
2311                 return;
2312         /*
2313          * This "batch->memcg" is valid without any css_get/put etc...
2314          * bacause we hide charges behind us.
2315          */
2316         if (batch->bytes)
2317                 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2318         if (batch->memsw_bytes)
2319                 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2320         /* forget this pointer (for sanity check) */
2321         batch->memcg = NULL;
2322 }
2323
2324 #ifdef CONFIG_SWAP
2325 /*
2326  * called after __delete_from_swap_cache() and drop "page" account.
2327  * memcg information is recorded to swap_cgroup of "ent"
2328  */
2329 void
2330 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2331 {
2332         struct mem_cgroup *memcg;
2333         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2334
2335         if (!swapout) /* this was a swap cache but the swap is unused ! */
2336                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2337
2338         memcg = __mem_cgroup_uncharge_common(page, ctype);
2339
2340         /* record memcg information */
2341         if (do_swap_account && swapout && memcg) {
2342                 swap_cgroup_record(ent, css_id(&memcg->css));
2343                 mem_cgroup_get(memcg);
2344         }
2345         if (swapout && memcg)
2346                 css_put(&memcg->css);
2347 }
2348 #endif
2349
2350 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2351 /*
2352  * called from swap_entry_free(). remove record in swap_cgroup and
2353  * uncharge "memsw" account.
2354  */
2355 void mem_cgroup_uncharge_swap(swp_entry_t ent)
2356 {
2357         struct mem_cgroup *memcg;
2358         unsigned short id;
2359
2360         if (!do_swap_account)
2361                 return;
2362
2363         id = swap_cgroup_record(ent, 0);
2364         rcu_read_lock();
2365         memcg = mem_cgroup_lookup(id);
2366         if (memcg) {
2367                 /*
2368                  * We uncharge this because swap is freed.
2369                  * This memcg can be obsolete one. We avoid calling css_tryget
2370                  */
2371                 if (!mem_cgroup_is_root(memcg))
2372                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2373                 mem_cgroup_swap_statistics(memcg, false);
2374                 mem_cgroup_put(memcg);
2375         }
2376         rcu_read_unlock();
2377 }
2378
2379 /**
2380  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2381  * @entry: swap entry to be moved
2382  * @from:  mem_cgroup which the entry is moved from
2383  * @to:  mem_cgroup which the entry is moved to
2384  * @need_fixup: whether we should fixup res_counters and refcounts.
2385  *
2386  * It succeeds only when the swap_cgroup's record for this entry is the same
2387  * as the mem_cgroup's id of @from.
2388  *
2389  * Returns 0 on success, -EINVAL on failure.
2390  *
2391  * The caller must have charged to @to, IOW, called res_counter_charge() about
2392  * both res and memsw, and called css_get().
2393  */
2394 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2395                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2396 {
2397         unsigned short old_id, new_id;
2398
2399         old_id = css_id(&from->css);
2400         new_id = css_id(&to->css);
2401
2402         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2403                 mem_cgroup_swap_statistics(from, false);
2404                 mem_cgroup_swap_statistics(to, true);
2405                 /*
2406                  * This function is only called from task migration context now.
2407                  * It postpones res_counter and refcount handling till the end
2408                  * of task migration(mem_cgroup_clear_mc()) for performance
2409                  * improvement. But we cannot postpone mem_cgroup_get(to)
2410                  * because if the process that has been moved to @to does
2411                  * swap-in, the refcount of @to might be decreased to 0.
2412                  */
2413                 mem_cgroup_get(to);
2414                 if (need_fixup) {
2415                         if (!mem_cgroup_is_root(from))
2416                                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2417                         mem_cgroup_put(from);
2418                         /*
2419                          * we charged both to->res and to->memsw, so we should
2420                          * uncharge to->res.
2421                          */
2422                         if (!mem_cgroup_is_root(to))
2423                                 res_counter_uncharge(&to->res, PAGE_SIZE);
2424                         css_put(&to->css);
2425                 }
2426                 return 0;
2427         }
2428         return -EINVAL;
2429 }
2430 #else
2431 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2432                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2433 {
2434         return -EINVAL;
2435 }
2436 #endif
2437
2438 /*
2439  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2440  * page belongs to.
2441  */
2442 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
2443 {
2444         struct page_cgroup *pc;
2445         struct mem_cgroup *mem = NULL;
2446         int ret = 0;
2447
2448         if (mem_cgroup_disabled())
2449                 return 0;
2450
2451         pc = lookup_page_cgroup(page);
2452         lock_page_cgroup(pc);
2453         if (PageCgroupUsed(pc)) {
2454                 mem = pc->mem_cgroup;
2455                 css_get(&mem->css);
2456         }
2457         unlock_page_cgroup(pc);
2458
2459         *ptr = mem;
2460         if (mem) {
2461                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2462                 css_put(&mem->css);
2463         }
2464         return ret;
2465 }
2466
2467 /* remove redundant charge if migration failed*/
2468 void mem_cgroup_end_migration(struct mem_cgroup *mem,
2469                 struct page *oldpage, struct page *newpage)
2470 {
2471         struct page *target, *unused;
2472         struct page_cgroup *pc;
2473         enum charge_type ctype;
2474
2475         if (!mem)
2476                 return;
2477         cgroup_exclude_rmdir(&mem->css);
2478         /* at migration success, oldpage->mapping is NULL. */
2479         if (oldpage->mapping) {
2480                 target = oldpage;
2481                 unused = NULL;
2482         } else {
2483                 target = newpage;
2484                 unused = oldpage;
2485         }
2486
2487         if (PageAnon(target))
2488                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2489         else if (page_is_file_cache(target))
2490                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2491         else
2492                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2493
2494         /* unused page is not on radix-tree now. */
2495         if (unused)
2496                 __mem_cgroup_uncharge_common(unused, ctype);
2497
2498         pc = lookup_page_cgroup(target);
2499         /*
2500          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
2501          * So, double-counting is effectively avoided.
2502          */
2503         __mem_cgroup_commit_charge(mem, pc, ctype);
2504
2505         /*
2506          * Both of oldpage and newpage are still under lock_page().
2507          * Then, we don't have to care about race in radix-tree.
2508          * But we have to be careful that this page is unmapped or not.
2509          *
2510          * There is a case for !page_mapped(). At the start of
2511          * migration, oldpage was mapped. But now, it's zapped.
2512          * But we know *target* page is not freed/reused under us.
2513          * mem_cgroup_uncharge_page() does all necessary checks.
2514          */
2515         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
2516                 mem_cgroup_uncharge_page(target);
2517         /*
2518          * At migration, we may charge account against cgroup which has no tasks
2519          * So, rmdir()->pre_destroy() can be called while we do this charge.
2520          * In that case, we need to call pre_destroy() again. check it here.
2521          */
2522         cgroup_release_and_wakeup_rmdir(&mem->css);
2523 }
2524
2525 /*
2526  * A call to try to shrink memory usage on charge failure at shmem's swapin.
2527  * Calling hierarchical_reclaim is not enough because we should update
2528  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2529  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2530  * not from the memcg which this page would be charged to.
2531  * try_charge_swapin does all of these works properly.
2532  */
2533 int mem_cgroup_shmem_charge_fallback(struct page *page,
2534                             struct mm_struct *mm,
2535                             gfp_t gfp_mask)
2536 {
2537         struct mem_cgroup *mem = NULL;
2538         int ret;
2539
2540         if (mem_cgroup_disabled())
2541                 return 0;
2542
2543         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2544         if (!ret)
2545                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
2546
2547         return ret;
2548 }
2549
2550 static DEFINE_MUTEX(set_limit_mutex);
2551
2552 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2553                                 unsigned long long val)
2554 {
2555         int retry_count;
2556         u64 memswlimit;
2557         int ret = 0;
2558         int children = mem_cgroup_count_children(memcg);
2559         u64 curusage, oldusage;
2560
2561         /*
2562          * For keeping hierarchical_reclaim simple, how long we should retry
2563          * is depends on callers. We set our retry-count to be function
2564          * of # of children which we should visit in this loop.
2565          */
2566         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2567
2568         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2569
2570         while (retry_count) {
2571                 if (signal_pending(current)) {
2572                         ret = -EINTR;
2573                         break;
2574                 }
2575                 /*
2576                  * Rather than hide all in some function, I do this in
2577                  * open coded manner. You see what this really does.
2578                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2579                  */
2580                 mutex_lock(&set_limit_mutex);
2581                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2582                 if (memswlimit < val) {
2583                         ret = -EINVAL;
2584                         mutex_unlock(&set_limit_mutex);
2585                         break;
2586                 }
2587                 ret = res_counter_set_limit(&memcg->res, val);
2588                 if (!ret) {
2589                         if (memswlimit == val)
2590                                 memcg->memsw_is_minimum = true;
2591                         else
2592                                 memcg->memsw_is_minimum = false;
2593                 }
2594                 mutex_unlock(&set_limit_mutex);
2595
2596                 if (!ret)
2597                         break;
2598
2599                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2600                                                 MEM_CGROUP_RECLAIM_SHRINK);
2601                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2602                 /* Usage is reduced ? */
2603                 if (curusage >= oldusage)
2604                         retry_count--;
2605                 else
2606                         oldusage = curusage;
2607         }
2608
2609         return ret;
2610 }
2611
2612 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2613                                         unsigned long long val)
2614 {
2615         int retry_count;
2616         u64 memlimit, oldusage, curusage;
2617         int children = mem_cgroup_count_children(memcg);
2618         int ret = -EBUSY;
2619
2620         /* see mem_cgroup_resize_res_limit */
2621         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2622         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2623         while (retry_count) {
2624                 if (signal_pending(current)) {
2625                         ret = -EINTR;
2626                         break;
2627                 }
2628                 /*
2629                  * Rather than hide all in some function, I do this in
2630                  * open coded manner. You see what this really does.
2631                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2632                  */
2633                 mutex_lock(&set_limit_mutex);
2634                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2635                 if (memlimit > val) {
2636                         ret = -EINVAL;
2637                         mutex_unlock(&set_limit_mutex);
2638                         break;
2639                 }
2640                 ret = res_counter_set_limit(&memcg->memsw, val);
2641                 if (!ret) {
2642                         if (memlimit == val)
2643                                 memcg->memsw_is_minimum = true;
2644                         else
2645                                 memcg->memsw_is_minimum = false;
2646                 }
2647                 mutex_unlock(&set_limit_mutex);
2648
2649                 if (!ret)
2650                         break;
2651
2652                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2653                                                 MEM_CGROUP_RECLAIM_NOSWAP |
2654                                                 MEM_CGROUP_RECLAIM_SHRINK);
2655                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2656                 /* Usage is reduced ? */
2657                 if (curusage >= oldusage)
2658                         retry_count--;
2659                 else
2660                         oldusage = curusage;
2661         }
2662         return ret;
2663 }
2664
2665 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2666                                                 gfp_t gfp_mask, int nid,
2667                                                 int zid)
2668 {
2669         unsigned long nr_reclaimed = 0;
2670         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2671         unsigned long reclaimed;
2672         int loop = 0;
2673         struct mem_cgroup_tree_per_zone *mctz;
2674         unsigned long long excess;
2675
2676         if (order > 0)
2677                 return 0;
2678
2679         mctz = soft_limit_tree_node_zone(nid, zid);
2680         /*
2681          * This loop can run a while, specially if mem_cgroup's continuously
2682          * keep exceeding their soft limit and putting the system under
2683          * pressure
2684          */
2685         do {
2686                 if (next_mz)
2687                         mz = next_mz;
2688                 else
2689                         mz = mem_cgroup_largest_soft_limit_node(mctz);
2690                 if (!mz)
2691                         break;
2692
2693                 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
2694                                                 gfp_mask,
2695                                                 MEM_CGROUP_RECLAIM_SOFT);
2696                 nr_reclaimed += reclaimed;
2697                 spin_lock(&mctz->lock);
2698
2699                 /*
2700                  * If we failed to reclaim anything from this memory cgroup
2701                  * it is time to move on to the next cgroup
2702                  */
2703                 next_mz = NULL;
2704                 if (!reclaimed) {
2705                         do {
2706                                 /*
2707                                  * Loop until we find yet another one.
2708                                  *
2709                                  * By the time we get the soft_limit lock
2710                                  * again, someone might have aded the
2711                                  * group back on the RB tree. Iterate to
2712                                  * make sure we get a different mem.
2713                                  * mem_cgroup_largest_soft_limit_node returns
2714                                  * NULL if no other cgroup is present on
2715                                  * the tree
2716                                  */
2717                                 next_mz =
2718                                 __mem_cgroup_largest_soft_limit_node(mctz);
2719                                 if (next_mz == mz) {
2720                                         css_put(&next_mz->mem->css);
2721                                         next_mz = NULL;
2722                                 } else /* next_mz == NULL or other memcg */
2723                                         break;
2724                         } while (1);
2725                 }
2726                 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2727                 excess = res_counter_soft_limit_excess(&mz->mem->res);
2728                 /*
2729                  * One school of thought says that we should not add
2730                  * back the node to the tree if reclaim returns 0.
2731                  * But our reclaim could return 0, simply because due
2732                  * to priority we are exposing a smaller subset of
2733                  * memory to reclaim from. Consider this as a longer
2734                  * term TODO.
2735                  */
2736                 /* If excess == 0, no tree ops */
2737                 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2738                 spin_unlock(&mctz->lock);
2739                 css_put(&mz->mem->css);
2740                 loop++;
2741                 /*
2742                  * Could not reclaim anything and there are no more
2743                  * mem cgroups to try or we seem to be looping without
2744                  * reclaiming anything.
2745                  */
2746                 if (!nr_reclaimed &&
2747                         (next_mz == NULL ||
2748                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2749                         break;
2750         } while (!nr_reclaimed);
2751         if (next_mz)
2752                 css_put(&next_mz->mem->css);
2753         return nr_reclaimed;
2754 }
2755
2756 /*
2757  * This routine traverse page_cgroup in given list and drop them all.
2758  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
2759  */
2760 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
2761                                 int node, int zid, enum lru_list lru)
2762 {
2763         struct zone *zone;
2764         struct mem_cgroup_per_zone *mz;
2765         struct page_cgroup *pc, *busy;
2766         unsigned long flags, loop;
2767         struct list_head *list;
2768         int ret = 0;
2769
2770         zone = &NODE_DATA(node)->node_zones[zid];
2771         mz = mem_cgroup_zoneinfo(mem, node, zid);
2772         list = &mz->lists[lru];
2773
2774         loop = MEM_CGROUP_ZSTAT(mz, lru);
2775         /* give some margin against EBUSY etc...*/
2776         loop += 256;
2777         busy = NULL;
2778         while (loop--) {
2779                 ret = 0;
2780                 spin_lock_irqsave(&zone->lru_lock, flags);
2781                 if (list_empty(list)) {
2782                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2783                         break;
2784                 }
2785                 pc = list_entry(list->prev, struct page_cgroup, lru);
2786                 if (busy == pc) {
2787                         list_move(&pc->lru, list);
2788                         busy = NULL;
2789                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2790                         continue;
2791                 }
2792                 spin_unlock_irqrestore(&zone->lru_lock, flags);
2793
2794                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
2795                 if (ret == -ENOMEM)
2796                         break;
2797
2798                 if (ret == -EBUSY || ret == -EINVAL) {
2799                         /* found lock contention or "pc" is obsolete. */
2800                         busy = pc;
2801                         cond_resched();
2802                 } else
2803                         busy = NULL;
2804         }
2805
2806         if (!ret && !list_empty(list))
2807                 return -EBUSY;
2808         return ret;
2809 }
2810
2811 /*
2812  * make mem_cgroup's charge to be 0 if there is no task.
2813  * This enables deleting this mem_cgroup.
2814  */
2815 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2816 {
2817         int ret;
2818         int node, zid, shrink;
2819         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2820         struct cgroup *cgrp = mem->css.cgroup;
2821
2822         css_get(&mem->css);
2823
2824         shrink = 0;
2825         /* should free all ? */
2826         if (free_all)
2827                 goto try_to_free;
2828 move_account:
2829         do {
2830                 ret = -EBUSY;
2831                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2832                         goto out;
2833                 ret = -EINTR;
2834                 if (signal_pending(current))
2835                         goto out;
2836                 /* This is for making all *used* pages to be on LRU. */
2837                 lru_add_drain_all();
2838                 drain_all_stock_sync();
2839                 ret = 0;
2840                 for_each_node_state(node, N_HIGH_MEMORY) {
2841                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
2842                                 enum lru_list l;
2843                                 for_each_lru(l) {
2844                                         ret = mem_cgroup_force_empty_list(mem,
2845                                                         node, zid, l);
2846                                         if (ret)
2847                                                 break;
2848                                 }
2849                         }
2850                         if (ret)
2851                                 break;
2852                 }
2853                 /* it seems parent cgroup doesn't have enough mem */
2854                 if (ret == -ENOMEM)
2855                         goto try_to_free;
2856                 cond_resched();
2857         /* "ret" should also be checked to ensure all lists are empty. */
2858         } while (mem->res.usage > 0 || ret);
2859 out:
2860         css_put(&mem->css);
2861         return ret;
2862
2863 try_to_free:
2864         /* returns EBUSY if there is a task or if we come here twice. */
2865         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
2866                 ret = -EBUSY;
2867                 goto out;
2868         }
2869         /* we call try-to-free pages for make this cgroup empty */
2870         lru_add_drain_all();
2871         /* try to free all pages in this cgroup */
2872         shrink = 1;
2873         while (nr_retries && mem->res.usage > 0) {
2874                 int progress;
2875
2876                 if (signal_pending(current)) {
2877                         ret = -EINTR;
2878                         goto out;
2879                 }
2880                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
2881                                                 false, get_swappiness(mem));
2882                 if (!progress) {
2883                         nr_retries--;
2884                         /* maybe some writeback is necessary */
2885                         congestion_wait(BLK_RW_ASYNC, HZ/10);
2886                 }
2887
2888         }
2889         lru_add_drain();
2890         /* try move_account...there may be some *locked* pages. */
2891         goto move_account;
2892 }
2893
2894 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
2895 {
2896         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
2897 }
2898
2899
2900 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
2901 {
2902         return mem_cgroup_from_cont(cont)->use_hierarchy;
2903 }
2904
2905 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
2906                                         u64 val)
2907 {
2908         int retval = 0;
2909         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2910         struct cgroup *parent = cont->parent;
2911         struct mem_cgroup *parent_mem = NULL;
2912
2913         if (parent)
2914                 parent_mem = mem_cgroup_from_cont(parent);
2915
2916         cgroup_lock();
2917         /*
2918          * If parent's use_hierarchy is set, we can't make any modifications
2919          * in the child subtrees. If it is unset, then the change can
2920          * occur, provided the current cgroup has no children.
2921          *
2922          * For the root cgroup, parent_mem is NULL, we allow value to be
2923          * set if there are no children.
2924          */
2925         if ((!parent_mem || !parent_mem->use_hierarchy) &&
2926                                 (val == 1 || val == 0)) {
2927                 if (list_empty(&cont->children))
2928                         mem->use_hierarchy = val;
2929                 else
2930                         retval = -EBUSY;
2931         } else
2932                 retval = -EINVAL;
2933         cgroup_unlock();
2934
2935         return retval;
2936 }
2937
2938 struct mem_cgroup_idx_data {
2939         s64 val;
2940         enum mem_cgroup_stat_index idx;
2941 };
2942
2943 static int
2944 mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
2945 {
2946         struct mem_cgroup_idx_data *d = data;
2947         d->val += mem_cgroup_read_stat(mem, d->idx);
2948         return 0;
2949 }
2950
2951 static void
2952 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
2953                                 enum mem_cgroup_stat_index idx, s64 *val)
2954 {
2955         struct mem_cgroup_idx_data d;
2956         d.idx = idx;
2957         d.val = 0;
2958         mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
2959         *val = d.val;
2960 }
2961
2962 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
2963 {
2964         u64 idx_val, val;
2965
2966         if (!mem_cgroup_is_root(mem)) {
2967                 if (!swap)
2968                         return res_counter_read_u64(&mem->res, RES_USAGE);
2969                 else
2970                         return res_counter_read_u64(&mem->memsw, RES_USAGE);
2971         }
2972
2973         mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
2974         val = idx_val;
2975         mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
2976         val += idx_val;
2977
2978         if (swap) {
2979                 mem_cgroup_get_recursive_idx_stat(mem,
2980                                 MEM_CGROUP_STAT_SWAPOUT, &idx_val);
2981                 val += idx_val;
2982         }
2983
2984         return val << PAGE_SHIFT;
2985 }
2986
2987 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
2988 {
2989         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2990         u64 val;
2991         int type, name;
2992
2993         type = MEMFILE_TYPE(cft->private);
2994         name = MEMFILE_ATTR(cft->private);
2995         switch (type) {
2996         case _MEM:
2997                 if (name == RES_USAGE)
2998                         val = mem_cgroup_usage(mem, false);
2999                 else
3000                         val = res_counter_read_u64(&mem->res, name);
3001                 break;
3002         case _MEMSWAP:
3003                 if (name == RES_USAGE)
3004                         val = mem_cgroup_usage(mem, true);
3005                 else
3006                         val = res_counter_read_u64(&mem->memsw, name);
3007                 break;
3008         default:
3009                 BUG();
3010                 break;
3011         }
3012         return val;
3013 }
3014 /*
3015  * The user of this function is...
3016  * RES_LIMIT.
3017  */
3018 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3019                             const char *buffer)
3020 {
3021         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3022         int type, name;
3023         unsigned long long val;
3024         int ret;
3025
3026         type = MEMFILE_TYPE(cft->private);
3027         name = MEMFILE_ATTR(cft->private);
3028         switch (name) {
3029         case RES_LIMIT:
3030                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3031                         ret = -EINVAL;
3032                         break;
3033                 }
3034                 /* This function does all necessary parse...reuse it */
3035                 ret = res_counter_memparse_write_strategy(buffer, &val);
3036                 if (ret)
3037                         break;
3038                 if (type == _MEM)
3039                         ret = mem_cgroup_resize_limit(memcg, val);
3040                 else
3041                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
3042                 break;
3043         case RES_SOFT_LIMIT:
3044                 ret = res_counter_memparse_write_strategy(buffer, &val);
3045                 if (ret)
3046                         break;
3047                 /*
3048                  * For memsw, soft limits are hard to implement in terms
3049                  * of semantics, for now, we support soft limits for
3050                  * control without swap
3051                  */
3052                 if (type == _MEM)
3053                         ret = res_counter_set_soft_limit(&memcg->res, val);
3054                 else
3055                         ret = -EINVAL;
3056                 break;
3057         default:
3058                 ret = -EINVAL; /* should be BUG() ? */
3059                 break;
3060         }
3061         return ret;
3062 }
3063
3064 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3065                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3066 {
3067         struct cgroup *cgroup;
3068         unsigned long long min_limit, min_memsw_limit, tmp;
3069
3070         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3071         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3072         cgroup = memcg->css.cgroup;
3073         if (!memcg->use_hierarchy)
3074                 goto out;
3075
3076         while (cgroup->parent) {
3077                 cgroup = cgroup->parent;
3078                 memcg = mem_cgroup_from_cont(cgroup);
3079                 if (!memcg->use_hierarchy)
3080                         break;
3081                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3082                 min_limit = min(min_limit, tmp);
3083                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3084                 min_memsw_limit = min(min_memsw_limit, tmp);
3085         }
3086 out:
3087         *mem_limit = min_limit;
3088         *memsw_limit = min_memsw_limit;
3089         return;
3090 }
3091
3092 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3093 {
3094         struct mem_cgroup *mem;
3095         int type, name;
3096
3097         mem = mem_cgroup_from_cont(cont);
3098         type = MEMFILE_TYPE(event);
3099         name = MEMFILE_ATTR(event);
3100         switch (name) {
3101         case RES_MAX_USAGE:
3102                 if (type == _MEM)
3103                         res_counter_reset_max(&mem->res);
3104                 else
3105                         res_counter_reset_max(&mem->memsw);
3106                 break;
3107         case RES_FAILCNT:
3108                 if (type == _MEM)
3109                         res_counter_reset_failcnt(&mem->res);
3110                 else
3111                         res_counter_reset_failcnt(&mem->memsw);
3112                 break;
3113         }
3114
3115         return 0;
3116 }
3117
3118 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3119                                         struct cftype *cft)
3120 {
3121         return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3122 }
3123
3124 #ifdef CONFIG_MMU
3125 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3126                                         struct cftype *cft, u64 val)
3127 {
3128         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3129
3130         if (val >= (1 << NR_MOVE_TYPE))
3131                 return -EINVAL;
3132         /*
3133          * We check this value several times in both in can_attach() and
3134          * attach(), so we need cgroup lock to prevent this value from being
3135          * inconsistent.
3136          */
3137         cgroup_lock();
3138         mem->move_charge_at_immigrate = val;
3139         cgroup_unlock();
3140
3141         return 0;
3142 }
3143 #else
3144 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3145                                         struct cftype *cft, u64 val)
3146 {
3147         return -ENOSYS;
3148 }
3149 #endif
3150
3151
3152 /* For read statistics */
3153 enum {
3154         MCS_CACHE,
3155         MCS_RSS,
3156         MCS_FILE_MAPPED,
3157         MCS_PGPGIN,
3158         MCS_PGPGOUT,
3159         MCS_SWAP,
3160         MCS_INACTIVE_ANON,
3161         MCS_ACTIVE_ANON,
3162         MCS_INACTIVE_FILE,
3163         MCS_ACTIVE_FILE,
3164         MCS_UNEVICTABLE,
3165         NR_MCS_STAT,
3166 };
3167
3168 struct mcs_total_stat {
3169         s64 stat[NR_MCS_STAT];
3170 };
3171
3172 struct {
3173         char *local_name;
3174         char *total_name;
3175 } memcg_stat_strings[NR_MCS_STAT] = {
3176         {"cache", "total_cache"},
3177         {"rss", "total_rss"},
3178         {"mapped_file", "total_mapped_file"},
3179         {"pgpgin", "total_pgpgin"},
3180         {"pgpgout", "total_pgpgout"},
3181         {"swap", "total_swap"},
3182         {"inactive_anon", "total_inactive_anon"},
3183         {"active_anon", "total_active_anon"},
3184         {"inactive_file", "total_inactive_file"},
3185         {"active_file", "total_active_file"},
3186         {"unevictable", "total_unevictable"}
3187 };
3188
3189
3190 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
3191 {
3192         struct mcs_total_stat *s = data;
3193         s64 val;
3194
3195         /* per cpu stat */
3196         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3197         s->stat[MCS_CACHE] += val * PAGE_SIZE;
3198         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3199         s->stat[MCS_RSS] += val * PAGE_SIZE;
3200         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3201         s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3202         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
3203         s->stat[MCS_PGPGIN] += val;
3204         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
3205         s->stat[MCS_PGPGOUT] += val;
3206         if (do_swap_account) {
3207                 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3208                 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3209         }
3210
3211         /* per zone stat */
3212         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3213         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3214         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3215         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3216         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3217         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3218         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3219         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3220         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3221         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3222         return 0;
3223 }
3224
3225 static void
3226 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3227 {
3228         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
3229 }
3230
3231 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3232                                  struct cgroup_map_cb *cb)
3233 {
3234         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3235         struct mcs_total_stat mystat;
3236         int i;
3237
3238         memset(&mystat, 0, sizeof(mystat));
3239         mem_cgroup_get_local_stat(mem_cont, &mystat);
3240
3241         for (i = 0; i < NR_MCS_STAT; i++) {
3242                 if (i == MCS_SWAP && !do_swap_account)
3243                         continue;
3244                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3245         }
3246
3247         /* Hierarchical information */
3248         {
3249                 unsigned long long limit, memsw_limit;
3250                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3251                 cb->fill(cb, "hierarchical_memory_limit", limit);
3252                 if (do_swap_account)
3253                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3254         }
3255
3256         memset(&mystat, 0, sizeof(mystat));
3257         mem_cgroup_get_total_stat(mem_cont, &mystat);
3258         for (i = 0; i < NR_MCS_STAT; i++) {
3259                 if (i == MCS_SWAP && !do_swap_account)
3260                         continue;
3261                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3262         }
3263
3264 #ifdef CONFIG_DEBUG_VM
3265         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3266
3267         {
3268                 int nid, zid;
3269                 struct mem_cgroup_per_zone *mz;
3270                 unsigned long recent_rotated[2] = {0, 0};
3271                 unsigned long recent_scanned[2] = {0, 0};
3272
3273                 for_each_online_node(nid)
3274                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3275                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3276
3277                                 recent_rotated[0] +=
3278                                         mz->reclaim_stat.recent_rotated[0];
3279                                 recent_rotated[1] +=
3280                                         mz->reclaim_stat.recent_rotated[1];
3281                                 recent_scanned[0] +=
3282                                         mz->reclaim_stat.recent_scanned[0];
3283                                 recent_scanned[1] +=
3284                                         mz->reclaim_stat.recent_scanned[1];
3285                         }
3286                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3287                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3288                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3289                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3290         }
3291 #endif
3292
3293         return 0;
3294 }
3295
3296 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3297 {
3298         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3299
3300         return get_swappiness(memcg);
3301 }
3302
3303 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3304                                        u64 val)
3305 {
3306         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3307         struct mem_cgroup *parent;
3308
3309         if (val > 100)
3310                 return -EINVAL;
3311
3312         if (cgrp->parent == NULL)
3313                 return -EINVAL;
3314
3315         parent = mem_cgroup_from_cont(cgrp->parent);
3316
3317         cgroup_lock();
3318
3319         /* If under hierarchy, only empty-root can set this value */
3320         if ((parent->use_hierarchy) ||
3321             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3322                 cgroup_unlock();
3323                 return -EINVAL;
3324         }
3325
3326         spin_lock(&memcg->reclaim_param_lock);
3327         memcg->swappiness = val;
3328         spin_unlock(&memcg->reclaim_param_lock);
3329
3330         cgroup_unlock();
3331
3332         return 0;
3333 }
3334
3335 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3336 {
3337         struct mem_cgroup_threshold_ary *t;
3338         u64 usage;
3339         int i;
3340
3341         rcu_read_lock();
3342         if (!swap)
3343                 t = rcu_dereference(memcg->thresholds);
3344         else
3345                 t = rcu_dereference(memcg->memsw_thresholds);
3346
3347         if (!t)
3348                 goto unlock;
3349
3350         usage = mem_cgroup_usage(memcg, swap);
3351
3352         /*
3353          * current_threshold points to threshold just below usage.
3354          * If it's not true, a threshold was crossed after last
3355          * call of __mem_cgroup_threshold().
3356          */
3357         i = atomic_read(&t->current_threshold);
3358
3359         /*
3360          * Iterate backward over array of thresholds starting from
3361          * current_threshold and check if a threshold is crossed.
3362          * If none of thresholds below usage is crossed, we read
3363          * only one element of the array here.
3364          */
3365         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3366                 eventfd_signal(t->entries[i].eventfd, 1);
3367
3368         /* i = current_threshold + 1 */
3369         i++;
3370
3371         /*
3372          * Iterate forward over array of thresholds starting from
3373          * current_threshold+1 and check if a threshold is crossed.
3374          * If none of thresholds above usage is crossed, we read
3375          * only one element of the array here.
3376          */
3377         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3378                 eventfd_signal(t->entries[i].eventfd, 1);
3379
3380         /* Update current_threshold */
3381         atomic_set(&t->current_threshold, i - 1);
3382 unlock:
3383         rcu_read_unlock();
3384 }
3385
3386 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3387 {
3388         __mem_cgroup_threshold(memcg, false);
3389         if (do_swap_account)
3390                 __mem_cgroup_threshold(memcg, true);
3391 }
3392
3393 static int compare_thresholds(const void *a, const void *b)
3394 {
3395         const struct mem_cgroup_threshold *_a = a;
3396         const struct mem_cgroup_threshold *_b = b;
3397
3398         return _a->threshold - _b->threshold;
3399 }
3400
3401 static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft,
3402                 struct eventfd_ctx *eventfd, const char *args)
3403 {
3404         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3405         struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
3406         int type = MEMFILE_TYPE(cft->private);
3407         u64 threshold, usage;
3408         int size;
3409         int i, ret;
3410
3411         ret = res_counter_memparse_write_strategy(args, &threshold);
3412         if (ret)
3413                 return ret;
3414
3415         mutex_lock(&memcg->thresholds_lock);
3416         if (type == _MEM)
3417                 thresholds = memcg->thresholds;
3418         else if (type == _MEMSWAP)
3419                 thresholds = memcg->memsw_thresholds;
3420         else
3421                 BUG();
3422
3423         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3424
3425         /* Check if a threshold crossed before adding a new one */
3426         if (thresholds)
3427                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3428
3429         if (thresholds)
3430                 size = thresholds->size + 1;
3431         else
3432                 size = 1;
3433
3434         /* Allocate memory for new array of thresholds */
3435         thresholds_new = kmalloc(sizeof(*thresholds_new) +
3436                         size * sizeof(struct mem_cgroup_threshold),
3437                         GFP_KERNEL);
3438         if (!thresholds_new) {
3439                 ret = -ENOMEM;
3440                 goto unlock;
3441         }
3442         thresholds_new->size = size;
3443
3444         /* Copy thresholds (if any) to new array */
3445         if (thresholds)
3446                 memcpy(thresholds_new->entries, thresholds->entries,
3447                                 thresholds->size *
3448                                 sizeof(struct mem_cgroup_threshold));
3449         /* Add new threshold */
3450         thresholds_new->entries[size - 1].eventfd = eventfd;
3451         thresholds_new->entries[size - 1].threshold = threshold;
3452
3453         /* Sort thresholds. Registering of new threshold isn't time-critical */
3454         sort(thresholds_new->entries, size,
3455                         sizeof(struct mem_cgroup_threshold),
3456                         compare_thresholds, NULL);
3457
3458         /* Find current threshold */
3459         atomic_set(&thresholds_new->current_threshold, -1);
3460         for (i = 0; i < size; i++) {
3461                 if (thresholds_new->entries[i].threshold < usage) {
3462                         /*
3463                          * thresholds_new->current_threshold will not be used
3464                          * until rcu_assign_pointer(), so it's safe to increment
3465                          * it here.
3466                          */
3467                         atomic_inc(&thresholds_new->current_threshold);
3468                 }
3469         }
3470
3471         if (type == _MEM)
3472                 rcu_assign_pointer(memcg->thresholds, thresholds_new);
3473         else
3474                 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3475
3476         /* To be sure that nobody uses thresholds before freeing it */
3477         synchronize_rcu();
3478
3479         kfree(thresholds);
3480 unlock:
3481         mutex_unlock(&memcg->thresholds_lock);
3482
3483         return ret;
3484 }
3485
3486 static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft,
3487                 struct eventfd_ctx *eventfd)
3488 {
3489         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3490         struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
3491         int type = MEMFILE_TYPE(cft->private);
3492         u64 usage;
3493         int size = 0;
3494         int i, j, ret;
3495
3496         mutex_lock(&memcg->thresholds_lock);
3497         if (type == _MEM)
3498                 thresholds = memcg->thresholds;
3499         else if (type == _MEMSWAP)
3500                 thresholds = memcg->memsw_thresholds;
3501         else
3502                 BUG();
3503
3504         /*
3505          * Something went wrong if we trying to unregister a threshold
3506          * if we don't have thresholds
3507          */
3508         BUG_ON(!thresholds);
3509
3510         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3511
3512         /* Check if a threshold crossed before removing */
3513         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3514
3515         /* Calculate new number of threshold */
3516         for (i = 0; i < thresholds->size; i++) {
3517                 if (thresholds->entries[i].eventfd != eventfd)
3518                         size++;
3519         }
3520
3521         /* Set thresholds array to NULL if we don't have thresholds */
3522         if (!size) {
3523                 thresholds_new = NULL;
3524                 goto assign;
3525         }
3526
3527         /* Allocate memory for new array of thresholds */
3528         thresholds_new = kmalloc(sizeof(*thresholds_new) +
3529                         size * sizeof(struct mem_cgroup_threshold),
3530                         GFP_KERNEL);
3531         if (!thresholds_new) {
3532                 ret = -ENOMEM;
3533                 goto unlock;
3534         }
3535         thresholds_new->size = size;
3536
3537         /* Copy thresholds and find current threshold */
3538         atomic_set(&thresholds_new->current_threshold, -1);
3539         for (i = 0, j = 0; i < thresholds->size; i++) {
3540                 if (thresholds->entries[i].eventfd == eventfd)
3541                         continue;
3542
3543                 thresholds_new->entries[j] = thresholds->entries[i];
3544                 if (thresholds_new->entries[j].threshold < usage) {
3545                         /*
3546                          * thresholds_new->current_threshold will not be used
3547                          * until rcu_assign_pointer(), so it's safe to increment
3548                          * it here.
3549                          */
3550                         atomic_inc(&thresholds_new->current_threshold);
3551                 }
3552                 j++;
3553         }
3554
3555 assign:
3556         if (type == _MEM)
3557                 rcu_assign_pointer(memcg->thresholds, thresholds_new);
3558         else
3559                 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3560
3561         /* To be sure that nobody uses thresholds before freeing it */
3562         synchronize_rcu();
3563
3564         kfree(thresholds);
3565 unlock:
3566         mutex_unlock(&memcg->thresholds_lock);
3567
3568         return ret;
3569 }
3570
3571 static struct cftype mem_cgroup_files[] = {
3572         {
3573                 .name = "usage_in_bytes",
3574                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3575                 .read_u64 = mem_cgroup_read,
3576                 .register_event = mem_cgroup_register_event,
3577                 .unregister_event = mem_cgroup_unregister_event,
3578         },
3579         {
3580                 .name = "max_usage_in_bytes",
3581                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3582                 .trigger = mem_cgroup_reset,
3583                 .read_u64 = mem_cgroup_read,
3584         },
3585         {
3586                 .name = "limit_in_bytes",
3587                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3588                 .write_string = mem_cgroup_write,
3589                 .read_u64 = mem_cgroup_read,
3590         },
3591         {
3592                 .name = "soft_limit_in_bytes",
3593                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3594                 .write_string = mem_cgroup_write,
3595                 .read_u64 = mem_cgroup_read,
3596         },
3597         {
3598                 .name = "failcnt",
3599                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3600                 .trigger = mem_cgroup_reset,
3601                 .read_u64 = mem_cgroup_read,
3602         },
3603         {
3604                 .name = "stat",
3605                 .read_map = mem_control_stat_show,
3606         },
3607         {
3608                 .name = "force_empty",
3609                 .trigger = mem_cgroup_force_empty_write,
3610         },
3611         {
3612                 .name = "use_hierarchy",
3613                 .write_u64 = mem_cgroup_hierarchy_write,
3614                 .read_u64 = mem_cgroup_hierarchy_read,
3615         },
3616         {
3617                 .name = "swappiness",
3618                 .read_u64 = mem_cgroup_swappiness_read,
3619                 .write_u64 = mem_cgroup_swappiness_write,
3620         },
3621         {
3622                 .name = "move_charge_at_immigrate",
3623                 .read_u64 = mem_cgroup_move_charge_read,
3624                 .write_u64 = mem_cgroup_move_charge_write,
3625         },
3626 };
3627
3628 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3629 static struct cftype memsw_cgroup_files[] = {
3630         {
3631                 .name = "memsw.usage_in_bytes",
3632                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
3633                 .read_u64 = mem_cgroup_read,
3634                 .register_event = mem_cgroup_register_event,
3635                 .unregister_event = mem_cgroup_unregister_event,
3636         },
3637         {
3638                 .name = "memsw.max_usage_in_bytes",
3639                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
3640                 .trigger = mem_cgroup_reset,
3641                 .read_u64 = mem_cgroup_read,
3642         },
3643         {
3644                 .name = "memsw.limit_in_bytes",
3645                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
3646                 .write_string = mem_cgroup_write,
3647                 .read_u64 = mem_cgroup_read,
3648         },
3649         {
3650                 .name = "memsw.failcnt",
3651                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
3652                 .trigger = mem_cgroup_reset,
3653                 .read_u64 = mem_cgroup_read,
3654         },
3655 };
3656
3657 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3658 {
3659         if (!do_swap_account)
3660                 return 0;
3661         return cgroup_add_files(cont, ss, memsw_cgroup_files,
3662                                 ARRAY_SIZE(memsw_cgroup_files));
3663 };
3664 #else
3665 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3666 {
3667         return 0;
3668 }
3669 #endif
3670
3671 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3672 {
3673         struct mem_cgroup_per_node *pn;
3674         struct mem_cgroup_per_zone *mz;
3675         enum lru_list l;
3676         int zone, tmp = node;
3677         /*
3678          * This routine is called against possible nodes.
3679          * But it's BUG to call kmalloc() against offline node.
3680          *
3681          * TODO: this routine can waste much memory for nodes which will
3682          *       never be onlined. It's better to use memory hotplug callback
3683          *       function.
3684          */
3685         if (!node_state(node, N_NORMAL_MEMORY))
3686                 tmp = -1;
3687         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
3688         if (!pn)
3689                 return 1;
3690
3691         mem->info.nodeinfo[node] = pn;
3692         memset(pn, 0, sizeof(*pn));
3693
3694         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3695                 mz = &pn->zoneinfo[zone];
3696                 for_each_lru(l)
3697                         INIT_LIST_HEAD(&mz->lists[l]);
3698                 mz->usage_in_excess = 0;
3699                 mz->on_tree = false;
3700                 mz->mem = mem;
3701         }
3702         return 0;
3703 }
3704
3705 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3706 {
3707         kfree(mem->info.nodeinfo[node]);
3708 }
3709
3710 static struct mem_cgroup *mem_cgroup_alloc(void)
3711 {
3712         struct mem_cgroup *mem;
3713         int size = sizeof(struct mem_cgroup);
3714
3715         /* Can be very big if MAX_NUMNODES is very big */
3716         if (size < PAGE_SIZE)
3717                 mem = kmalloc(size, GFP_KERNEL);
3718         else
3719                 mem = vmalloc(size);
3720
3721         if (!mem)
3722                 return NULL;
3723
3724         memset(mem, 0, size);
3725         mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
3726         if (!mem->stat) {
3727                 if (size < PAGE_SIZE)
3728                         kfree(mem);
3729                 else
3730                         vfree(mem);
3731                 mem = NULL;
3732         }
3733         return mem;
3734 }
3735
3736 /*
3737  * At destroying mem_cgroup, references from swap_cgroup can remain.
3738  * (scanning all at force_empty is too costly...)
3739  *
3740  * Instead of clearing all references at force_empty, we remember
3741  * the number of reference from swap_cgroup and free mem_cgroup when
3742  * it goes down to 0.
3743  *
3744  * Removal of cgroup itself succeeds regardless of refs from swap.
3745  */
3746
3747 static void __mem_cgroup_free(struct mem_cgroup *mem)
3748 {
3749         int node;
3750
3751         mem_cgroup_remove_from_trees(mem);
3752         free_css_id(&mem_cgroup_subsys, &mem->css);
3753
3754         for_each_node_state(node, N_POSSIBLE)
3755                 free_mem_cgroup_per_zone_info(mem, node);
3756
3757         free_percpu(mem->stat);
3758         if (sizeof(struct mem_cgroup) < PAGE_SIZE)
3759                 kfree(mem);
3760         else
3761                 vfree(mem);
3762 }
3763
3764 static void mem_cgroup_get(struct mem_cgroup *mem)
3765 {
3766         atomic_inc(&mem->refcnt);
3767 }
3768
3769 static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
3770 {
3771         if (atomic_sub_and_test(count, &mem->refcnt)) {
3772                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
3773                 __mem_cgroup_free(mem);
3774                 if (parent)
3775                         mem_cgroup_put(parent);
3776         }
3777 }
3778
3779 static void mem_cgroup_put(struct mem_cgroup *mem)
3780 {
3781         __mem_cgroup_put(mem, 1);
3782 }
3783
3784 /*
3785  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
3786  */
3787 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
3788 {
3789         if (!mem->res.parent)
3790                 return NULL;
3791         return mem_cgroup_from_res_counter(mem->res.parent, res);
3792 }
3793
3794 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3795 static void __init enable_swap_cgroup(void)
3796 {
3797         if (!mem_cgroup_disabled() && really_do_swap_account)
3798                 do_swap_account = 1;
3799 }
3800 #else
3801 static void __init enable_swap_cgroup(void)
3802 {
3803 }
3804 #endif
3805
3806 static int mem_cgroup_soft_limit_tree_init(void)
3807 {
3808         struct mem_cgroup_tree_per_node *rtpn;
3809         struct mem_cgroup_tree_per_zone *rtpz;
3810         int tmp, node, zone;
3811
3812         for_each_node_state(node, N_POSSIBLE) {
3813                 tmp = node;
3814                 if (!node_state(node, N_NORMAL_MEMORY))
3815                         tmp = -1;
3816                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
3817                 if (!rtpn)
3818                         return 1;
3819
3820                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
3821
3822                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3823                         rtpz = &rtpn->rb_tree_per_zone[zone];
3824                         rtpz->rb_root = RB_ROOT;
3825                         spin_lock_init(&rtpz->lock);
3826                 }
3827         }
3828         return 0;
3829 }
3830
3831 static struct cgroup_subsys_state * __ref
3832 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3833 {
3834         struct mem_cgroup *mem, *parent;
3835         long error = -ENOMEM;
3836         int node;
3837
3838         mem = mem_cgroup_alloc();
3839         if (!mem)
3840                 return ERR_PTR(error);
3841
3842         for_each_node_state(node, N_POSSIBLE)
3843                 if (alloc_mem_cgroup_per_zone_info(mem, node))
3844                         goto free_out;
3845
3846         /* root ? */
3847         if (cont->parent == NULL) {
3848                 int cpu;
3849                 enable_swap_cgroup();
3850                 parent = NULL;
3851                 root_mem_cgroup = mem;
3852                 if (mem_cgroup_soft_limit_tree_init())
3853                         goto free_out;
3854                 for_each_possible_cpu(cpu) {
3855                         struct memcg_stock_pcp *stock =
3856                                                 &per_cpu(memcg_stock, cpu);
3857                         INIT_WORK(&stock->work, drain_local_stock);
3858                 }
3859                 hotcpu_notifier(memcg_stock_cpu_callback, 0);
3860         } else {
3861                 parent = mem_cgroup_from_cont(cont->parent);
3862                 mem->use_hierarchy = parent->use_hierarchy;
3863         }
3864
3865         if (parent && parent->use_hierarchy) {
3866                 res_counter_init(&mem->res, &parent->res);
3867                 res_counter_init(&mem->memsw, &parent->memsw);
3868                 /*
3869                  * We increment refcnt of the parent to ensure that we can
3870                  * safely access it on res_counter_charge/uncharge.
3871                  * This refcnt will be decremented when freeing this
3872                  * mem_cgroup(see mem_cgroup_put).
3873                  */
3874                 mem_cgroup_get(parent);
3875         } else {
3876                 res_counter_init(&mem->res, NULL);
3877                 res_counter_init(&mem->memsw, NULL);
3878         }
3879         mem->last_scanned_child = 0;
3880         spin_lock_init(&mem->reclaim_param_lock);
3881
3882         if (parent)
3883                 mem->swappiness = get_swappiness(parent);
3884         atomic_set(&mem->refcnt, 1);
3885         mem->move_charge_at_immigrate = 0;
3886         mutex_init(&mem->thresholds_lock);
3887         return &mem->css;
3888 free_out:
3889         __mem_cgroup_free(mem);
3890         root_mem_cgroup = NULL;
3891         return ERR_PTR(error);
3892 }
3893
3894 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
3895                                         struct cgroup *cont)
3896 {
3897         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3898
3899         return mem_cgroup_force_empty(mem, false);
3900 }
3901
3902 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
3903                                 struct cgroup *cont)
3904 {
3905         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3906
3907         mem_cgroup_put(mem);
3908 }
3909
3910 static int mem_cgroup_populate(struct cgroup_subsys *ss,
3911                                 struct cgroup *cont)
3912 {
3913         int ret;
3914
3915         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
3916                                 ARRAY_SIZE(mem_cgroup_files));
3917
3918         if (!ret)
3919                 ret = register_memsw_files(cont, ss);
3920         return ret;
3921 }
3922
3923 #ifdef CONFIG_MMU
3924 /* Handlers for move charge at task migration. */
3925 #define PRECHARGE_COUNT_AT_ONCE 256
3926 static int mem_cgroup_do_precharge(unsigned long count)
3927 {
3928         int ret = 0;
3929         int batch_count = PRECHARGE_COUNT_AT_ONCE;
3930         struct mem_cgroup *mem = mc.to;
3931
3932         if (mem_cgroup_is_root(mem)) {
3933                 mc.precharge += count;
3934                 /* we don't need css_get for root */
3935                 return ret;
3936         }
3937         /* try to charge at once */
3938         if (count > 1) {
3939                 struct res_counter *dummy;
3940                 /*
3941                  * "mem" cannot be under rmdir() because we've already checked
3942                  * by cgroup_lock_live_cgroup() that it is not removed and we
3943                  * are still under the same cgroup_mutex. So we can postpone
3944                  * css_get().
3945                  */
3946                 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
3947                         goto one_by_one;
3948                 if (do_swap_account && res_counter_charge(&mem->memsw,
3949                                                 PAGE_SIZE * count, &dummy)) {
3950                         res_counter_uncharge(&mem->res, PAGE_SIZE * count);
3951                         goto one_by_one;
3952                 }
3953                 mc.precharge += count;
3954                 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
3955                 WARN_ON_ONCE(count > INT_MAX);
3956                 __css_get(&mem->css, (int)count);
3957                 return ret;
3958         }
3959 one_by_one:
3960         /* fall back to one by one charge */
3961         while (count--) {
3962                 if (signal_pending(current)) {
3963                         ret = -EINTR;
3964                         break;
3965                 }
3966                 if (!batch_count--) {
3967                         batch_count = PRECHARGE_COUNT_AT_ONCE;
3968                         cond_resched();
3969                 }
3970                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
3971                 if (ret || !mem)
3972                         /* mem_cgroup_clear_mc() will do uncharge later */
3973                         return -ENOMEM;
3974                 mc.precharge++;
3975         }
3976         return ret;
3977 }
3978
3979 /**
3980  * is_target_pte_for_mc - check a pte whether it is valid for move charge
3981  * @vma: the vma the pte to be checked belongs
3982  * @addr: the address corresponding to the pte to be checked
3983  * @ptent: the pte to be checked
3984  * @target: the pointer the target page or swap ent will be stored(can be NULL)
3985  *
3986  * Returns
3987  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
3988  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
3989  *     move charge. if @target is not NULL, the page is stored in target->page
3990  *     with extra refcnt got(Callers should handle it).
3991  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
3992  *     target for charge migration. if @target is not NULL, the entry is stored
3993  *     in target->ent.
3994  *
3995  * Called with pte lock held.
3996  */
3997 union mc_target {
3998         struct page     *page;
3999         swp_entry_t     ent;
4000 };
4001
4002 enum mc_target_type {
4003         MC_TARGET_NONE, /* not used */
4004         MC_TARGET_PAGE,
4005         MC_TARGET_SWAP,
4006 };
4007
4008 static int is_target_pte_for_mc(struct vm_area_struct *vma,
4009                 unsigned long addr, pte_t ptent, union mc_target *target)
4010 {
4011         struct page *page = NULL;
4012         struct page_cgroup *pc;
4013         int ret = 0;
4014         swp_entry_t ent = { .val = 0 };
4015         int usage_count = 0;
4016         bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
4017                                         &mc.to->move_charge_at_immigrate);
4018
4019         if (!pte_present(ptent)) {
4020                 /* TODO: handle swap of shmes/tmpfs */
4021                 if (pte_none(ptent) || pte_file(ptent))
4022                         return 0;
4023                 else if (is_swap_pte(ptent)) {
4024                         ent = pte_to_swp_entry(ptent);
4025                         if (!move_anon || non_swap_entry(ent))
4026                                 return 0;
4027                         usage_count = mem_cgroup_count_swap_user(ent, &page);
4028                 }
4029         } else {
4030                 page = vm_normal_page(vma, addr, ptent);
4031                 if (!page || !page_mapped(page))
4032                         return 0;
4033                 /*
4034                  * TODO: We don't move charges of file(including shmem/tmpfs)
4035                  * pages for now.
4036                  */
4037                 if (!move_anon || !PageAnon(page))
4038                         return 0;
4039                 if (!get_page_unless_zero(page))
4040                         return 0;
4041                 usage_count = page_mapcount(page);
4042         }
4043         if (usage_count > 1) {
4044                 /*
4045                  * TODO: We don't move charges of shared(used by multiple
4046                  * processes) pages for now.
4047                  */
4048                 if (page)
4049                         put_page(page);
4050                 return 0;
4051         }
4052         if (page) {
4053                 pc = lookup_page_cgroup(page);
4054                 /*
4055                  * Do only loose check w/o page_cgroup lock.
4056                  * mem_cgroup_move_account() checks the pc is valid or not under
4057                  * the lock.
4058                  */
4059                 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4060                         ret = MC_TARGET_PAGE;
4061                         if (target)
4062                                 target->page = page;
4063                 }
4064                 if (!ret || !target)
4065                         put_page(page);
4066         }
4067         /* throught */
4068         if (ent.val && do_swap_account && !ret &&
4069                         css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4070                 ret = MC_TARGET_SWAP;
4071                 if (target)
4072                         target->ent = ent;
4073         }
4074         return ret;
4075 }
4076
4077 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4078                                         unsigned long addr, unsigned long end,
4079                                         struct mm_walk *walk)
4080 {
4081         struct vm_area_struct *vma = walk->private;
4082         pte_t *pte;
4083         spinlock_t *ptl;
4084
4085         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4086         for (; addr != end; pte++, addr += PAGE_SIZE)
4087                 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4088                         mc.precharge++; /* increment precharge temporarily */
4089         pte_unmap_unlock(pte - 1, ptl);
4090         cond_resched();
4091
4092         return 0;
4093 }
4094
4095 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4096 {
4097         unsigned long precharge;
4098         struct vm_area_struct *vma;
4099
4100         down_read(&mm->mmap_sem);
4101         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4102                 struct mm_walk mem_cgroup_count_precharge_walk = {
4103                         .pmd_entry = mem_cgroup_count_precharge_pte_range,
4104                         .mm = mm,
4105                         .private = vma,
4106                 };
4107                 if (is_vm_hugetlb_page(vma))
4108                         continue;
4109                 /* TODO: We don't move charges of shmem/tmpfs pages for now. */
4110                 if (vma->vm_flags & VM_SHARED)
4111                         continue;
4112                 walk_page_range(vma->vm_start, vma->vm_end,
4113                                         &mem_cgroup_count_precharge_walk);
4114         }
4115         up_read(&mm->mmap_sem);
4116
4117         precharge = mc.precharge;
4118         mc.precharge = 0;
4119
4120         return precharge;
4121 }
4122
4123 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4124 {
4125         return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
4126 }
4127
4128 static void mem_cgroup_clear_mc(void)
4129 {
4130         /* we must uncharge all the leftover precharges from mc.to */
4131         if (mc.precharge) {
4132                 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4133                 mc.precharge = 0;
4134         }
4135         /*
4136          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4137          * we must uncharge here.
4138          */
4139         if (mc.moved_charge) {
4140                 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4141                 mc.moved_charge = 0;
4142         }
4143         /* we must fixup refcnts and charges */
4144         if (mc.moved_swap) {
4145                 WARN_ON_ONCE(mc.moved_swap > INT_MAX);
4146                 /* uncharge swap account from the old cgroup */
4147                 if (!mem_cgroup_is_root(mc.from))
4148                         res_counter_uncharge(&mc.from->memsw,
4149                                                 PAGE_SIZE * mc.moved_swap);
4150                 __mem_cgroup_put(mc.from, mc.moved_swap);
4151
4152                 if (!mem_cgroup_is_root(mc.to)) {
4153                         /*
4154                          * we charged both to->res and to->memsw, so we should
4155                          * uncharge to->res.
4156                          */
4157                         res_counter_uncharge(&mc.to->res,
4158                                                 PAGE_SIZE * mc.moved_swap);
4159                         VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
4160                         __css_put(&mc.to->css, mc.moved_swap);
4161                 }
4162                 /* we've already done mem_cgroup_get(mc.to) */
4163
4164                 mc.moved_swap = 0;
4165         }
4166         mc.from = NULL;
4167         mc.to = NULL;
4168         mc.moving_task = NULL;
4169         wake_up_all(&mc.waitq);
4170 }
4171
4172 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4173                                 struct cgroup *cgroup,
4174                                 struct task_struct *p,
4175                                 bool threadgroup)
4176 {
4177         int ret = 0;
4178         struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4179
4180         if (mem->move_charge_at_immigrate) {
4181                 struct mm_struct *mm;
4182                 struct mem_cgroup *from = mem_cgroup_from_task(p);
4183
4184                 VM_BUG_ON(from == mem);
4185
4186                 mm = get_task_mm(p);
4187                 if (!mm)
4188                         return 0;
4189                 /* We move charges only when we move a owner of the mm */
4190                 if (mm->owner == p) {
4191                         VM_BUG_ON(mc.from);
4192                         VM_BUG_ON(mc.to);
4193                         VM_BUG_ON(mc.precharge);
4194                         VM_BUG_ON(mc.moved_charge);
4195                         VM_BUG_ON(mc.moved_swap);
4196                         VM_BUG_ON(mc.moving_task);
4197                         mc.from = from;
4198                         mc.to = mem;
4199                         mc.precharge = 0;
4200                         mc.moved_charge = 0;
4201                         mc.moved_swap = 0;
4202                         mc.moving_task = current;
4203
4204                         ret = mem_cgroup_precharge_mc(mm);
4205                         if (ret)
4206                                 mem_cgroup_clear_mc();
4207                 }
4208                 mmput(mm);
4209         }
4210         return ret;
4211 }
4212
4213 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4214                                 struct cgroup *cgroup,
4215                                 struct task_struct *p,
4216                                 bool threadgroup)
4217 {
4218         mem_cgroup_clear_mc();
4219 }
4220
4221 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4222                                 unsigned long addr, unsigned long end,
4223                                 struct mm_walk *walk)
4224 {
4225         int ret = 0;
4226         struct vm_area_struct *vma = walk->private;
4227         pte_t *pte;
4228         spinlock_t *ptl;
4229
4230 retry:
4231         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4232         for (; addr != end; addr += PAGE_SIZE) {
4233                 pte_t ptent = *(pte++);
4234                 union mc_target target;
4235                 int type;
4236                 struct page *page;
4237                 struct page_cgroup *pc;
4238                 swp_entry_t ent;
4239
4240                 if (!mc.precharge)
4241                         break;
4242
4243                 type = is_target_pte_for_mc(vma, addr, ptent, &target);
4244                 switch (type) {
4245                 case MC_TARGET_PAGE:
4246                         page = target.page;
4247                         if (isolate_lru_page(page))
4248                                 goto put;
4249                         pc = lookup_page_cgroup(page);
4250                         if (!mem_cgroup_move_account(pc,
4251                                                 mc.from, mc.to, false)) {
4252                                 mc.precharge--;
4253                                 /* we uncharge from mc.from later. */
4254                                 mc.moved_charge++;
4255                         }
4256                         putback_lru_page(page);
4257 put:                    /* is_target_pte_for_mc() gets the page */
4258                         put_page(page);
4259                         break;
4260                 case MC_TARGET_SWAP:
4261                         ent = target.ent;
4262                         if (!mem_cgroup_move_swap_account(ent,
4263                                                 mc.from, mc.to, false)) {
4264                                 mc.precharge--;
4265                                 /* we fixup refcnts and charges later. */
4266                                 mc.moved_swap++;
4267                         }
4268                         break;
4269                 default:
4270                         break;
4271                 }
4272         }
4273         pte_unmap_unlock(pte - 1, ptl);
4274         cond_resched();
4275
4276         if (addr != end) {
4277                 /*
4278                  * We have consumed all precharges we got in can_attach().
4279                  * We try charge one by one, but don't do any additional
4280                  * charges to mc.to if we have failed in charge once in attach()
4281                  * phase.
4282                  */
4283                 ret = mem_cgroup_do_precharge(1);
4284                 if (!ret)
4285                         goto retry;
4286         }
4287
4288         return ret;
4289 }
4290
4291 static void mem_cgroup_move_charge(struct mm_struct *mm)
4292 {
4293         struct vm_area_struct *vma;
4294
4295         lru_add_drain_all();
4296         down_read(&mm->mmap_sem);
4297         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4298                 int ret;
4299                 struct mm_walk mem_cgroup_move_charge_walk = {
4300                         .pmd_entry = mem_cgroup_move_charge_pte_range,
4301                         .mm = mm,
4302                         .private = vma,
4303                 };
4304                 if (is_vm_hugetlb_page(vma))
4305                         continue;
4306                 /* TODO: We don't move charges of shmem/tmpfs pages for now. */
4307                 if (vma->vm_flags & VM_SHARED)
4308                         continue;
4309                 ret = walk_page_range(vma->vm_start, vma->vm_end,
4310                                                 &mem_cgroup_move_charge_walk);
4311                 if (ret)
4312                         /*
4313                          * means we have consumed all precharges and failed in
4314                          * doing additional charge. Just abandon here.
4315                          */
4316                         break;
4317         }
4318         up_read(&mm->mmap_sem);
4319 }
4320
4321 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4322                                 struct cgroup *cont,
4323                                 struct cgroup *old_cont,
4324                                 struct task_struct *p,
4325                                 bool threadgroup)
4326 {
4327         struct mm_struct *mm;
4328
4329         if (!mc.to)
4330                 /* no need to move charge */
4331                 return;
4332
4333         mm = get_task_mm(p);
4334         if (mm) {
4335                 mem_cgroup_move_charge(mm);
4336                 mmput(mm);
4337         }
4338         mem_cgroup_clear_mc();
4339 }
4340 #else   /* !CONFIG_MMU */
4341 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4342                                 struct cgroup *cgroup,
4343                                 struct task_struct *p,
4344                                 bool threadgroup)
4345 {
4346         return 0;
4347 }
4348 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4349                                 struct cgroup *cgroup,
4350                                 struct task_struct *p,
4351                                 bool threadgroup)
4352 {
4353 }
4354 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4355                                 struct cgroup *cont,
4356                                 struct cgroup *old_cont,
4357                                 struct task_struct *p,
4358                                 bool threadgroup)
4359 {
4360 }
4361 #endif
4362
4363 struct cgroup_subsys mem_cgroup_subsys = {
4364         .name = "memory",
4365         .subsys_id = mem_cgroup_subsys_id,
4366         .create = mem_cgroup_create,
4367         .pre_destroy = mem_cgroup_pre_destroy,
4368         .destroy = mem_cgroup_destroy,
4369         .populate = mem_cgroup_populate,
4370         .can_attach = mem_cgroup_can_attach,
4371         .cancel_attach = mem_cgroup_cancel_attach,
4372         .attach = mem_cgroup_move_task,
4373         .early_init = 0,
4374         .use_id = 1,
4375 };
4376
4377 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4378
4379 static int __init disable_swap_account(char *s)
4380 {
4381         really_do_swap_account = 0;
4382         return 1;
4383 }
4384 __setup("noswapaccount", disable_swap_account);
4385 #endif