memcg: cpu hotplug aware quick acount_move detection
[pandora-kernel.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/mutex.h>
37 #include <linux/rbtree.h>
38 #include <linux/slab.h>
39 #include <linux/swap.h>
40 #include <linux/swapops.h>
41 #include <linux/spinlock.h>
42 #include <linux/eventfd.h>
43 #include <linux/sort.h>
44 #include <linux/fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/vmalloc.h>
47 #include <linux/mm_inline.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/cpu.h>
50 #include <linux/oom.h>
51 #include "internal.h"
52
53 #include <asm/uaccess.h>
54
55 #include <trace/events/vmscan.h>
56
57 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
58 #define MEM_CGROUP_RECLAIM_RETRIES      5
59 struct mem_cgroup *root_mem_cgroup __read_mostly;
60
61 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63 int do_swap_account __read_mostly;
64 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
65 #else
66 #define do_swap_account         (0)
67 #endif
68
69 /*
70  * Per memcg event counter is incremented at every pagein/pageout. This counter
71  * is used for trigger some periodic events. This is straightforward and better
72  * than using jiffies etc. to handle periodic memcg event.
73  *
74  * These values will be used as !((event) & ((1 <<(thresh)) - 1))
75  */
76 #define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
77 #define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
78
79 /*
80  * Statistics for memory cgroup.
81  */
82 enum mem_cgroup_stat_index {
83         /*
84          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
85          */
86         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
87         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
88         MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
89         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
90         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
91         MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
92         MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
93         /* incremented at every  pagein/pageout */
94         MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
95         MEM_CGROUP_ON_MOVE,     /* someone is moving account between groups */
96
97         MEM_CGROUP_STAT_NSTATS,
98 };
99
100 struct mem_cgroup_stat_cpu {
101         s64 count[MEM_CGROUP_STAT_NSTATS];
102 };
103
104 /*
105  * per-zone information in memory controller.
106  */
107 struct mem_cgroup_per_zone {
108         /*
109          * spin_lock to protect the per cgroup LRU
110          */
111         struct list_head        lists[NR_LRU_LISTS];
112         unsigned long           count[NR_LRU_LISTS];
113
114         struct zone_reclaim_stat reclaim_stat;
115         struct rb_node          tree_node;      /* RB tree node */
116         unsigned long long      usage_in_excess;/* Set to the value by which */
117                                                 /* the soft limit is exceeded*/
118         bool                    on_tree;
119         struct mem_cgroup       *mem;           /* Back pointer, we cannot */
120                                                 /* use container_of        */
121 };
122 /* Macro for accessing counter */
123 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
124
125 struct mem_cgroup_per_node {
126         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
127 };
128
129 struct mem_cgroup_lru_info {
130         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
131 };
132
133 /*
134  * Cgroups above their limits are maintained in a RB-Tree, independent of
135  * their hierarchy representation
136  */
137
138 struct mem_cgroup_tree_per_zone {
139         struct rb_root rb_root;
140         spinlock_t lock;
141 };
142
143 struct mem_cgroup_tree_per_node {
144         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
145 };
146
147 struct mem_cgroup_tree {
148         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
149 };
150
151 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
152
153 struct mem_cgroup_threshold {
154         struct eventfd_ctx *eventfd;
155         u64 threshold;
156 };
157
158 /* For threshold */
159 struct mem_cgroup_threshold_ary {
160         /* An array index points to threshold just below usage. */
161         int current_threshold;
162         /* Size of entries[] */
163         unsigned int size;
164         /* Array of thresholds */
165         struct mem_cgroup_threshold entries[0];
166 };
167
168 struct mem_cgroup_thresholds {
169         /* Primary thresholds array */
170         struct mem_cgroup_threshold_ary *primary;
171         /*
172          * Spare threshold array.
173          * This is needed to make mem_cgroup_unregister_event() "never fail".
174          * It must be able to store at least primary->size - 1 entries.
175          */
176         struct mem_cgroup_threshold_ary *spare;
177 };
178
179 /* for OOM */
180 struct mem_cgroup_eventfd_list {
181         struct list_head list;
182         struct eventfd_ctx *eventfd;
183 };
184
185 static void mem_cgroup_threshold(struct mem_cgroup *mem);
186 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
187
188 /*
189  * The memory controller data structure. The memory controller controls both
190  * page cache and RSS per cgroup. We would eventually like to provide
191  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
192  * to help the administrator determine what knobs to tune.
193  *
194  * TODO: Add a water mark for the memory controller. Reclaim will begin when
195  * we hit the water mark. May be even add a low water mark, such that
196  * no reclaim occurs from a cgroup at it's low water mark, this is
197  * a feature that will be implemented much later in the future.
198  */
199 struct mem_cgroup {
200         struct cgroup_subsys_state css;
201         /*
202          * the counter to account for memory usage
203          */
204         struct res_counter res;
205         /*
206          * the counter to account for mem+swap usage.
207          */
208         struct res_counter memsw;
209         /*
210          * Per cgroup active and inactive list, similar to the
211          * per zone LRU lists.
212          */
213         struct mem_cgroup_lru_info info;
214
215         /*
216           protect against reclaim related member.
217         */
218         spinlock_t reclaim_param_lock;
219
220         /*
221          * While reclaiming in a hierarchy, we cache the last child we
222          * reclaimed from.
223          */
224         int last_scanned_child;
225         /*
226          * Should the accounting and control be hierarchical, per subtree?
227          */
228         bool use_hierarchy;
229         atomic_t        oom_lock;
230         atomic_t        refcnt;
231
232         unsigned int    swappiness;
233         /* OOM-Killer disable */
234         int             oom_kill_disable;
235
236         /* set when res.limit == memsw.limit */
237         bool            memsw_is_minimum;
238
239         /* protect arrays of thresholds */
240         struct mutex thresholds_lock;
241
242         /* thresholds for memory usage. RCU-protected */
243         struct mem_cgroup_thresholds thresholds;
244
245         /* thresholds for mem+swap usage. RCU-protected */
246         struct mem_cgroup_thresholds memsw_thresholds;
247
248         /* For oom notifier event fd */
249         struct list_head oom_notify;
250
251         /*
252          * Should we move charges of a task when a task is moved into this
253          * mem_cgroup ? And what type of charges should we move ?
254          */
255         unsigned long   move_charge_at_immigrate;
256         /*
257          * percpu counter.
258          */
259         struct mem_cgroup_stat_cpu *stat;
260         /*
261          * used when a cpu is offlined or other synchronizations
262          * See mem_cgroup_read_stat().
263          */
264         struct mem_cgroup_stat_cpu nocpu_base;
265         spinlock_t pcp_counter_lock;
266 };
267
268 /* Stuffs for move charges at task migration. */
269 /*
270  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
271  * left-shifted bitmap of these types.
272  */
273 enum move_type {
274         MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
275         MOVE_CHARGE_TYPE_FILE,  /* file page(including tmpfs) and swap of it */
276         NR_MOVE_TYPE,
277 };
278
279 /* "mc" and its members are protected by cgroup_mutex */
280 static struct move_charge_struct {
281         spinlock_t        lock; /* for from, to, moving_task */
282         struct mem_cgroup *from;
283         struct mem_cgroup *to;
284         unsigned long precharge;
285         unsigned long moved_charge;
286         unsigned long moved_swap;
287         struct task_struct *moving_task;        /* a task moving charges */
288         wait_queue_head_t waitq;                /* a waitq for other context */
289 } mc = {
290         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
291         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
292 };
293
294 static bool move_anon(void)
295 {
296         return test_bit(MOVE_CHARGE_TYPE_ANON,
297                                         &mc.to->move_charge_at_immigrate);
298 }
299
300 static bool move_file(void)
301 {
302         return test_bit(MOVE_CHARGE_TYPE_FILE,
303                                         &mc.to->move_charge_at_immigrate);
304 }
305
306 /*
307  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
308  * limit reclaim to prevent infinite loops, if they ever occur.
309  */
310 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
311 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
312
313 enum charge_type {
314         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
315         MEM_CGROUP_CHARGE_TYPE_MAPPED,
316         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
317         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
318         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
319         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
320         NR_CHARGE_TYPE,
321 };
322
323 /* only for here (for easy reading.) */
324 #define PCGF_CACHE      (1UL << PCG_CACHE)
325 #define PCGF_USED       (1UL << PCG_USED)
326 #define PCGF_LOCK       (1UL << PCG_LOCK)
327 /* Not used, but added here for completeness */
328 #define PCGF_ACCT       (1UL << PCG_ACCT)
329
330 /* for encoding cft->private value on file */
331 #define _MEM                    (0)
332 #define _MEMSWAP                (1)
333 #define _OOM_TYPE               (2)
334 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
335 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
336 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
337 /* Used for OOM nofiier */
338 #define OOM_CONTROL             (0)
339
340 /*
341  * Reclaim flags for mem_cgroup_hierarchical_reclaim
342  */
343 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT   0x0
344 #define MEM_CGROUP_RECLAIM_NOSWAP       (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
345 #define MEM_CGROUP_RECLAIM_SHRINK_BIT   0x1
346 #define MEM_CGROUP_RECLAIM_SHRINK       (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
347 #define MEM_CGROUP_RECLAIM_SOFT_BIT     0x2
348 #define MEM_CGROUP_RECLAIM_SOFT         (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
349
350 static void mem_cgroup_get(struct mem_cgroup *mem);
351 static void mem_cgroup_put(struct mem_cgroup *mem);
352 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
353 static void drain_all_stock_async(void);
354
355 static struct mem_cgroup_per_zone *
356 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
357 {
358         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
359 }
360
361 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
362 {
363         return &mem->css;
364 }
365
366 static struct mem_cgroup_per_zone *
367 page_cgroup_zoneinfo(struct page_cgroup *pc)
368 {
369         struct mem_cgroup *mem = pc->mem_cgroup;
370         int nid = page_cgroup_nid(pc);
371         int zid = page_cgroup_zid(pc);
372
373         if (!mem)
374                 return NULL;
375
376         return mem_cgroup_zoneinfo(mem, nid, zid);
377 }
378
379 static struct mem_cgroup_tree_per_zone *
380 soft_limit_tree_node_zone(int nid, int zid)
381 {
382         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
383 }
384
385 static struct mem_cgroup_tree_per_zone *
386 soft_limit_tree_from_page(struct page *page)
387 {
388         int nid = page_to_nid(page);
389         int zid = page_zonenum(page);
390
391         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
392 }
393
394 static void
395 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
396                                 struct mem_cgroup_per_zone *mz,
397                                 struct mem_cgroup_tree_per_zone *mctz,
398                                 unsigned long long new_usage_in_excess)
399 {
400         struct rb_node **p = &mctz->rb_root.rb_node;
401         struct rb_node *parent = NULL;
402         struct mem_cgroup_per_zone *mz_node;
403
404         if (mz->on_tree)
405                 return;
406
407         mz->usage_in_excess = new_usage_in_excess;
408         if (!mz->usage_in_excess)
409                 return;
410         while (*p) {
411                 parent = *p;
412                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
413                                         tree_node);
414                 if (mz->usage_in_excess < mz_node->usage_in_excess)
415                         p = &(*p)->rb_left;
416                 /*
417                  * We can't avoid mem cgroups that are over their soft
418                  * limit by the same amount
419                  */
420                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
421                         p = &(*p)->rb_right;
422         }
423         rb_link_node(&mz->tree_node, parent, p);
424         rb_insert_color(&mz->tree_node, &mctz->rb_root);
425         mz->on_tree = true;
426 }
427
428 static void
429 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
430                                 struct mem_cgroup_per_zone *mz,
431                                 struct mem_cgroup_tree_per_zone *mctz)
432 {
433         if (!mz->on_tree)
434                 return;
435         rb_erase(&mz->tree_node, &mctz->rb_root);
436         mz->on_tree = false;
437 }
438
439 static void
440 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
441                                 struct mem_cgroup_per_zone *mz,
442                                 struct mem_cgroup_tree_per_zone *mctz)
443 {
444         spin_lock(&mctz->lock);
445         __mem_cgroup_remove_exceeded(mem, mz, mctz);
446         spin_unlock(&mctz->lock);
447 }
448
449
450 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
451 {
452         unsigned long long excess;
453         struct mem_cgroup_per_zone *mz;
454         struct mem_cgroup_tree_per_zone *mctz;
455         int nid = page_to_nid(page);
456         int zid = page_zonenum(page);
457         mctz = soft_limit_tree_from_page(page);
458
459         /*
460          * Necessary to update all ancestors when hierarchy is used.
461          * because their event counter is not touched.
462          */
463         for (; mem; mem = parent_mem_cgroup(mem)) {
464                 mz = mem_cgroup_zoneinfo(mem, nid, zid);
465                 excess = res_counter_soft_limit_excess(&mem->res);
466                 /*
467                  * We have to update the tree if mz is on RB-tree or
468                  * mem is over its softlimit.
469                  */
470                 if (excess || mz->on_tree) {
471                         spin_lock(&mctz->lock);
472                         /* if on-tree, remove it */
473                         if (mz->on_tree)
474                                 __mem_cgroup_remove_exceeded(mem, mz, mctz);
475                         /*
476                          * Insert again. mz->usage_in_excess will be updated.
477                          * If excess is 0, no tree ops.
478                          */
479                         __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
480                         spin_unlock(&mctz->lock);
481                 }
482         }
483 }
484
485 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
486 {
487         int node, zone;
488         struct mem_cgroup_per_zone *mz;
489         struct mem_cgroup_tree_per_zone *mctz;
490
491         for_each_node_state(node, N_POSSIBLE) {
492                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
493                         mz = mem_cgroup_zoneinfo(mem, node, zone);
494                         mctz = soft_limit_tree_node_zone(node, zone);
495                         mem_cgroup_remove_exceeded(mem, mz, mctz);
496                 }
497         }
498 }
499
500 static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
501 {
502         return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
503 }
504
505 static struct mem_cgroup_per_zone *
506 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
507 {
508         struct rb_node *rightmost = NULL;
509         struct mem_cgroup_per_zone *mz;
510
511 retry:
512         mz = NULL;
513         rightmost = rb_last(&mctz->rb_root);
514         if (!rightmost)
515                 goto done;              /* Nothing to reclaim from */
516
517         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
518         /*
519          * Remove the node now but someone else can add it back,
520          * we will to add it back at the end of reclaim to its correct
521          * position in the tree.
522          */
523         __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
524         if (!res_counter_soft_limit_excess(&mz->mem->res) ||
525                 !css_tryget(&mz->mem->css))
526                 goto retry;
527 done:
528         return mz;
529 }
530
531 static struct mem_cgroup_per_zone *
532 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
533 {
534         struct mem_cgroup_per_zone *mz;
535
536         spin_lock(&mctz->lock);
537         mz = __mem_cgroup_largest_soft_limit_node(mctz);
538         spin_unlock(&mctz->lock);
539         return mz;
540 }
541
542 /*
543  * Implementation Note: reading percpu statistics for memcg.
544  *
545  * Both of vmstat[] and percpu_counter has threshold and do periodic
546  * synchronization to implement "quick" read. There are trade-off between
547  * reading cost and precision of value. Then, we may have a chance to implement
548  * a periodic synchronizion of counter in memcg's counter.
549  *
550  * But this _read() function is used for user interface now. The user accounts
551  * memory usage by memory cgroup and he _always_ requires exact value because
552  * he accounts memory. Even if we provide quick-and-fuzzy read, we always
553  * have to visit all online cpus and make sum. So, for now, unnecessary
554  * synchronization is not implemented. (just implemented for cpu hotplug)
555  *
556  * If there are kernel internal actions which can make use of some not-exact
557  * value, and reading all cpu value can be performance bottleneck in some
558  * common workload, threashold and synchonization as vmstat[] should be
559  * implemented.
560  */
561 static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
562                 enum mem_cgroup_stat_index idx)
563 {
564         int cpu;
565         s64 val = 0;
566
567         get_online_cpus();
568         for_each_online_cpu(cpu)
569                 val += per_cpu(mem->stat->count[idx], cpu);
570 #ifdef CONFIG_HOTPLUG_CPU
571         spin_lock(&mem->pcp_counter_lock);
572         val += mem->nocpu_base.count[idx];
573         spin_unlock(&mem->pcp_counter_lock);
574 #endif
575         put_online_cpus();
576         return val;
577 }
578
579 static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
580 {
581         s64 ret;
582
583         ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
584         ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
585         return ret;
586 }
587
588 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
589                                          bool charge)
590 {
591         int val = (charge) ? 1 : -1;
592         this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
593 }
594
595 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
596                                          struct page_cgroup *pc,
597                                          bool charge)
598 {
599         int val = (charge) ? 1 : -1;
600
601         preempt_disable();
602
603         if (PageCgroupCache(pc))
604                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
605         else
606                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
607
608         if (charge)
609                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
610         else
611                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
612         __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
613
614         preempt_enable();
615 }
616
617 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
618                                         enum lru_list idx)
619 {
620         int nid, zid;
621         struct mem_cgroup_per_zone *mz;
622         u64 total = 0;
623
624         for_each_online_node(nid)
625                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
626                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
627                         total += MEM_CGROUP_ZSTAT(mz, idx);
628                 }
629         return total;
630 }
631
632 static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
633 {
634         s64 val;
635
636         val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
637
638         return !(val & ((1 << event_mask_shift) - 1));
639 }
640
641 /*
642  * Check events in order.
643  *
644  */
645 static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
646 {
647         /* threshold event is triggered in finer grain than soft limit */
648         if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
649                 mem_cgroup_threshold(mem);
650                 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
651                         mem_cgroup_update_tree(mem, page);
652         }
653 }
654
655 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
656 {
657         return container_of(cgroup_subsys_state(cont,
658                                 mem_cgroup_subsys_id), struct mem_cgroup,
659                                 css);
660 }
661
662 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
663 {
664         /*
665          * mm_update_next_owner() may clear mm->owner to NULL
666          * if it races with swapoff, page migration, etc.
667          * So this can be called with p == NULL.
668          */
669         if (unlikely(!p))
670                 return NULL;
671
672         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
673                                 struct mem_cgroup, css);
674 }
675
676 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
677 {
678         struct mem_cgroup *mem = NULL;
679
680         if (!mm)
681                 return NULL;
682         /*
683          * Because we have no locks, mm->owner's may be being moved to other
684          * cgroup. We use css_tryget() here even if this looks
685          * pessimistic (rather than adding locks here).
686          */
687         rcu_read_lock();
688         do {
689                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
690                 if (unlikely(!mem))
691                         break;
692         } while (!css_tryget(&mem->css));
693         rcu_read_unlock();
694         return mem;
695 }
696
697 /* The caller has to guarantee "mem" exists before calling this */
698 static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
699 {
700         struct cgroup_subsys_state *css;
701         int found;
702
703         if (!mem) /* ROOT cgroup has the smallest ID */
704                 return root_mem_cgroup; /*css_put/get against root is ignored*/
705         if (!mem->use_hierarchy) {
706                 if (css_tryget(&mem->css))
707                         return mem;
708                 return NULL;
709         }
710         rcu_read_lock();
711         /*
712          * searching a memory cgroup which has the smallest ID under given
713          * ROOT cgroup. (ID >= 1)
714          */
715         css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
716         if (css && css_tryget(css))
717                 mem = container_of(css, struct mem_cgroup, css);
718         else
719                 mem = NULL;
720         rcu_read_unlock();
721         return mem;
722 }
723
724 static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
725                                         struct mem_cgroup *root,
726                                         bool cond)
727 {
728         int nextid = css_id(&iter->css) + 1;
729         int found;
730         int hierarchy_used;
731         struct cgroup_subsys_state *css;
732
733         hierarchy_used = iter->use_hierarchy;
734
735         css_put(&iter->css);
736         /* If no ROOT, walk all, ignore hierarchy */
737         if (!cond || (root && !hierarchy_used))
738                 return NULL;
739
740         if (!root)
741                 root = root_mem_cgroup;
742
743         do {
744                 iter = NULL;
745                 rcu_read_lock();
746
747                 css = css_get_next(&mem_cgroup_subsys, nextid,
748                                 &root->css, &found);
749                 if (css && css_tryget(css))
750                         iter = container_of(css, struct mem_cgroup, css);
751                 rcu_read_unlock();
752                 /* If css is NULL, no more cgroups will be found */
753                 nextid = found + 1;
754         } while (css && !iter);
755
756         return iter;
757 }
758 /*
759  * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
760  * be careful that "break" loop is not allowed. We have reference count.
761  * Instead of that modify "cond" to be false and "continue" to exit the loop.
762  */
763 #define for_each_mem_cgroup_tree_cond(iter, root, cond) \
764         for (iter = mem_cgroup_start_loop(root);\
765              iter != NULL;\
766              iter = mem_cgroup_get_next(iter, root, cond))
767
768 #define for_each_mem_cgroup_tree(iter, root) \
769         for_each_mem_cgroup_tree_cond(iter, root, true)
770
771 #define for_each_mem_cgroup_all(iter) \
772         for_each_mem_cgroup_tree_cond(iter, NULL, true)
773
774
775 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
776 {
777         return (mem == root_mem_cgroup);
778 }
779
780 /*
781  * Following LRU functions are allowed to be used without PCG_LOCK.
782  * Operations are called by routine of global LRU independently from memcg.
783  * What we have to take care of here is validness of pc->mem_cgroup.
784  *
785  * Changes to pc->mem_cgroup happens when
786  * 1. charge
787  * 2. moving account
788  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
789  * It is added to LRU before charge.
790  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
791  * When moving account, the page is not on LRU. It's isolated.
792  */
793
794 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
795 {
796         struct page_cgroup *pc;
797         struct mem_cgroup_per_zone *mz;
798
799         if (mem_cgroup_disabled())
800                 return;
801         pc = lookup_page_cgroup(page);
802         /* can happen while we handle swapcache. */
803         if (!TestClearPageCgroupAcctLRU(pc))
804                 return;
805         VM_BUG_ON(!pc->mem_cgroup);
806         /*
807          * We don't check PCG_USED bit. It's cleared when the "page" is finally
808          * removed from global LRU.
809          */
810         mz = page_cgroup_zoneinfo(pc);
811         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
812         if (mem_cgroup_is_root(pc->mem_cgroup))
813                 return;
814         VM_BUG_ON(list_empty(&pc->lru));
815         list_del_init(&pc->lru);
816         return;
817 }
818
819 void mem_cgroup_del_lru(struct page *page)
820 {
821         mem_cgroup_del_lru_list(page, page_lru(page));
822 }
823
824 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
825 {
826         struct mem_cgroup_per_zone *mz;
827         struct page_cgroup *pc;
828
829         if (mem_cgroup_disabled())
830                 return;
831
832         pc = lookup_page_cgroup(page);
833         /*
834          * Used bit is set without atomic ops but after smp_wmb().
835          * For making pc->mem_cgroup visible, insert smp_rmb() here.
836          */
837         smp_rmb();
838         /* unused or root page is not rotated. */
839         if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
840                 return;
841         mz = page_cgroup_zoneinfo(pc);
842         list_move(&pc->lru, &mz->lists[lru]);
843 }
844
845 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
846 {
847         struct page_cgroup *pc;
848         struct mem_cgroup_per_zone *mz;
849
850         if (mem_cgroup_disabled())
851                 return;
852         pc = lookup_page_cgroup(page);
853         VM_BUG_ON(PageCgroupAcctLRU(pc));
854         /*
855          * Used bit is set without atomic ops but after smp_wmb().
856          * For making pc->mem_cgroup visible, insert smp_rmb() here.
857          */
858         smp_rmb();
859         if (!PageCgroupUsed(pc))
860                 return;
861
862         mz = page_cgroup_zoneinfo(pc);
863         MEM_CGROUP_ZSTAT(mz, lru) += 1;
864         SetPageCgroupAcctLRU(pc);
865         if (mem_cgroup_is_root(pc->mem_cgroup))
866                 return;
867         list_add(&pc->lru, &mz->lists[lru]);
868 }
869
870 /*
871  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
872  * lru because the page may.be reused after it's fully uncharged (because of
873  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
874  * it again. This function is only used to charge SwapCache. It's done under
875  * lock_page and expected that zone->lru_lock is never held.
876  */
877 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
878 {
879         unsigned long flags;
880         struct zone *zone = page_zone(page);
881         struct page_cgroup *pc = lookup_page_cgroup(page);
882
883         spin_lock_irqsave(&zone->lru_lock, flags);
884         /*
885          * Forget old LRU when this page_cgroup is *not* used. This Used bit
886          * is guarded by lock_page() because the page is SwapCache.
887          */
888         if (!PageCgroupUsed(pc))
889                 mem_cgroup_del_lru_list(page, page_lru(page));
890         spin_unlock_irqrestore(&zone->lru_lock, flags);
891 }
892
893 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
894 {
895         unsigned long flags;
896         struct zone *zone = page_zone(page);
897         struct page_cgroup *pc = lookup_page_cgroup(page);
898
899         spin_lock_irqsave(&zone->lru_lock, flags);
900         /* link when the page is linked to LRU but page_cgroup isn't */
901         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
902                 mem_cgroup_add_lru_list(page, page_lru(page));
903         spin_unlock_irqrestore(&zone->lru_lock, flags);
904 }
905
906
907 void mem_cgroup_move_lists(struct page *page,
908                            enum lru_list from, enum lru_list to)
909 {
910         if (mem_cgroup_disabled())
911                 return;
912         mem_cgroup_del_lru_list(page, from);
913         mem_cgroup_add_lru_list(page, to);
914 }
915
916 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
917 {
918         int ret;
919         struct mem_cgroup *curr = NULL;
920         struct task_struct *p;
921
922         p = find_lock_task_mm(task);
923         if (!p)
924                 return 0;
925         curr = try_get_mem_cgroup_from_mm(p->mm);
926         task_unlock(p);
927         if (!curr)
928                 return 0;
929         /*
930          * We should check use_hierarchy of "mem" not "curr". Because checking
931          * use_hierarchy of "curr" here make this function true if hierarchy is
932          * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
933          * hierarchy(even if use_hierarchy is disabled in "mem").
934          */
935         if (mem->use_hierarchy)
936                 ret = css_is_ancestor(&curr->css, &mem->css);
937         else
938                 ret = (curr == mem);
939         css_put(&curr->css);
940         return ret;
941 }
942
943 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
944 {
945         unsigned long active;
946         unsigned long inactive;
947         unsigned long gb;
948         unsigned long inactive_ratio;
949
950         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
951         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
952
953         gb = (inactive + active) >> (30 - PAGE_SHIFT);
954         if (gb)
955                 inactive_ratio = int_sqrt(10 * gb);
956         else
957                 inactive_ratio = 1;
958
959         if (present_pages) {
960                 present_pages[0] = inactive;
961                 present_pages[1] = active;
962         }
963
964         return inactive_ratio;
965 }
966
967 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
968 {
969         unsigned long active;
970         unsigned long inactive;
971         unsigned long present_pages[2];
972         unsigned long inactive_ratio;
973
974         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
975
976         inactive = present_pages[0];
977         active = present_pages[1];
978
979         if (inactive * inactive_ratio < active)
980                 return 1;
981
982         return 0;
983 }
984
985 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
986 {
987         unsigned long active;
988         unsigned long inactive;
989
990         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
991         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
992
993         return (active > inactive);
994 }
995
996 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
997                                        struct zone *zone,
998                                        enum lru_list lru)
999 {
1000         int nid = zone_to_nid(zone);
1001         int zid = zone_idx(zone);
1002         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1003
1004         return MEM_CGROUP_ZSTAT(mz, lru);
1005 }
1006
1007 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1008                                                       struct zone *zone)
1009 {
1010         int nid = zone_to_nid(zone);
1011         int zid = zone_idx(zone);
1012         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1013
1014         return &mz->reclaim_stat;
1015 }
1016
1017 struct zone_reclaim_stat *
1018 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1019 {
1020         struct page_cgroup *pc;
1021         struct mem_cgroup_per_zone *mz;
1022
1023         if (mem_cgroup_disabled())
1024                 return NULL;
1025
1026         pc = lookup_page_cgroup(page);
1027         /*
1028          * Used bit is set without atomic ops but after smp_wmb().
1029          * For making pc->mem_cgroup visible, insert smp_rmb() here.
1030          */
1031         smp_rmb();
1032         if (!PageCgroupUsed(pc))
1033                 return NULL;
1034
1035         mz = page_cgroup_zoneinfo(pc);
1036         if (!mz)
1037                 return NULL;
1038
1039         return &mz->reclaim_stat;
1040 }
1041
1042 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1043                                         struct list_head *dst,
1044                                         unsigned long *scanned, int order,
1045                                         int mode, struct zone *z,
1046                                         struct mem_cgroup *mem_cont,
1047                                         int active, int file)
1048 {
1049         unsigned long nr_taken = 0;
1050         struct page *page;
1051         unsigned long scan;
1052         LIST_HEAD(pc_list);
1053         struct list_head *src;
1054         struct page_cgroup *pc, *tmp;
1055         int nid = zone_to_nid(z);
1056         int zid = zone_idx(z);
1057         struct mem_cgroup_per_zone *mz;
1058         int lru = LRU_FILE * file + active;
1059         int ret;
1060
1061         BUG_ON(!mem_cont);
1062         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1063         src = &mz->lists[lru];
1064
1065         scan = 0;
1066         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1067                 if (scan >= nr_to_scan)
1068                         break;
1069
1070                 page = pc->page;
1071                 if (unlikely(!PageCgroupUsed(pc)))
1072                         continue;
1073                 if (unlikely(!PageLRU(page)))
1074                         continue;
1075
1076                 scan++;
1077                 ret = __isolate_lru_page(page, mode, file);
1078                 switch (ret) {
1079                 case 0:
1080                         list_move(&page->lru, dst);
1081                         mem_cgroup_del_lru(page);
1082                         nr_taken++;
1083                         break;
1084                 case -EBUSY:
1085                         /* we don't affect global LRU but rotate in our LRU */
1086                         mem_cgroup_rotate_lru_list(page, page_lru(page));
1087                         break;
1088                 default:
1089                         break;
1090                 }
1091         }
1092
1093         *scanned = scan;
1094
1095         trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1096                                       0, 0, 0, mode);
1097
1098         return nr_taken;
1099 }
1100
1101 #define mem_cgroup_from_res_counter(counter, member)    \
1102         container_of(counter, struct mem_cgroup, member)
1103
1104 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
1105 {
1106         if (do_swap_account) {
1107                 if (res_counter_check_under_limit(&mem->res) &&
1108                         res_counter_check_under_limit(&mem->memsw))
1109                         return true;
1110         } else
1111                 if (res_counter_check_under_limit(&mem->res))
1112                         return true;
1113         return false;
1114 }
1115
1116 static unsigned int get_swappiness(struct mem_cgroup *memcg)
1117 {
1118         struct cgroup *cgrp = memcg->css.cgroup;
1119         unsigned int swappiness;
1120
1121         /* root ? */
1122         if (cgrp->parent == NULL)
1123                 return vm_swappiness;
1124
1125         spin_lock(&memcg->reclaim_param_lock);
1126         swappiness = memcg->swappiness;
1127         spin_unlock(&memcg->reclaim_param_lock);
1128
1129         return swappiness;
1130 }
1131
1132 static void mem_cgroup_start_move(struct mem_cgroup *mem)
1133 {
1134         int cpu;
1135
1136         get_online_cpus();
1137         spin_lock(&mem->pcp_counter_lock);
1138         for_each_online_cpu(cpu)
1139                 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1140         mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1141         spin_unlock(&mem->pcp_counter_lock);
1142         put_online_cpus();
1143
1144         synchronize_rcu();
1145 }
1146
1147 static void mem_cgroup_end_move(struct mem_cgroup *mem)
1148 {
1149         int cpu;
1150
1151         if (!mem)
1152                 return;
1153         get_online_cpus();
1154         spin_lock(&mem->pcp_counter_lock);
1155         for_each_online_cpu(cpu)
1156                 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1157         mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1158         spin_unlock(&mem->pcp_counter_lock);
1159         put_online_cpus();
1160 }
1161 /*
1162  * 2 routines for checking "mem" is under move_account() or not.
1163  *
1164  * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1165  *                        for avoiding race in accounting. If true,
1166  *                        pc->mem_cgroup may be overwritten.
1167  *
1168  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1169  *                        under hierarchy of moving cgroups. This is for
1170  *                        waiting at hith-memory prressure caused by "move".
1171  */
1172
1173 static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1174 {
1175         VM_BUG_ON(!rcu_read_lock_held());
1176         return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1177 }
1178
1179 static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1180 {
1181         struct mem_cgroup *from;
1182         struct mem_cgroup *to;
1183         bool ret = false;
1184         /*
1185          * Unlike task_move routines, we access mc.to, mc.from not under
1186          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1187          */
1188         spin_lock(&mc.lock);
1189         from = mc.from;
1190         to = mc.to;
1191         if (!from)
1192                 goto unlock;
1193         if (from == mem || to == mem
1194             || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1195             || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
1196                 ret = true;
1197 unlock:
1198         spin_unlock(&mc.lock);
1199         return ret;
1200 }
1201
1202 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1203 {
1204         if (mc.moving_task && current != mc.moving_task) {
1205                 if (mem_cgroup_under_move(mem)) {
1206                         DEFINE_WAIT(wait);
1207                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1208                         /* moving charge context might have finished. */
1209                         if (mc.moving_task)
1210                                 schedule();
1211                         finish_wait(&mc.waitq, &wait);
1212                         return true;
1213                 }
1214         }
1215         return false;
1216 }
1217
1218 /**
1219  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1220  * @memcg: The memory cgroup that went over limit
1221  * @p: Task that is going to be killed
1222  *
1223  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1224  * enabled
1225  */
1226 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1227 {
1228         struct cgroup *task_cgrp;
1229         struct cgroup *mem_cgrp;
1230         /*
1231          * Need a buffer in BSS, can't rely on allocations. The code relies
1232          * on the assumption that OOM is serialized for memory controller.
1233          * If this assumption is broken, revisit this code.
1234          */
1235         static char memcg_name[PATH_MAX];
1236         int ret;
1237
1238         if (!memcg || !p)
1239                 return;
1240
1241
1242         rcu_read_lock();
1243
1244         mem_cgrp = memcg->css.cgroup;
1245         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1246
1247         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1248         if (ret < 0) {
1249                 /*
1250                  * Unfortunately, we are unable to convert to a useful name
1251                  * But we'll still print out the usage information
1252                  */
1253                 rcu_read_unlock();
1254                 goto done;
1255         }
1256         rcu_read_unlock();
1257
1258         printk(KERN_INFO "Task in %s killed", memcg_name);
1259
1260         rcu_read_lock();
1261         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1262         if (ret < 0) {
1263                 rcu_read_unlock();
1264                 goto done;
1265         }
1266         rcu_read_unlock();
1267
1268         /*
1269          * Continues from above, so we don't need an KERN_ level
1270          */
1271         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1272 done:
1273
1274         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1275                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1276                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1277                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1278         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1279                 "failcnt %llu\n",
1280                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1281                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1282                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1283 }
1284
1285 /*
1286  * This function returns the number of memcg under hierarchy tree. Returns
1287  * 1(self count) if no children.
1288  */
1289 static int mem_cgroup_count_children(struct mem_cgroup *mem)
1290 {
1291         int num = 0;
1292         struct mem_cgroup *iter;
1293
1294         for_each_mem_cgroup_tree(iter, mem)
1295                 num++;
1296         return num;
1297 }
1298
1299 /*
1300  * Return the memory (and swap, if configured) limit for a memcg.
1301  */
1302 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1303 {
1304         u64 limit;
1305         u64 memsw;
1306
1307         limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
1308                         total_swap_pages;
1309         memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1310         /*
1311          * If memsw is finite and limits the amount of swap space available
1312          * to this memcg, return that limit.
1313          */
1314         return min(limit, memsw);
1315 }
1316
1317 /*
1318  * Visit the first child (need not be the first child as per the ordering
1319  * of the cgroup list, since we track last_scanned_child) of @mem and use
1320  * that to reclaim free pages from.
1321  */
1322 static struct mem_cgroup *
1323 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1324 {
1325         struct mem_cgroup *ret = NULL;
1326         struct cgroup_subsys_state *css;
1327         int nextid, found;
1328
1329         if (!root_mem->use_hierarchy) {
1330                 css_get(&root_mem->css);
1331                 ret = root_mem;
1332         }
1333
1334         while (!ret) {
1335                 rcu_read_lock();
1336                 nextid = root_mem->last_scanned_child + 1;
1337                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1338                                    &found);
1339                 if (css && css_tryget(css))
1340                         ret = container_of(css, struct mem_cgroup, css);
1341
1342                 rcu_read_unlock();
1343                 /* Updates scanning parameter */
1344                 spin_lock(&root_mem->reclaim_param_lock);
1345                 if (!css) {
1346                         /* this means start scan from ID:1 */
1347                         root_mem->last_scanned_child = 0;
1348                 } else
1349                         root_mem->last_scanned_child = found;
1350                 spin_unlock(&root_mem->reclaim_param_lock);
1351         }
1352
1353         return ret;
1354 }
1355
1356 /*
1357  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1358  * we reclaimed from, so that we don't end up penalizing one child extensively
1359  * based on its position in the children list.
1360  *
1361  * root_mem is the original ancestor that we've been reclaim from.
1362  *
1363  * We give up and return to the caller when we visit root_mem twice.
1364  * (other groups can be removed while we're walking....)
1365  *
1366  * If shrink==true, for avoiding to free too much, this returns immedieately.
1367  */
1368 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1369                                                 struct zone *zone,
1370                                                 gfp_t gfp_mask,
1371                                                 unsigned long reclaim_options)
1372 {
1373         struct mem_cgroup *victim;
1374         int ret, total = 0;
1375         int loop = 0;
1376         bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1377         bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1378         bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1379         unsigned long excess = mem_cgroup_get_excess(root_mem);
1380
1381         /* If memsw_is_minimum==1, swap-out is of-no-use. */
1382         if (root_mem->memsw_is_minimum)
1383                 noswap = true;
1384
1385         while (1) {
1386                 victim = mem_cgroup_select_victim(root_mem);
1387                 if (victim == root_mem) {
1388                         loop++;
1389                         if (loop >= 1)
1390                                 drain_all_stock_async();
1391                         if (loop >= 2) {
1392                                 /*
1393                                  * If we have not been able to reclaim
1394                                  * anything, it might because there are
1395                                  * no reclaimable pages under this hierarchy
1396                                  */
1397                                 if (!check_soft || !total) {
1398                                         css_put(&victim->css);
1399                                         break;
1400                                 }
1401                                 /*
1402                                  * We want to do more targetted reclaim.
1403                                  * excess >> 2 is not to excessive so as to
1404                                  * reclaim too much, nor too less that we keep
1405                                  * coming back to reclaim from this cgroup
1406                                  */
1407                                 if (total >= (excess >> 2) ||
1408                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1409                                         css_put(&victim->css);
1410                                         break;
1411                                 }
1412                         }
1413                 }
1414                 if (!mem_cgroup_local_usage(victim)) {
1415                         /* this cgroup's local usage == 0 */
1416                         css_put(&victim->css);
1417                         continue;
1418                 }
1419                 /* we use swappiness of local cgroup */
1420                 if (check_soft)
1421                         ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1422                                 noswap, get_swappiness(victim), zone);
1423                 else
1424                         ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1425                                                 noswap, get_swappiness(victim));
1426                 css_put(&victim->css);
1427                 /*
1428                  * At shrinking usage, we can't check we should stop here or
1429                  * reclaim more. It's depends on callers. last_scanned_child
1430                  * will work enough for keeping fairness under tree.
1431                  */
1432                 if (shrink)
1433                         return ret;
1434                 total += ret;
1435                 if (check_soft) {
1436                         if (res_counter_check_under_soft_limit(&root_mem->res))
1437                                 return total;
1438                 } else if (mem_cgroup_check_under_limit(root_mem))
1439                         return 1 + total;
1440         }
1441         return total;
1442 }
1443
1444 /*
1445  * Check OOM-Killer is already running under our hierarchy.
1446  * If someone is running, return false.
1447  */
1448 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1449 {
1450         int x, lock_count = 0;
1451         struct mem_cgroup *iter;
1452
1453         for_each_mem_cgroup_tree(iter, mem) {
1454                 x = atomic_inc_return(&iter->oom_lock);
1455                 lock_count = max(x, lock_count);
1456         }
1457
1458         if (lock_count == 1)
1459                 return true;
1460         return false;
1461 }
1462
1463 static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1464 {
1465         struct mem_cgroup *iter;
1466
1467         /*
1468          * When a new child is created while the hierarchy is under oom,
1469          * mem_cgroup_oom_lock() may not be called. We have to use
1470          * atomic_add_unless() here.
1471          */
1472         for_each_mem_cgroup_tree(iter, mem)
1473                 atomic_add_unless(&iter->oom_lock, -1, 0);
1474         return 0;
1475 }
1476
1477
1478 static DEFINE_MUTEX(memcg_oom_mutex);
1479 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1480
1481 struct oom_wait_info {
1482         struct mem_cgroup *mem;
1483         wait_queue_t    wait;
1484 };
1485
1486 static int memcg_oom_wake_function(wait_queue_t *wait,
1487         unsigned mode, int sync, void *arg)
1488 {
1489         struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1490         struct oom_wait_info *oom_wait_info;
1491
1492         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1493
1494         if (oom_wait_info->mem == wake_mem)
1495                 goto wakeup;
1496         /* if no hierarchy, no match */
1497         if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1498                 return 0;
1499         /*
1500          * Both of oom_wait_info->mem and wake_mem are stable under us.
1501          * Then we can use css_is_ancestor without taking care of RCU.
1502          */
1503         if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1504             !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1505                 return 0;
1506
1507 wakeup:
1508         return autoremove_wake_function(wait, mode, sync, arg);
1509 }
1510
1511 static void memcg_wakeup_oom(struct mem_cgroup *mem)
1512 {
1513         /* for filtering, pass "mem" as argument. */
1514         __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1515 }
1516
1517 static void memcg_oom_recover(struct mem_cgroup *mem)
1518 {
1519         if (mem && atomic_read(&mem->oom_lock))
1520                 memcg_wakeup_oom(mem);
1521 }
1522
1523 /*
1524  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1525  */
1526 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1527 {
1528         struct oom_wait_info owait;
1529         bool locked, need_to_kill;
1530
1531         owait.mem = mem;
1532         owait.wait.flags = 0;
1533         owait.wait.func = memcg_oom_wake_function;
1534         owait.wait.private = current;
1535         INIT_LIST_HEAD(&owait.wait.task_list);
1536         need_to_kill = true;
1537         /* At first, try to OOM lock hierarchy under mem.*/
1538         mutex_lock(&memcg_oom_mutex);
1539         locked = mem_cgroup_oom_lock(mem);
1540         /*
1541          * Even if signal_pending(), we can't quit charge() loop without
1542          * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1543          * under OOM is always welcomed, use TASK_KILLABLE here.
1544          */
1545         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1546         if (!locked || mem->oom_kill_disable)
1547                 need_to_kill = false;
1548         if (locked)
1549                 mem_cgroup_oom_notify(mem);
1550         mutex_unlock(&memcg_oom_mutex);
1551
1552         if (need_to_kill) {
1553                 finish_wait(&memcg_oom_waitq, &owait.wait);
1554                 mem_cgroup_out_of_memory(mem, mask);
1555         } else {
1556                 schedule();
1557                 finish_wait(&memcg_oom_waitq, &owait.wait);
1558         }
1559         mutex_lock(&memcg_oom_mutex);
1560         mem_cgroup_oom_unlock(mem);
1561         memcg_wakeup_oom(mem);
1562         mutex_unlock(&memcg_oom_mutex);
1563
1564         if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1565                 return false;
1566         /* Give chance to dying process */
1567         schedule_timeout(1);
1568         return true;
1569 }
1570
1571 /*
1572  * Currently used to update mapped file statistics, but the routine can be
1573  * generalized to update other statistics as well.
1574  *
1575  * Notes: Race condition
1576  *
1577  * We usually use page_cgroup_lock() for accessing page_cgroup member but
1578  * it tends to be costly. But considering some conditions, we doesn't need
1579  * to do so _always_.
1580  *
1581  * Considering "charge", lock_page_cgroup() is not required because all
1582  * file-stat operations happen after a page is attached to radix-tree. There
1583  * are no race with "charge".
1584  *
1585  * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1586  * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1587  * if there are race with "uncharge". Statistics itself is properly handled
1588  * by flags.
1589  *
1590  * Considering "move", this is an only case we see a race. To make the race
1591  * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1592  * possibility of race condition. If there is, we take a lock.
1593  */
1594 void mem_cgroup_update_file_mapped(struct page *page, int val)
1595 {
1596         struct mem_cgroup *mem;
1597         struct page_cgroup *pc = lookup_page_cgroup(page);
1598         bool need_unlock = false;
1599
1600         if (unlikely(!pc))
1601                 return;
1602
1603         rcu_read_lock();
1604         mem = pc->mem_cgroup;
1605         if (unlikely(!mem || !PageCgroupUsed(pc)))
1606                 goto out;
1607         /* pc->mem_cgroup is unstable ? */
1608         if (unlikely(mem_cgroup_stealed(mem))) {
1609                 /* take a lock against to access pc->mem_cgroup */
1610                 lock_page_cgroup(pc);
1611                 need_unlock = true;
1612                 mem = pc->mem_cgroup;
1613                 if (!mem || !PageCgroupUsed(pc))
1614                         goto out;
1615         }
1616         if (val > 0) {
1617                 this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1618                 SetPageCgroupFileMapped(pc);
1619         } else {
1620                 this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1621                 if (!page_mapped(page)) /* for race between dec->inc counter */
1622                         ClearPageCgroupFileMapped(pc);
1623         }
1624
1625 out:
1626         if (unlikely(need_unlock))
1627                 unlock_page_cgroup(pc);
1628         rcu_read_unlock();
1629         return;
1630 }
1631
1632 /*
1633  * size of first charge trial. "32" comes from vmscan.c's magic value.
1634  * TODO: maybe necessary to use big numbers in big irons.
1635  */
1636 #define CHARGE_SIZE     (32 * PAGE_SIZE)
1637 struct memcg_stock_pcp {
1638         struct mem_cgroup *cached; /* this never be root cgroup */
1639         int charge;
1640         struct work_struct work;
1641 };
1642 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1643 static atomic_t memcg_drain_count;
1644
1645 /*
1646  * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1647  * from local stock and true is returned. If the stock is 0 or charges from a
1648  * cgroup which is not current target, returns false. This stock will be
1649  * refilled.
1650  */
1651 static bool consume_stock(struct mem_cgroup *mem)
1652 {
1653         struct memcg_stock_pcp *stock;
1654         bool ret = true;
1655
1656         stock = &get_cpu_var(memcg_stock);
1657         if (mem == stock->cached && stock->charge)
1658                 stock->charge -= PAGE_SIZE;
1659         else /* need to call res_counter_charge */
1660                 ret = false;
1661         put_cpu_var(memcg_stock);
1662         return ret;
1663 }
1664
1665 /*
1666  * Returns stocks cached in percpu to res_counter and reset cached information.
1667  */
1668 static void drain_stock(struct memcg_stock_pcp *stock)
1669 {
1670         struct mem_cgroup *old = stock->cached;
1671
1672         if (stock->charge) {
1673                 res_counter_uncharge(&old->res, stock->charge);
1674                 if (do_swap_account)
1675                         res_counter_uncharge(&old->memsw, stock->charge);
1676         }
1677         stock->cached = NULL;
1678         stock->charge = 0;
1679 }
1680
1681 /*
1682  * This must be called under preempt disabled or must be called by
1683  * a thread which is pinned to local cpu.
1684  */
1685 static void drain_local_stock(struct work_struct *dummy)
1686 {
1687         struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1688         drain_stock(stock);
1689 }
1690
1691 /*
1692  * Cache charges(val) which is from res_counter, to local per_cpu area.
1693  * This will be consumed by consume_stock() function, later.
1694  */
1695 static void refill_stock(struct mem_cgroup *mem, int val)
1696 {
1697         struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1698
1699         if (stock->cached != mem) { /* reset if necessary */
1700                 drain_stock(stock);
1701                 stock->cached = mem;
1702         }
1703         stock->charge += val;
1704         put_cpu_var(memcg_stock);
1705 }
1706
1707 /*
1708  * Tries to drain stocked charges in other cpus. This function is asynchronous
1709  * and just put a work per cpu for draining localy on each cpu. Caller can
1710  * expects some charges will be back to res_counter later but cannot wait for
1711  * it.
1712  */
1713 static void drain_all_stock_async(void)
1714 {
1715         int cpu;
1716         /* This function is for scheduling "drain" in asynchronous way.
1717          * The result of "drain" is not directly handled by callers. Then,
1718          * if someone is calling drain, we don't have to call drain more.
1719          * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1720          * there is a race. We just do loose check here.
1721          */
1722         if (atomic_read(&memcg_drain_count))
1723                 return;
1724         /* Notify other cpus that system-wide "drain" is running */
1725         atomic_inc(&memcg_drain_count);
1726         get_online_cpus();
1727         for_each_online_cpu(cpu) {
1728                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1729                 schedule_work_on(cpu, &stock->work);
1730         }
1731         put_online_cpus();
1732         atomic_dec(&memcg_drain_count);
1733         /* We don't wait for flush_work */
1734 }
1735
1736 /* This is a synchronous drain interface. */
1737 static void drain_all_stock_sync(void)
1738 {
1739         /* called when force_empty is called */
1740         atomic_inc(&memcg_drain_count);
1741         schedule_on_each_cpu(drain_local_stock);
1742         atomic_dec(&memcg_drain_count);
1743 }
1744
1745 /*
1746  * This function drains percpu counter value from DEAD cpu and
1747  * move it to local cpu. Note that this function can be preempted.
1748  */
1749 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
1750 {
1751         int i;
1752
1753         spin_lock(&mem->pcp_counter_lock);
1754         for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
1755                 s64 x = per_cpu(mem->stat->count[i], cpu);
1756
1757                 per_cpu(mem->stat->count[i], cpu) = 0;
1758                 mem->nocpu_base.count[i] += x;
1759         }
1760         /* need to clear ON_MOVE value, works as a kind of lock. */
1761         per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
1762         spin_unlock(&mem->pcp_counter_lock);
1763 }
1764
1765 static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
1766 {
1767         int idx = MEM_CGROUP_ON_MOVE;
1768
1769         spin_lock(&mem->pcp_counter_lock);
1770         per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
1771         spin_unlock(&mem->pcp_counter_lock);
1772 }
1773
1774 static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
1775                                         unsigned long action,
1776                                         void *hcpu)
1777 {
1778         int cpu = (unsigned long)hcpu;
1779         struct memcg_stock_pcp *stock;
1780         struct mem_cgroup *iter;
1781
1782         if ((action == CPU_ONLINE)) {
1783                 for_each_mem_cgroup_all(iter)
1784                         synchronize_mem_cgroup_on_move(iter, cpu);
1785                 return NOTIFY_OK;
1786         }
1787
1788         if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
1789                 return NOTIFY_OK;
1790
1791         for_each_mem_cgroup_all(iter)
1792                 mem_cgroup_drain_pcp_counter(iter, cpu);
1793
1794         stock = &per_cpu(memcg_stock, cpu);
1795         drain_stock(stock);
1796         return NOTIFY_OK;
1797 }
1798
1799
1800 /* See __mem_cgroup_try_charge() for details */
1801 enum {
1802         CHARGE_OK,              /* success */
1803         CHARGE_RETRY,           /* need to retry but retry is not bad */
1804         CHARGE_NOMEM,           /* we can't do more. return -ENOMEM */
1805         CHARGE_WOULDBLOCK,      /* GFP_WAIT wasn't set and no enough res. */
1806         CHARGE_OOM_DIE,         /* the current is killed because of OOM */
1807 };
1808
1809 static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1810                                 int csize, bool oom_check)
1811 {
1812         struct mem_cgroup *mem_over_limit;
1813         struct res_counter *fail_res;
1814         unsigned long flags = 0;
1815         int ret;
1816
1817         ret = res_counter_charge(&mem->res, csize, &fail_res);
1818
1819         if (likely(!ret)) {
1820                 if (!do_swap_account)
1821                         return CHARGE_OK;
1822                 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1823                 if (likely(!ret))
1824                         return CHARGE_OK;
1825
1826                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
1827                 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1828         } else
1829                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
1830
1831         if (csize > PAGE_SIZE) /* change csize and retry */
1832                 return CHARGE_RETRY;
1833
1834         if (!(gfp_mask & __GFP_WAIT))
1835                 return CHARGE_WOULDBLOCK;
1836
1837         ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1838                                         gfp_mask, flags);
1839         /*
1840          * try_to_free_mem_cgroup_pages() might not give us a full
1841          * picture of reclaim. Some pages are reclaimed and might be
1842          * moved to swap cache or just unmapped from the cgroup.
1843          * Check the limit again to see if the reclaim reduced the
1844          * current usage of the cgroup before giving up
1845          */
1846         if (ret || mem_cgroup_check_under_limit(mem_over_limit))
1847                 return CHARGE_RETRY;
1848
1849         /*
1850          * At task move, charge accounts can be doubly counted. So, it's
1851          * better to wait until the end of task_move if something is going on.
1852          */
1853         if (mem_cgroup_wait_acct_move(mem_over_limit))
1854                 return CHARGE_RETRY;
1855
1856         /* If we don't need to call oom-killer at el, return immediately */
1857         if (!oom_check)
1858                 return CHARGE_NOMEM;
1859         /* check OOM */
1860         if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
1861                 return CHARGE_OOM_DIE;
1862
1863         return CHARGE_RETRY;
1864 }
1865
1866 /*
1867  * Unlike exported interface, "oom" parameter is added. if oom==true,
1868  * oom-killer can be invoked.
1869  */
1870 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1871                 gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1872 {
1873         int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1874         struct mem_cgroup *mem = NULL;
1875         int ret;
1876         int csize = CHARGE_SIZE;
1877
1878         /*
1879          * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1880          * in system level. So, allow to go ahead dying process in addition to
1881          * MEMDIE process.
1882          */
1883         if (unlikely(test_thread_flag(TIF_MEMDIE)
1884                      || fatal_signal_pending(current)))
1885                 goto bypass;
1886
1887         /*
1888          * We always charge the cgroup the mm_struct belongs to.
1889          * The mm_struct's mem_cgroup changes on task migration if the
1890          * thread group leader migrates. It's possible that mm is not
1891          * set, if so charge the init_mm (happens for pagecache usage).
1892          */
1893         if (!*memcg && !mm)
1894                 goto bypass;
1895 again:
1896         if (*memcg) { /* css should be a valid one */
1897                 mem = *memcg;
1898                 VM_BUG_ON(css_is_removed(&mem->css));
1899                 if (mem_cgroup_is_root(mem))
1900                         goto done;
1901                 if (consume_stock(mem))
1902                         goto done;
1903                 css_get(&mem->css);
1904         } else {
1905                 struct task_struct *p;
1906
1907                 rcu_read_lock();
1908                 p = rcu_dereference(mm->owner);
1909                 VM_BUG_ON(!p);
1910                 /*
1911                  * because we don't have task_lock(), "p" can exit while
1912                  * we're here. In that case, "mem" can point to root
1913                  * cgroup but never be NULL. (and task_struct itself is freed
1914                  * by RCU, cgroup itself is RCU safe.) Then, we have small
1915                  * risk here to get wrong cgroup. But such kind of mis-account
1916                  * by race always happens because we don't have cgroup_mutex().
1917                  * It's overkill and we allow that small race, here.
1918                  */
1919                 mem = mem_cgroup_from_task(p);
1920                 VM_BUG_ON(!mem);
1921                 if (mem_cgroup_is_root(mem)) {
1922                         rcu_read_unlock();
1923                         goto done;
1924                 }
1925                 if (consume_stock(mem)) {
1926                         /*
1927                          * It seems dagerous to access memcg without css_get().
1928                          * But considering how consume_stok works, it's not
1929                          * necessary. If consume_stock success, some charges
1930                          * from this memcg are cached on this cpu. So, we
1931                          * don't need to call css_get()/css_tryget() before
1932                          * calling consume_stock().
1933                          */
1934                         rcu_read_unlock();
1935                         goto done;
1936                 }
1937                 /* after here, we may be blocked. we need to get refcnt */
1938                 if (!css_tryget(&mem->css)) {
1939                         rcu_read_unlock();
1940                         goto again;
1941                 }
1942                 rcu_read_unlock();
1943         }
1944
1945         do {
1946                 bool oom_check;
1947
1948                 /* If killed, bypass charge */
1949                 if (fatal_signal_pending(current)) {
1950                         css_put(&mem->css);
1951                         goto bypass;
1952                 }
1953
1954                 oom_check = false;
1955                 if (oom && !nr_oom_retries) {
1956                         oom_check = true;
1957                         nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1958                 }
1959
1960                 ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
1961
1962                 switch (ret) {
1963                 case CHARGE_OK:
1964                         break;
1965                 case CHARGE_RETRY: /* not in OOM situation but retry */
1966                         csize = PAGE_SIZE;
1967                         css_put(&mem->css);
1968                         mem = NULL;
1969                         goto again;
1970                 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
1971                         css_put(&mem->css);
1972                         goto nomem;
1973                 case CHARGE_NOMEM: /* OOM routine works */
1974                         if (!oom) {
1975                                 css_put(&mem->css);
1976                                 goto nomem;
1977                         }
1978                         /* If oom, we never return -ENOMEM */
1979                         nr_oom_retries--;
1980                         break;
1981                 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
1982                         css_put(&mem->css);
1983                         goto bypass;
1984                 }
1985         } while (ret != CHARGE_OK);
1986
1987         if (csize > PAGE_SIZE)
1988                 refill_stock(mem, csize - PAGE_SIZE);
1989         css_put(&mem->css);
1990 done:
1991         *memcg = mem;
1992         return 0;
1993 nomem:
1994         *memcg = NULL;
1995         return -ENOMEM;
1996 bypass:
1997         *memcg = NULL;
1998         return 0;
1999 }
2000
2001 /*
2002  * Somemtimes we have to undo a charge we got by try_charge().
2003  * This function is for that and do uncharge, put css's refcnt.
2004  * gotten by try_charge().
2005  */
2006 static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
2007                                                         unsigned long count)
2008 {
2009         if (!mem_cgroup_is_root(mem)) {
2010                 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
2011                 if (do_swap_account)
2012                         res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
2013         }
2014 }
2015
2016 static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
2017 {
2018         __mem_cgroup_cancel_charge(mem, 1);
2019 }
2020
2021 /*
2022  * A helper function to get mem_cgroup from ID. must be called under
2023  * rcu_read_lock(). The caller must check css_is_removed() or some if
2024  * it's concern. (dropping refcnt from swap can be called against removed
2025  * memcg.)
2026  */
2027 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2028 {
2029         struct cgroup_subsys_state *css;
2030
2031         /* ID 0 is unused ID */
2032         if (!id)
2033                 return NULL;
2034         css = css_lookup(&mem_cgroup_subsys, id);
2035         if (!css)
2036                 return NULL;
2037         return container_of(css, struct mem_cgroup, css);
2038 }
2039
2040 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2041 {
2042         struct mem_cgroup *mem = NULL;
2043         struct page_cgroup *pc;
2044         unsigned short id;
2045         swp_entry_t ent;
2046
2047         VM_BUG_ON(!PageLocked(page));
2048
2049         pc = lookup_page_cgroup(page);
2050         lock_page_cgroup(pc);
2051         if (PageCgroupUsed(pc)) {
2052                 mem = pc->mem_cgroup;
2053                 if (mem && !css_tryget(&mem->css))
2054                         mem = NULL;
2055         } else if (PageSwapCache(page)) {
2056                 ent.val = page_private(page);
2057                 id = lookup_swap_cgroup(ent);
2058                 rcu_read_lock();
2059                 mem = mem_cgroup_lookup(id);
2060                 if (mem && !css_tryget(&mem->css))
2061                         mem = NULL;
2062                 rcu_read_unlock();
2063         }
2064         unlock_page_cgroup(pc);
2065         return mem;
2066 }
2067
2068 /*
2069  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
2070  * USED state. If already USED, uncharge and return.
2071  */
2072
2073 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
2074                                      struct page_cgroup *pc,
2075                                      enum charge_type ctype)
2076 {
2077         /* try_charge() can return NULL to *memcg, taking care of it. */
2078         if (!mem)
2079                 return;
2080
2081         lock_page_cgroup(pc);
2082         if (unlikely(PageCgroupUsed(pc))) {
2083                 unlock_page_cgroup(pc);
2084                 mem_cgroup_cancel_charge(mem);
2085                 return;
2086         }
2087
2088         pc->mem_cgroup = mem;
2089         /*
2090          * We access a page_cgroup asynchronously without lock_page_cgroup().
2091          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2092          * is accessed after testing USED bit. To make pc->mem_cgroup visible
2093          * before USED bit, we need memory barrier here.
2094          * See mem_cgroup_add_lru_list(), etc.
2095          */
2096         smp_wmb();
2097         switch (ctype) {
2098         case MEM_CGROUP_CHARGE_TYPE_CACHE:
2099         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2100                 SetPageCgroupCache(pc);
2101                 SetPageCgroupUsed(pc);
2102                 break;
2103         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2104                 ClearPageCgroupCache(pc);
2105                 SetPageCgroupUsed(pc);
2106                 break;
2107         default:
2108                 break;
2109         }
2110
2111         mem_cgroup_charge_statistics(mem, pc, true);
2112
2113         unlock_page_cgroup(pc);
2114         /*
2115          * "charge_statistics" updated event counter. Then, check it.
2116          * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2117          * if they exceeds softlimit.
2118          */
2119         memcg_check_events(mem, pc->page);
2120 }
2121
2122 /**
2123  * __mem_cgroup_move_account - move account of the page
2124  * @pc: page_cgroup of the page.
2125  * @from: mem_cgroup which the page is moved from.
2126  * @to: mem_cgroup which the page is moved to. @from != @to.
2127  * @uncharge: whether we should call uncharge and css_put against @from.
2128  *
2129  * The caller must confirm following.
2130  * - page is not on LRU (isolate_page() is useful.)
2131  * - the pc is locked, used, and ->mem_cgroup points to @from.
2132  *
2133  * This function doesn't do "charge" nor css_get to new cgroup. It should be
2134  * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
2135  * true, this function does "uncharge" from old cgroup, but it doesn't if
2136  * @uncharge is false, so a caller should do "uncharge".
2137  */
2138
2139 static void __mem_cgroup_move_account(struct page_cgroup *pc,
2140         struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
2141 {
2142         VM_BUG_ON(from == to);
2143         VM_BUG_ON(PageLRU(pc->page));
2144         VM_BUG_ON(!PageCgroupLocked(pc));
2145         VM_BUG_ON(!PageCgroupUsed(pc));
2146         VM_BUG_ON(pc->mem_cgroup != from);
2147
2148         if (PageCgroupFileMapped(pc)) {
2149                 /* Update mapped_file data for mem_cgroup */
2150                 preempt_disable();
2151                 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2152                 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2153                 preempt_enable();
2154         }
2155         mem_cgroup_charge_statistics(from, pc, false);
2156         if (uncharge)
2157                 /* This is not "cancel", but cancel_charge does all we need. */
2158                 mem_cgroup_cancel_charge(from);
2159
2160         /* caller should have done css_get */
2161         pc->mem_cgroup = to;
2162         mem_cgroup_charge_statistics(to, pc, true);
2163         /*
2164          * We charges against "to" which may not have any tasks. Then, "to"
2165          * can be under rmdir(). But in current implementation, caller of
2166          * this function is just force_empty() and move charge, so it's
2167          * garanteed that "to" is never removed. So, we don't check rmdir
2168          * status here.
2169          */
2170 }
2171
2172 /*
2173  * check whether the @pc is valid for moving account and call
2174  * __mem_cgroup_move_account()
2175  */
2176 static int mem_cgroup_move_account(struct page_cgroup *pc,
2177                 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
2178 {
2179         int ret = -EINVAL;
2180         lock_page_cgroup(pc);
2181         if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
2182                 __mem_cgroup_move_account(pc, from, to, uncharge);
2183                 ret = 0;
2184         }
2185         unlock_page_cgroup(pc);
2186         /*
2187          * check events
2188          */
2189         memcg_check_events(to, pc->page);
2190         memcg_check_events(from, pc->page);
2191         return ret;
2192 }
2193
2194 /*
2195  * move charges to its parent.
2196  */
2197
2198 static int mem_cgroup_move_parent(struct page_cgroup *pc,
2199                                   struct mem_cgroup *child,
2200                                   gfp_t gfp_mask)
2201 {
2202         struct page *page = pc->page;
2203         struct cgroup *cg = child->css.cgroup;
2204         struct cgroup *pcg = cg->parent;
2205         struct mem_cgroup *parent;
2206         int ret;
2207
2208         /* Is ROOT ? */
2209         if (!pcg)
2210                 return -EINVAL;
2211
2212         ret = -EBUSY;
2213         if (!get_page_unless_zero(page))
2214                 goto out;
2215         if (isolate_lru_page(page))
2216                 goto put;
2217
2218         parent = mem_cgroup_from_cont(pcg);
2219         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
2220         if (ret || !parent)
2221                 goto put_back;
2222
2223         ret = mem_cgroup_move_account(pc, child, parent, true);
2224         if (ret)
2225                 mem_cgroup_cancel_charge(parent);
2226 put_back:
2227         putback_lru_page(page);
2228 put:
2229         put_page(page);
2230 out:
2231         return ret;
2232 }
2233
2234 /*
2235  * Charge the memory controller for page usage.
2236  * Return
2237  * 0 if the charge was successful
2238  * < 0 if the cgroup is over its limit
2239  */
2240 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2241                                 gfp_t gfp_mask, enum charge_type ctype)
2242 {
2243         struct mem_cgroup *mem = NULL;
2244         struct page_cgroup *pc;
2245         int ret;
2246
2247         pc = lookup_page_cgroup(page);
2248         /* can happen at boot */
2249         if (unlikely(!pc))
2250                 return 0;
2251         prefetchw(pc);
2252
2253         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
2254         if (ret || !mem)
2255                 return ret;
2256
2257         __mem_cgroup_commit_charge(mem, pc, ctype);
2258         return 0;
2259 }
2260
2261 int mem_cgroup_newpage_charge(struct page *page,
2262                               struct mm_struct *mm, gfp_t gfp_mask)
2263 {
2264         if (mem_cgroup_disabled())
2265                 return 0;
2266         if (PageCompound(page))
2267                 return 0;
2268         /*
2269          * If already mapped, we don't have to account.
2270          * If page cache, page->mapping has address_space.
2271          * But page->mapping may have out-of-use anon_vma pointer,
2272          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2273          * is NULL.
2274          */
2275         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2276                 return 0;
2277         if (unlikely(!mm))
2278                 mm = &init_mm;
2279         return mem_cgroup_charge_common(page, mm, gfp_mask,
2280                                 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2281 }
2282
2283 static void
2284 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2285                                         enum charge_type ctype);
2286
2287 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2288                                 gfp_t gfp_mask)
2289 {
2290         int ret;
2291
2292         if (mem_cgroup_disabled())
2293                 return 0;
2294         if (PageCompound(page))
2295                 return 0;
2296         /*
2297          * Corner case handling. This is called from add_to_page_cache()
2298          * in usual. But some FS (shmem) precharges this page before calling it
2299          * and call add_to_page_cache() with GFP_NOWAIT.
2300          *
2301          * For GFP_NOWAIT case, the page may be pre-charged before calling
2302          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2303          * charge twice. (It works but has to pay a bit larger cost.)
2304          * And when the page is SwapCache, it should take swap information
2305          * into account. This is under lock_page() now.
2306          */
2307         if (!(gfp_mask & __GFP_WAIT)) {
2308                 struct page_cgroup *pc;
2309
2310                 pc = lookup_page_cgroup(page);
2311                 if (!pc)
2312                         return 0;
2313                 lock_page_cgroup(pc);
2314                 if (PageCgroupUsed(pc)) {
2315                         unlock_page_cgroup(pc);
2316                         return 0;
2317                 }
2318                 unlock_page_cgroup(pc);
2319         }
2320
2321         if (unlikely(!mm))
2322                 mm = &init_mm;
2323
2324         if (page_is_file_cache(page))
2325                 return mem_cgroup_charge_common(page, mm, gfp_mask,
2326                                 MEM_CGROUP_CHARGE_TYPE_CACHE);
2327
2328         /* shmem */
2329         if (PageSwapCache(page)) {
2330                 struct mem_cgroup *mem = NULL;
2331
2332                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2333                 if (!ret)
2334                         __mem_cgroup_commit_charge_swapin(page, mem,
2335                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2336         } else
2337                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2338                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2339
2340         return ret;
2341 }
2342
2343 /*
2344  * While swap-in, try_charge -> commit or cancel, the page is locked.
2345  * And when try_charge() successfully returns, one refcnt to memcg without
2346  * struct page_cgroup is acquired. This refcnt will be consumed by
2347  * "commit()" or removed by "cancel()"
2348  */
2349 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2350                                  struct page *page,
2351                                  gfp_t mask, struct mem_cgroup **ptr)
2352 {
2353         struct mem_cgroup *mem;
2354         int ret;
2355
2356         if (mem_cgroup_disabled())
2357                 return 0;
2358
2359         if (!do_swap_account)
2360                 goto charge_cur_mm;
2361         /*
2362          * A racing thread's fault, or swapoff, may have already updated
2363          * the pte, and even removed page from swap cache: in those cases
2364          * do_swap_page()'s pte_same() test will fail; but there's also a
2365          * KSM case which does need to charge the page.
2366          */
2367         if (!PageSwapCache(page))
2368                 goto charge_cur_mm;
2369         mem = try_get_mem_cgroup_from_page(page);
2370         if (!mem)
2371                 goto charge_cur_mm;
2372         *ptr = mem;
2373         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
2374         css_put(&mem->css);
2375         return ret;
2376 charge_cur_mm:
2377         if (unlikely(!mm))
2378                 mm = &init_mm;
2379         return __mem_cgroup_try_charge(mm, mask, ptr, true);
2380 }
2381
2382 static void
2383 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2384                                         enum charge_type ctype)
2385 {
2386         struct page_cgroup *pc;
2387
2388         if (mem_cgroup_disabled())
2389                 return;
2390         if (!ptr)
2391                 return;
2392         cgroup_exclude_rmdir(&ptr->css);
2393         pc = lookup_page_cgroup(page);
2394         mem_cgroup_lru_del_before_commit_swapcache(page);
2395         __mem_cgroup_commit_charge(ptr, pc, ctype);
2396         mem_cgroup_lru_add_after_commit_swapcache(page);
2397         /*
2398          * Now swap is on-memory. This means this page may be
2399          * counted both as mem and swap....double count.
2400          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2401          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2402          * may call delete_from_swap_cache() before reach here.
2403          */
2404         if (do_swap_account && PageSwapCache(page)) {
2405                 swp_entry_t ent = {.val = page_private(page)};
2406                 unsigned short id;
2407                 struct mem_cgroup *memcg;
2408
2409                 id = swap_cgroup_record(ent, 0);
2410                 rcu_read_lock();
2411                 memcg = mem_cgroup_lookup(id);
2412                 if (memcg) {
2413                         /*
2414                          * This recorded memcg can be obsolete one. So, avoid
2415                          * calling css_tryget
2416                          */
2417                         if (!mem_cgroup_is_root(memcg))
2418                                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2419                         mem_cgroup_swap_statistics(memcg, false);
2420                         mem_cgroup_put(memcg);
2421                 }
2422                 rcu_read_unlock();
2423         }
2424         /*
2425          * At swapin, we may charge account against cgroup which has no tasks.
2426          * So, rmdir()->pre_destroy() can be called while we do this charge.
2427          * In that case, we need to call pre_destroy() again. check it here.
2428          */
2429         cgroup_release_and_wakeup_rmdir(&ptr->css);
2430 }
2431
2432 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2433 {
2434         __mem_cgroup_commit_charge_swapin(page, ptr,
2435                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
2436 }
2437
2438 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2439 {
2440         if (mem_cgroup_disabled())
2441                 return;
2442         if (!mem)
2443                 return;
2444         mem_cgroup_cancel_charge(mem);
2445 }
2446
2447 static void
2448 __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2449 {
2450         struct memcg_batch_info *batch = NULL;
2451         bool uncharge_memsw = true;
2452         /* If swapout, usage of swap doesn't decrease */
2453         if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2454                 uncharge_memsw = false;
2455
2456         batch = &current->memcg_batch;
2457         /*
2458          * In usual, we do css_get() when we remember memcg pointer.
2459          * But in this case, we keep res->usage until end of a series of
2460          * uncharges. Then, it's ok to ignore memcg's refcnt.
2461          */
2462         if (!batch->memcg)
2463                 batch->memcg = mem;
2464         /*
2465          * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2466          * In those cases, all pages freed continously can be expected to be in
2467          * the same cgroup and we have chance to coalesce uncharges.
2468          * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2469          * because we want to do uncharge as soon as possible.
2470          */
2471
2472         if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2473                 goto direct_uncharge;
2474
2475         /*
2476          * In typical case, batch->memcg == mem. This means we can
2477          * merge a series of uncharges to an uncharge of res_counter.
2478          * If not, we uncharge res_counter ony by one.
2479          */
2480         if (batch->memcg != mem)
2481                 goto direct_uncharge;
2482         /* remember freed charge and uncharge it later */
2483         batch->bytes += PAGE_SIZE;
2484         if (uncharge_memsw)
2485                 batch->memsw_bytes += PAGE_SIZE;
2486         return;
2487 direct_uncharge:
2488         res_counter_uncharge(&mem->res, PAGE_SIZE);
2489         if (uncharge_memsw)
2490                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2491         if (unlikely(batch->memcg != mem))
2492                 memcg_oom_recover(mem);
2493         return;
2494 }
2495
2496 /*
2497  * uncharge if !page_mapped(page)
2498  */
2499 static struct mem_cgroup *
2500 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2501 {
2502         struct page_cgroup *pc;
2503         struct mem_cgroup *mem = NULL;
2504
2505         if (mem_cgroup_disabled())
2506                 return NULL;
2507
2508         if (PageSwapCache(page))
2509                 return NULL;
2510
2511         /*
2512          * Check if our page_cgroup is valid
2513          */
2514         pc = lookup_page_cgroup(page);
2515         if (unlikely(!pc || !PageCgroupUsed(pc)))
2516                 return NULL;
2517
2518         lock_page_cgroup(pc);
2519
2520         mem = pc->mem_cgroup;
2521
2522         if (!PageCgroupUsed(pc))
2523                 goto unlock_out;
2524
2525         switch (ctype) {
2526         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2527         case MEM_CGROUP_CHARGE_TYPE_DROP:
2528                 /* See mem_cgroup_prepare_migration() */
2529                 if (page_mapped(page) || PageCgroupMigration(pc))
2530                         goto unlock_out;
2531                 break;
2532         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2533                 if (!PageAnon(page)) {  /* Shared memory */
2534                         if (page->mapping && !page_is_file_cache(page))
2535                                 goto unlock_out;
2536                 } else if (page_mapped(page)) /* Anon */
2537                                 goto unlock_out;
2538                 break;
2539         default:
2540                 break;
2541         }
2542
2543         mem_cgroup_charge_statistics(mem, pc, false);
2544
2545         ClearPageCgroupUsed(pc);
2546         /*
2547          * pc->mem_cgroup is not cleared here. It will be accessed when it's
2548          * freed from LRU. This is safe because uncharged page is expected not
2549          * to be reused (freed soon). Exception is SwapCache, it's handled by
2550          * special functions.
2551          */
2552
2553         unlock_page_cgroup(pc);
2554         /*
2555          * even after unlock, we have mem->res.usage here and this memcg
2556          * will never be freed.
2557          */
2558         memcg_check_events(mem, page);
2559         if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2560                 mem_cgroup_swap_statistics(mem, true);
2561                 mem_cgroup_get(mem);
2562         }
2563         if (!mem_cgroup_is_root(mem))
2564                 __do_uncharge(mem, ctype);
2565
2566         return mem;
2567
2568 unlock_out:
2569         unlock_page_cgroup(pc);
2570         return NULL;
2571 }
2572
2573 void mem_cgroup_uncharge_page(struct page *page)
2574 {
2575         /* early check. */
2576         if (page_mapped(page))
2577                 return;
2578         if (page->mapping && !PageAnon(page))
2579                 return;
2580         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2581 }
2582
2583 void mem_cgroup_uncharge_cache_page(struct page *page)
2584 {
2585         VM_BUG_ON(page_mapped(page));
2586         VM_BUG_ON(page->mapping);
2587         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2588 }
2589
2590 /*
2591  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2592  * In that cases, pages are freed continuously and we can expect pages
2593  * are in the same memcg. All these calls itself limits the number of
2594  * pages freed at once, then uncharge_start/end() is called properly.
2595  * This may be called prural(2) times in a context,
2596  */
2597
2598 void mem_cgroup_uncharge_start(void)
2599 {
2600         current->memcg_batch.do_batch++;
2601         /* We can do nest. */
2602         if (current->memcg_batch.do_batch == 1) {
2603                 current->memcg_batch.memcg = NULL;
2604                 current->memcg_batch.bytes = 0;
2605                 current->memcg_batch.memsw_bytes = 0;
2606         }
2607 }
2608
2609 void mem_cgroup_uncharge_end(void)
2610 {
2611         struct memcg_batch_info *batch = &current->memcg_batch;
2612
2613         if (!batch->do_batch)
2614                 return;
2615
2616         batch->do_batch--;
2617         if (batch->do_batch) /* If stacked, do nothing. */
2618                 return;
2619
2620         if (!batch->memcg)
2621                 return;
2622         /*
2623          * This "batch->memcg" is valid without any css_get/put etc...
2624          * bacause we hide charges behind us.
2625          */
2626         if (batch->bytes)
2627                 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2628         if (batch->memsw_bytes)
2629                 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2630         memcg_oom_recover(batch->memcg);
2631         /* forget this pointer (for sanity check) */
2632         batch->memcg = NULL;
2633 }
2634
2635 #ifdef CONFIG_SWAP
2636 /*
2637  * called after __delete_from_swap_cache() and drop "page" account.
2638  * memcg information is recorded to swap_cgroup of "ent"
2639  */
2640 void
2641 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2642 {
2643         struct mem_cgroup *memcg;
2644         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2645
2646         if (!swapout) /* this was a swap cache but the swap is unused ! */
2647                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2648
2649         memcg = __mem_cgroup_uncharge_common(page, ctype);
2650
2651         /*
2652          * record memcg information,  if swapout && memcg != NULL,
2653          * mem_cgroup_get() was called in uncharge().
2654          */
2655         if (do_swap_account && swapout && memcg)
2656                 swap_cgroup_record(ent, css_id(&memcg->css));
2657 }
2658 #endif
2659
2660 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2661 /*
2662  * called from swap_entry_free(). remove record in swap_cgroup and
2663  * uncharge "memsw" account.
2664  */
2665 void mem_cgroup_uncharge_swap(swp_entry_t ent)
2666 {
2667         struct mem_cgroup *memcg;
2668         unsigned short id;
2669
2670         if (!do_swap_account)
2671                 return;
2672
2673         id = swap_cgroup_record(ent, 0);
2674         rcu_read_lock();
2675         memcg = mem_cgroup_lookup(id);
2676         if (memcg) {
2677                 /*
2678                  * We uncharge this because swap is freed.
2679                  * This memcg can be obsolete one. We avoid calling css_tryget
2680                  */
2681                 if (!mem_cgroup_is_root(memcg))
2682                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2683                 mem_cgroup_swap_statistics(memcg, false);
2684                 mem_cgroup_put(memcg);
2685         }
2686         rcu_read_unlock();
2687 }
2688
2689 /**
2690  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2691  * @entry: swap entry to be moved
2692  * @from:  mem_cgroup which the entry is moved from
2693  * @to:  mem_cgroup which the entry is moved to
2694  * @need_fixup: whether we should fixup res_counters and refcounts.
2695  *
2696  * It succeeds only when the swap_cgroup's record for this entry is the same
2697  * as the mem_cgroup's id of @from.
2698  *
2699  * Returns 0 on success, -EINVAL on failure.
2700  *
2701  * The caller must have charged to @to, IOW, called res_counter_charge() about
2702  * both res and memsw, and called css_get().
2703  */
2704 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2705                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2706 {
2707         unsigned short old_id, new_id;
2708
2709         old_id = css_id(&from->css);
2710         new_id = css_id(&to->css);
2711
2712         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2713                 mem_cgroup_swap_statistics(from, false);
2714                 mem_cgroup_swap_statistics(to, true);
2715                 /*
2716                  * This function is only called from task migration context now.
2717                  * It postpones res_counter and refcount handling till the end
2718                  * of task migration(mem_cgroup_clear_mc()) for performance
2719                  * improvement. But we cannot postpone mem_cgroup_get(to)
2720                  * because if the process that has been moved to @to does
2721                  * swap-in, the refcount of @to might be decreased to 0.
2722                  */
2723                 mem_cgroup_get(to);
2724                 if (need_fixup) {
2725                         if (!mem_cgroup_is_root(from))
2726                                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2727                         mem_cgroup_put(from);
2728                         /*
2729                          * we charged both to->res and to->memsw, so we should
2730                          * uncharge to->res.
2731                          */
2732                         if (!mem_cgroup_is_root(to))
2733                                 res_counter_uncharge(&to->res, PAGE_SIZE);
2734                 }
2735                 return 0;
2736         }
2737         return -EINVAL;
2738 }
2739 #else
2740 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2741                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2742 {
2743         return -EINVAL;
2744 }
2745 #endif
2746
2747 /*
2748  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2749  * page belongs to.
2750  */
2751 int mem_cgroup_prepare_migration(struct page *page,
2752         struct page *newpage, struct mem_cgroup **ptr)
2753 {
2754         struct page_cgroup *pc;
2755         struct mem_cgroup *mem = NULL;
2756         enum charge_type ctype;
2757         int ret = 0;
2758
2759         if (mem_cgroup_disabled())
2760                 return 0;
2761
2762         pc = lookup_page_cgroup(page);
2763         lock_page_cgroup(pc);
2764         if (PageCgroupUsed(pc)) {
2765                 mem = pc->mem_cgroup;
2766                 css_get(&mem->css);
2767                 /*
2768                  * At migrating an anonymous page, its mapcount goes down
2769                  * to 0 and uncharge() will be called. But, even if it's fully
2770                  * unmapped, migration may fail and this page has to be
2771                  * charged again. We set MIGRATION flag here and delay uncharge
2772                  * until end_migration() is called
2773                  *
2774                  * Corner Case Thinking
2775                  * A)
2776                  * When the old page was mapped as Anon and it's unmap-and-freed
2777                  * while migration was ongoing.
2778                  * If unmap finds the old page, uncharge() of it will be delayed
2779                  * until end_migration(). If unmap finds a new page, it's
2780                  * uncharged when it make mapcount to be 1->0. If unmap code
2781                  * finds swap_migration_entry, the new page will not be mapped
2782                  * and end_migration() will find it(mapcount==0).
2783                  *
2784                  * B)
2785                  * When the old page was mapped but migraion fails, the kernel
2786                  * remaps it. A charge for it is kept by MIGRATION flag even
2787                  * if mapcount goes down to 0. We can do remap successfully
2788                  * without charging it again.
2789                  *
2790                  * C)
2791                  * The "old" page is under lock_page() until the end of
2792                  * migration, so, the old page itself will not be swapped-out.
2793                  * If the new page is swapped out before end_migraton, our
2794                  * hook to usual swap-out path will catch the event.
2795                  */
2796                 if (PageAnon(page))
2797                         SetPageCgroupMigration(pc);
2798         }
2799         unlock_page_cgroup(pc);
2800         /*
2801          * If the page is not charged at this point,
2802          * we return here.
2803          */
2804         if (!mem)
2805                 return 0;
2806
2807         *ptr = mem;
2808         ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2809         css_put(&mem->css);/* drop extra refcnt */
2810         if (ret || *ptr == NULL) {
2811                 if (PageAnon(page)) {
2812                         lock_page_cgroup(pc);
2813                         ClearPageCgroupMigration(pc);
2814                         unlock_page_cgroup(pc);
2815                         /*
2816                          * The old page may be fully unmapped while we kept it.
2817                          */
2818                         mem_cgroup_uncharge_page(page);
2819                 }
2820                 return -ENOMEM;
2821         }
2822         /*
2823          * We charge new page before it's used/mapped. So, even if unlock_page()
2824          * is called before end_migration, we can catch all events on this new
2825          * page. In the case new page is migrated but not remapped, new page's
2826          * mapcount will be finally 0 and we call uncharge in end_migration().
2827          */
2828         pc = lookup_page_cgroup(newpage);
2829         if (PageAnon(page))
2830                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2831         else if (page_is_file_cache(page))
2832                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2833         else
2834                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2835         __mem_cgroup_commit_charge(mem, pc, ctype);
2836         return ret;
2837 }
2838
2839 /* remove redundant charge if migration failed*/
2840 void mem_cgroup_end_migration(struct mem_cgroup *mem,
2841         struct page *oldpage, struct page *newpage)
2842 {
2843         struct page *used, *unused;
2844         struct page_cgroup *pc;
2845
2846         if (!mem)
2847                 return;
2848         /* blocks rmdir() */
2849         cgroup_exclude_rmdir(&mem->css);
2850         /* at migration success, oldpage->mapping is NULL. */
2851         if (oldpage->mapping) {
2852                 used = oldpage;
2853                 unused = newpage;
2854         } else {
2855                 used = newpage;
2856                 unused = oldpage;
2857         }
2858         /*
2859          * We disallowed uncharge of pages under migration because mapcount
2860          * of the page goes down to zero, temporarly.
2861          * Clear the flag and check the page should be charged.
2862          */
2863         pc = lookup_page_cgroup(oldpage);
2864         lock_page_cgroup(pc);
2865         ClearPageCgroupMigration(pc);
2866         unlock_page_cgroup(pc);
2867
2868         __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
2869
2870         /*
2871          * If a page is a file cache, radix-tree replacement is very atomic
2872          * and we can skip this check. When it was an Anon page, its mapcount
2873          * goes down to 0. But because we added MIGRATION flage, it's not
2874          * uncharged yet. There are several case but page->mapcount check
2875          * and USED bit check in mem_cgroup_uncharge_page() will do enough
2876          * check. (see prepare_charge() also)
2877          */
2878         if (PageAnon(used))
2879                 mem_cgroup_uncharge_page(used);
2880         /*
2881          * At migration, we may charge account against cgroup which has no
2882          * tasks.
2883          * So, rmdir()->pre_destroy() can be called while we do this charge.
2884          * In that case, we need to call pre_destroy() again. check it here.
2885          */
2886         cgroup_release_and_wakeup_rmdir(&mem->css);
2887 }
2888
2889 /*
2890  * A call to try to shrink memory usage on charge failure at shmem's swapin.
2891  * Calling hierarchical_reclaim is not enough because we should update
2892  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2893  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2894  * not from the memcg which this page would be charged to.
2895  * try_charge_swapin does all of these works properly.
2896  */
2897 int mem_cgroup_shmem_charge_fallback(struct page *page,
2898                             struct mm_struct *mm,
2899                             gfp_t gfp_mask)
2900 {
2901         struct mem_cgroup *mem = NULL;
2902         int ret;
2903
2904         if (mem_cgroup_disabled())
2905                 return 0;
2906
2907         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2908         if (!ret)
2909                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
2910
2911         return ret;
2912 }
2913
2914 static DEFINE_MUTEX(set_limit_mutex);
2915
2916 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2917                                 unsigned long long val)
2918 {
2919         int retry_count;
2920         u64 memswlimit, memlimit;
2921         int ret = 0;
2922         int children = mem_cgroup_count_children(memcg);
2923         u64 curusage, oldusage;
2924         int enlarge;
2925
2926         /*
2927          * For keeping hierarchical_reclaim simple, how long we should retry
2928          * is depends on callers. We set our retry-count to be function
2929          * of # of children which we should visit in this loop.
2930          */
2931         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2932
2933         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2934
2935         enlarge = 0;
2936         while (retry_count) {
2937                 if (signal_pending(current)) {
2938                         ret = -EINTR;
2939                         break;
2940                 }
2941                 /*
2942                  * Rather than hide all in some function, I do this in
2943                  * open coded manner. You see what this really does.
2944                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2945                  */
2946                 mutex_lock(&set_limit_mutex);
2947                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2948                 if (memswlimit < val) {
2949                         ret = -EINVAL;
2950                         mutex_unlock(&set_limit_mutex);
2951                         break;
2952                 }
2953
2954                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2955                 if (memlimit < val)
2956                         enlarge = 1;
2957
2958                 ret = res_counter_set_limit(&memcg->res, val);
2959                 if (!ret) {
2960                         if (memswlimit == val)
2961                                 memcg->memsw_is_minimum = true;
2962                         else
2963                                 memcg->memsw_is_minimum = false;
2964                 }
2965                 mutex_unlock(&set_limit_mutex);
2966
2967                 if (!ret)
2968                         break;
2969
2970                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2971                                                 MEM_CGROUP_RECLAIM_SHRINK);
2972                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2973                 /* Usage is reduced ? */
2974                 if (curusage >= oldusage)
2975                         retry_count--;
2976                 else
2977                         oldusage = curusage;
2978         }
2979         if (!ret && enlarge)
2980                 memcg_oom_recover(memcg);
2981
2982         return ret;
2983 }
2984
2985 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2986                                         unsigned long long val)
2987 {
2988         int retry_count;
2989         u64 memlimit, memswlimit, oldusage, curusage;
2990         int children = mem_cgroup_count_children(memcg);
2991         int ret = -EBUSY;
2992         int enlarge = 0;
2993
2994         /* see mem_cgroup_resize_res_limit */
2995         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2996         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2997         while (retry_count) {
2998                 if (signal_pending(current)) {
2999                         ret = -EINTR;
3000                         break;
3001                 }
3002                 /*
3003                  * Rather than hide all in some function, I do this in
3004                  * open coded manner. You see what this really does.
3005                  * We have to guarantee mem->res.limit < mem->memsw.limit.
3006                  */
3007                 mutex_lock(&set_limit_mutex);
3008                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3009                 if (memlimit > val) {
3010                         ret = -EINVAL;
3011                         mutex_unlock(&set_limit_mutex);
3012                         break;
3013                 }
3014                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3015                 if (memswlimit < val)
3016                         enlarge = 1;
3017                 ret = res_counter_set_limit(&memcg->memsw, val);
3018                 if (!ret) {
3019                         if (memlimit == val)
3020                                 memcg->memsw_is_minimum = true;
3021                         else
3022                                 memcg->memsw_is_minimum = false;
3023                 }
3024                 mutex_unlock(&set_limit_mutex);
3025
3026                 if (!ret)
3027                         break;
3028
3029                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3030                                                 MEM_CGROUP_RECLAIM_NOSWAP |
3031                                                 MEM_CGROUP_RECLAIM_SHRINK);
3032                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3033                 /* Usage is reduced ? */
3034                 if (curusage >= oldusage)
3035                         retry_count--;
3036                 else
3037                         oldusage = curusage;
3038         }
3039         if (!ret && enlarge)
3040                 memcg_oom_recover(memcg);
3041         return ret;
3042 }
3043
3044 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3045                                             gfp_t gfp_mask)
3046 {
3047         unsigned long nr_reclaimed = 0;
3048         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3049         unsigned long reclaimed;
3050         int loop = 0;
3051         struct mem_cgroup_tree_per_zone *mctz;
3052         unsigned long long excess;
3053
3054         if (order > 0)
3055                 return 0;
3056
3057         mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3058         /*
3059          * This loop can run a while, specially if mem_cgroup's continuously
3060          * keep exceeding their soft limit and putting the system under
3061          * pressure
3062          */
3063         do {
3064                 if (next_mz)
3065                         mz = next_mz;
3066                 else
3067                         mz = mem_cgroup_largest_soft_limit_node(mctz);
3068                 if (!mz)
3069                         break;
3070
3071                 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3072                                                 gfp_mask,
3073                                                 MEM_CGROUP_RECLAIM_SOFT);
3074                 nr_reclaimed += reclaimed;
3075                 spin_lock(&mctz->lock);
3076
3077                 /*
3078                  * If we failed to reclaim anything from this memory cgroup
3079                  * it is time to move on to the next cgroup
3080                  */
3081                 next_mz = NULL;
3082                 if (!reclaimed) {
3083                         do {
3084                                 /*
3085                                  * Loop until we find yet another one.
3086                                  *
3087                                  * By the time we get the soft_limit lock
3088                                  * again, someone might have aded the
3089                                  * group back on the RB tree. Iterate to
3090                                  * make sure we get a different mem.
3091                                  * mem_cgroup_largest_soft_limit_node returns
3092                                  * NULL if no other cgroup is present on
3093                                  * the tree
3094                                  */
3095                                 next_mz =
3096                                 __mem_cgroup_largest_soft_limit_node(mctz);
3097                                 if (next_mz == mz) {
3098                                         css_put(&next_mz->mem->css);
3099                                         next_mz = NULL;
3100                                 } else /* next_mz == NULL or other memcg */
3101                                         break;
3102                         } while (1);
3103                 }
3104                 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3105                 excess = res_counter_soft_limit_excess(&mz->mem->res);
3106                 /*
3107                  * One school of thought says that we should not add
3108                  * back the node to the tree if reclaim returns 0.
3109                  * But our reclaim could return 0, simply because due
3110                  * to priority we are exposing a smaller subset of
3111                  * memory to reclaim from. Consider this as a longer
3112                  * term TODO.
3113                  */
3114                 /* If excess == 0, no tree ops */
3115                 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3116                 spin_unlock(&mctz->lock);
3117                 css_put(&mz->mem->css);
3118                 loop++;
3119                 /*
3120                  * Could not reclaim anything and there are no more
3121                  * mem cgroups to try or we seem to be looping without
3122                  * reclaiming anything.
3123                  */
3124                 if (!nr_reclaimed &&
3125                         (next_mz == NULL ||
3126                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3127                         break;
3128         } while (!nr_reclaimed);
3129         if (next_mz)
3130                 css_put(&next_mz->mem->css);
3131         return nr_reclaimed;
3132 }
3133
3134 /*
3135  * This routine traverse page_cgroup in given list and drop them all.
3136  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3137  */
3138 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
3139                                 int node, int zid, enum lru_list lru)
3140 {
3141         struct zone *zone;
3142         struct mem_cgroup_per_zone *mz;
3143         struct page_cgroup *pc, *busy;
3144         unsigned long flags, loop;
3145         struct list_head *list;
3146         int ret = 0;
3147
3148         zone = &NODE_DATA(node)->node_zones[zid];
3149         mz = mem_cgroup_zoneinfo(mem, node, zid);
3150         list = &mz->lists[lru];
3151
3152         loop = MEM_CGROUP_ZSTAT(mz, lru);
3153         /* give some margin against EBUSY etc...*/
3154         loop += 256;
3155         busy = NULL;
3156         while (loop--) {
3157                 ret = 0;
3158                 spin_lock_irqsave(&zone->lru_lock, flags);
3159                 if (list_empty(list)) {
3160                         spin_unlock_irqrestore(&zone->lru_lock, flags);
3161                         break;
3162                 }
3163                 pc = list_entry(list->prev, struct page_cgroup, lru);
3164                 if (busy == pc) {
3165                         list_move(&pc->lru, list);
3166                         busy = NULL;
3167                         spin_unlock_irqrestore(&zone->lru_lock, flags);
3168                         continue;
3169                 }
3170                 spin_unlock_irqrestore(&zone->lru_lock, flags);
3171
3172                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
3173                 if (ret == -ENOMEM)
3174                         break;
3175
3176                 if (ret == -EBUSY || ret == -EINVAL) {
3177                         /* found lock contention or "pc" is obsolete. */
3178                         busy = pc;
3179                         cond_resched();
3180                 } else
3181                         busy = NULL;
3182         }
3183
3184         if (!ret && !list_empty(list))
3185                 return -EBUSY;
3186         return ret;
3187 }
3188
3189 /*
3190  * make mem_cgroup's charge to be 0 if there is no task.
3191  * This enables deleting this mem_cgroup.
3192  */
3193 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
3194 {
3195         int ret;
3196         int node, zid, shrink;
3197         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3198         struct cgroup *cgrp = mem->css.cgroup;
3199
3200         css_get(&mem->css);
3201
3202         shrink = 0;
3203         /* should free all ? */
3204         if (free_all)
3205                 goto try_to_free;
3206 move_account:
3207         do {
3208                 ret = -EBUSY;
3209                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3210                         goto out;
3211                 ret = -EINTR;
3212                 if (signal_pending(current))
3213                         goto out;
3214                 /* This is for making all *used* pages to be on LRU. */
3215                 lru_add_drain_all();
3216                 drain_all_stock_sync();
3217                 ret = 0;
3218                 mem_cgroup_start_move(mem);
3219                 for_each_node_state(node, N_HIGH_MEMORY) {
3220                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3221                                 enum lru_list l;
3222                                 for_each_lru(l) {
3223                                         ret = mem_cgroup_force_empty_list(mem,
3224                                                         node, zid, l);
3225                                         if (ret)
3226                                                 break;
3227                                 }
3228                         }
3229                         if (ret)
3230                                 break;
3231                 }
3232                 mem_cgroup_end_move(mem);
3233                 memcg_oom_recover(mem);
3234                 /* it seems parent cgroup doesn't have enough mem */
3235                 if (ret == -ENOMEM)
3236                         goto try_to_free;
3237                 cond_resched();
3238         /* "ret" should also be checked to ensure all lists are empty. */
3239         } while (mem->res.usage > 0 || ret);
3240 out:
3241         css_put(&mem->css);
3242         return ret;
3243
3244 try_to_free:
3245         /* returns EBUSY if there is a task or if we come here twice. */
3246         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3247                 ret = -EBUSY;
3248                 goto out;
3249         }
3250         /* we call try-to-free pages for make this cgroup empty */
3251         lru_add_drain_all();
3252         /* try to free all pages in this cgroup */
3253         shrink = 1;
3254         while (nr_retries && mem->res.usage > 0) {
3255                 int progress;
3256
3257                 if (signal_pending(current)) {
3258                         ret = -EINTR;
3259                         goto out;
3260                 }
3261                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3262                                                 false, get_swappiness(mem));
3263                 if (!progress) {
3264                         nr_retries--;
3265                         /* maybe some writeback is necessary */
3266                         congestion_wait(BLK_RW_ASYNC, HZ/10);
3267                 }
3268
3269         }
3270         lru_add_drain();
3271         /* try move_account...there may be some *locked* pages. */
3272         goto move_account;
3273 }
3274
3275 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3276 {
3277         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3278 }
3279
3280
3281 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3282 {
3283         return mem_cgroup_from_cont(cont)->use_hierarchy;
3284 }
3285
3286 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3287                                         u64 val)
3288 {
3289         int retval = 0;
3290         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3291         struct cgroup *parent = cont->parent;
3292         struct mem_cgroup *parent_mem = NULL;
3293
3294         if (parent)
3295                 parent_mem = mem_cgroup_from_cont(parent);
3296
3297         cgroup_lock();
3298         /*
3299          * If parent's use_hierarchy is set, we can't make any modifications
3300          * in the child subtrees. If it is unset, then the change can
3301          * occur, provided the current cgroup has no children.
3302          *
3303          * For the root cgroup, parent_mem is NULL, we allow value to be
3304          * set if there are no children.
3305          */
3306         if ((!parent_mem || !parent_mem->use_hierarchy) &&
3307                                 (val == 1 || val == 0)) {
3308                 if (list_empty(&cont->children))
3309                         mem->use_hierarchy = val;
3310                 else
3311                         retval = -EBUSY;
3312         } else
3313                 retval = -EINVAL;
3314         cgroup_unlock();
3315
3316         return retval;
3317 }
3318
3319
3320 static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
3321                                 enum mem_cgroup_stat_index idx)
3322 {
3323         struct mem_cgroup *iter;
3324         s64 val = 0;
3325
3326         /* each per cpu's value can be minus.Then, use s64 */
3327         for_each_mem_cgroup_tree(iter, mem)
3328                 val += mem_cgroup_read_stat(iter, idx);
3329
3330         if (val < 0) /* race ? */
3331                 val = 0;
3332         return val;
3333 }
3334
3335 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3336 {
3337         u64 val;
3338
3339         if (!mem_cgroup_is_root(mem)) {
3340                 if (!swap)
3341                         return res_counter_read_u64(&mem->res, RES_USAGE);
3342                 else
3343                         return res_counter_read_u64(&mem->memsw, RES_USAGE);
3344         }
3345
3346         val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
3347         val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
3348
3349         if (swap)
3350                 val += mem_cgroup_get_recursive_idx_stat(mem,
3351                                 MEM_CGROUP_STAT_SWAPOUT);
3352
3353         return val << PAGE_SHIFT;
3354 }
3355
3356 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3357 {
3358         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3359         u64 val;
3360         int type, name;
3361
3362         type = MEMFILE_TYPE(cft->private);
3363         name = MEMFILE_ATTR(cft->private);
3364         switch (type) {
3365         case _MEM:
3366                 if (name == RES_USAGE)
3367                         val = mem_cgroup_usage(mem, false);
3368                 else
3369                         val = res_counter_read_u64(&mem->res, name);
3370                 break;
3371         case _MEMSWAP:
3372                 if (name == RES_USAGE)
3373                         val = mem_cgroup_usage(mem, true);
3374                 else
3375                         val = res_counter_read_u64(&mem->memsw, name);
3376                 break;
3377         default:
3378                 BUG();
3379                 break;
3380         }
3381         return val;
3382 }
3383 /*
3384  * The user of this function is...
3385  * RES_LIMIT.
3386  */
3387 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3388                             const char *buffer)
3389 {
3390         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3391         int type, name;
3392         unsigned long long val;
3393         int ret;
3394
3395         type = MEMFILE_TYPE(cft->private);
3396         name = MEMFILE_ATTR(cft->private);
3397         switch (name) {
3398         case RES_LIMIT:
3399                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3400                         ret = -EINVAL;
3401                         break;
3402                 }
3403                 /* This function does all necessary parse...reuse it */
3404                 ret = res_counter_memparse_write_strategy(buffer, &val);
3405                 if (ret)
3406                         break;
3407                 if (type == _MEM)
3408                         ret = mem_cgroup_resize_limit(memcg, val);
3409                 else
3410                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
3411                 break;
3412         case RES_SOFT_LIMIT:
3413                 ret = res_counter_memparse_write_strategy(buffer, &val);
3414                 if (ret)
3415                         break;
3416                 /*
3417                  * For memsw, soft limits are hard to implement in terms
3418                  * of semantics, for now, we support soft limits for
3419                  * control without swap
3420                  */
3421                 if (type == _MEM)
3422                         ret = res_counter_set_soft_limit(&memcg->res, val);
3423                 else
3424                         ret = -EINVAL;
3425                 break;
3426         default:
3427                 ret = -EINVAL; /* should be BUG() ? */
3428                 break;
3429         }
3430         return ret;
3431 }
3432
3433 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3434                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3435 {
3436         struct cgroup *cgroup;
3437         unsigned long long min_limit, min_memsw_limit, tmp;
3438
3439         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3440         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3441         cgroup = memcg->css.cgroup;
3442         if (!memcg->use_hierarchy)
3443                 goto out;
3444
3445         while (cgroup->parent) {
3446                 cgroup = cgroup->parent;
3447                 memcg = mem_cgroup_from_cont(cgroup);
3448                 if (!memcg->use_hierarchy)
3449                         break;
3450                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3451                 min_limit = min(min_limit, tmp);
3452                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3453                 min_memsw_limit = min(min_memsw_limit, tmp);
3454         }
3455 out:
3456         *mem_limit = min_limit;
3457         *memsw_limit = min_memsw_limit;
3458         return;
3459 }
3460
3461 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3462 {
3463         struct mem_cgroup *mem;
3464         int type, name;
3465
3466         mem = mem_cgroup_from_cont(cont);
3467         type = MEMFILE_TYPE(event);
3468         name = MEMFILE_ATTR(event);
3469         switch (name) {
3470         case RES_MAX_USAGE:
3471                 if (type == _MEM)
3472                         res_counter_reset_max(&mem->res);
3473                 else
3474                         res_counter_reset_max(&mem->memsw);
3475                 break;
3476         case RES_FAILCNT:
3477                 if (type == _MEM)
3478                         res_counter_reset_failcnt(&mem->res);
3479                 else
3480                         res_counter_reset_failcnt(&mem->memsw);
3481                 break;
3482         }
3483
3484         return 0;
3485 }
3486
3487 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3488                                         struct cftype *cft)
3489 {
3490         return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3491 }
3492
3493 #ifdef CONFIG_MMU
3494 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3495                                         struct cftype *cft, u64 val)
3496 {
3497         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3498
3499         if (val >= (1 << NR_MOVE_TYPE))
3500                 return -EINVAL;
3501         /*
3502          * We check this value several times in both in can_attach() and
3503          * attach(), so we need cgroup lock to prevent this value from being
3504          * inconsistent.
3505          */
3506         cgroup_lock();
3507         mem->move_charge_at_immigrate = val;
3508         cgroup_unlock();
3509
3510         return 0;
3511 }
3512 #else
3513 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3514                                         struct cftype *cft, u64 val)
3515 {
3516         return -ENOSYS;
3517 }
3518 #endif
3519
3520
3521 /* For read statistics */
3522 enum {
3523         MCS_CACHE,
3524         MCS_RSS,
3525         MCS_FILE_MAPPED,
3526         MCS_PGPGIN,
3527         MCS_PGPGOUT,
3528         MCS_SWAP,
3529         MCS_INACTIVE_ANON,
3530         MCS_ACTIVE_ANON,
3531         MCS_INACTIVE_FILE,
3532         MCS_ACTIVE_FILE,
3533         MCS_UNEVICTABLE,
3534         NR_MCS_STAT,
3535 };
3536
3537 struct mcs_total_stat {
3538         s64 stat[NR_MCS_STAT];
3539 };
3540
3541 struct {
3542         char *local_name;
3543         char *total_name;
3544 } memcg_stat_strings[NR_MCS_STAT] = {
3545         {"cache", "total_cache"},
3546         {"rss", "total_rss"},
3547         {"mapped_file", "total_mapped_file"},
3548         {"pgpgin", "total_pgpgin"},
3549         {"pgpgout", "total_pgpgout"},
3550         {"swap", "total_swap"},
3551         {"inactive_anon", "total_inactive_anon"},
3552         {"active_anon", "total_active_anon"},
3553         {"inactive_file", "total_inactive_file"},
3554         {"active_file", "total_active_file"},
3555         {"unevictable", "total_unevictable"}
3556 };
3557
3558
3559 static void
3560 mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3561 {
3562         s64 val;
3563
3564         /* per cpu stat */
3565         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3566         s->stat[MCS_CACHE] += val * PAGE_SIZE;
3567         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3568         s->stat[MCS_RSS] += val * PAGE_SIZE;
3569         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3570         s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3571         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
3572         s->stat[MCS_PGPGIN] += val;
3573         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
3574         s->stat[MCS_PGPGOUT] += val;
3575         if (do_swap_account) {
3576                 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3577                 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3578         }
3579
3580         /* per zone stat */
3581         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3582         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3583         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3584         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3585         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3586         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3587         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3588         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3589         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3590         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3591 }
3592
3593 static void
3594 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3595 {
3596         struct mem_cgroup *iter;
3597
3598         for_each_mem_cgroup_tree(iter, mem)
3599                 mem_cgroup_get_local_stat(iter, s);
3600 }
3601
3602 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3603                                  struct cgroup_map_cb *cb)
3604 {
3605         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3606         struct mcs_total_stat mystat;
3607         int i;
3608
3609         memset(&mystat, 0, sizeof(mystat));
3610         mem_cgroup_get_local_stat(mem_cont, &mystat);
3611
3612         for (i = 0; i < NR_MCS_STAT; i++) {
3613                 if (i == MCS_SWAP && !do_swap_account)
3614                         continue;
3615                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3616         }
3617
3618         /* Hierarchical information */
3619         {
3620                 unsigned long long limit, memsw_limit;
3621                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3622                 cb->fill(cb, "hierarchical_memory_limit", limit);
3623                 if (do_swap_account)
3624                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3625         }
3626
3627         memset(&mystat, 0, sizeof(mystat));
3628         mem_cgroup_get_total_stat(mem_cont, &mystat);
3629         for (i = 0; i < NR_MCS_STAT; i++) {
3630                 if (i == MCS_SWAP && !do_swap_account)
3631                         continue;
3632                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3633         }
3634
3635 #ifdef CONFIG_DEBUG_VM
3636         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3637
3638         {
3639                 int nid, zid;
3640                 struct mem_cgroup_per_zone *mz;
3641                 unsigned long recent_rotated[2] = {0, 0};
3642                 unsigned long recent_scanned[2] = {0, 0};
3643
3644                 for_each_online_node(nid)
3645                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3646                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3647
3648                                 recent_rotated[0] +=
3649                                         mz->reclaim_stat.recent_rotated[0];
3650                                 recent_rotated[1] +=
3651                                         mz->reclaim_stat.recent_rotated[1];
3652                                 recent_scanned[0] +=
3653                                         mz->reclaim_stat.recent_scanned[0];
3654                                 recent_scanned[1] +=
3655                                         mz->reclaim_stat.recent_scanned[1];
3656                         }
3657                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3658                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3659                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3660                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3661         }
3662 #endif
3663
3664         return 0;
3665 }
3666
3667 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3668 {
3669         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3670
3671         return get_swappiness(memcg);
3672 }
3673
3674 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3675                                        u64 val)
3676 {
3677         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3678         struct mem_cgroup *parent;
3679
3680         if (val > 100)
3681                 return -EINVAL;
3682
3683         if (cgrp->parent == NULL)
3684                 return -EINVAL;
3685
3686         parent = mem_cgroup_from_cont(cgrp->parent);
3687
3688         cgroup_lock();
3689
3690         /* If under hierarchy, only empty-root can set this value */
3691         if ((parent->use_hierarchy) ||
3692             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3693                 cgroup_unlock();
3694                 return -EINVAL;
3695         }
3696
3697         spin_lock(&memcg->reclaim_param_lock);
3698         memcg->swappiness = val;
3699         spin_unlock(&memcg->reclaim_param_lock);
3700
3701         cgroup_unlock();
3702
3703         return 0;
3704 }
3705
3706 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3707 {
3708         struct mem_cgroup_threshold_ary *t;
3709         u64 usage;
3710         int i;
3711
3712         rcu_read_lock();
3713         if (!swap)
3714                 t = rcu_dereference(memcg->thresholds.primary);
3715         else
3716                 t = rcu_dereference(memcg->memsw_thresholds.primary);
3717
3718         if (!t)
3719                 goto unlock;
3720
3721         usage = mem_cgroup_usage(memcg, swap);
3722
3723         /*
3724          * current_threshold points to threshold just below usage.
3725          * If it's not true, a threshold was crossed after last
3726          * call of __mem_cgroup_threshold().
3727          */
3728         i = t->current_threshold;
3729
3730         /*
3731          * Iterate backward over array of thresholds starting from
3732          * current_threshold and check if a threshold is crossed.
3733          * If none of thresholds below usage is crossed, we read
3734          * only one element of the array here.
3735          */
3736         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3737                 eventfd_signal(t->entries[i].eventfd, 1);
3738
3739         /* i = current_threshold + 1 */
3740         i++;
3741
3742         /*
3743          * Iterate forward over array of thresholds starting from
3744          * current_threshold+1 and check if a threshold is crossed.
3745          * If none of thresholds above usage is crossed, we read
3746          * only one element of the array here.
3747          */
3748         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3749                 eventfd_signal(t->entries[i].eventfd, 1);
3750
3751         /* Update current_threshold */
3752         t->current_threshold = i - 1;
3753 unlock:
3754         rcu_read_unlock();
3755 }
3756
3757 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3758 {
3759         while (memcg) {
3760                 __mem_cgroup_threshold(memcg, false);
3761                 if (do_swap_account)
3762                         __mem_cgroup_threshold(memcg, true);
3763
3764                 memcg = parent_mem_cgroup(memcg);
3765         }
3766 }
3767
3768 static int compare_thresholds(const void *a, const void *b)
3769 {
3770         const struct mem_cgroup_threshold *_a = a;
3771         const struct mem_cgroup_threshold *_b = b;
3772
3773         return _a->threshold - _b->threshold;
3774 }
3775
3776 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
3777 {
3778         struct mem_cgroup_eventfd_list *ev;
3779
3780         list_for_each_entry(ev, &mem->oom_notify, list)
3781                 eventfd_signal(ev->eventfd, 1);
3782         return 0;
3783 }
3784
3785 static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3786 {
3787         struct mem_cgroup *iter;
3788
3789         for_each_mem_cgroup_tree(iter, mem)
3790                 mem_cgroup_oom_notify_cb(iter);
3791 }
3792
3793 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3794         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3795 {
3796         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3797         struct mem_cgroup_thresholds *thresholds;
3798         struct mem_cgroup_threshold_ary *new;
3799         int type = MEMFILE_TYPE(cft->private);
3800         u64 threshold, usage;
3801         int i, size, ret;
3802
3803         ret = res_counter_memparse_write_strategy(args, &threshold);
3804         if (ret)
3805                 return ret;
3806
3807         mutex_lock(&memcg->thresholds_lock);
3808
3809         if (type == _MEM)
3810                 thresholds = &memcg->thresholds;
3811         else if (type == _MEMSWAP)
3812                 thresholds = &memcg->memsw_thresholds;
3813         else
3814                 BUG();
3815
3816         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3817
3818         /* Check if a threshold crossed before adding a new one */
3819         if (thresholds->primary)
3820                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3821
3822         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3823
3824         /* Allocate memory for new array of thresholds */
3825         new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3826                         GFP_KERNEL);
3827         if (!new) {
3828                 ret = -ENOMEM;
3829                 goto unlock;
3830         }
3831         new->size = size;
3832
3833         /* Copy thresholds (if any) to new array */
3834         if (thresholds->primary) {
3835                 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3836                                 sizeof(struct mem_cgroup_threshold));
3837         }
3838
3839         /* Add new threshold */
3840         new->entries[size - 1].eventfd = eventfd;
3841         new->entries[size - 1].threshold = threshold;
3842
3843         /* Sort thresholds. Registering of new threshold isn't time-critical */
3844         sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3845                         compare_thresholds, NULL);
3846
3847         /* Find current threshold */
3848         new->current_threshold = -1;
3849         for (i = 0; i < size; i++) {
3850                 if (new->entries[i].threshold < usage) {
3851                         /*
3852                          * new->current_threshold will not be used until
3853                          * rcu_assign_pointer(), so it's safe to increment
3854                          * it here.
3855                          */
3856                         ++new->current_threshold;
3857                 }
3858         }
3859
3860         /* Free old spare buffer and save old primary buffer as spare */
3861         kfree(thresholds->spare);
3862         thresholds->spare = thresholds->primary;
3863
3864         rcu_assign_pointer(thresholds->primary, new);
3865
3866         /* To be sure that nobody uses thresholds */
3867         synchronize_rcu();
3868
3869 unlock:
3870         mutex_unlock(&memcg->thresholds_lock);
3871
3872         return ret;
3873 }
3874
3875 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3876         struct cftype *cft, struct eventfd_ctx *eventfd)
3877 {
3878         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3879         struct mem_cgroup_thresholds *thresholds;
3880         struct mem_cgroup_threshold_ary *new;
3881         int type = MEMFILE_TYPE(cft->private);
3882         u64 usage;
3883         int i, j, size;
3884
3885         mutex_lock(&memcg->thresholds_lock);
3886         if (type == _MEM)
3887                 thresholds = &memcg->thresholds;
3888         else if (type == _MEMSWAP)
3889                 thresholds = &memcg->memsw_thresholds;
3890         else
3891                 BUG();
3892
3893         /*
3894          * Something went wrong if we trying to unregister a threshold
3895          * if we don't have thresholds
3896          */
3897         BUG_ON(!thresholds);
3898
3899         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3900
3901         /* Check if a threshold crossed before removing */
3902         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3903
3904         /* Calculate new number of threshold */
3905         size = 0;
3906         for (i = 0; i < thresholds->primary->size; i++) {
3907                 if (thresholds->primary->entries[i].eventfd != eventfd)
3908                         size++;
3909         }
3910
3911         new = thresholds->spare;
3912
3913         /* Set thresholds array to NULL if we don't have thresholds */
3914         if (!size) {
3915                 kfree(new);
3916                 new = NULL;
3917                 goto swap_buffers;
3918         }
3919
3920         new->size = size;
3921
3922         /* Copy thresholds and find current threshold */
3923         new->current_threshold = -1;
3924         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3925                 if (thresholds->primary->entries[i].eventfd == eventfd)
3926                         continue;
3927
3928                 new->entries[j] = thresholds->primary->entries[i];
3929                 if (new->entries[j].threshold < usage) {
3930                         /*
3931                          * new->current_threshold will not be used
3932                          * until rcu_assign_pointer(), so it's safe to increment
3933                          * it here.
3934                          */
3935                         ++new->current_threshold;
3936                 }
3937                 j++;
3938         }
3939
3940 swap_buffers:
3941         /* Swap primary and spare array */
3942         thresholds->spare = thresholds->primary;
3943         rcu_assign_pointer(thresholds->primary, new);
3944
3945         /* To be sure that nobody uses thresholds */
3946         synchronize_rcu();
3947
3948         mutex_unlock(&memcg->thresholds_lock);
3949 }
3950
3951 static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3952         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3953 {
3954         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3955         struct mem_cgroup_eventfd_list *event;
3956         int type = MEMFILE_TYPE(cft->private);
3957
3958         BUG_ON(type != _OOM_TYPE);
3959         event = kmalloc(sizeof(*event), GFP_KERNEL);
3960         if (!event)
3961                 return -ENOMEM;
3962
3963         mutex_lock(&memcg_oom_mutex);
3964
3965         event->eventfd = eventfd;
3966         list_add(&event->list, &memcg->oom_notify);
3967
3968         /* already in OOM ? */
3969         if (atomic_read(&memcg->oom_lock))
3970                 eventfd_signal(eventfd, 1);
3971         mutex_unlock(&memcg_oom_mutex);
3972
3973         return 0;
3974 }
3975
3976 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3977         struct cftype *cft, struct eventfd_ctx *eventfd)
3978 {
3979         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3980         struct mem_cgroup_eventfd_list *ev, *tmp;
3981         int type = MEMFILE_TYPE(cft->private);
3982
3983         BUG_ON(type != _OOM_TYPE);
3984
3985         mutex_lock(&memcg_oom_mutex);
3986
3987         list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
3988                 if (ev->eventfd == eventfd) {
3989                         list_del(&ev->list);
3990                         kfree(ev);
3991                 }
3992         }
3993
3994         mutex_unlock(&memcg_oom_mutex);
3995 }
3996
3997 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
3998         struct cftype *cft,  struct cgroup_map_cb *cb)
3999 {
4000         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4001
4002         cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
4003
4004         if (atomic_read(&mem->oom_lock))
4005                 cb->fill(cb, "under_oom", 1);
4006         else
4007                 cb->fill(cb, "under_oom", 0);
4008         return 0;
4009 }
4010
4011 static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4012         struct cftype *cft, u64 val)
4013 {
4014         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4015         struct mem_cgroup *parent;
4016
4017         /* cannot set to root cgroup and only 0 and 1 are allowed */
4018         if (!cgrp->parent || !((val == 0) || (val == 1)))
4019                 return -EINVAL;
4020
4021         parent = mem_cgroup_from_cont(cgrp->parent);
4022
4023         cgroup_lock();
4024         /* oom-kill-disable is a flag for subhierarchy. */
4025         if ((parent->use_hierarchy) ||
4026             (mem->use_hierarchy && !list_empty(&cgrp->children))) {
4027                 cgroup_unlock();
4028                 return -EINVAL;
4029         }
4030         mem->oom_kill_disable = val;
4031         if (!val)
4032                 memcg_oom_recover(mem);
4033         cgroup_unlock();
4034         return 0;
4035 }
4036
4037 static struct cftype mem_cgroup_files[] = {
4038         {
4039                 .name = "usage_in_bytes",
4040                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4041                 .read_u64 = mem_cgroup_read,
4042                 .register_event = mem_cgroup_usage_register_event,
4043                 .unregister_event = mem_cgroup_usage_unregister_event,
4044         },
4045         {
4046                 .name = "max_usage_in_bytes",
4047                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4048                 .trigger = mem_cgroup_reset,
4049                 .read_u64 = mem_cgroup_read,
4050         },
4051         {
4052                 .name = "limit_in_bytes",
4053                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4054                 .write_string = mem_cgroup_write,
4055                 .read_u64 = mem_cgroup_read,
4056         },
4057         {
4058                 .name = "soft_limit_in_bytes",
4059                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4060                 .write_string = mem_cgroup_write,
4061                 .read_u64 = mem_cgroup_read,
4062         },
4063         {
4064                 .name = "failcnt",
4065                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4066                 .trigger = mem_cgroup_reset,
4067                 .read_u64 = mem_cgroup_read,
4068         },
4069         {
4070                 .name = "stat",
4071                 .read_map = mem_control_stat_show,
4072         },
4073         {
4074                 .name = "force_empty",
4075                 .trigger = mem_cgroup_force_empty_write,
4076         },
4077         {
4078                 .name = "use_hierarchy",
4079                 .write_u64 = mem_cgroup_hierarchy_write,
4080                 .read_u64 = mem_cgroup_hierarchy_read,
4081         },
4082         {
4083                 .name = "swappiness",
4084                 .read_u64 = mem_cgroup_swappiness_read,
4085                 .write_u64 = mem_cgroup_swappiness_write,
4086         },
4087         {
4088                 .name = "move_charge_at_immigrate",
4089                 .read_u64 = mem_cgroup_move_charge_read,
4090                 .write_u64 = mem_cgroup_move_charge_write,
4091         },
4092         {
4093                 .name = "oom_control",
4094                 .read_map = mem_cgroup_oom_control_read,
4095                 .write_u64 = mem_cgroup_oom_control_write,
4096                 .register_event = mem_cgroup_oom_register_event,
4097                 .unregister_event = mem_cgroup_oom_unregister_event,
4098                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4099         },
4100 };
4101
4102 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4103 static struct cftype memsw_cgroup_files[] = {
4104         {
4105                 .name = "memsw.usage_in_bytes",
4106                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4107                 .read_u64 = mem_cgroup_read,
4108                 .register_event = mem_cgroup_usage_register_event,
4109                 .unregister_event = mem_cgroup_usage_unregister_event,
4110         },
4111         {
4112                 .name = "memsw.max_usage_in_bytes",
4113                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4114                 .trigger = mem_cgroup_reset,
4115                 .read_u64 = mem_cgroup_read,
4116         },
4117         {
4118                 .name = "memsw.limit_in_bytes",
4119                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4120                 .write_string = mem_cgroup_write,
4121                 .read_u64 = mem_cgroup_read,
4122         },
4123         {
4124                 .name = "memsw.failcnt",
4125                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4126                 .trigger = mem_cgroup_reset,
4127                 .read_u64 = mem_cgroup_read,
4128         },
4129 };
4130
4131 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4132 {
4133         if (!do_swap_account)
4134                 return 0;
4135         return cgroup_add_files(cont, ss, memsw_cgroup_files,
4136                                 ARRAY_SIZE(memsw_cgroup_files));
4137 };
4138 #else
4139 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4140 {
4141         return 0;
4142 }
4143 #endif
4144
4145 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4146 {
4147         struct mem_cgroup_per_node *pn;
4148         struct mem_cgroup_per_zone *mz;
4149         enum lru_list l;
4150         int zone, tmp = node;
4151         /*
4152          * This routine is called against possible nodes.
4153          * But it's BUG to call kmalloc() against offline node.
4154          *
4155          * TODO: this routine can waste much memory for nodes which will
4156          *       never be onlined. It's better to use memory hotplug callback
4157          *       function.
4158          */
4159         if (!node_state(node, N_NORMAL_MEMORY))
4160                 tmp = -1;
4161         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4162         if (!pn)
4163                 return 1;
4164
4165         mem->info.nodeinfo[node] = pn;
4166         memset(pn, 0, sizeof(*pn));
4167
4168         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4169                 mz = &pn->zoneinfo[zone];
4170                 for_each_lru(l)
4171                         INIT_LIST_HEAD(&mz->lists[l]);
4172                 mz->usage_in_excess = 0;
4173                 mz->on_tree = false;
4174                 mz->mem = mem;
4175         }
4176         return 0;
4177 }
4178
4179 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4180 {
4181         kfree(mem->info.nodeinfo[node]);
4182 }
4183
4184 static struct mem_cgroup *mem_cgroup_alloc(void)
4185 {
4186         struct mem_cgroup *mem;
4187         int size = sizeof(struct mem_cgroup);
4188
4189         /* Can be very big if MAX_NUMNODES is very big */
4190         if (size < PAGE_SIZE)
4191                 mem = kmalloc(size, GFP_KERNEL);
4192         else
4193                 mem = vmalloc(size);
4194
4195         if (!mem)
4196                 return NULL;
4197
4198         memset(mem, 0, size);
4199         mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4200         if (!mem->stat) {
4201                 if (size < PAGE_SIZE)
4202                         kfree(mem);
4203                 else
4204                         vfree(mem);
4205                 mem = NULL;
4206         }
4207         spin_lock_init(&mem->pcp_counter_lock);
4208         return mem;
4209 }
4210
4211 /*
4212  * At destroying mem_cgroup, references from swap_cgroup can remain.
4213  * (scanning all at force_empty is too costly...)
4214  *
4215  * Instead of clearing all references at force_empty, we remember
4216  * the number of reference from swap_cgroup and free mem_cgroup when
4217  * it goes down to 0.
4218  *
4219  * Removal of cgroup itself succeeds regardless of refs from swap.
4220  */
4221
4222 static void __mem_cgroup_free(struct mem_cgroup *mem)
4223 {
4224         int node;
4225
4226         mem_cgroup_remove_from_trees(mem);
4227         free_css_id(&mem_cgroup_subsys, &mem->css);
4228
4229         for_each_node_state(node, N_POSSIBLE)
4230                 free_mem_cgroup_per_zone_info(mem, node);
4231
4232         free_percpu(mem->stat);
4233         if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4234                 kfree(mem);
4235         else
4236                 vfree(mem);
4237 }
4238
4239 static void mem_cgroup_get(struct mem_cgroup *mem)
4240 {
4241         atomic_inc(&mem->refcnt);
4242 }
4243
4244 static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4245 {
4246         if (atomic_sub_and_test(count, &mem->refcnt)) {
4247                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
4248                 __mem_cgroup_free(mem);
4249                 if (parent)
4250                         mem_cgroup_put(parent);
4251         }
4252 }
4253
4254 static void mem_cgroup_put(struct mem_cgroup *mem)
4255 {
4256         __mem_cgroup_put(mem, 1);
4257 }
4258
4259 /*
4260  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4261  */
4262 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4263 {
4264         if (!mem->res.parent)
4265                 return NULL;
4266         return mem_cgroup_from_res_counter(mem->res.parent, res);
4267 }
4268
4269 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4270 static void __init enable_swap_cgroup(void)
4271 {
4272         if (!mem_cgroup_disabled() && really_do_swap_account)
4273                 do_swap_account = 1;
4274 }
4275 #else
4276 static void __init enable_swap_cgroup(void)
4277 {
4278 }
4279 #endif
4280
4281 static int mem_cgroup_soft_limit_tree_init(void)
4282 {
4283         struct mem_cgroup_tree_per_node *rtpn;
4284         struct mem_cgroup_tree_per_zone *rtpz;
4285         int tmp, node, zone;
4286
4287         for_each_node_state(node, N_POSSIBLE) {
4288                 tmp = node;
4289                 if (!node_state(node, N_NORMAL_MEMORY))
4290                         tmp = -1;
4291                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4292                 if (!rtpn)
4293                         return 1;
4294
4295                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4296
4297                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4298                         rtpz = &rtpn->rb_tree_per_zone[zone];
4299                         rtpz->rb_root = RB_ROOT;
4300                         spin_lock_init(&rtpz->lock);
4301                 }
4302         }
4303         return 0;
4304 }
4305
4306 static struct cgroup_subsys_state * __ref
4307 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4308 {
4309         struct mem_cgroup *mem, *parent;
4310         long error = -ENOMEM;
4311         int node;
4312
4313         mem = mem_cgroup_alloc();
4314         if (!mem)
4315                 return ERR_PTR(error);
4316
4317         for_each_node_state(node, N_POSSIBLE)
4318                 if (alloc_mem_cgroup_per_zone_info(mem, node))
4319                         goto free_out;
4320
4321         /* root ? */
4322         if (cont->parent == NULL) {
4323                 int cpu;
4324                 enable_swap_cgroup();
4325                 parent = NULL;
4326                 root_mem_cgroup = mem;
4327                 if (mem_cgroup_soft_limit_tree_init())
4328                         goto free_out;
4329                 for_each_possible_cpu(cpu) {
4330                         struct memcg_stock_pcp *stock =
4331                                                 &per_cpu(memcg_stock, cpu);
4332                         INIT_WORK(&stock->work, drain_local_stock);
4333                 }
4334                 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4335         } else {
4336                 parent = mem_cgroup_from_cont(cont->parent);
4337                 mem->use_hierarchy = parent->use_hierarchy;
4338                 mem->oom_kill_disable = parent->oom_kill_disable;
4339         }
4340
4341         if (parent && parent->use_hierarchy) {
4342                 res_counter_init(&mem->res, &parent->res);
4343                 res_counter_init(&mem->memsw, &parent->memsw);
4344                 /*
4345                  * We increment refcnt of the parent to ensure that we can
4346                  * safely access it on res_counter_charge/uncharge.
4347                  * This refcnt will be decremented when freeing this
4348                  * mem_cgroup(see mem_cgroup_put).
4349                  */
4350                 mem_cgroup_get(parent);
4351         } else {
4352                 res_counter_init(&mem->res, NULL);
4353                 res_counter_init(&mem->memsw, NULL);
4354         }
4355         mem->last_scanned_child = 0;
4356         spin_lock_init(&mem->reclaim_param_lock);
4357         INIT_LIST_HEAD(&mem->oom_notify);
4358
4359         if (parent)
4360                 mem->swappiness = get_swappiness(parent);
4361         atomic_set(&mem->refcnt, 1);
4362         mem->move_charge_at_immigrate = 0;
4363         mutex_init(&mem->thresholds_lock);
4364         return &mem->css;
4365 free_out:
4366         __mem_cgroup_free(mem);
4367         root_mem_cgroup = NULL;
4368         return ERR_PTR(error);
4369 }
4370
4371 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4372                                         struct cgroup *cont)
4373 {
4374         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4375
4376         return mem_cgroup_force_empty(mem, false);
4377 }
4378
4379 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4380                                 struct cgroup *cont)
4381 {
4382         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4383
4384         mem_cgroup_put(mem);
4385 }
4386
4387 static int mem_cgroup_populate(struct cgroup_subsys *ss,
4388                                 struct cgroup *cont)
4389 {
4390         int ret;
4391
4392         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4393                                 ARRAY_SIZE(mem_cgroup_files));
4394
4395         if (!ret)
4396                 ret = register_memsw_files(cont, ss);
4397         return ret;
4398 }
4399
4400 #ifdef CONFIG_MMU
4401 /* Handlers for move charge at task migration. */
4402 #define PRECHARGE_COUNT_AT_ONCE 256
4403 static int mem_cgroup_do_precharge(unsigned long count)
4404 {
4405         int ret = 0;
4406         int batch_count = PRECHARGE_COUNT_AT_ONCE;
4407         struct mem_cgroup *mem = mc.to;
4408
4409         if (mem_cgroup_is_root(mem)) {
4410                 mc.precharge += count;
4411                 /* we don't need css_get for root */
4412                 return ret;
4413         }
4414         /* try to charge at once */
4415         if (count > 1) {
4416                 struct res_counter *dummy;
4417                 /*
4418                  * "mem" cannot be under rmdir() because we've already checked
4419                  * by cgroup_lock_live_cgroup() that it is not removed and we
4420                  * are still under the same cgroup_mutex. So we can postpone
4421                  * css_get().
4422                  */
4423                 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4424                         goto one_by_one;
4425                 if (do_swap_account && res_counter_charge(&mem->memsw,
4426                                                 PAGE_SIZE * count, &dummy)) {
4427                         res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4428                         goto one_by_one;
4429                 }
4430                 mc.precharge += count;
4431                 return ret;
4432         }
4433 one_by_one:
4434         /* fall back to one by one charge */
4435         while (count--) {
4436                 if (signal_pending(current)) {
4437                         ret = -EINTR;
4438                         break;
4439                 }
4440                 if (!batch_count--) {
4441                         batch_count = PRECHARGE_COUNT_AT_ONCE;
4442                         cond_resched();
4443                 }
4444                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
4445                 if (ret || !mem)
4446                         /* mem_cgroup_clear_mc() will do uncharge later */
4447                         return -ENOMEM;
4448                 mc.precharge++;
4449         }
4450         return ret;
4451 }
4452
4453 /**
4454  * is_target_pte_for_mc - check a pte whether it is valid for move charge
4455  * @vma: the vma the pte to be checked belongs
4456  * @addr: the address corresponding to the pte to be checked
4457  * @ptent: the pte to be checked
4458  * @target: the pointer the target page or swap ent will be stored(can be NULL)
4459  *
4460  * Returns
4461  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4462  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4463  *     move charge. if @target is not NULL, the page is stored in target->page
4464  *     with extra refcnt got(Callers should handle it).
4465  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4466  *     target for charge migration. if @target is not NULL, the entry is stored
4467  *     in target->ent.
4468  *
4469  * Called with pte lock held.
4470  */
4471 union mc_target {
4472         struct page     *page;
4473         swp_entry_t     ent;
4474 };
4475
4476 enum mc_target_type {
4477         MC_TARGET_NONE, /* not used */
4478         MC_TARGET_PAGE,
4479         MC_TARGET_SWAP,
4480 };
4481
4482 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4483                                                 unsigned long addr, pte_t ptent)
4484 {
4485         struct page *page = vm_normal_page(vma, addr, ptent);
4486
4487         if (!page || !page_mapped(page))
4488                 return NULL;
4489         if (PageAnon(page)) {
4490                 /* we don't move shared anon */
4491                 if (!move_anon() || page_mapcount(page) > 2)
4492                         return NULL;
4493         } else if (!move_file())
4494                 /* we ignore mapcount for file pages */
4495                 return NULL;
4496         if (!get_page_unless_zero(page))
4497                 return NULL;
4498
4499         return page;
4500 }
4501
4502 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4503                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4504 {
4505         int usage_count;
4506         struct page *page = NULL;
4507         swp_entry_t ent = pte_to_swp_entry(ptent);
4508
4509         if (!move_anon() || non_swap_entry(ent))
4510                 return NULL;
4511         usage_count = mem_cgroup_count_swap_user(ent, &page);
4512         if (usage_count > 1) { /* we don't move shared anon */
4513                 if (page)
4514                         put_page(page);
4515                 return NULL;
4516         }
4517         if (do_swap_account)
4518                 entry->val = ent.val;
4519
4520         return page;
4521 }
4522
4523 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4524                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4525 {
4526         struct page *page = NULL;
4527         struct inode *inode;
4528         struct address_space *mapping;
4529         pgoff_t pgoff;
4530
4531         if (!vma->vm_file) /* anonymous vma */
4532                 return NULL;
4533         if (!move_file())
4534                 return NULL;
4535
4536         inode = vma->vm_file->f_path.dentry->d_inode;
4537         mapping = vma->vm_file->f_mapping;
4538         if (pte_none(ptent))
4539                 pgoff = linear_page_index(vma, addr);
4540         else /* pte_file(ptent) is true */
4541                 pgoff = pte_to_pgoff(ptent);
4542
4543         /* page is moved even if it's not RSS of this task(page-faulted). */
4544         if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4545                 page = find_get_page(mapping, pgoff);
4546         } else { /* shmem/tmpfs file. we should take account of swap too. */
4547                 swp_entry_t ent;
4548                 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4549                 if (do_swap_account)
4550                         entry->val = ent.val;
4551         }
4552
4553         return page;
4554 }
4555
4556 static int is_target_pte_for_mc(struct vm_area_struct *vma,
4557                 unsigned long addr, pte_t ptent, union mc_target *target)
4558 {
4559         struct page *page = NULL;
4560         struct page_cgroup *pc;
4561         int ret = 0;
4562         swp_entry_t ent = { .val = 0 };
4563
4564         if (pte_present(ptent))
4565                 page = mc_handle_present_pte(vma, addr, ptent);
4566         else if (is_swap_pte(ptent))
4567                 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4568         else if (pte_none(ptent) || pte_file(ptent))
4569                 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4570
4571         if (!page && !ent.val)
4572                 return 0;
4573         if (page) {
4574                 pc = lookup_page_cgroup(page);
4575                 /*
4576                  * Do only loose check w/o page_cgroup lock.
4577                  * mem_cgroup_move_account() checks the pc is valid or not under
4578                  * the lock.
4579                  */
4580                 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4581                         ret = MC_TARGET_PAGE;
4582                         if (target)
4583                                 target->page = page;
4584                 }
4585                 if (!ret || !target)
4586                         put_page(page);
4587         }
4588         /* There is a swap entry and a page doesn't exist or isn't charged */
4589         if (ent.val && !ret &&
4590                         css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4591                 ret = MC_TARGET_SWAP;
4592                 if (target)
4593                         target->ent = ent;
4594         }
4595         return ret;
4596 }
4597
4598 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4599                                         unsigned long addr, unsigned long end,
4600                                         struct mm_walk *walk)
4601 {
4602         struct vm_area_struct *vma = walk->private;
4603         pte_t *pte;
4604         spinlock_t *ptl;
4605
4606         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4607         for (; addr != end; pte++, addr += PAGE_SIZE)
4608                 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4609                         mc.precharge++; /* increment precharge temporarily */
4610         pte_unmap_unlock(pte - 1, ptl);
4611         cond_resched();
4612
4613         return 0;
4614 }
4615
4616 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4617 {
4618         unsigned long precharge;
4619         struct vm_area_struct *vma;
4620
4621         down_read(&mm->mmap_sem);
4622         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4623                 struct mm_walk mem_cgroup_count_precharge_walk = {
4624                         .pmd_entry = mem_cgroup_count_precharge_pte_range,
4625                         .mm = mm,
4626                         .private = vma,
4627                 };
4628                 if (is_vm_hugetlb_page(vma))
4629                         continue;
4630                 walk_page_range(vma->vm_start, vma->vm_end,
4631                                         &mem_cgroup_count_precharge_walk);
4632         }
4633         up_read(&mm->mmap_sem);
4634
4635         precharge = mc.precharge;
4636         mc.precharge = 0;
4637
4638         return precharge;
4639 }
4640
4641 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4642 {
4643         return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
4644 }
4645
4646 static void mem_cgroup_clear_mc(void)
4647 {
4648         struct mem_cgroup *from = mc.from;
4649         struct mem_cgroup *to = mc.to;
4650
4651         /* we must uncharge all the leftover precharges from mc.to */
4652         if (mc.precharge) {
4653                 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4654                 mc.precharge = 0;
4655         }
4656         /*
4657          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4658          * we must uncharge here.
4659          */
4660         if (mc.moved_charge) {
4661                 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4662                 mc.moved_charge = 0;
4663         }
4664         /* we must fixup refcnts and charges */
4665         if (mc.moved_swap) {
4666                 /* uncharge swap account from the old cgroup */
4667                 if (!mem_cgroup_is_root(mc.from))
4668                         res_counter_uncharge(&mc.from->memsw,
4669                                                 PAGE_SIZE * mc.moved_swap);
4670                 __mem_cgroup_put(mc.from, mc.moved_swap);
4671
4672                 if (!mem_cgroup_is_root(mc.to)) {
4673                         /*
4674                          * we charged both to->res and to->memsw, so we should
4675                          * uncharge to->res.
4676                          */
4677                         res_counter_uncharge(&mc.to->res,
4678                                                 PAGE_SIZE * mc.moved_swap);
4679                 }
4680                 /* we've already done mem_cgroup_get(mc.to) */
4681
4682                 mc.moved_swap = 0;
4683         }
4684         spin_lock(&mc.lock);
4685         mc.from = NULL;
4686         mc.to = NULL;
4687         mc.moving_task = NULL;
4688         spin_unlock(&mc.lock);
4689         mem_cgroup_end_move(from);
4690         memcg_oom_recover(from);
4691         memcg_oom_recover(to);
4692         wake_up_all(&mc.waitq);
4693 }
4694
4695 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4696                                 struct cgroup *cgroup,
4697                                 struct task_struct *p,
4698                                 bool threadgroup)
4699 {
4700         int ret = 0;
4701         struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4702
4703         if (mem->move_charge_at_immigrate) {
4704                 struct mm_struct *mm;
4705                 struct mem_cgroup *from = mem_cgroup_from_task(p);
4706
4707                 VM_BUG_ON(from == mem);
4708
4709                 mm = get_task_mm(p);
4710                 if (!mm)
4711                         return 0;
4712                 /* We move charges only when we move a owner of the mm */
4713                 if (mm->owner == p) {
4714                         VM_BUG_ON(mc.from);
4715                         VM_BUG_ON(mc.to);
4716                         VM_BUG_ON(mc.precharge);
4717                         VM_BUG_ON(mc.moved_charge);
4718                         VM_BUG_ON(mc.moved_swap);
4719                         VM_BUG_ON(mc.moving_task);
4720                         mem_cgroup_start_move(from);
4721                         spin_lock(&mc.lock);
4722                         mc.from = from;
4723                         mc.to = mem;
4724                         mc.precharge = 0;
4725                         mc.moved_charge = 0;
4726                         mc.moved_swap = 0;
4727                         mc.moving_task = current;
4728                         spin_unlock(&mc.lock);
4729
4730                         ret = mem_cgroup_precharge_mc(mm);
4731                         if (ret)
4732                                 mem_cgroup_clear_mc();
4733                 }
4734                 mmput(mm);
4735         }
4736         return ret;
4737 }
4738
4739 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4740                                 struct cgroup *cgroup,
4741                                 struct task_struct *p,
4742                                 bool threadgroup)
4743 {
4744         mem_cgroup_clear_mc();
4745 }
4746
4747 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4748                                 unsigned long addr, unsigned long end,
4749                                 struct mm_walk *walk)
4750 {
4751         int ret = 0;
4752         struct vm_area_struct *vma = walk->private;
4753         pte_t *pte;
4754         spinlock_t *ptl;
4755
4756 retry:
4757         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4758         for (; addr != end; addr += PAGE_SIZE) {
4759                 pte_t ptent = *(pte++);
4760                 union mc_target target;
4761                 int type;
4762                 struct page *page;
4763                 struct page_cgroup *pc;
4764                 swp_entry_t ent;
4765
4766                 if (!mc.precharge)
4767                         break;
4768
4769                 type = is_target_pte_for_mc(vma, addr, ptent, &target);
4770                 switch (type) {
4771                 case MC_TARGET_PAGE:
4772                         page = target.page;
4773                         if (isolate_lru_page(page))
4774                                 goto put;
4775                         pc = lookup_page_cgroup(page);
4776                         if (!mem_cgroup_move_account(pc,
4777                                                 mc.from, mc.to, false)) {
4778                                 mc.precharge--;
4779                                 /* we uncharge from mc.from later. */
4780                                 mc.moved_charge++;
4781                         }
4782                         putback_lru_page(page);
4783 put:                    /* is_target_pte_for_mc() gets the page */
4784                         put_page(page);
4785                         break;
4786                 case MC_TARGET_SWAP:
4787                         ent = target.ent;
4788                         if (!mem_cgroup_move_swap_account(ent,
4789                                                 mc.from, mc.to, false)) {
4790                                 mc.precharge--;
4791                                 /* we fixup refcnts and charges later. */
4792                                 mc.moved_swap++;
4793                         }
4794                         break;
4795                 default:
4796                         break;
4797                 }
4798         }
4799         pte_unmap_unlock(pte - 1, ptl);
4800         cond_resched();
4801
4802         if (addr != end) {
4803                 /*
4804                  * We have consumed all precharges we got in can_attach().
4805                  * We try charge one by one, but don't do any additional
4806                  * charges to mc.to if we have failed in charge once in attach()
4807                  * phase.
4808                  */
4809                 ret = mem_cgroup_do_precharge(1);
4810                 if (!ret)
4811                         goto retry;
4812         }
4813
4814         return ret;
4815 }
4816
4817 static void mem_cgroup_move_charge(struct mm_struct *mm)
4818 {
4819         struct vm_area_struct *vma;
4820
4821         lru_add_drain_all();
4822         down_read(&mm->mmap_sem);
4823         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4824                 int ret;
4825                 struct mm_walk mem_cgroup_move_charge_walk = {
4826                         .pmd_entry = mem_cgroup_move_charge_pte_range,
4827                         .mm = mm,
4828                         .private = vma,
4829                 };
4830                 if (is_vm_hugetlb_page(vma))
4831                         continue;
4832                 ret = walk_page_range(vma->vm_start, vma->vm_end,
4833                                                 &mem_cgroup_move_charge_walk);
4834                 if (ret)
4835                         /*
4836                          * means we have consumed all precharges and failed in
4837                          * doing additional charge. Just abandon here.
4838                          */
4839                         break;
4840         }
4841         up_read(&mm->mmap_sem);
4842 }
4843
4844 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4845                                 struct cgroup *cont,
4846                                 struct cgroup *old_cont,
4847                                 struct task_struct *p,
4848                                 bool threadgroup)
4849 {
4850         struct mm_struct *mm;
4851
4852         if (!mc.to)
4853                 /* no need to move charge */
4854                 return;
4855
4856         mm = get_task_mm(p);
4857         if (mm) {
4858                 mem_cgroup_move_charge(mm);
4859                 mmput(mm);
4860         }
4861         mem_cgroup_clear_mc();
4862 }
4863 #else   /* !CONFIG_MMU */
4864 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4865                                 struct cgroup *cgroup,
4866                                 struct task_struct *p,
4867                                 bool threadgroup)
4868 {
4869         return 0;
4870 }
4871 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4872                                 struct cgroup *cgroup,
4873                                 struct task_struct *p,
4874                                 bool threadgroup)
4875 {
4876 }
4877 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4878                                 struct cgroup *cont,
4879                                 struct cgroup *old_cont,
4880                                 struct task_struct *p,
4881                                 bool threadgroup)
4882 {
4883 }
4884 #endif
4885
4886 struct cgroup_subsys mem_cgroup_subsys = {
4887         .name = "memory",
4888         .subsys_id = mem_cgroup_subsys_id,
4889         .create = mem_cgroup_create,
4890         .pre_destroy = mem_cgroup_pre_destroy,
4891         .destroy = mem_cgroup_destroy,
4892         .populate = mem_cgroup_populate,
4893         .can_attach = mem_cgroup_can_attach,
4894         .cancel_attach = mem_cgroup_cancel_attach,
4895         .attach = mem_cgroup_move_task,
4896         .early_init = 0,
4897         .use_id = 1,
4898 };
4899
4900 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4901
4902 static int __init disable_swap_account(char *s)
4903 {
4904         really_do_swap_account = 0;
4905         return 1;
4906 }
4907 __setup("noswapaccount", disable_swap_account);
4908 #endif