1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
35 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
36 struct list_head *dst,
37 unsigned long *scanned, int order,
40 struct mem_cgroup *mem_cont,
41 int active, int file);
43 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
45 * All "charge" functions with gfp_mask should use GFP_KERNEL or
46 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
47 * alloc memory but reclaims memory from all available zones. So, "where I want
48 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
49 * available but adding a rule is better. charge functions' gfp_mask should
50 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
52 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
55 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
57 /* for swap handling */
58 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
59 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
60 extern void mem_cgroup_commit_charge_swapin(struct page *page,
61 struct mem_cgroup *ptr);
62 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
64 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
66 extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
67 extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
68 extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
69 extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
70 extern void mem_cgroup_del_lru(struct page *page);
71 extern void mem_cgroup_move_lists(struct page *page,
72 enum lru_list from, enum lru_list to);
74 /* For coalescing uncharge for reducing memcg' overhead*/
75 extern void mem_cgroup_uncharge_start(void);
76 extern void mem_cgroup_uncharge_end(void);
78 extern void mem_cgroup_uncharge_page(struct page *page);
79 extern void mem_cgroup_uncharge_cache_page(struct page *page);
81 extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask);
82 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
84 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
85 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
86 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
89 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
91 struct mem_cgroup *memcg;
93 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
95 return cgroup == memcg;
98 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
101 mem_cgroup_prepare_migration(struct page *page,
102 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
103 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
104 struct page *oldpage, struct page *newpage, bool migration_ok);
107 * For memory reclaim.
109 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
111 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
113 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
114 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
115 int nid, int zid, unsigned int lrumask);
116 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
118 struct zone_reclaim_stat*
119 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
120 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
121 struct task_struct *p);
123 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
124 extern int do_swap_account;
127 static inline bool mem_cgroup_disabled(void)
129 if (mem_cgroup_subsys.disabled)
134 void mem_cgroup_update_page_stat(struct page *page,
135 enum mem_cgroup_page_stat_item idx,
138 static inline void mem_cgroup_inc_page_stat(struct page *page,
139 enum mem_cgroup_page_stat_item idx)
141 mem_cgroup_update_page_stat(page, idx, 1);
144 static inline void mem_cgroup_dec_page_stat(struct page *page,
145 enum mem_cgroup_page_stat_item idx)
147 mem_cgroup_update_page_stat(page, idx, -1);
150 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
152 unsigned long *total_scanned);
153 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
155 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
156 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
157 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
160 #ifdef CONFIG_DEBUG_VM
161 bool mem_cgroup_bad_page_check(struct page *page);
162 void mem_cgroup_print_bad_page(struct page *page);
164 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
167 static inline int mem_cgroup_newpage_charge(struct page *page,
168 struct mm_struct *mm, gfp_t gfp_mask)
173 static inline int mem_cgroup_cache_charge(struct page *page,
174 struct mm_struct *mm, gfp_t gfp_mask)
179 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
180 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
185 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
186 struct mem_cgroup *ptr)
190 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
194 static inline void mem_cgroup_uncharge_start(void)
198 static inline void mem_cgroup_uncharge_end(void)
202 static inline void mem_cgroup_uncharge_page(struct page *page)
206 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
210 static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
214 static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
219 static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
224 static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
229 static inline void mem_cgroup_del_lru(struct page *page)
235 mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
239 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
244 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
249 static inline int mm_match_cgroup(struct mm_struct *mm,
250 struct mem_cgroup *memcg)
255 static inline int task_in_mem_cgroup(struct task_struct *task,
256 const struct mem_cgroup *memcg)
261 static inline struct cgroup_subsys_state
262 *mem_cgroup_css(struct mem_cgroup *memcg)
268 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
269 struct mem_cgroup **ptr, gfp_t gfp_mask)
274 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
275 struct page *oldpage, struct page *newpage, bool migration_ok)
279 static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *memcg)
284 static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *memcg,
289 static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *memcg,
294 static inline bool mem_cgroup_disabled(void)
300 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
306 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
311 static inline unsigned long
312 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
313 unsigned int lru_mask)
319 static inline struct zone_reclaim_stat*
320 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
325 static inline struct zone_reclaim_stat*
326 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
332 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
336 static inline void mem_cgroup_inc_page_stat(struct page *page,
337 enum mem_cgroup_page_stat_item idx)
341 static inline void mem_cgroup_dec_page_stat(struct page *page,
342 enum mem_cgroup_page_stat_item idx)
347 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
349 unsigned long *total_scanned)
355 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
360 static inline void mem_cgroup_split_huge_fixup(struct page *head,
366 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
369 #endif /* CONFIG_CGROUP_MEM_CONT */
371 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
373 mem_cgroup_bad_page_check(struct page *page)
379 mem_cgroup_print_bad_page(struct page *page)
384 #endif /* _LINUX_MEMCONTROL_H */