1 #include <linux/module.h>
2 #include <linux/kernel.h>
3 #include <linux/mman.h>
4 #include <linux/init.h>
5 #include <linux/security.h>
6 #include <linux/sysctl.h>
7 #include <linux/swap.h>
8 #include <linux/kobject.h>
9 #include <linux/pagemap.h>
10 #include <linux/hugetlb.h>
11 #include <linux/sysfs.h>
12 #include <linux/oom.h>
14 #define MY_NAME "lowmem"
16 #define LOWMEM_MAX_UIDS 8
20 VM_LOWMEM_LEVEL1_NOTIFY,
21 VM_LOWMEM_LEVEL2_NOTIFY,
22 VM_LOWMEM_NR_DECAY_PAGES,
23 VM_LOWMEM_ALLOWED_UIDS,
24 VM_LOWMEM_ALLOWED_PAGES,
28 static unsigned int deny_percentage;
29 static unsigned int l1_notify, l2_notify;
30 static unsigned int nr_decay_pages;
31 static unsigned long allowed_pages;
32 static long used_pages;
33 static unsigned int allowed_uids[LOWMEM_MAX_UIDS];
34 static unsigned int minuid = 1;
35 static unsigned int maxuid = 65535;
37 static ctl_table lowmem_table[] = {
39 .ctl_name = VM_LOWMEM_DENY,
40 .procname = "lowmem_deny_watermark",
41 .data = &deny_percentage,
42 .maxlen = sizeof(unsigned int),
45 .proc_handler = &proc_dointvec,
46 .strategy = &sysctl_intvec,
48 .ctl_name = VM_LOWMEM_LEVEL1_NOTIFY,
49 .procname = "lowmem_notify_low",
51 .maxlen = sizeof(unsigned int),
54 .proc_handler = &proc_dointvec,
55 .strategy = &sysctl_intvec,
57 .ctl_name = VM_LOWMEM_LEVEL2_NOTIFY,
58 .procname = "lowmem_notify_high",
60 .maxlen = sizeof(unsigned int),
63 .proc_handler = &proc_dointvec,
64 .strategy = &sysctl_intvec,
66 .ctl_name = VM_LOWMEM_NR_DECAY_PAGES,
67 .procname = "lowmem_nr_decay_pages",
68 .data = &nr_decay_pages,
69 .maxlen = sizeof(unsigned int),
72 .proc_handler = &proc_dointvec,
73 .strategy = &sysctl_intvec,
75 .ctl_name = VM_LOWMEM_ALLOWED_UIDS,
76 .procname = "lowmem_allowed_uids",
77 .data = &allowed_uids,
78 .maxlen = LOWMEM_MAX_UIDS * sizeof(unsigned int),
81 .proc_handler = &proc_dointvec_minmax,
82 .strategy = &sysctl_intvec,
86 .ctl_name = VM_LOWMEM_ALLOWED_PAGES,
87 .procname = "lowmem_allowed_pages",
88 .data = &allowed_pages,
89 .maxlen = sizeof(unsigned long),
92 .proc_handler = &proc_dointvec,
93 .strategy = &sysctl_intvec,
95 .ctl_name = VM_LOWMEM_USED_PAGES,
96 .procname = "lowmem_used_pages",
98 .maxlen = sizeof(long),
101 .proc_handler = &proc_dointvec,
102 .strategy = &sysctl_intvec,
108 static ctl_table lowmem_root_table[] = {
113 .child = lowmem_table,
119 #define KERNEL_ATTR_RO(_name) \
120 static struct subsys_attribute _name##_attr = __ATTR_RO(_name)
122 static int low_watermark_reached, high_watermark_reached;
124 static ssize_t low_watermark_show(struct subsystem *subsys, char *page)
126 return sprintf(page, "%u\n", low_watermark_reached);
129 static ssize_t high_watermark_show(struct subsystem *subsys, char *page)
131 return sprintf(page, "%u\n", high_watermark_reached);
134 KERNEL_ATTR_RO(low_watermark);
135 KERNEL_ATTR_RO(high_watermark);
137 static void low_watermark_state(int new_state)
141 if (low_watermark_reached != new_state) {
142 low_watermark_reached = new_state;
147 sysfs_notify(&kernel_subsys.kset.kobj, NULL, "low_watermark");
150 static void high_watermark_state(int new_state)
154 if (high_watermark_reached != new_state) {
155 high_watermark_reached = new_state;
160 sysfs_notify(&kernel_subsys.kset.kobj, NULL, "high_watermark");
163 static int low_vm_enough_memory(long pages)
165 unsigned long free, allowed;
166 long deny_threshold, level1, level2, used;
167 int cap_sys_admin = 0, notify;
169 if (cap_capable(current, CAP_SYS_ADMIN) == 0)
172 /* We activate ourselves only after both parameters have been
174 if (deny_percentage == 0 || l1_notify == 0 || l2_notify == 0)
175 return __vm_enough_memory(pages, cap_sys_admin);
177 allowed = totalram_pages - hugetlb_total_pages();
178 deny_threshold = allowed * deny_percentage / 100;
179 level1 = allowed * l1_notify / 100;
180 level2 = allowed * l2_notify / 100;
182 vm_acct_memory(pages);
184 /* Easily freed pages when under VM pressure or direct reclaim */
185 free = global_page_state(NR_FILE_PAGES);
186 free += nr_swap_pages;
187 free += global_page_state(NR_SLAB_RECLAIMABLE);
189 used = allowed - free;
190 if (unlikely(used < 0))
193 /* The hot path, plenty of memory */
194 if (likely(used < level1))
197 /* No luck, lets make it more expensive and try again.. */
198 used -= nr_free_pages();
200 if (used >= deny_threshold) {
203 allowed_pages = allowed;
205 low_watermark_state(1);
206 high_watermark_state(1);
207 /* Memory allocations by root are always allowed */
211 /* OOM unkillable process is allowed to consume memory */
212 if (current->oomkilladj == OOM_DISABLE)
215 /* uids from allowed_uids vector are also allowed no matter what */
216 for (i = 0; i < LOWMEM_MAX_UIDS && allowed_uids[i]; i++)
217 if (current->uid == allowed_uids[i])
220 vm_unacct_memory(pages);
221 if (printk_ratelimit()) {
222 printk(MY_NAME ": denying memory allocation to process %d (%s)\n",
223 current->pid, current->comm);
229 /* See if we need to notify level 1 */
230 low_watermark_state(used >= level1);
233 * In the level 2 notification case things are more complicated,
234 * as the level that we drop the state and send a notification
235 * should be lower than when it is first triggered. Having this
236 * on the same watermark level ends up bouncing back and forth
237 * when applications are being stupid.
239 notify = used >= level2;
240 if (notify || used + nr_decay_pages < level2)
241 high_watermark_state(notify);
243 /* We have plenty of memory */
244 allowed_pages = allowed;
249 static struct security_operations lowmem_security_ops = {
250 /* Use the capability functions for some of the hooks */
251 .ptrace = cap_ptrace,
252 .capget = cap_capget,
253 .capset_check = cap_capset_check,
254 .capset_set = cap_capset_set,
255 .capable = cap_capable,
257 .bprm_apply_creds = cap_bprm_apply_creds,
258 .bprm_set_security = cap_bprm_set_security,
260 .task_post_setuid = cap_task_post_setuid,
261 .task_reparent_to_init = cap_task_reparent_to_init,
262 .vm_enough_memory = low_vm_enough_memory,
265 static struct ctl_table_header *lowmem_table_header;
266 /* flag to keep track of how we were registered */
267 static int secondary;
269 static struct attribute *lowmem_attrs[] = {
270 &low_watermark_attr.attr,
271 &high_watermark_attr.attr,
275 static struct attribute_group lowmem_attr_group = {
276 .attrs = lowmem_attrs,
279 static int __init lowmem_init(void)
283 /* register ourselves with the security framework */
284 if (register_security(&lowmem_security_ops)) {
285 printk(KERN_ERR MY_NAME ": Failure registering with the kernel\n");
286 /* try registering with primary module */
287 if (mod_reg_security(MY_NAME, &lowmem_security_ops)) {
288 printk(KERN_ERR ": Failure registering with the primary"
289 "security module.\n");
295 /* initialize the uids vector */
296 memset(allowed_uids, 0, sizeof(allowed_uids));
298 lowmem_table_header = register_sysctl_table(lowmem_root_table);
299 if (unlikely(!lowmem_table_header))
302 kernel_subsys.kset.kobj.kset = &kernel_subsys.kset;
304 r = sysfs_create_group(&kernel_subsys.kset.kobj,
309 printk(KERN_INFO MY_NAME ": Module initialized.\n");
314 static void __exit lowmem_exit(void)
316 /* remove ourselves from the security framework */
318 if (mod_unreg_security(MY_NAME, &lowmem_security_ops))
319 printk(KERN_ERR MY_NAME ": Failure unregistering "
320 "with the primary security module.\n");
322 if (unregister_security(&lowmem_security_ops)) {
323 printk(KERN_ERR MY_NAME ": Failure unregistering "
324 "with the kernel.\n");
328 unregister_sysctl_table(lowmem_table_header);
330 sysfs_remove_group(&kernel_subsys.kset.kobj, &lowmem_attr_group);
332 printk(KERN_INFO MY_NAME ": Module removed.\n");
335 module_init(lowmem_init);
336 module_exit(lowmem_exit);
338 MODULE_DESCRIPTION("Low watermark LSM module");
339 MODULE_LICENSE("GPL");