4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
8 * The routines in this file are used to kill a process when
9 * we're seriously out of memory. This gets called from __alloc_pages()
10 * in mm/page_alloc.c when we really run out of memory.
12 * Since we won't call these routines often (on a well-configured
13 * machine) this file will double as a 'coding guide' and a signpost
14 * for newbie kernel hackers. It features several pointers to major
15 * kernel subsystems and hints as to where to find out what things do.
19 #include <linux/sched.h>
20 #include <linux/swap.h>
21 #include <linux/timex.h>
22 #include <linux/jiffies.h>
23 #include <linux/cpuset.h>
24 #include <linux/module.h>
25 #include <linux/notifier.h>
27 int sysctl_panic_on_oom;
31 * badness - calculate a numeric value for how bad this task has been
32 * @p: task struct of which task we should calculate
33 * @uptime: current uptime in seconds
35 * The formula used is relatively simple and documented inline in the
36 * function. The main rationale is that we want to select a good task
37 * to kill when we run out of memory.
39 * Good in this context means that:
40 * 1) we lose the minimum amount of work done
41 * 2) we recover a large amount of memory
42 * 3) we don't kill anything innocent of eating tons of memory
43 * 4) we want to kill the minimum amount of processes (one)
44 * 5) we try to kill the process the user expects us to kill, this
45 * algorithm has been meticulously tuned to meet the principle
46 * of least surprise ... (be careful when you change it)
49 unsigned long badness(struct task_struct *p, unsigned long uptime)
51 unsigned long points, cpu_time, run_time, s;
53 struct task_struct *child;
63 * swapoff can easily use up all memory, so kill those first.
65 if (p->flags & PF_SWAPOFF)
69 * The memory size of the process is the basis for the badness.
71 points = mm->total_vm;
74 * After this unlock we can no longer dereference local variable `mm'
79 * Processes which fork a lot of child processes are likely
80 * a good choice. We add half the vmsize of the children if they
81 * have an own mm. This prevents forking servers to flood the
82 * machine with an endless amount of children. In case a single
83 * child is eating the vast majority of memory, adding only half
84 * to the parents will make the child our kill candidate of choice.
86 list_for_each_entry(child, &p->children, sibling) {
88 if (child->mm != mm && child->mm)
89 points += child->mm->total_vm/2 + 1;
94 * CPU time is in tens of seconds and run time is in thousands
95 * of seconds. There is no particular reason for this other than
96 * that it turned out to work very well in practice.
98 cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime))
101 if (uptime >= p->start_time.tv_sec)
102 run_time = (uptime - p->start_time.tv_sec) >> 10;
106 s = int_sqrt(cpu_time);
109 s = int_sqrt(int_sqrt(run_time));
114 * Niced processes are most likely less important, so double
115 * their badness points.
117 if (task_nice(p) > 0)
121 * Superuser processes are usually more important, so we make it
122 * less likely that we kill those.
124 if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) ||
125 p->uid == 0 || p->euid == 0)
129 * We don't want to kill a process with direct hardware access.
130 * Not only could that mess up the hardware, but usually users
131 * tend to only have this flag set on applications they think
134 if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO))
138 * If p's nodes don't overlap ours, it may still help to kill p
139 * because p may have allocated or otherwise mapped memory on
140 * this node before. However it will be less likely.
142 if (!cpuset_excl_nodes_overlap(p))
146 * Adjust the score by oomkilladj.
149 if (p->oomkilladj > 0)
150 points <<= p->oomkilladj;
152 points >>= -(p->oomkilladj);
156 printk(KERN_DEBUG "OOMkill: task %d (%s) got %d points\n",
157 p->pid, p->comm, points);
163 * Types of limitations to the nodes from which allocations may occur
165 #define CONSTRAINT_NONE 1
166 #define CONSTRAINT_MEMORY_POLICY 2
167 #define CONSTRAINT_CPUSET 3
170 * Determine the type of allocation constraint.
172 static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
176 nodemask_t nodes = node_online_map;
178 for (z = zonelist->zones; *z; z++)
179 if (cpuset_zone_allowed(*z, gfp_mask))
180 node_clear((*z)->zone_pgdat->node_id,
183 return CONSTRAINT_CPUSET;
185 if (!nodes_empty(nodes))
186 return CONSTRAINT_MEMORY_POLICY;
189 return CONSTRAINT_NONE;
193 * Simple selection loop. We chose the process with the highest
194 * number of 'points'. We expect the caller will lock the tasklist.
196 * (not docbooked, we don't want this one cluttering up the manual)
198 static struct task_struct *select_bad_process(unsigned long *ppoints)
200 struct task_struct *g, *p;
201 struct task_struct *chosen = NULL;
202 struct timespec uptime;
205 do_posix_clock_monotonic_gettime(&uptime);
206 do_each_thread(g, p) {
207 unsigned long points;
210 /* skip the init task with pid == 1 */
215 * This is in the process of releasing memory so wait for it
216 * to finish before killing some other task by mistake.
218 * However, if p is the current task, we allow the 'kill' to
219 * go ahead if it is exiting: this will simply set TIF_MEMDIE,
220 * which will allow it to gain access to memory reserves in
221 * the process of exiting and releasing its resources.
222 * Otherwise we could get an OOM deadlock.
224 releasing = test_tsk_thread_flag(p, TIF_MEMDIE) ||
225 p->flags & PF_EXITING;
227 /* PF_DEAD tasks have already released their mm */
228 if (p->flags & PF_DEAD)
230 if (p->flags & PF_EXITING && p == current) {
232 *ppoints = ULONG_MAX;
235 return ERR_PTR(-1UL);
237 if (p->oomkilladj == OOM_DISABLE)
240 points = badness(p, uptime.tv_sec);
241 if (points > *ppoints || !chosen) {
245 } while_each_thread(g, p);
250 * We must be careful though to never send SIGKILL a process with
251 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
252 * we select a process with CAP_SYS_RAW_IO set).
254 static void __oom_kill_task(struct task_struct *p, const char *message)
258 printk(KERN_WARNING "tried to kill init!\n");
263 if (!p->mm || p->mm == &init_mm) {
265 printk(KERN_WARNING "tried to kill an mm-less task!\n");
272 printk(KERN_ERR "%s: Killed process %d (%s).\n",
273 message, p->pid, p->comm);
277 * We give our sacrificial lamb high priority and access to
278 * all the memory it needs. That way it should be able to
279 * exit() and clear out its resources quickly...
282 set_tsk_thread_flag(p, TIF_MEMDIE);
284 force_sig(SIGKILL, p);
287 static int oom_kill_task(struct task_struct *p, const char *message)
289 struct mm_struct *mm;
290 struct task_struct *g, *q;
294 /* WARNING: mm may not be dereferenced since we did not obtain its
295 * value from get_task_mm(p). This is OK since all we need to do is
296 * compare mm to q->mm below.
298 * Furthermore, even if mm contains a non-NULL value, p->mm may
299 * change to NULL at any time since we do not hold task_lock(p).
300 * However, this is of no concern to us.
303 if (mm == NULL || mm == &init_mm)
306 __oom_kill_task(p, message);
308 * kill all processes that share the ->mm (i.e. all threads),
309 * but are in a different thread group
312 if (q->mm == mm && q->tgid != p->tgid)
313 __oom_kill_task(q, message);
314 while_each_thread(g, q);
319 static int oom_kill_process(struct task_struct *p, unsigned long points,
322 struct task_struct *c;
323 struct list_head *tsk;
326 * If the task is already exiting, don't alarm the sysadmin or kill
327 * its children or threads, just set TIF_MEMDIE so it can die quickly
329 if (p->flags & PF_EXITING) {
330 __oom_kill_task(p, NULL);
334 printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li"
335 " and children.\n", p->pid, p->comm, points);
336 /* Try to kill a child first */
337 list_for_each(tsk, &p->children) {
338 c = list_entry(tsk, struct task_struct, sibling);
341 if (!oom_kill_task(c, message))
344 return oom_kill_task(p, message);
347 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
349 int register_oom_notifier(struct notifier_block *nb)
351 return blocking_notifier_chain_register(&oom_notify_list, nb);
353 EXPORT_SYMBOL_GPL(register_oom_notifier);
355 int unregister_oom_notifier(struct notifier_block *nb)
357 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
359 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
362 * out_of_memory - kill the "best" process when we run out of memory
364 * If we run out of memory, we have the choice between either
365 * killing a random task (bad), letting the system crash (worse)
366 * OR try to be smart about which process to kill. Note that we
367 * don't have to be perfect here, we just have to be good.
369 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
371 struct task_struct *p;
372 unsigned long points = 0;
373 unsigned long freed = 0;
375 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
377 /* Got some memory back in the last second. */
380 if (printk_ratelimit()) {
381 printk("oom-killer: gfp_mask=0x%x, order=%d\n",
388 read_lock(&tasklist_lock);
391 * Check if there were limitations on the allocation (only relevant for
392 * NUMA) that may require different handling.
394 switch (constrained_alloc(zonelist, gfp_mask)) {
395 case CONSTRAINT_MEMORY_POLICY:
396 oom_kill_process(current, points,
397 "No available memory (MPOL_BIND)");
400 case CONSTRAINT_CPUSET:
401 oom_kill_process(current, points,
402 "No available memory in cpuset");
405 case CONSTRAINT_NONE:
406 if (sysctl_panic_on_oom)
407 panic("out of memory. panic_on_oom is selected\n");
410 * Rambo mode: Shoot down a process and hope it solves whatever
411 * issues we may have.
413 p = select_bad_process(&points);
415 if (PTR_ERR(p) == -1UL)
418 /* Found nothing?!?! Either we hang forever, or we panic. */
420 read_unlock(&tasklist_lock);
422 panic("Out of memory and no killable processes...\n");
425 if (oom_kill_process(p, points, "Out of memory"))
432 read_unlock(&tasklist_lock);
436 * Give "p" a good chance of killing itself before we
437 * retry to allocate memory unless "p" is current
439 if (!test_thread_flag(TIF_MEMDIE))
440 schedule_timeout_uninterruptible(1);