if (cpuset_mems_allowed_intersects(current, tsk))
return true;
}
- tsk = next_thread(tsk);
- } while (tsk != start);
+ } while_each_thread(start, tsk);
+
return false;
}
#else
}
#endif /* CONFIG_NUMA */
+/*
+ * If this is a system OOM (not a memcg OOM) and the task selected to be
+ * killed is not already running at high (RT) priorities, speed up the
+ * recovery by boosting the dying task to the lowest FIFO priority.
+ * That helps with the recovery and avoids interfering with RT tasks.
+ */
+static void boost_dying_task_prio(struct task_struct *p,
+ struct mem_cgroup *mem)
+{
+ struct sched_param param = { .sched_priority = 1 };
+
+ if (mem)
+ return;
+
+ if (!rt_task(p))
+ sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
+}
+
/*
* The process p may have detached its own ->mm while exiting or through
* use_mm(), but one or more of its subthreads may still have a valid
}
#define K(x) ((x) << (PAGE_SHIFT-10))
-static int oom_kill_task(struct task_struct *p)
+static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
{
p = find_lock_task_mm(p);
- if (!p || p->signal->oom_adj == OOM_DISABLE) {
+ if (!p) {
task_unlock(p);
return 1;
}
K(get_mm_counter(p->mm, MM_FILEPAGES)));
task_unlock(p);
- p->rt.time_slice = HZ;
+
set_tsk_thread_flag(p, TIF_MEMDIE);
force_sig(SIGKILL, p);
+
+ /*
+ * We give our sacrificial lamb high priority and access to
+ * all the memory it needs. That way it should be able to
+ * exit() and clear out its resources quickly...
+ */
+ boost_dying_task_prio(p, mem);
+
return 0;
}
#undef K
*/
if (p->flags & PF_EXITING) {
set_tsk_thread_flag(p, TIF_MEMDIE);
+ boost_dying_task_prio(p, mem);
return 0;
}
list_for_each_entry(child, &t->children, sibling) {
unsigned long child_points;
- if (child->mm == p->mm)
- continue;
-
/* badness() returns 0 if the thread is unkillable */
child_points = badness(child, mem, nodemask,
uptime.tv_sec);
}
} while_each_thread(p, t);
- return oom_kill_task(victim);
+ return oom_kill_task(victim, mem);
}
/*
*/
if (fatal_signal_pending(current)) {
set_thread_flag(TIF_MEMDIE);
+ boost_dying_task_prio(current, NULL);
return;
}
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
- !oom_unkillable_task(current, NULL, nodemask)) {
+ !oom_unkillable_task(current, NULL, nodemask) &&
+ (current->signal->oom_adj != OOM_DISABLE)) {
/*
* oom_kill_process() needs tasklist_lock held. If it returns
* non-zero, current could not be killed so we must fallback to