From aee1f8b87e203b0525f340d2fde94f5a82f6c4bd Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 7 Aug 2012 10:02:38 +0200 Subject: [PATCH] sched,rt: fix isolated CPUs leaving root_task_group indefinitely throttled commit e221d028bb08b47e624c5f0a31732c642db9d19a upstream. Root task group bandwidth replenishment must service all CPUs, regardless of where the timer was last started, and regardless of the isolation mechanism, lest 'Quoth the Raven, "Nevermore"' become rt scheduling policy. Signed-off-by: Mike Galbraith Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1344326558.6968.25.camel@marge.simpson.net Signed-off-by: Thomas Gleixner [bwh: Backported to 3.2: adjust filename] Signed-off-by: Ben Hutchings --- kernel/sched_rt.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index ba756ab688a8..d0fff8139377 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -587,6 +587,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) return 1; span = sched_rt_period_mask(); +#ifdef CONFIG_RT_GROUP_SCHED + /* + * FIXME: isolated CPUs should really leave the root task group, + * whether they are isolcpus or were isolated via cpusets, lest + * the timer run on a CPU which does not service all runqueues, + * potentially leaving other CPUs indefinitely throttled. If + * isolation is really required, the user will turn the throttle + * off to kill the perturbations it causes anyway. Meanwhile, + * this maintains functionality for boot and/or troubleshooting. + */ + if (rt_b == &root_task_group.rt_bandwidth) + span = cpu_online_mask; +#endif for_each_cpu(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); -- 2.39.5