X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=kernel%2Fsched.c;h=f4235a7abdd9781f0d48884281c8ac46ab5039c9;hp=cadc9586b807f57a0ef51028bf22fa9668e2183d;hb=a592f46075d79ff7348e624e7d871dca730e70f2;hpb=f7c3ff94a296f2daf8f5f2e8be3e459636c3fdfd diff --git a/kernel/sched.c b/kernel/sched.c index cadc9586b807..f4235a7abdd9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1016,8 +1016,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) * After ->on_cpu is cleared, the task can be moved to a different CPU. * We must ensure this doesn't happen until the switch is completely * finished. + * + * Pairs with the control dependency and rmb in try_to_wake_up(). */ - smp_wmb(); + smp_mb(); prev->on_cpu = 0; #endif #ifdef CONFIG_DEBUG_SPINLOCK @@ -2930,7 +2932,6 @@ out: */ int wake_up_process(struct task_struct *p) { - WARN_ON(task_is_stopped_or_traced(p)); return try_to_wake_up(p, TASK_NORMAL, 0); } EXPORT_SYMBOL(wake_up_process); @@ -3191,11 +3192,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. - * The test for TASK_DEAD must occur while the runqueue locks are - * still held, otherwise prev could be scheduled on another cpu, die - * there before we look at prev->state, and then the reference would - * be dropped twice. - * Manfred Spraul + * + * We must observe prev->state before clearing prev->on_cpu (in + * finish_lock_switch), otherwise a concurrent wakeup can get prev + * running on another CPU and we could rave with its RUNNING -> DEAD + * transition, resulting in a double drop. */ prev_state = prev->state; finish_arch_switch(prev); @@ -5224,8 +5225,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (rt_prio(prio)) p->sched_class = &rt_sched_class; - else + else { + if (rt_prio(oldprio)) + p->rt.timeout = 0; p->sched_class = &fair_sched_class; + } p->prio = prio; @@ -7167,11 +7171,11 @@ static int init_rootdomain(struct root_domain *rd) { memset(rd, 0, sizeof(*rd)); - if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) goto out; - if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) goto free_span; - if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) goto free_online; if (cpupri_init(&rd->cpupri) != 0) @@ -9105,6 +9109,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg) { struct task_struct *g, *p; + /* + * Autogroups do not have RT tasks; see autogroup_create(). + */ + if (task_group_is_autogroup(tg)) + return 0; + do_each_thread(g, p) { if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) return 1;