X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fsched.c;h=abc5279e1774eb034ffcc8dd83f97542bb43e3ef;hb=33e870668c1a84825be845cfdae81d16f1ccb96c;hp=ea85b0d2bbf2cf8c725d504fc48494abb9f35cb8;hpb=4553dab7f3e57b70fee61739325b6ff7bb56ac9b;p=pandora-kernel.git diff --git a/kernel/sched.c b/kernel/sched.c index ea85b0d2bbf2..abc5279e1774 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1016,8 +1016,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) * After ->on_cpu is cleared, the task can be moved to a different CPU. * We must ensure this doesn't happen until the switch is completely * finished. + * + * Pairs with the control dependency and rmb in try_to_wake_up(). */ - smp_wmb(); + smp_mb(); prev->on_cpu = 0; #endif #ifdef CONFIG_DEBUG_SPINLOCK @@ -2082,6 +2084,19 @@ EXPORT_SYMBOL_GPL(account_system_vtime); #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ +static inline void account_reset_rq(struct rq *rq) +{ +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + rq->prev_irq_time = 0; +#endif +#ifdef CONFIG_PARAVIRT + rq->prev_steal_time = 0; +#endif +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + rq->prev_steal_time_rq = 0; +#endif +} + #ifdef CONFIG_PARAVIRT static inline u64 steal_ticks(u64 steal) { @@ -2930,7 +2945,6 @@ out: */ int wake_up_process(struct task_struct *p) { - WARN_ON(task_is_stopped_or_traced(p)); return try_to_wake_up(p, TASK_NORMAL, 0); } EXPORT_SYMBOL(wake_up_process); @@ -3191,11 +3205,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. - * The test for TASK_DEAD must occur while the runqueue locks are - * still held, otherwise prev could be scheduled on another cpu, die - * there before we look at prev->state, and then the reference would - * be dropped twice. - * Manfred Spraul + * + * We must observe prev->state before clearing prev->on_cpu (in + * finish_lock_switch), otherwise a concurrent wakeup can get prev + * running on another CPU and we could rave with its RUNNING -> DEAD + * transition, resulting in a double drop. */ prev_state = prev->state; finish_arch_switch(prev); @@ -5224,8 +5238,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (rt_prio(prio)) p->sched_class = &rt_sched_class; - else + else { + if (rt_prio(oldprio)) + p->rt.timeout = 0; p->sched_class = &fair_sched_class; + } p->prio = prio; @@ -5300,6 +5317,7 @@ int can_nice(const struct task_struct *p, const int nice) return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || capable(CAP_SYS_NICE)); } +EXPORT_SYMBOL_GPL(can_nice); #ifdef __ARCH_WANT_SYS_NICE @@ -6847,6 +6865,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_UP_PREPARE: rq->calc_load_update = calc_load_update; + account_reset_rq(rq); break; case CPU_ONLINE: @@ -7166,11 +7185,11 @@ static int init_rootdomain(struct root_domain *rd) { memset(rd, 0, sizeof(*rd)); - if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) goto out; - if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) goto free_span; - if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) goto free_online; if (cpupri_init(&rd->cpupri) != 0) @@ -8495,6 +8514,7 @@ void __init sched_init(void) #ifdef CONFIG_CGROUP_SCHED list_add(&root_task_group.list, &task_groups); INIT_LIST_HEAD(&root_task_group.children); + INIT_LIST_HEAD(&root_task_group.siblings); autogroup_init(&init_task); #endif /* CONFIG_CGROUP_SCHED */ @@ -9104,6 +9124,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg) { struct task_struct *g, *p; + /* + * Autogroups do not have RT tasks; see autogroup_create(). + */ + if (task_group_is_autogroup(tg)) + return 0; + do_each_thread(g, p) { if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) return 1;