pandora: defconfig: update
[pandora-kernel.git] / kernel / sched.c
index 596b3ca..af9a268 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
-#include <asm/mmu_context.h>
+#include <linux/mmu_context.h>
 #include <linux/interrupt.h>
 #include <linux/capability.h>
 #include <linux/completion.h>
@@ -1016,8 +1016,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         * After ->on_cpu is cleared, the task can be moved to a different CPU.
         * We must ensure this doesn't happen until the switch is completely
         * finished.
+        *
+        * Pairs with the control dependency and rmb in try_to_wake_up().
         */
-       smp_wmb();
+       smp_mb();
        prev->on_cpu = 0;
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
@@ -2831,6 +2833,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
+       /*
+        * Ensure we load p->on_rq _after_ p->state, otherwise it would
+        * be possible to, falsely, observe p->on_rq == 0 and get stuck
+        * in smp_cond_load_acquire() below.
+        *
+        * sched_ttwu_pending()                 try_to_wake_up()
+        *   [S] p->on_rq = 1;                  [L] P->state
+        *       UNLOCK rq->lock  -----.
+        *                              \
+        *                               +---   RMB
+        * schedule()                   /
+        *       LOCK rq->lock    -----'
+        *       UNLOCK rq->lock
+        *
+        * [task p]
+        *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
+        *
+        * Pairs with the UNLOCK+LOCK on rq->lock from the
+        * last wakeup of our task and the schedule that got our task
+        * current.
+        */
+       smp_rmb();
        if (p->on_rq && ttwu_remote(p, wake_flags))
                goto stat;
 
@@ -2930,7 +2954,6 @@ out:
  */
 int wake_up_process(struct task_struct *p)
 {
-       WARN_ON(task_is_stopped_or_traced(p));
        return try_to_wake_up(p, TASK_NORMAL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
@@ -3191,11 +3214,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
         * schedule one last time. The schedule call will never return, and
         * the scheduled task must drop that reference.
-        * The test for TASK_DEAD must occur while the runqueue locks are
-        * still held, otherwise prev could be scheduled on another cpu, die
-        * there before we look at prev->state, and then the reference would
-        * be dropped twice.
-        *              Manfred Spraul <manfred@colorfullife.com>
+        *
+        * We must observe prev->state before clearing prev->on_cpu (in
+        * finish_lock_switch), otherwise a concurrent wakeup can get prev
+        * running on another CPU and we could rave with its RUNNING -> DEAD
+        * transition, resulting in a double drop.
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
@@ -3308,7 +3331,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
-               switch_mm(oldmm, mm, next);
+               switch_mm_irqs_off(oldmm, mm, next);
 
        if (!prev->mm) {
                prev->active_mm = NULL;
@@ -3493,10 +3516,13 @@ static long calc_load_fold_active(struct rq *this_rq)
 static unsigned long
 calc_load(unsigned long load, unsigned long exp, unsigned long active)
 {
-       load *= exp;
-       load += active * (FIXED_1 - exp);
-       load += 1UL << (FSHIFT - 1);
-       return load >> FSHIFT;
+       unsigned long newload;
+
+       newload = load * exp + active * (FIXED_1 - exp);
+       if (active >= load)
+               newload += FIXED_1-1;
+
+       return newload / FIXED_1;
 }
 
 #ifdef CONFIG_NO_HZ
@@ -3591,8 +3617,9 @@ void calc_load_exit_idle(void)
        struct rq *this_rq = this_rq();
 
        /*
-        * If we're still before the sample window, we're done.
+        * If we're still before the pending sample window, we're done.
         */
+       this_rq->calc_load_update = calc_load_update;
        if (time_before(jiffies, this_rq->calc_load_update))
                return;
 
@@ -3601,7 +3628,6 @@ void calc_load_exit_idle(void)
         * accounted through the nohz accounting, so skip the entire deal and
         * sync up for the next window.
         */
-       this_rq->calc_load_update = calc_load_update;
        if (time_before(jiffies, this_rq->calc_load_update + 10))
                this_rq->calc_load_update += LOAD_FREQ;
 }
@@ -6244,14 +6270,16 @@ void show_state_filter(unsigned long state_filter)
                /*
                 * reset the NMI-timeout, listing all files on a slow
                 * console might take a lot of time:
+                * Also, reset softlockup watchdogs on all CPUs, because
+                * another CPU might be blocked waiting for us to process
+                * an IPI.
                 */
                touch_nmi_watchdog();
+               touch_all_softlockup_watchdogs();
                if (!state_filter || (p->state & state_filter))
                        sched_show_task(p);
        } while_each_thread(g, p);
 
-       touch_all_softlockup_watchdogs();
-
 #ifdef CONFIG_SCHED_DEBUG
        sysrq_sched_debug_show();
 #endif
@@ -7170,11 +7198,11 @@ static int init_rootdomain(struct root_domain *rd)
 {
        memset(rd, 0, sizeof(*rd));
 
-       if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
                goto out;
-       if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
                goto free_span;
-       if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
                goto free_online;
 
        if (cpupri_init(&rd->cpupri) != 0)
@@ -7452,7 +7480,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 
        cpumask_clear(covered);
 
-       for_each_cpu(i, span) {
+       for_each_cpu_wrap(i, span, cpu) {
                struct cpumask *sg_span;
 
                if (cpumask_test_cpu(i, covered))
@@ -8499,6 +8527,7 @@ void __init sched_init(void)
 #ifdef CONFIG_CGROUP_SCHED
        list_add(&root_task_group.list, &task_groups);
        INIT_LIST_HEAD(&root_task_group.children);
+       INIT_LIST_HEAD(&root_task_group.siblings);
        autogroup_init(&init_task);
 #endif /* CONFIG_CGROUP_SCHED */