pandora: defconfig: update
[pandora-kernel.git] / kernel / sched.c
index b342168..af9a268 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
-#include <asm/mmu_context.h>
+#include <linux/mmu_context.h>
 #include <linux/interrupt.h>
 #include <linux/capability.h>
 #include <linux/completion.h>
@@ -2084,19 +2084,6 @@ EXPORT_SYMBOL_GPL(account_system_vtime);
 
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
-static inline void account_reset_rq(struct rq *rq)
-{
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-       rq->prev_irq_time = 0;
-#endif
-#ifdef CONFIG_PARAVIRT
-       rq->prev_steal_time = 0;
-#endif
-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-       rq->prev_steal_time_rq = 0;
-#endif
-}
-
 #ifdef CONFIG_PARAVIRT
 static inline u64 steal_ticks(u64 steal)
 {
@@ -2846,6 +2833,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
+       /*
+        * Ensure we load p->on_rq _after_ p->state, otherwise it would
+        * be possible to, falsely, observe p->on_rq == 0 and get stuck
+        * in smp_cond_load_acquire() below.
+        *
+        * sched_ttwu_pending()                 try_to_wake_up()
+        *   [S] p->on_rq = 1;                  [L] P->state
+        *       UNLOCK rq->lock  -----.
+        *                              \
+        *                               +---   RMB
+        * schedule()                   /
+        *       LOCK rq->lock    -----'
+        *       UNLOCK rq->lock
+        *
+        * [task p]
+        *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
+        *
+        * Pairs with the UNLOCK+LOCK on rq->lock from the
+        * last wakeup of our task and the schedule that got our task
+        * current.
+        */
+       smp_rmb();
        if (p->on_rq && ttwu_remote(p, wake_flags))
                goto stat;
 
@@ -3322,7 +3331,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
-               switch_mm(oldmm, mm, next);
+               switch_mm_irqs_off(oldmm, mm, next);
 
        if (!prev->mm) {
                prev->active_mm = NULL;
@@ -3608,8 +3617,9 @@ void calc_load_exit_idle(void)
        struct rq *this_rq = this_rq();
 
        /*
-        * If we're still before the sample window, we're done.
+        * If we're still before the pending sample window, we're done.
         */
+       this_rq->calc_load_update = calc_load_update;
        if (time_before(jiffies, this_rq->calc_load_update))
                return;
 
@@ -3618,7 +3628,6 @@ void calc_load_exit_idle(void)
         * accounted through the nohz accounting, so skip the entire deal and
         * sync up for the next window.
         */
-       this_rq->calc_load_update = calc_load_update;
        if (time_before(jiffies, this_rq->calc_load_update + 10))
                this_rq->calc_load_update += LOAD_FREQ;
 }
@@ -6870,7 +6879,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
        case CPU_UP_PREPARE:
                rq->calc_load_update = calc_load_update;
-               account_reset_rq(rq);
                break;
 
        case CPU_ONLINE:
@@ -7472,7 +7480,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 
        cpumask_clear(covered);
 
-       for_each_cpu(i, span) {
+       for_each_cpu_wrap(i, span, cpu) {
                struct cpumask *sg_span;
 
                if (cpumask_test_cpu(i, covered))