futex: Fix potential use-after-free in FUTEX_REQUEUE_PI
[pandora-kernel.git] / kernel / sched_fair.c
index a78ed27..98e1039 100644 (file)
@@ -1527,6 +1527,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        cfs_rq->throttled_timestamp = rq->clock;
        raw_spin_lock(&cfs_b->lock);
        list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+       if (!cfs_b->timer_active)
+               __start_cfs_bandwidth(cfs_b);
        raw_spin_unlock(&cfs_b->lock);
 }
 
@@ -2352,13 +2354,11 @@ again:
                if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
                        continue;
 
-               if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
-                       if (!smt) {
-                               smt = 1;
-                               goto again;
-                       }
+               if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
+                       break;
+
+               if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
-               }
 
                sg = sd->groups;
                do {
@@ -2378,6 +2378,10 @@ next:
                        sg = sg->next;
                } while (sg != sd->groups);
        }
+       if (!smt) {
+               smt = 1;
+               goto again;
+       }
 done:
        rcu_read_unlock();
 
@@ -2787,6 +2791,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
         * 1) running (obviously), or
         * 2) cannot be migrated to this CPU due to cpus_allowed, or
         * 3) are cache-hot on their current CPU.
+        * 4) p->pi_lock is held.
         */
        if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
                schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
@@ -2799,6 +2804,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
                return 0;
        }
 
+       /*
+        * rt -> fair class change may be in progress.  If we sneak in should
+        * double_lock_balance() release rq->lock, and move the task, we will
+        * cause switched_to_fair() to meet a passed but no longer valid rq.
+        */
+       if (raw_spin_is_locked(&p->pi_lock))
+               return 0;
+
        /*
         * Aggressive migration if:
         * 1) task is cache cold, or
@@ -4733,7 +4746,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
 
                raw_spin_lock_irq(&this_rq->lock);
                update_rq_clock(this_rq);
-               update_cpu_load(this_rq);
+               update_idle_cpu_load(this_rq);
                raw_spin_unlock_irq(&this_rq->lock);
 
                rebalance_domains(balance_cpu, CPU_IDLE);
@@ -4844,6 +4857,9 @@ static void rq_online_fair(struct rq *rq)
 static void rq_offline_fair(struct rq *rq)
 {
        update_sysctl();
+
+       /* Ensure any throttled groups are reachable by pick_next_task */
+       unthrottle_offline_cfs_rqs(rq);
 }
 
 #else  /* CONFIG_SMP */
@@ -4888,11 +4904,15 @@ static void task_fork_fair(struct task_struct *p)
 
        update_rq_clock(rq);
 
-       if (unlikely(task_cpu(p) != this_cpu)) {
-               rcu_read_lock();
-               __set_task_cpu(p, this_cpu);
-               rcu_read_unlock();
-       }
+       /*
+        * Not only the cpu but also the task_group of the parent might have
+        * been changed after parent->se.parent,cfs_rq were copied to
+        * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
+        * of child point to valid ones.
+        */
+       rcu_read_lock();
+       __set_task_cpu(p, this_cpu);
+       rcu_read_unlock();
 
        update_curr(cfs_rq);
 
@@ -4942,15 +4962,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
        /*
-        * Ensure the task's vruntime is normalized, so that when its
+        * Ensure the task's vruntime is normalized, so that when it's
         * switched back to the fair class the enqueue_entity(.flags=0) will
         * do the right thing.
         *
-        * If it was on_rq, then the dequeue_entity(.flags=0) will already
-        * have normalized the vruntime, if it was !on_rq, then only when
+        * If it's on_rq, then the dequeue_entity(.flags=0) will already
+        * have normalized the vruntime, if it's !on_rq, then only when
         * the task is sleeping will it still have non-normalized vruntime.
         */
-       if (!se->on_rq && p->state != TASK_RUNNING) {
+       if (!p->on_rq && p->state != TASK_RUNNING) {
                /*
                 * Fix up our vruntime so that the current sleep doesn't
                 * cause 'unlimited' sleep bonus.
@@ -5031,7 +5051,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
         * idle runqueue:
         */
        if (rq->cfs.load.weight)
-               rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+               rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
 
        return rr_interval;
 }