Revert "sctp: fix call to SCTP_CMD_PROCESS_SACK in sctp_cmd_interpreter()"
[pandora-kernel.git] / kernel / sched_fair.c
index 7e51b5b..c261da7 100644 (file)
@@ -1756,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
-       if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+       if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
                return;
 
        __return_cfs_rq_runtime(cfs_rq);
@@ -2326,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
-       int i;
+       struct sched_group *sg;
+       int i, smt = 0;
 
        /*
         * If the task is going to be woken-up on this cpu and if it is
@@ -2346,25 +2347,40 @@ static int select_idle_sibling(struct task_struct *p, int target)
         * Otherwise, iterate the domains and find an elegible idle cpu.
         */
        rcu_read_lock();
+again:
        for_each_domain(target, sd) {
+               if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+                       continue;
+
+               if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
+                       break;
+
                if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
 
-               for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
-                       if (idle_cpu(i)) {
-                               target = i;
-                               break;
+               sg = sd->groups;
+               do {
+                       if (!cpumask_intersects(sched_group_cpus(sg),
+                                               tsk_cpus_allowed(p)))
+                               goto next;
+
+                       for_each_cpu(i, sched_group_cpus(sg)) {
+                               if (!idle_cpu(i))
+                                       goto next;
                        }
-               }
 
-               /*
-                * Lets stop looking for an idle sibling when we reached
-                * the domain that spans the current cpu and prev_cpu.
-                */
-               if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
-                   cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
-                       break;
+                       target = cpumask_first_and(sched_group_cpus(sg),
+                                       tsk_cpus_allowed(p));
+                       goto done;
+next:
+                       sg = sg->next;
+               } while (sg != sd->groups);
        }
+       if (!smt) {
+               smt = 1;
+               goto again;
+       }
+done:
        rcu_read_unlock();
 
        return target;
@@ -4719,7 +4735,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
 
                raw_spin_lock_irq(&this_rq->lock);
                update_rq_clock(this_rq);
-               update_cpu_load(this_rq);
+               update_idle_cpu_load(this_rq);
                raw_spin_unlock_irq(&this_rq->lock);
 
                rebalance_domains(balance_cpu, CPU_IDLE);
@@ -4874,11 +4890,15 @@ static void task_fork_fair(struct task_struct *p)
 
        update_rq_clock(rq);
 
-       if (unlikely(task_cpu(p) != this_cpu)) {
-               rcu_read_lock();
-               __set_task_cpu(p, this_cpu);
-               rcu_read_unlock();
-       }
+       /*
+        * Not only the cpu but also the task_group of the parent might have
+        * been changed after parent->se.parent,cfs_rq were copied to
+        * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
+        * of child point to valid ones.
+        */
+       rcu_read_lock();
+       __set_task_cpu(p, this_cpu);
+       rcu_read_unlock();
 
        update_curr(cfs_rq);
 
@@ -5017,7 +5037,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
         * idle runqueue:
         */
        if (rq->cfs.load.weight)
-               rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+               rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
 
        return rr_interval;
 }