sched: stop wake_affine from causing serious imbalance
authorMike Galbraith <efault@gmx.de>
Thu, 29 May 2008 09:11:41 +0000 (11:11 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 29 May 2008 09:29:20 +0000 (11:29 +0200)
Prevent short-running wakers of short-running threads from overloading a single
cpu via wakeup affinity, and wire up disconnected debug option.

Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index f0f25fc..08ae848 100644 (file)
@@ -996,16 +996,27 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
        struct task_struct *curr = this_rq->curr;
        unsigned long tl = this_load;
        unsigned long tl_per_task;
+       int balanced;
 
-       if (!(this_sd->flags & SD_WAKE_AFFINE))
+       if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
                return 0;
 
+       /*
+        * If sync wakeup then subtract the (maximum possible)
+        * effect of the currently running task from the load
+        * of the current CPU:
+        */
+       if (sync)
+               tl -= current->se.load.weight;
+
+       balanced = 100*(tl + p->se.load.weight) <= imbalance*load;
+
        /*
         * If the currently running task will sleep within
         * a reasonable amount of time then attract this newly
         * woken task:
         */
-       if (sync && curr->sched_class == &fair_sched_class) {
+       if (sync && balanced && curr->sched_class == &fair_sched_class) {
                if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
                                p->se.avg_overlap < sysctl_sched_migration_cost)
                        return 1;
@@ -1014,16 +1025,8 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
        schedstat_inc(p, se.nr_wakeups_affine_attempts);
        tl_per_task = cpu_avg_load_per_task(this_cpu);
 
-       /*
-        * If sync wakeup then subtract the (maximum possible)
-        * effect of the currently running task from the load
-        * of the current CPU:
-        */
-       if (sync)
-               tl -= current->se.load.weight;
-
        if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) ||
-                       100*(tl + p->se.load.weight) <= imbalance*load) {
+                       balanced) {
                /*
                 * This domain has SD_WAKE_AFFINE and
                 * p is cache cold in this domain, and