[PATCH] sched: less newidle locking
authorNick Piggin <nickpiggin@yahoo.com.au>
Sat, 10 Sep 2005 07:26:16 +0000 (00:26 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sat, 10 Sep 2005 17:06:23 +0000 (10:06 -0700)
Similarly to the earlier change in load_balance, only lock the runqueue in
load_balance_newidle if the busiest queue found has a nr_running > 1.  This
will reduce frequency of expensive remote runqueue lock aquisitions in the
schedule() path on some workloads.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
kernel/sched.c

index c61ee34..9301895 100644 (file)
@@ -2104,8 +2104,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
                 */
                double_lock_balance(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
                 */
                double_lock_balance(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
-                                               imbalance, sd, idle,
-                                               &all_pinned);
+                                       imbalance, sd, idle, &all_pinned);
                spin_unlock(&busiest->lock);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
                spin_unlock(&busiest->lock);
 
                /* All tasks on this runqueue were pinned by CPU affinity */
@@ -2200,18 +2199,22 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
 
        BUG_ON(busiest == this_rq);
 
 
        BUG_ON(busiest == this_rq);
 
-       /* Attempt to move tasks */
-       double_lock_balance(this_rq, busiest);
-
        schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
        schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
-       nr_moved = move_tasks(this_rq, this_cpu, busiest,
+
+       nr_moved = 0;
+       if (busiest->nr_running > 1) {
+               /* Attempt to move tasks */
+               double_lock_balance(this_rq, busiest);
+               nr_moved = move_tasks(this_rq, this_cpu, busiest,
                                        imbalance, sd, NEWLY_IDLE, NULL);
                                        imbalance, sd, NEWLY_IDLE, NULL);
+               spin_unlock(&busiest->lock);
+       }
+
        if (!nr_moved)
                schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
        else
                sd->nr_balance_failed = 0;
 
        if (!nr_moved)
                schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
        else
                sd->nr_balance_failed = 0;
 
-       spin_unlock(&busiest->lock);
        return nr_moved;
 
 out_balanced:
        return nr_moved;
 
 out_balanced: