lockdep: re-annotate scheduler runqueues
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 11 Aug 2008 07:30:22 +0000 (09:30 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 11 Aug 2008 07:30:22 +0000 (09:30 +0200)
Instead of using a per-rq lock class, use the regular nesting operations.

However, take extra care with double_lock_balance() as it can release the
already held rq->lock (and therefore change its nesting class).

So what can happen is:

 spin_lock(rq->lock); // this rq subclass 0

 double_lock_balance(rq, other_rq);
   // release rq
   // acquire other_rq->lock subclass 0
   // acquire rq->lock subclass 1

 spin_unlock(other_rq->lock);

leaving you with rq->lock in subclass 1

So a subsequent double_lock_balance() call can try to nest a subclass 1
lock while already holding a subclass 1 lock.

Fix this by introducing double_unlock_balance() which releases the other
rq's lock, but also re-sets the subclass for this rq's lock to 0.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c
kernel/sched_rt.c

index 655f1db..9b2b6a8 100644 (file)
@@ -2812,6 +2812,13 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
        return ret;
 }
 
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
+       __releases(busiest->lock)
+{
+       spin_unlock(&busiest->lock);
+       lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
+}
+
 /*
  * If dest_cpu is allowed for this process, migrate the task to it.
  * This is accomplished by forcing the cpu_allowed mask to only
@@ -3636,7 +3643,7 @@ redo:
                ld_moved = move_tasks(this_rq, this_cpu, busiest,
                                        imbalance, sd, CPU_NEWLY_IDLE,
                                        &all_pinned);
-               spin_unlock(&busiest->lock);
+               double_unlock_balance(this_rq, busiest);
 
                if (unlikely(all_pinned)) {
                        cpu_clear(cpu_of(busiest), *cpus);
@@ -3751,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
                else
                        schedstat_inc(sd, alb_failed);
        }
-       spin_unlock(&target_rq->lock);
+       double_unlock_balance(busiest_rq, target_rq);
 }
 
 #ifdef CONFIG_NO_HZ
index 908c04f..6163e4c 100644 (file)
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 #define RT_MAX_TRIES 3
 
 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
+
 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
 
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                        break;
 
                /* try again */
-               spin_unlock(&lowest_rq->lock);
+               double_unlock_balance(rq, lowest_rq);
                lowest_rq = NULL;
        }
 
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
 
        resched_task(lowest_rq->curr);
 
-       spin_unlock(&lowest_rq->lock);
+       double_unlock_balance(rq, lowest_rq);
 
        ret = 1;
 out:
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
 
                }
  skip:
-               spin_unlock(&src_rq->lock);
+               double_unlock_balance(this_rq, src_rq);
        }
 
        return ret;