sched: fix RCU lockdep splat from task_group()
[pandora-kernel.git] / kernel / sched.c
index ed09d4f..ae8f75a 100644 (file)
@@ -3513,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
        rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
 
        if (total) {
-               u64 temp;
+               u64 temp = rtime;
 
-               temp = (u64)(rtime * utime);
+               temp *= utime;
                do_div(temp, total);
                utime = (cputime_t)temp;
        } else
@@ -3546,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
        rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
 
        if (total) {
-               u64 temp;
+               u64 temp = rtime;
 
-               temp = (u64)(rtime * cputime.utime);
+               temp *= cputime.utime;
                do_div(temp, total);
                utime = (cputime_t)temp;
        } else
@@ -5337,7 +5337,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        idle->se.exec_start = sched_clock();
 
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+       /*
+        * We're having a chicken and egg problem, even though we are
+        * holding rq->lock, the cpu isn't yet set to this cpu so the
+        * lockdep check in task_group() will fail.
+        *
+        * Similar case to sched_fork(). / Alternatively we could
+        * use task_rq_lock() here and obtain the other rq->lock.
+        *
+        * Silence PROVE_RCU
+        */
+       rcu_read_lock();
        __set_task_cpu(idle, cpu);
+       rcu_read_unlock();
 
        rq->curr = rq->idle = idle;
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)