Merge branch 'linus' into cpus4096
[pandora-kernel.git] / kernel / rcuclassic.c
index a38895a..6f8696c 100644 (file)
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
                 */
                cpus_and(cpumask, rcp->cpumask, cpu_online_map);
                cpu_clear(rdp->cpu, cpumask);
-               for_each_cpu_mask(cpu, cpumask)
+               for_each_cpu_mask_nr(cpu, cpumask)
                        smp_send_reschedule(cpu);
        }
 }
@@ -387,6 +387,10 @@ static void __rcu_offline_cpu(struct rcu_data *this_rdp,
        rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
        rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
        rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
+
+       local_irq_disable();
+       this_rdp->qlen += rdp->qlen;
+       local_irq_enable();
 }
 
 static void rcu_offline_cpu(int cpu)
@@ -516,10 +520,38 @@ void rcu_check_callbacks(int cpu, int user)
        if (user ||
            (idle_cpu(cpu) && !in_softirq() &&
                                hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+               /*
+                * Get here if this CPU took its interrupt from user
+                * mode or from the idle loop, and if this is not a
+                * nested interrupt.  In this case, the CPU is in
+                * a quiescent state, so count it.
+                *
+                * Also do a memory barrier.  This is needed to handle
+                * the case where writes from a preempt-disable section
+                * of code get reordered into schedule() by this CPU's
+                * write buffer.  The memory barrier makes sure that
+                * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
+                * by other CPUs to happen after any such write.
+                */
+
+               smp_mb();  /* See above block comment. */
                rcu_qsctr_inc(cpu);
                rcu_bh_qsctr_inc(cpu);
-       } else if (!in_softirq())
+
+       } else if (!in_softirq()) {
+
+               /*
+                * Get here if this CPU did not take its interrupt from
+                * softirq, in other words, if it is not interrupting
+                * a rcu_bh read-side critical section.  This is an _bh
+                * critical section, so count it.  The memory barrier
+                * is needed for the same reason as is the above one.
+                */
+
+               smp_mb();  /* See above block comment. */
                rcu_bh_qsctr_inc(cpu);
+       }
        raise_rcu_softirq();
 }
 
@@ -543,7 +575,7 @@ static void __cpuinit rcu_online_cpu(int cpu)
 
        rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
        rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
-       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL);
+       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 }
 
 static int __cpuinit rcu_cpu_notify(struct notifier_block *self,