rcu: refactor RCU's context-switch handling
[pandora-kernel.git] / kernel / rcutree.c
index 3ec8160..e336313 100644 (file)
@@ -97,25 +97,32 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
  */
 void rcu_sched_qs(int cpu)
 {
-       struct rcu_data *rdp;
+       struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
 
-       rdp = &per_cpu(rcu_sched_data, cpu);
        rdp->passed_quiesc_completed = rdp->gpnum - 1;
        barrier();
        rdp->passed_quiesc = 1;
-       rcu_preempt_note_context_switch(cpu);
 }
 
 void rcu_bh_qs(int cpu)
 {
-       struct rcu_data *rdp;
+       struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
 
-       rdp = &per_cpu(rcu_bh_data, cpu);
        rdp->passed_quiesc_completed = rdp->gpnum - 1;
        barrier();
        rdp->passed_quiesc = 1;
 }
 
+/*
+ * Note a context switch.  This is a quiescent state for RCU-sched,
+ * and requires special handling for preemptible RCU.
+ */
+void rcu_note_context_switch(int cpu)
+{
+       rcu_sched_qs(cpu);
+       rcu_preempt_note_context_switch(cpu);
+}
+
 #ifdef CONFIG_NO_HZ
 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
        .dynticks_nesting = 1,
@@ -1236,11 +1243,11 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
                break; /* grace period idle or initializing, ignore. */
 
        case RCU_SAVE_DYNTICK:
-
-               raw_spin_unlock(&rnp->lock);  /* irqs remain disabled */
                if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
                        break; /* So gcc recognizes the dead code. */
 
+               raw_spin_unlock(&rnp->lock);  /* irqs remain disabled */
+
                /* Record dyntick-idle state. */
                force_qs_rnp(rsp, dyntick_save_progress_counter);
                raw_spin_lock(&rnp->lock);  /* irqs already disabled */
@@ -1499,6 +1506,16 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 
        /* Is the RCU core waiting for a quiescent state from this CPU? */
        if (rdp->qs_pending) {
+
+               /*
+                * If force_quiescent_state() coming soon and this CPU
+                * needs a quiescent state, and this is either RCU-sched
+                * or RCU-bh, force a local reschedule.
+                */
+               if (!rdp->preemptable &&
+                   ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
+                                jiffies))
+                       set_need_resched();
                rdp->n_rp_qs_pending++;
                return 1;
        }
@@ -1849,6 +1866,14 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                        INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
                }
        }
+
+       rnp = rsp->level[NUM_RCU_LVLS - 1];
+       for_each_possible_cpu(i) {
+               if (i > rnp->grphi)
+                       rnp++;
+               rsp->rda[i]->mynode = rnp;
+               rcu_boot_init_percpu_data(i, rsp);
+       }
 }
 
 /*
@@ -1859,19 +1884,11 @@ static void __init rcu_init_one(struct rcu_state *rsp)
 #define RCU_INIT_FLAVOR(rsp, rcu_data) \
 do { \
        int i; \
-       int j; \
-       struct rcu_node *rnp; \
        \
-       rcu_init_one(rsp); \
-       rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
-       j = 0; \
        for_each_possible_cpu(i) { \
-               if (i > rnp[j].grphi) \
-                       j++; \
-               per_cpu(rcu_data, i).mynode = &rnp[j]; \
                (rsp)->rda[i] = &per_cpu(rcu_data, i); \
-               rcu_boot_init_percpu_data(i, rsp); \
        } \
+       rcu_init_one(rsp); \
 } while (0)
 
 void __init rcu_init(void)