Check to make sure that there are no blocked tasks for the previous
grace period while initializing for the next grace period, verify
that rcu_preempt_qs() is given the correct CPU number and is never
called for an offline CPU.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
LKML-Reference: <
12528585111986-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
/* Special-case the common single-level case. */
if (NUM_RCU_NODES == 1) {
rnp->qsmask = rnp->qsmaskinit;
/* Special-case the common single-level case. */
if (NUM_RCU_NODES == 1) {
rnp->qsmask = rnp->qsmaskinit;
+ rcu_preempt_check_blocked_tasks(rnp);
rnp->gpnum = rsp->gpnum;
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
spin_unlock_irqrestore(&rnp->lock, flags);
rnp->gpnum = rsp->gpnum;
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
spin_unlock_irqrestore(&rnp->lock, flags);
for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) {
spin_lock(&rnp_cur->lock); /* irqs already disabled. */
rnp_cur->qsmask = rnp_cur->qsmaskinit;
for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) {
spin_lock(&rnp_cur->lock); /* irqs already disabled. */
rnp_cur->qsmask = rnp_cur->qsmaskinit;
+ rcu_preempt_check_blocked_tasks(rnp);
rnp->gpnum = rsp->gpnum;
spin_unlock(&rnp_cur->lock); /* irqs already disabled. */
}
rnp->gpnum = rsp->gpnum;
spin_unlock(&rnp_cur->lock); /* irqs already disabled. */
}
if (t->rcu_read_lock_nesting &&
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
if (t->rcu_read_lock_nesting &&
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
+ WARN_ON_ONCE(cpu != smp_processor_id());
/* Possibly blocking in an RCU read-side critical section. */
rdp = rcu_preempt_state.rda[cpu];
/* Possibly blocking in an RCU read-side critical section. */
rdp = rcu_preempt_state.rda[cpu];
* state for the current grace period), then as long
* as that task remains queued, the current grace period
* cannot end.
* state for the current grace period), then as long
* as that task remains queued, the current grace period
* cannot end.
+ *
+ * But first, note that the current CPU must still be
+ * on line!
+ WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
smp_mb(); /* Ensure later ctxt swtch seen after above. */
phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
smp_mb(); /* Ensure later ctxt swtch seen after above. */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+/*
+ * Check that the list of blocked tasks for the newly completed grace
+ * period is in fact empty. It is a serious bug to complete a grace
+ * period that still has RCU readers blocked! This function must be
+ * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
+ * must be held by the caller.
+ */
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+{
+ WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]));
+}
+
/*
* Check for preempted RCU readers for the specified rcu_node structure.
* If the caller needs a reliable answer, it must hold the rcu_node's
/*
* Check for preempted RCU readers for the specified rcu_node structure.
* If the caller needs a reliable answer, it must hold the rcu_node's
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+/*
+ * Because there is no preemptable RCU, there can be no readers blocked,
+ * so there is no need to check for blocked tasks.
+ */
+static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+{
+}
+
/*
* Because preemptable RCU does not exist, there are never any preempted
* RCU readers.
/*
* Because preemptable RCU does not exist, there are never any preempted
* RCU readers.