#include <linux/kernel_stat.h>
#include <linux/wait.h>
#include <linux/kthread.h>
+#include <linux/prefetch.h>
#include "rcutree.h"
* handle all flavors of RCU.
*/
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
+DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
+DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
+DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
-static DEFINE_PER_CPU(char, rcu_cpu_has_work);
+DEFINE_PER_CPU(char, rcu_cpu_has_work);
static char rcu_kthreads_spawnable;
static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
+/*
+ * Track the rcutorture test sequence number and the update version
+ * number within a given test. The rcutorture_testseq is incremented
+ * on every rcutorture module load and unload, so has an odd value
+ * when a test is running. The rcutorture_vernum is set to zero
+ * when rcutorture starts and is incremented on each rcutorture update.
+ * These variables enable correlating rcutorture output with the
+ * RCU tracing information.
+ */
+unsigned long rcutorture_testseq;
+unsigned long rcutorture_vernum;
+
/*
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
* permit this function to be invoked without holding the root rcu_node
rcu_sched_qs(cpu);
rcu_preempt_note_context_switch(cpu);
}
+EXPORT_SYMBOL_GPL(rcu_note_context_switch);
#ifdef CONFIG_NO_HZ
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = 1,
- .dynticks = ATOMIC_INIT(1),
+ .dynticks = 1,
};
#endif /* #ifdef CONFIG_NO_HZ */
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
+/*
+ * Record the number of times rcutorture tests have been initiated and
+ * terminated. This information allows the debugfs tracing stats to be
+ * correlated to the rcutorture messages, even when the rcutorture module
+ * is being repeatedly loaded and unloaded. In other words, we cannot
+ * store this state in rcutorture itself.
+ */
+void rcutorture_record_test_transition(void)
+{
+ rcutorture_testseq++;
+ rcutorture_vernum = 0;
+}
+EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
+
+/*
+ * Record the number of writer passes through the current rcutorture test.
+ * This is also used to correlate debugfs tracing stats with the rcutorture
+ * messages.
+ */
+void rcutorture_record_progress(unsigned long vernum)
+{
+ rcutorture_vernum++;
+}
+EXPORT_SYMBOL_GPL(rcutorture_record_progress);
+
/*
* Force a quiescent state for RCU-sched.
*/
return 1;
}
- /* If preemptable RCU, no point in sending reschedule IPI. */
- if (rdp->preemptable)
+ /* If preemptible RCU, no point in sending reschedule IPI. */
+ if (rdp->preemptible)
return 0;
/* The CPU is online, so send it a reschedule IPI. */
unsigned long flags;
struct rcu_dynticks *rdtp;
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- if (--rdtp->dynticks_nesting) {
- local_irq_restore(flags);
- return;
- }
- /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
- smp_mb__before_atomic_inc(); /* See above. */
- atomic_inc(&rdtp->dynticks);
- smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+ rdtp->dynticks++;
+ rdtp->dynticks_nesting--;
+ WARN_ON_ONCE(rdtp->dynticks & 0x1);
local_irq_restore(flags);
-
- /* If the interrupt queued a callback, get out of dyntick mode. */
- if (in_irq() &&
- (__get_cpu_var(rcu_sched_data).nxtlist ||
- __get_cpu_var(rcu_bh_data).nxtlist ||
- rcu_preempt_needs_cpu(smp_processor_id())))
- set_need_resched();
}
/*
local_irq_save(flags);
rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks_nesting++) {
- local_irq_restore(flags);
- return;
- }
- smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
- atomic_inc(&rdtp->dynticks);
- /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
- smp_mb__after_atomic_inc(); /* See above. */
- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+ rdtp->dynticks++;
+ rdtp->dynticks_nesting++;
+ WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
local_irq_restore(flags);
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
/**
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks_nmi_nesting == 0 &&
- (atomic_read(&rdtp->dynticks) & 0x1))
+ if (rdtp->dynticks & 0x1)
return;
- rdtp->dynticks_nmi_nesting++;
- smp_mb__before_atomic_inc(); /* Force delay from prior write. */
- atomic_inc(&rdtp->dynticks);
- /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
- smp_mb__after_atomic_inc(); /* See above. */
- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+ rdtp->dynticks_nmi++;
+ WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
/**
{
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->dynticks_nmi_nesting == 0 ||
- --rdtp->dynticks_nmi_nesting != 0)
+ if (rdtp->dynticks & 0x1)
return;
- /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
- smp_mb__before_atomic_inc(); /* See above. */
- atomic_inc(&rdtp->dynticks);
- smp_mb__after_atomic_inc(); /* Force delay to next write. */
- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ rdtp->dynticks_nmi++;
+ WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
}
/**
*/
void rcu_irq_enter(void)
{
- rcu_exit_nohz();
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (rdtp->dynticks_nesting++)
+ return;
+ rdtp->dynticks++;
+ WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
}
/**
*/
void rcu_irq_exit(void)
{
- rcu_enter_nohz();
+ struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+ if (--rdtp->dynticks_nesting)
+ return;
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ rdtp->dynticks++;
+ WARN_ON_ONCE(rdtp->dynticks & 0x1);
+
+ /* If the interrupt queued a callback, get out of dyntick mode. */
+ if (__this_cpu_read(rcu_sched_data.nxtlist) ||
+ __this_cpu_read(rcu_bh_data.nxtlist))
+ set_need_resched();
}
#ifdef CONFIG_SMP
*/
static int dyntick_save_progress_counter(struct rcu_data *rdp)
{
- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
- return 0;
+ int ret;
+ int snap;
+ int snap_nmi;
+
+ snap = rdp->dynticks->dynticks;
+ snap_nmi = rdp->dynticks->dynticks_nmi;
+ smp_mb(); /* Order sampling of snap with end of grace period. */
+ rdp->dynticks_snap = snap;
+ rdp->dynticks_nmi_snap = snap_nmi;
+ ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
+ if (ret)
+ rdp->dynticks_fqs++;
+ return ret;
}
/*
*/
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
- unsigned long curr;
- unsigned long snap;
+ long curr;
+ long curr_nmi;
+ long snap;
+ long snap_nmi;
- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
- snap = (unsigned long)rdp->dynticks_snap;
+ curr = rdp->dynticks->dynticks;
+ snap = rdp->dynticks_snap;
+ curr_nmi = rdp->dynticks->dynticks_nmi;
+ snap_nmi = rdp->dynticks_nmi_snap;
+ smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
/*
* If the CPU passed through or entered a dynticks idle phase with
* read-side critical section that started before the beginning
* of the current RCU grace period.
*/
- if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
+ if ((curr != snap || (curr & 0x1) == 0) &&
+ (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
rdp->dynticks_fqs++;
return 1;
}
static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
- long delta;
+ unsigned long j;
+ unsigned long js;
struct rcu_node *rnp;
if (rcu_cpu_stall_suppress)
return;
- delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
+ j = ACCESS_ONCE(jiffies);
+ js = ACCESS_ONCE(rsp->jiffies_stall);
rnp = rdp->mynode;
- if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) {
+ if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
/* We haven't checked in, so go dump stack. */
print_cpu_stall(rsp);
- } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
+ } else if (rcu_gp_in_progress(rsp) &&
+ ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
- /* They had two time units to dump stack, so complain. */
+ /* They had a few time units to dump stack, so complain. */
print_other_cpu_stall(rsp);
}
}
static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock)
{
- WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+ unsigned long gp_duration;
- /*
- * Ensure that all grace-period and pre-grace-period activity
- * is seen before the assignment to rsp->completed.
- */
- smp_mb(); /* See above block comment. */
+ WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+ gp_duration = jiffies - rsp->gp_start;
+ if (gp_duration > rsp->gp_max)
+ rsp->gp_max = gp_duration;
rsp->completed = rsp->gpnum;
rsp->signaled = RCU_GP_IDLE;
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (need_report & RCU_OFL_TASKS_EXP_GP)
rcu_report_exp_rnp(rsp, rnp);
-
- /*
- * If there are no more online CPUs for this rcu_node structure,
- * kill the rcu_node structure's kthread. Otherwise, adjust its
- * affinity.
- */
- t = rnp->node_kthread_task;
- if (t != NULL &&
- rnp->qsmaskinit == 0) {
- raw_spin_lock_irqsave(&rnp->lock, flags);
- rnp->node_kthread_task = NULL;
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
- kthread_stop(t);
- rcu_stop_boost_kthread(rnp);
- } else
- rcu_node_kthread_setaffinity(rnp, -1);
+ rcu_node_kthread_setaffinity(rnp, -1);
}
/*
next = list->next;
prefetch(next);
debug_rcu_head_unqueue(list);
- list->func(list);
+ __rcu_reclaim(list);
list = next;
if (++count >= rdp->blimit)
break;
return;
}
if (rnp->qsmask == 0) {
- rcu_initiate_boost(rnp);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
continue;
}
cpu = rnp->grplo;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
rnp = rcu_get_root(rsp);
- raw_spin_lock_irqsave(&rnp->lock, flags);
- if (rnp->qsmask == 0)
- rcu_initiate_boost(rnp);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ if (rnp->qsmask == 0) {
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+ }
}
/*
*/
static void rcu_process_callbacks(void)
{
+ /*
+ * Memory references from any prior RCU read-side critical sections
+ * executed by the interrupted code must be seen before any RCU
+ * grace-period manipulations below.
+ */
+ smp_mb(); /* See above block comment. */
+
__rcu_process_callbacks(&rcu_sched_state,
&__get_cpu_var(rcu_sched_data));
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
rcu_preempt_process_callbacks();
+ /*
+ * Memory references from any later RCU read-side critical sections
+ * executed by the interrupted code must be seen after any RCU
+ * grace-period manipulations above.
+ */
+ smp_mb(); /* See above block comment. */
+
/* If we are last CPU on way to dyntick-idle mode, accelerate it. */
rcu_needs_cpu_flush();
}
static void invoke_rcu_cpu_kthread(void)
{
unsigned long flags;
- wait_queue_head_t *q;
- int cpu;
local_irq_save(flags);
- cpu = smp_processor_id();
- per_cpu(rcu_cpu_has_work, cpu) = 1;
- if (per_cpu(rcu_cpu_kthread_task, cpu) == NULL) {
+ __this_cpu_write(rcu_cpu_has_work, 1);
+ if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
local_irq_restore(flags);
return;
}
- q = &per_cpu(rcu_cpu_wq, cpu);
- wake_up(q);
+ wake_up(&__get_cpu_var(rcu_cpu_wq));
local_irq_restore(flags);
}
/*
* Wake up the specified per-rcu_node-structure kthread.
- * The caller must hold ->lock.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
*/
static void invoke_rcu_node_kthread(struct rcu_node *rnp)
{
raw_spin_lock_irqsave(&rnp->lock, flags);
rnp->wakemask |= rdp->grpmask;
- invoke_rcu_node_kthread(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ invoke_rcu_node_kthread(rnp);
}
/*
mod_timer(&yield_timer, jiffies + 2);
sp.sched_priority = 0;
sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
+ set_user_nice(current, 19);
schedule();
sp.sched_priority = RCU_KTHREAD_PRIO;
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
smp_processor_id() != cpu) {
if (kthread_should_stop())
return 1;
+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+ per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
local_bh_enable();
schedule_timeout_uninterruptible(1);
if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)))
set_cpus_allowed_ptr(current, cpumask_of(cpu));
local_bh_disable();
}
+ per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
return 0;
}
int cpu = (int)(long)arg;
unsigned long flags;
int spincnt = 0;
+ unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
char work;
char *workp = &per_cpu(rcu_cpu_has_work, cpu);
for (;;) {
+ *statusp = RCU_KTHREAD_WAITING;
wait_event_interruptible(*wqp,
*workp != 0 || kthread_should_stop());
local_bh_disable();
local_bh_enable();
break;
}
+ *statusp = RCU_KTHREAD_RUNNING;
+ per_cpu(rcu_cpu_kthread_loops, cpu)++;
local_irq_save(flags);
work = *workp;
*workp = 0;
else
spincnt = 0;
if (spincnt > 10) {
+ *statusp = RCU_KTHREAD_YIELDING;
rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
spincnt = 0;
}
}
+ *statusp = RCU_KTHREAD_STOPPED;
return 0;
}
if (IS_ERR(t))
return PTR_ERR(t);
kthread_bind(t, cpu);
+ per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
per_cpu(rcu_cpu_kthread_task, cpu) = t;
wake_up_process(t);
struct task_struct *t;
for (;;) {
- wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 ||
- kthread_should_stop());
- if (kthread_should_stop())
- break;
+ rnp->node_kthread_status = RCU_KTHREAD_WAITING;
+ wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0);
+ rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
raw_spin_lock_irqsave(&rnp->lock, flags);
mask = rnp->wakemask;
rnp->wakemask = 0;
- rcu_initiate_boost(rnp);
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
if ((mask & 0x1) == 0)
continue;
preempt_enable();
}
}
+ /* NOTREACHED */
+ rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
return 0;
}
int cpu;
unsigned long mask = rnp->qsmaskinit;
- if (rnp->node_kthread_task == NULL || mask == 0)
+ if (rnp->node_kthread_task == NULL)
return;
if (!alloc_cpumask_var(&cm, GFP_KERNEL))
return;
/* Add the callback to our list. */
*rdp->nxttail[RCU_NEXT_TAIL] = head;
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
+ rdp->qlen++;
+
+ /* If interrupts were disabled, don't dive into RCU core. */
+ if (irqs_disabled_flags(flags)) {
+ local_irq_restore(flags);
+ return;
+ }
/*
* Force the grace period if too many callbacks or too long waiting.
* invoking force_quiescent_state() if the newly enqueued callback
* is the only one waiting for a grace period to complete.
*/
- if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
+ if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
/* Are we ignoring a completed grace period? */
rcu_process_gp_end(rsp, rdp);
* or RCU-bh, force a local reschedule.
*/
rdp->n_rp_qs_pending++;
- if (!rdp->preemptable &&
+ if (!rdp->preemptible &&
ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
jiffies))
set_need_resched();
* that this CPU cannot possibly have any RCU callbacks in flight yet.
*/
static void __cpuinit
-rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
+rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
{
unsigned long flags;
unsigned long mask;
rdp->passed_quiesc = 0; /* We could be racing with new GP, */
rdp->qs_pending = 1; /* so set up to respond to current GP. */
rdp->beenonline = 1; /* We have now been online. */
- rdp->preemptable = preemptable;
+ rdp->preemptible = preemptible;
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit;