2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2008
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
27 * For detailed explanation of Read-Copy Update mechanism see -
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp.h>
35 #include <linux/rcupdate.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/nmi.h>
39 #include <linux/atomic.h>
40 #include <linux/bitops.h>
41 #include <linux/export.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/percpu.h>
45 #include <linux/notifier.h>
46 #include <linux/cpu.h>
47 #include <linux/mutex.h>
48 #include <linux/time.h>
49 #include <linux/kernel_stat.h>
50 #include <linux/wait.h>
51 #include <linux/kthread.h>
52 #include <linux/prefetch.h>
55 #include <trace/events/rcu.h>
59 /* Data structures. */
61 static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
63 #define RCU_STATE_INITIALIZER(structname) { \
64 .level = { &structname##_state.node[0] }, \
66 NUM_RCU_LVL_0, /* root of hierarchy. */ \
70 NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
72 .fqs_state = RCU_GP_IDLE, \
75 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
76 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
78 .n_force_qs_ngp = 0, \
79 .name = #structname, \
82 struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
83 DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
85 struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
86 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
88 static struct rcu_state *rcu_state;
91 * The rcu_scheduler_active variable transitions from zero to one just
92 * before the first task is spawned. So when this variable is zero, RCU
93 * can assume that there is but one task, allowing RCU to (for example)
94 * optimized synchronize_sched() to a simple barrier(). When this variable
95 * is one, RCU must actually do all the hard work required to detect real
96 * grace periods. This variable is also used to suppress boot-time false
97 * positives from lockdep-RCU error checking.
99 int rcu_scheduler_active __read_mostly;
100 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
103 * The rcu_scheduler_fully_active variable transitions from zero to one
104 * during the early_initcall() processing, which is after the scheduler
105 * is capable of creating new tasks. So RCU processing (for example,
106 * creating tasks for RCU priority boosting) must be delayed until after
107 * rcu_scheduler_fully_active transitions from zero to one. We also
108 * currently delay invocation of any RCU callbacks until after this point.
110 * It might later prove better for people registering RCU callbacks during
111 * early boot to take responsibility for these callbacks, but one step at
114 static int rcu_scheduler_fully_active __read_mostly;
116 #ifdef CONFIG_RCU_BOOST
119 * Control variables for per-CPU and per-rcu_node kthreads. These
120 * handle all flavors of RCU.
122 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
123 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
124 DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
125 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
126 DEFINE_PER_CPU(char, rcu_cpu_has_work);
128 #endif /* #ifdef CONFIG_RCU_BOOST */
130 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
131 static void invoke_rcu_core(void);
132 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
135 * Track the rcutorture test sequence number and the update version
136 * number within a given test. The rcutorture_testseq is incremented
137 * on every rcutorture module load and unload, so has an odd value
138 * when a test is running. The rcutorture_vernum is set to zero
139 * when rcutorture starts and is incremented on each rcutorture update.
140 * These variables enable correlating rcutorture output with the
141 * RCU tracing information.
143 unsigned long rcutorture_testseq;
144 unsigned long rcutorture_vernum;
147 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
148 * permit this function to be invoked without holding the root rcu_node
149 * structure's ->lock, but of course results can be subject to change.
151 static int rcu_gp_in_progress(struct rcu_state *rsp)
153 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
157 * Note a quiescent state. Because we do not need to know
158 * how many quiescent states passed, just if there was at least
159 * one since the start of the grace period, this just sets a flag.
160 * The caller must have disabled preemption.
162 void rcu_sched_qs(int cpu)
164 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
166 rdp->passed_quiesce_gpnum = rdp->gpnum;
168 if (rdp->passed_quiesce == 0)
169 trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
170 rdp->passed_quiesce = 1;
173 void rcu_bh_qs(int cpu)
175 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
177 rdp->passed_quiesce_gpnum = rdp->gpnum;
179 if (rdp->passed_quiesce == 0)
180 trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
181 rdp->passed_quiesce = 1;
185 * Note a context switch. This is a quiescent state for RCU-sched,
186 * and requires special handling for preemptible RCU.
187 * The caller must have disabled preemption.
189 void rcu_note_context_switch(int cpu)
191 trace_rcu_utilization("Start context switch");
193 rcu_preempt_note_context_switch(cpu);
194 trace_rcu_utilization("End context switch");
196 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
198 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
199 .dynticks_nesting = DYNTICK_TASK_NESTING,
200 .dynticks = ATOMIC_INIT(1),
203 static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
204 static int qhimark = 10000; /* If this many pending, ignore blimit. */
205 static int qlowmark = 100; /* Once only this many pending, use blimit. */
207 module_param(blimit, int, 0);
208 module_param(qhimark, int, 0);
209 module_param(qlowmark, int, 0);
211 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
212 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
214 module_param(rcu_cpu_stall_suppress, int, 0644);
215 module_param(rcu_cpu_stall_timeout, int, 0644);
217 static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
218 static int rcu_pending(int cpu);
221 * Return the number of RCU-sched batches processed thus far for debug & stats.
223 long rcu_batches_completed_sched(void)
225 return rcu_sched_state.completed;
227 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
230 * Return the number of RCU BH batches processed thus far for debug & stats.
232 long rcu_batches_completed_bh(void)
234 return rcu_bh_state.completed;
236 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
239 * Force a quiescent state for RCU BH.
241 void rcu_bh_force_quiescent_state(void)
243 force_quiescent_state(&rcu_bh_state, 0);
245 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
248 * Record the number of times rcutorture tests have been initiated and
249 * terminated. This information allows the debugfs tracing stats to be
250 * correlated to the rcutorture messages, even when the rcutorture module
251 * is being repeatedly loaded and unloaded. In other words, we cannot
252 * store this state in rcutorture itself.
254 void rcutorture_record_test_transition(void)
256 rcutorture_testseq++;
257 rcutorture_vernum = 0;
259 EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
262 * Record the number of writer passes through the current rcutorture test.
263 * This is also used to correlate debugfs tracing stats with the rcutorture
266 void rcutorture_record_progress(unsigned long vernum)
270 EXPORT_SYMBOL_GPL(rcutorture_record_progress);
273 * Force a quiescent state for RCU-sched.
275 void rcu_sched_force_quiescent_state(void)
277 force_quiescent_state(&rcu_sched_state, 0);
279 EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
282 * Does the CPU have callbacks ready to be invoked?
285 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
287 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
291 * Does the current CPU require a yet-as-unscheduled grace period?
294 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
296 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
300 * Return the root node of the specified rcu_state structure.
302 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
304 return &rsp->node[0];
308 * If the specified CPU is offline, tell the caller that it is in
309 * a quiescent state. Otherwise, whack it with a reschedule IPI.
310 * Grace periods can end up waiting on an offline CPU when that
311 * CPU is in the process of coming online -- it will be added to the
312 * rcu_node bitmasks before it actually makes it online. The same thing
313 * can happen while a CPU is in the process of coming online. Because this
314 * race is quite rare, we check for it after detecting that the grace
315 * period has been delayed rather than checking each and every CPU
316 * each and every time we start a new grace period.
318 static int rcu_implicit_offline_qs(struct rcu_data *rdp)
321 * If the CPU is offline, it is in a quiescent state. We can
322 * trust its state not to change because interrupts are disabled.
324 if (cpu_is_offline(rdp->cpu)) {
325 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
331 * The CPU is online, so send it a reschedule IPI. This forces
332 * it through the scheduler, and (inefficiently) also handles cases
333 * where idle loops fail to inform RCU about the CPU being idle.
335 if (rdp->cpu != smp_processor_id())
336 smp_send_reschedule(rdp->cpu);
344 * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
346 * If the new value of the ->dynticks_nesting counter now is zero,
347 * we really have entered idle, and must do the appropriate accounting.
348 * The caller must have disabled interrupts.
350 static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
352 trace_rcu_dyntick("Start", oldval, 0);
353 if (!is_idle_task(current)) {
354 struct task_struct *idle = idle_task(smp_processor_id());
356 trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
357 ftrace_dump(DUMP_ALL);
358 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
359 current->pid, current->comm,
360 idle->pid, idle->comm); /* must be idle task! */
362 rcu_prepare_for_idle(smp_processor_id());
363 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
364 smp_mb__before_atomic_inc(); /* See above. */
365 atomic_inc(&rdtp->dynticks);
366 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
367 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
370 * The idle task is not permitted to enter the idle loop while
371 * in an RCU read-side critical section.
373 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
374 "Illegal idle entry in RCU read-side critical section.");
375 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
376 "Illegal idle entry in RCU-bh read-side critical section.");
377 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
378 "Illegal idle entry in RCU-sched read-side critical section.");
382 * rcu_idle_enter - inform RCU that current CPU is entering idle
384 * Enter idle mode, in other words, -leave- the mode in which RCU
385 * read-side critical sections can occur. (Though RCU read-side
386 * critical sections can occur in irq handlers in idle, a possibility
387 * handled by irq_enter() and irq_exit().)
389 * We crowbar the ->dynticks_nesting field to zero to allow for
390 * the possibility of usermode upcalls having messed up our count
391 * of interrupt nesting level during the prior busy period.
393 void rcu_idle_enter(void)
397 struct rcu_dynticks *rdtp;
399 local_irq_save(flags);
400 rdtp = &__get_cpu_var(rcu_dynticks);
401 oldval = rdtp->dynticks_nesting;
402 rdtp->dynticks_nesting = 0;
403 rcu_idle_enter_common(rdtp, oldval);
404 local_irq_restore(flags);
408 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
410 * Exit from an interrupt handler, which might possibly result in entering
411 * idle mode, in other words, leaving the mode in which read-side critical
412 * sections can occur.
414 * This code assumes that the idle loop never does anything that might
415 * result in unbalanced calls to irq_enter() and irq_exit(). If your
416 * architecture violates this assumption, RCU will give you what you
417 * deserve, good and hard. But very infrequently and irreproducibly.
419 * Use things like work queues to work around this limitation.
421 * You have been warned.
423 void rcu_irq_exit(void)
427 struct rcu_dynticks *rdtp;
429 local_irq_save(flags);
430 rdtp = &__get_cpu_var(rcu_dynticks);
431 oldval = rdtp->dynticks_nesting;
432 rdtp->dynticks_nesting--;
433 WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
434 if (rdtp->dynticks_nesting)
435 trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
437 rcu_idle_enter_common(rdtp, oldval);
438 local_irq_restore(flags);
442 * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
444 * If the new value of the ->dynticks_nesting counter was previously zero,
445 * we really have exited idle, and must do the appropriate accounting.
446 * The caller must have disabled interrupts.
448 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
450 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
451 atomic_inc(&rdtp->dynticks);
452 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
453 smp_mb__after_atomic_inc(); /* See above. */
454 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
455 rcu_cleanup_after_idle(smp_processor_id());
456 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
457 if (!is_idle_task(current)) {
458 struct task_struct *idle = idle_task(smp_processor_id());
460 trace_rcu_dyntick("Error on exit: not idle task",
461 oldval, rdtp->dynticks_nesting);
462 ftrace_dump(DUMP_ALL);
463 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
464 current->pid, current->comm,
465 idle->pid, idle->comm); /* must be idle task! */
470 * rcu_idle_exit - inform RCU that current CPU is leaving idle
472 * Exit idle mode, in other words, -enter- the mode in which RCU
473 * read-side critical sections can occur.
475 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
476 * allow for the possibility of usermode upcalls messing up our count
477 * of interrupt nesting level during the busy period that is just
480 void rcu_idle_exit(void)
483 struct rcu_dynticks *rdtp;
486 local_irq_save(flags);
487 rdtp = &__get_cpu_var(rcu_dynticks);
488 oldval = rdtp->dynticks_nesting;
489 WARN_ON_ONCE(oldval != 0);
490 rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
491 rcu_idle_exit_common(rdtp, oldval);
492 local_irq_restore(flags);
496 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
498 * Enter an interrupt handler, which might possibly result in exiting
499 * idle mode, in other words, entering the mode in which read-side critical
500 * sections can occur.
502 * Note that the Linux kernel is fully capable of entering an interrupt
503 * handler that it never exits, for example when doing upcalls to
504 * user mode! This code assumes that the idle loop never does upcalls to
505 * user mode. If your architecture does do upcalls from the idle loop (or
506 * does anything else that results in unbalanced calls to the irq_enter()
507 * and irq_exit() functions), RCU will give you what you deserve, good
508 * and hard. But very infrequently and irreproducibly.
510 * Use things like work queues to work around this limitation.
512 * You have been warned.
514 void rcu_irq_enter(void)
517 struct rcu_dynticks *rdtp;
520 local_irq_save(flags);
521 rdtp = &__get_cpu_var(rcu_dynticks);
522 oldval = rdtp->dynticks_nesting;
523 rdtp->dynticks_nesting++;
524 WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
526 trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
528 rcu_idle_exit_common(rdtp, oldval);
529 local_irq_restore(flags);
533 * rcu_nmi_enter - inform RCU of entry to NMI context
535 * If the CPU was idle with dynamic ticks active, and there is no
536 * irq handler running, this updates rdtp->dynticks_nmi to let the
537 * RCU grace-period handling know that the CPU is active.
539 void rcu_nmi_enter(void)
541 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
543 if (rdtp->dynticks_nmi_nesting == 0 &&
544 (atomic_read(&rdtp->dynticks) & 0x1))
546 rdtp->dynticks_nmi_nesting++;
547 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
548 atomic_inc(&rdtp->dynticks);
549 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
550 smp_mb__after_atomic_inc(); /* See above. */
551 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
555 * rcu_nmi_exit - inform RCU of exit from NMI context
557 * If the CPU was idle with dynamic ticks active, and there is no
558 * irq handler running, this updates rdtp->dynticks_nmi to let the
559 * RCU grace-period handling know that the CPU is no longer active.
561 void rcu_nmi_exit(void)
563 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
565 if (rdtp->dynticks_nmi_nesting == 0 ||
566 --rdtp->dynticks_nmi_nesting != 0)
568 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
569 smp_mb__before_atomic_inc(); /* See above. */
570 atomic_inc(&rdtp->dynticks);
571 smp_mb__after_atomic_inc(); /* Force delay to next write. */
572 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
575 #ifdef CONFIG_PROVE_RCU
578 * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
580 * If the current CPU is in its idle loop and is neither in an interrupt
581 * or NMI handler, return true.
583 int rcu_is_cpu_idle(void)
588 ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
592 EXPORT_SYMBOL(rcu_is_cpu_idle);
594 #endif /* #ifdef CONFIG_PROVE_RCU */
597 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
599 * If the current CPU is idle or running at a first-level (not nested)
600 * interrupt from idle, return true. The caller must have at least
601 * disabled preemption.
603 int rcu_is_cpu_rrupt_from_idle(void)
605 return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
609 * Snapshot the specified CPU's dynticks counter so that we can later
610 * credit them with an implicit quiescent state. Return 1 if this CPU
611 * is in dynticks idle mode, which is an extended quiescent state.
613 static int dyntick_save_progress_counter(struct rcu_data *rdp)
615 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
616 return (rdp->dynticks_snap & 0x1) == 0;
620 * Return true if the specified CPU has passed through a quiescent
621 * state by virtue of being in or having passed through an dynticks
622 * idle state since the last call to dyntick_save_progress_counter()
625 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
630 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
631 snap = (unsigned int)rdp->dynticks_snap;
634 * If the CPU passed through or entered a dynticks idle phase with
635 * no active irq/NMI handlers, then we can safely pretend that the CPU
636 * already acknowledged the request to pass through a quiescent
637 * state. Either way, that CPU cannot possibly be in an RCU
638 * read-side critical section that started before the beginning
639 * of the current RCU grace period.
641 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
642 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
647 /* Go check for the CPU being offline. */
648 return rcu_implicit_offline_qs(rdp);
651 static int jiffies_till_stall_check(void)
653 int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
656 * Limit check must be consistent with the Kconfig limits
657 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
659 if (till_stall_check < 3) {
660 ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
661 till_stall_check = 3;
662 } else if (till_stall_check > 300) {
663 ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
664 till_stall_check = 300;
666 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
669 static void record_gp_stall_check_time(struct rcu_state *rsp)
671 rsp->gp_start = jiffies;
672 rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
675 static void print_other_cpu_stall(struct rcu_state *rsp)
681 struct rcu_node *rnp = rcu_get_root(rsp);
683 /* Only let one CPU complain about others per time interval. */
685 raw_spin_lock_irqsave(&rnp->lock, flags);
686 delta = jiffies - rsp->jiffies_stall;
687 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
688 raw_spin_unlock_irqrestore(&rnp->lock, flags);
691 rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3;
694 * Now rat on any tasks that got kicked up to the root rcu_node
695 * due to CPU offlining.
697 ndetected = rcu_print_task_stall(rnp);
698 raw_spin_unlock_irqrestore(&rnp->lock, flags);
701 * OK, time to rat on our buddy...
702 * See Documentation/RCU/stallwarn.txt for info on how to debug
703 * RCU CPU stall warnings.
705 printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
707 rcu_for_each_leaf_node(rsp, rnp) {
708 raw_spin_lock_irqsave(&rnp->lock, flags);
709 ndetected += rcu_print_task_stall(rnp);
710 raw_spin_unlock_irqrestore(&rnp->lock, flags);
711 if (rnp->qsmask == 0)
713 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
714 if (rnp->qsmask & (1UL << cpu)) {
715 printk(" %d", rnp->grplo + cpu);
719 printk("} (detected by %d, t=%ld jiffies)\n",
720 smp_processor_id(), (long)(jiffies - rsp->gp_start));
722 printk(KERN_ERR "INFO: Stall ended before state dump start\n");
723 else if (!trigger_all_cpu_backtrace())
726 /* If so configured, complain about tasks blocking the grace period. */
728 rcu_print_detail_task_stall(rsp);
730 force_quiescent_state(rsp, 0); /* Kick them all. */
733 static void print_cpu_stall(struct rcu_state *rsp)
736 struct rcu_node *rnp = rcu_get_root(rsp);
739 * OK, time to rat on ourselves...
740 * See Documentation/RCU/stallwarn.txt for info on how to debug
741 * RCU CPU stall warnings.
743 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
744 rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
745 if (!trigger_all_cpu_backtrace())
748 raw_spin_lock_irqsave(&rnp->lock, flags);
749 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
750 rsp->jiffies_stall = jiffies +
751 3 * jiffies_till_stall_check() + 3;
752 raw_spin_unlock_irqrestore(&rnp->lock, flags);
754 set_need_resched(); /* kick ourselves to get things going. */
757 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
761 struct rcu_node *rnp;
763 if (rcu_cpu_stall_suppress)
765 j = ACCESS_ONCE(jiffies);
766 js = ACCESS_ONCE(rsp->jiffies_stall);
768 if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
770 /* We haven't checked in, so go dump stack. */
771 print_cpu_stall(rsp);
773 } else if (rcu_gp_in_progress(rsp) &&
774 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
776 /* They had a few time units to dump stack, so complain. */
777 print_other_cpu_stall(rsp);
781 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
783 rcu_cpu_stall_suppress = 1;
788 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
790 * Set the stall-warning timeout way off into the future, thus preventing
791 * any RCU CPU stall-warning messages from appearing in the current set of
794 * The caller must disable hard irqs.
796 void rcu_cpu_stall_reset(void)
798 rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
799 rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
800 rcu_preempt_stall_reset();
803 static struct notifier_block rcu_panic_block = {
804 .notifier_call = rcu_panic,
807 static void __init check_cpu_stall_init(void)
809 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
813 * Update CPU-local rcu_data state to record the newly noticed grace period.
814 * This is used both when we started the grace period and when we notice
815 * that someone else started the grace period. The caller must hold the
816 * ->lock of the leaf rcu_node structure corresponding to the current CPU,
817 * and must have irqs disabled.
819 static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
821 if (rdp->gpnum != rnp->gpnum) {
823 * If the current grace period is waiting for this CPU,
824 * set up to detect a quiescent state, otherwise don't
825 * go looking for one.
827 rdp->gpnum = rnp->gpnum;
828 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
829 if (rnp->qsmask & rdp->grpmask) {
831 rdp->passed_quiesce = 0;
837 static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
840 struct rcu_node *rnp;
842 local_irq_save(flags);
844 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
845 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
846 local_irq_restore(flags);
849 __note_new_gpnum(rsp, rnp, rdp);
850 raw_spin_unlock_irqrestore(&rnp->lock, flags);
854 * Did someone else start a new RCU grace period start since we last
855 * checked? Update local state appropriately if so. Must be called
856 * on the CPU corresponding to rdp.
859 check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
864 local_irq_save(flags);
865 if (rdp->gpnum != rsp->gpnum) {
866 note_new_gpnum(rsp, rdp);
869 local_irq_restore(flags);
874 * Advance this CPU's callbacks, but only if the current grace period
875 * has ended. This may be called only from the CPU to whom the rdp
876 * belongs. In addition, the corresponding leaf rcu_node structure's
877 * ->lock must be held by the caller, with irqs disabled.
880 __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
882 /* Did another grace period end? */
883 if (rdp->completed != rnp->completed) {
885 /* Advance callbacks. No harm if list empty. */
886 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
887 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
888 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
890 /* Remember that we saw this grace-period completion. */
891 rdp->completed = rnp->completed;
892 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
895 * If we were in an extended quiescent state, we may have
896 * missed some grace periods that others CPUs handled on
897 * our behalf. Catch up with this state to avoid noting
898 * spurious new grace periods. If another grace period
899 * has started, then rnp->gpnum will have advanced, so
900 * we will detect this later on.
902 if (ULONG_CMP_LT(rdp->gpnum, rdp->completed))
903 rdp->gpnum = rdp->completed;
906 * If RCU does not need a quiescent state from this CPU,
907 * then make sure that this CPU doesn't go looking for one.
909 if ((rnp->qsmask & rdp->grpmask) == 0)
915 * Advance this CPU's callbacks, but only if the current grace period
916 * has ended. This may be called only from the CPU to whom the rdp
920 rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
923 struct rcu_node *rnp;
925 local_irq_save(flags);
927 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
928 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
929 local_irq_restore(flags);
932 __rcu_process_gp_end(rsp, rnp, rdp);
933 raw_spin_unlock_irqrestore(&rnp->lock, flags);
937 * Do per-CPU grace-period initialization for running CPU. The caller
938 * must hold the lock of the leaf rcu_node structure corresponding to
942 rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
944 /* Prior grace period ended, so advance callbacks for current CPU. */
945 __rcu_process_gp_end(rsp, rnp, rdp);
948 * Because this CPU just now started the new grace period, we know
949 * that all of its callbacks will be covered by this upcoming grace
950 * period, even the ones that were registered arbitrarily recently.
951 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
953 * Other CPUs cannot be sure exactly when the grace period started.
954 * Therefore, their recently registered callbacks must pass through
955 * an additional RCU_NEXT_READY stage, so that they will be handled
956 * by the next RCU grace period.
958 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
959 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
961 /* Set state so that this CPU will detect the next quiescent state. */
962 __note_new_gpnum(rsp, rnp, rdp);
966 * Start a new RCU grace period if warranted, re-initializing the hierarchy
967 * in preparation for detecting the next grace period. The caller must hold
968 * the root node's ->lock, which is released before return. Hard irqs must
971 * Note that it is legal for a dying CPU (which is marked as offline) to
972 * invoke this function. This can happen when the dying CPU reports its
976 rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
977 __releases(rcu_get_root(rsp)->lock)
979 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
980 struct rcu_node *rnp = rcu_get_root(rsp);
982 if (!rcu_scheduler_fully_active ||
983 !cpu_needs_another_gp(rsp, rdp)) {
985 * Either the scheduler hasn't yet spawned the first
986 * non-idle task or this CPU does not need another
987 * grace period. Either way, don't start a new grace
990 raw_spin_unlock_irqrestore(&rnp->lock, flags);
994 if (rsp->fqs_active) {
996 * This CPU needs a grace period, but force_quiescent_state()
997 * is running. Tell it to start one on this CPU's behalf.
999 rsp->fqs_need_gp = 1;
1000 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1004 /* Advance to a new grace period and initialize state. */
1006 trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
1007 WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT);
1008 rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
1009 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1010 record_gp_stall_check_time(rsp);
1011 raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
1013 /* Exclude any concurrent CPU-hotplug operations. */
1014 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
1017 * Set the quiescent-state-needed bits in all the rcu_node
1018 * structures for all currently online CPUs in breadth-first
1019 * order, starting from the root rcu_node structure. This
1020 * operation relies on the layout of the hierarchy within the
1021 * rsp->node[] array. Note that other CPUs will access only
1022 * the leaves of the hierarchy, which still indicate that no
1023 * grace period is in progress, at least until the corresponding
1024 * leaf node has been initialized. In addition, we have excluded
1025 * CPU-hotplug operations.
1027 * Note that the grace period cannot complete until we finish
1028 * the initialization process, as there will be at least one
1029 * qsmask bit set in the root node until that time, namely the
1030 * one corresponding to this CPU, due to the fact that we have
1033 rcu_for_each_node_breadth_first(rsp, rnp) {
1034 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1035 rcu_preempt_check_blocked_tasks(rnp);
1036 rnp->qsmask = rnp->qsmaskinit;
1037 rnp->gpnum = rsp->gpnum;
1038 rnp->completed = rsp->completed;
1039 if (rnp == rdp->mynode)
1040 rcu_start_gp_per_cpu(rsp, rnp, rdp);
1041 rcu_preempt_boost_start_gp(rnp);
1042 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1043 rnp->level, rnp->grplo,
1044 rnp->grphi, rnp->qsmask);
1045 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1048 rnp = rcu_get_root(rsp);
1049 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1050 rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
1051 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1052 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
1056 * Report a full set of quiescent states to the specified rcu_state
1057 * data structure. This involves cleaning up after the prior grace
1058 * period and letting rcu_start_gp() start up the next grace period
1059 * if one is needed. Note that the caller must hold rnp->lock, as
1060 * required by rcu_start_gp(), which will release it.
1062 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1063 __releases(rcu_get_root(rsp)->lock)
1065 unsigned long gp_duration;
1066 struct rcu_node *rnp = rcu_get_root(rsp);
1067 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1069 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
1072 * Ensure that all grace-period and pre-grace-period activity
1073 * is seen before the assignment to rsp->completed.
1075 smp_mb(); /* See above block comment. */
1076 gp_duration = jiffies - rsp->gp_start;
1077 if (gp_duration > rsp->gp_max)
1078 rsp->gp_max = gp_duration;
1081 * We know the grace period is complete, but to everyone else
1082 * it appears to still be ongoing. But it is also the case
1083 * that to everyone else it looks like there is nothing that
1084 * they can do to advance the grace period. It is therefore
1085 * safe for us to drop the lock in order to mark the grace
1086 * period as completed in all of the rcu_node structures.
1088 * But if this CPU needs another grace period, it will take
1089 * care of this while initializing the next grace period.
1090 * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL
1091 * because the callbacks have not yet been advanced: Those
1092 * callbacks are waiting on the grace period that just now
1095 if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
1096 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1099 * Propagate new ->completed value to rcu_node structures
1100 * so that other CPUs don't have to wait until the start
1101 * of the next grace period to process their callbacks.
1103 rcu_for_each_node_breadth_first(rsp, rnp) {
1104 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1105 rnp->completed = rsp->gpnum;
1106 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1108 rnp = rcu_get_root(rsp);
1109 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1112 rsp->completed = rsp->gpnum; /* Declare the grace period complete. */
1113 trace_rcu_grace_period(rsp->name, rsp->completed, "end");
1114 rsp->fqs_state = RCU_GP_IDLE;
1115 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
1119 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1120 * Allows quiescent states for a group of CPUs to be reported at one go
1121 * to the specified rcu_node structure, though all the CPUs in the group
1122 * must be represented by the same rcu_node structure (which need not be
1123 * a leaf rcu_node structure, though it often will be). That structure's
1124 * lock must be held upon entry, and it is released before return.
1127 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1128 struct rcu_node *rnp, unsigned long flags)
1129 __releases(rnp->lock)
1131 struct rcu_node *rnp_c;
1133 /* Walk up the rcu_node hierarchy. */
1135 if (!(rnp->qsmask & mask)) {
1137 /* Our bit has already been cleared, so done. */
1138 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1141 rnp->qsmask &= ~mask;
1142 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
1143 mask, rnp->qsmask, rnp->level,
1144 rnp->grplo, rnp->grphi,
1146 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1148 /* Other bits still set at this level, so done. */
1149 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1152 mask = rnp->grpmask;
1153 if (rnp->parent == NULL) {
1155 /* No more levels. Exit loop holding root lock. */
1159 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1162 raw_spin_lock_irqsave(&rnp->lock, flags);
1163 WARN_ON_ONCE(rnp_c->qsmask);
1167 * Get here if we are the last CPU to pass through a quiescent
1168 * state for this grace period. Invoke rcu_report_qs_rsp()
1169 * to clean up and start the next grace period if one is needed.
1171 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
1175 * Record a quiescent state for the specified CPU to that CPU's rcu_data
1176 * structure. This must be either called from the specified CPU, or
1177 * called when the specified CPU is known to be offline (and when it is
1178 * also known that no other CPU is concurrently trying to help the offline
1179 * CPU). The lastcomp argument is used to make sure we are still in the
1180 * grace period of interest. We don't want to end the current grace period
1181 * based on quiescent states detected in an earlier grace period!
1184 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp)
1186 unsigned long flags;
1188 struct rcu_node *rnp;
1191 raw_spin_lock_irqsave(&rnp->lock, flags);
1192 if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) {
1195 * The grace period in which this quiescent state was
1196 * recorded has ended, so don't report it upwards.
1197 * We will instead need a new quiescent state that lies
1198 * within the current grace period.
1200 rdp->passed_quiesce = 0; /* need qs for new gp. */
1201 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1204 mask = rdp->grpmask;
1205 if ((rnp->qsmask & mask) == 0) {
1206 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1208 rdp->qs_pending = 0;
1211 * This GP can't end until cpu checks in, so all of our
1212 * callbacks can be processed during the next GP.
1214 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1216 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
1221 * Check to see if there is a new grace period of which this CPU
1222 * is not yet aware, and if so, set up local rcu_data state for it.
1223 * Otherwise, see if this CPU has just passed through its first
1224 * quiescent state for this grace period, and record that fact if so.
1227 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1229 /* If there is now a new grace period, record and return. */
1230 if (check_for_new_grace_period(rsp, rdp))
1234 * Does this CPU still need to do its part for current grace period?
1235 * If no, return and let the other CPUs do their part as well.
1237 if (!rdp->qs_pending)
1241 * Was there a quiescent state since the beginning of the grace
1242 * period? If no, then exit and wait for the next call.
1244 if (!rdp->passed_quiesce)
1248 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
1251 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum);
1254 #ifdef CONFIG_HOTPLUG_CPU
1257 * Move a dying CPU's RCU callbacks to online CPU's callback list.
1258 * Also record a quiescent state for this CPU for the current grace period.
1259 * Synchronization and interrupt disabling are not required because
1260 * this function executes in stop_machine() context. Therefore, cleanup
1261 * operations that might block must be done later from the CPU_DEAD
1264 * Note that the outgoing CPU's bit has already been cleared in the
1265 * cpu_online_mask. This allows us to randomly pick a callback
1266 * destination from the bits set in that mask.
1268 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1270 unsigned long flags;
1274 int receive_cpu = cpumask_any(cpu_online_mask);
1275 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1276 struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu);
1277 struct rcu_node *rnp = rdp->mynode; /* For dying CPU. */
1279 /* First, adjust the counts. */
1280 if (rdp->nxtlist != NULL) {
1281 receive_rdp->qlen_lazy += rdp->qlen_lazy;
1282 receive_rdp->qlen += rdp->qlen;
1288 * Next, move ready-to-invoke callbacks to be invoked on some
1289 * other CPU. These will not be required to pass through another
1290 * grace period: They are done, regardless of CPU.
1292 if (rdp->nxtlist != NULL &&
1293 rdp->nxttail[RCU_DONE_TAIL] != &rdp->nxtlist) {
1294 struct rcu_head *oldhead;
1295 struct rcu_head **oldtail;
1296 struct rcu_head **newtail;
1298 oldhead = rdp->nxtlist;
1299 oldtail = receive_rdp->nxttail[RCU_DONE_TAIL];
1300 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1301 *rdp->nxttail[RCU_DONE_TAIL] = *oldtail;
1302 *receive_rdp->nxttail[RCU_DONE_TAIL] = oldhead;
1303 newtail = rdp->nxttail[RCU_DONE_TAIL];
1304 for (i = RCU_DONE_TAIL; i < RCU_NEXT_SIZE; i++) {
1305 if (receive_rdp->nxttail[i] == oldtail)
1306 receive_rdp->nxttail[i] = newtail;
1307 if (rdp->nxttail[i] == newtail)
1308 rdp->nxttail[i] = &rdp->nxtlist;
1313 * Finally, put the rest of the callbacks at the end of the list.
1314 * The ones that made it partway through get to start over: We
1315 * cannot assume that grace periods are synchronized across CPUs.
1316 * (We could splice RCU_WAIT_TAIL into RCU_NEXT_READY_TAIL, but
1317 * this does not seem compelling. Not yet, anyway.)
1319 if (rdp->nxtlist != NULL) {
1320 *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
1321 receive_rdp->nxttail[RCU_NEXT_TAIL] =
1322 rdp->nxttail[RCU_NEXT_TAIL];
1323 receive_rdp->n_cbs_adopted += rdp->qlen;
1324 rdp->n_cbs_orphaned += rdp->qlen;
1326 rdp->nxtlist = NULL;
1327 for (i = 0; i < RCU_NEXT_SIZE; i++)
1328 rdp->nxttail[i] = &rdp->nxtlist;
1332 * Record a quiescent state for the dying CPU. This is safe
1333 * only because we have already cleared out the callbacks.
1334 * (Otherwise, the RCU core might try to schedule the invocation
1335 * of callbacks on this now-offline CPU, which would be bad.)
1337 mask = rdp->grpmask; /* rnp->grplo is constant. */
1338 trace_rcu_grace_period(rsp->name,
1339 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
1341 rcu_report_qs_rdp(smp_processor_id(), rsp, rdp, rsp->gpnum);
1342 /* Note that rcu_report_qs_rdp() might call trace_rcu_grace_period(). */
1345 * Remove the dying CPU from the bitmasks in the rcu_node
1346 * hierarchy. Because we are in stop_machine() context, we
1347 * automatically exclude ->onofflock critical sections.
1350 raw_spin_lock_irqsave(&rnp->lock, flags);
1351 rnp->qsmaskinit &= ~mask;
1352 if (rnp->qsmaskinit != 0) {
1353 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1356 if (rnp == rdp->mynode) {
1357 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1358 if (need_report & RCU_OFL_TASKS_NORM_GP)
1359 rcu_report_unblock_qs_rnp(rnp, flags);
1361 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1362 if (need_report & RCU_OFL_TASKS_EXP_GP)
1363 rcu_report_exp_rnp(rsp, rnp, true);
1365 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1366 mask = rnp->grpmask;
1368 } while (rnp != NULL);
1372 * The CPU has been completely removed, and some other CPU is reporting
1373 * this fact from process context. Do the remainder of the cleanup.
1374 * There can only be one CPU hotplug operation at a time, so no other
1375 * CPU can be attempting to update rcu_cpu_kthread_task.
1377 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1379 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1380 struct rcu_node *rnp = rdp->mynode;
1382 rcu_stop_cpu_kthread(cpu);
1383 rcu_node_kthread_setaffinity(rnp, -1);
1386 #else /* #ifdef CONFIG_HOTPLUG_CPU */
1388 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1392 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1396 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1399 * Invoke any RCU callbacks that have made it to the end of their grace
1400 * period. Thottle as specified by rdp->blimit.
1402 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1404 unsigned long flags;
1405 struct rcu_head *next, *list, **tail;
1406 int bl, count, count_lazy;
1408 /* If no callbacks are ready, just return.*/
1409 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
1410 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
1411 trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
1412 need_resched(), is_idle_task(current),
1413 rcu_is_callbacks_kthread());
1418 * Extract the list of ready callbacks, disabling to prevent
1419 * races with call_rcu() from interrupt handlers.
1421 local_irq_save(flags);
1422 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1424 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
1425 list = rdp->nxtlist;
1426 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1427 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
1428 tail = rdp->nxttail[RCU_DONE_TAIL];
1429 for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
1430 if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
1431 rdp->nxttail[count] = &rdp->nxtlist;
1432 local_irq_restore(flags);
1434 /* Invoke callbacks. */
1435 count = count_lazy = 0;
1439 debug_rcu_head_unqueue(list);
1440 if (__rcu_reclaim(rsp->name, list))
1443 /* Stop only if limit reached and CPU has something to do. */
1444 if (++count >= bl &&
1446 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
1450 local_irq_save(flags);
1451 trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
1452 is_idle_task(current),
1453 rcu_is_callbacks_kthread());
1455 /* Update count, and requeue any remaining callbacks. */
1456 rdp->qlen_lazy -= count_lazy;
1458 rdp->n_cbs_invoked += count;
1460 *tail = rdp->nxtlist;
1461 rdp->nxtlist = list;
1462 for (count = 0; count < RCU_NEXT_SIZE; count++)
1463 if (&rdp->nxtlist == rdp->nxttail[count])
1464 rdp->nxttail[count] = tail;
1469 /* Reinstate batch limit if we have worked down the excess. */
1470 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
1471 rdp->blimit = blimit;
1473 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1474 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1475 rdp->qlen_last_fqs_check = 0;
1476 rdp->n_force_qs_snap = rsp->n_force_qs;
1477 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1478 rdp->qlen_last_fqs_check = rdp->qlen;
1480 local_irq_restore(flags);
1482 /* Re-invoke RCU core processing if there are callbacks remaining. */
1483 if (cpu_has_callbacks_ready_to_invoke(rdp))
1488 * Check to see if this CPU is in a non-context-switch quiescent state
1489 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
1490 * Also schedule RCU core processing.
1492 * This function must be called from hardirq context. It is normally
1493 * invoked from the scheduling-clock interrupt. If rcu_pending returns
1494 * false, there is no point in invoking rcu_check_callbacks().
1496 void rcu_check_callbacks(int cpu, int user)
1498 trace_rcu_utilization("Start scheduler-tick");
1499 if (user || rcu_is_cpu_rrupt_from_idle()) {
1502 * Get here if this CPU took its interrupt from user
1503 * mode or from the idle loop, and if this is not a
1504 * nested interrupt. In this case, the CPU is in
1505 * a quiescent state, so note it.
1507 * No memory barrier is required here because both
1508 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
1509 * variables that other CPUs neither access nor modify,
1510 * at least not while the corresponding CPU is online.
1516 } else if (!in_softirq()) {
1519 * Get here if this CPU did not take its interrupt from
1520 * softirq, in other words, if it is not interrupting
1521 * a rcu_bh read-side critical section. This is an _bh
1522 * critical section, so note it.
1527 rcu_preempt_check_callbacks(cpu);
1528 if (rcu_pending(cpu))
1530 trace_rcu_utilization("End scheduler-tick");
1534 * Scan the leaf rcu_node structures, processing dyntick state for any that
1535 * have not yet encountered a quiescent state, using the function specified.
1536 * Also initiate boosting for any threads blocked on the root rcu_node.
1538 * The caller must have suppressed start of new grace periods.
1540 static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
1544 unsigned long flags;
1546 struct rcu_node *rnp;
1548 rcu_for_each_leaf_node(rsp, rnp) {
1550 raw_spin_lock_irqsave(&rnp->lock, flags);
1551 if (!rcu_gp_in_progress(rsp)) {
1552 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1555 if (rnp->qsmask == 0) {
1556 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
1561 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1562 if ((rnp->qsmask & bit) != 0 &&
1563 f(per_cpu_ptr(rsp->rda, cpu)))
1568 /* rcu_report_qs_rnp() releases rnp->lock. */
1569 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1572 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1574 rnp = rcu_get_root(rsp);
1575 if (rnp->qsmask == 0) {
1576 raw_spin_lock_irqsave(&rnp->lock, flags);
1577 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1582 * Force quiescent states on reluctant CPUs, and also detect which
1583 * CPUs are in dyntick-idle mode.
1585 static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1587 unsigned long flags;
1588 struct rcu_node *rnp = rcu_get_root(rsp);
1590 trace_rcu_utilization("Start fqs");
1591 if (!rcu_gp_in_progress(rsp)) {
1592 trace_rcu_utilization("End fqs");
1593 return; /* No grace period in progress, nothing to force. */
1595 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
1596 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1597 trace_rcu_utilization("End fqs");
1598 return; /* Someone else is already on the job. */
1600 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
1601 goto unlock_fqs_ret; /* no emergency and done recently. */
1603 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1604 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1605 if(!rcu_gp_in_progress(rsp)) {
1606 rsp->n_force_qs_ngp++;
1607 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1608 goto unlock_fqs_ret; /* no GP in progress, time updated. */
1610 rsp->fqs_active = 1;
1611 switch (rsp->fqs_state) {
1615 break; /* grace period idle or initializing, ignore. */
1617 case RCU_SAVE_DYNTICK:
1618 if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
1619 break; /* So gcc recognizes the dead code. */
1621 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1623 /* Record dyntick-idle state. */
1624 force_qs_rnp(rsp, dyntick_save_progress_counter);
1625 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1626 if (rcu_gp_in_progress(rsp))
1627 rsp->fqs_state = RCU_FORCE_QS;
1632 /* Check dyntick-idle state, send IPI to laggarts. */
1633 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1634 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
1636 /* Leave state in case more forcing is required. */
1638 raw_spin_lock(&rnp->lock); /* irqs already disabled */
1641 rsp->fqs_active = 0;
1642 if (rsp->fqs_need_gp) {
1643 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
1644 rsp->fqs_need_gp = 0;
1645 rcu_start_gp(rsp, flags); /* releases rnp->lock */
1646 trace_rcu_utilization("End fqs");
1649 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1651 raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
1652 trace_rcu_utilization("End fqs");
1656 * This does the RCU core processing work for the specified rcu_state
1657 * and rcu_data structures. This may be called only from the CPU to
1658 * whom the rdp belongs.
1661 __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1663 unsigned long flags;
1665 WARN_ON_ONCE(rdp->beenonline == 0);
1668 * If an RCU GP has gone long enough, go check for dyntick
1669 * idle CPUs and, if needed, send resched IPIs.
1671 if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1672 force_quiescent_state(rsp, 1);
1675 * Advance callbacks in response to end of earlier grace
1676 * period that some other CPU ended.
1678 rcu_process_gp_end(rsp, rdp);
1680 /* Update RCU state based on any recent quiescent states. */
1681 rcu_check_quiescent_state(rsp, rdp);
1683 /* Does this CPU require a not-yet-started grace period? */
1684 if (cpu_needs_another_gp(rsp, rdp)) {
1685 raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
1686 rcu_start_gp(rsp, flags); /* releases above lock */
1689 /* If there are callbacks ready, invoke them. */
1690 if (cpu_has_callbacks_ready_to_invoke(rdp))
1691 invoke_rcu_callbacks(rsp, rdp);
1695 * Do RCU core processing for the current CPU.
1697 static void rcu_process_callbacks(struct softirq_action *unused)
1699 trace_rcu_utilization("Start RCU core");
1700 __rcu_process_callbacks(&rcu_sched_state,
1701 &__get_cpu_var(rcu_sched_data));
1702 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1703 rcu_preempt_process_callbacks();
1704 trace_rcu_utilization("End RCU core");
1708 * Schedule RCU callback invocation. If the specified type of RCU
1709 * does not support RCU priority boosting, just do a direct call,
1710 * otherwise wake up the per-CPU kernel kthread. Note that because we
1711 * are running on the current CPU with interrupts disabled, the
1712 * rcu_cpu_kthread_task cannot disappear out from under us.
1714 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1716 if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
1718 if (likely(!rsp->boost)) {
1719 rcu_do_batch(rsp, rdp);
1722 invoke_rcu_callbacks_kthread();
1725 static void invoke_rcu_core(void)
1727 raise_softirq(RCU_SOFTIRQ);
1731 __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1732 struct rcu_state *rsp, bool lazy)
1734 unsigned long flags;
1735 struct rcu_data *rdp;
1737 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
1738 debug_rcu_head_queue(head);
1742 smp_mb(); /* Ensure RCU update seen before callback registry. */
1745 * Opportunistically note grace-period endings and beginnings.
1746 * Note that we might see a beginning right after we see an
1747 * end, but never vice versa, since this CPU has to pass through
1748 * a quiescent state betweentimes.
1750 local_irq_save(flags);
1751 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1752 rdp = this_cpu_ptr(rsp->rda);
1754 /* Add the callback to our list. */
1755 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1756 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1761 if (__is_kfree_rcu_offset((unsigned long)func))
1762 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
1763 rdp->qlen_lazy, rdp->qlen);
1765 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
1767 /* If interrupts were disabled, don't dive into RCU core. */
1768 if (irqs_disabled_flags(flags)) {
1769 local_irq_restore(flags);
1774 * Force the grace period if too many callbacks or too long waiting.
1775 * Enforce hysteresis, and don't invoke force_quiescent_state()
1776 * if some other CPU has recently done so. Also, don't bother
1777 * invoking force_quiescent_state() if the newly enqueued callback
1778 * is the only one waiting for a grace period to complete.
1780 if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1782 /* Are we ignoring a completed grace period? */
1783 rcu_process_gp_end(rsp, rdp);
1784 check_for_new_grace_period(rsp, rdp);
1786 /* Start a new grace period if one not already started. */
1787 if (!rcu_gp_in_progress(rsp)) {
1788 unsigned long nestflag;
1789 struct rcu_node *rnp_root = rcu_get_root(rsp);
1791 raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
1792 rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
1794 /* Give the grace period a kick. */
1795 rdp->blimit = LONG_MAX;
1796 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1797 *rdp->nxttail[RCU_DONE_TAIL] != head)
1798 force_quiescent_state(rsp, 0);
1799 rdp->n_force_qs_snap = rsp->n_force_qs;
1800 rdp->qlen_last_fqs_check = rdp->qlen;
1802 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1803 force_quiescent_state(rsp, 1);
1804 local_irq_restore(flags);
1808 * Queue an RCU-sched callback for invocation after a grace period.
1810 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1812 __call_rcu(head, func, &rcu_sched_state, 0);
1814 EXPORT_SYMBOL_GPL(call_rcu_sched);
1817 * Queue an RCU callback for invocation after a quicker grace period.
1819 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1821 __call_rcu(head, func, &rcu_bh_state, 0);
1823 EXPORT_SYMBOL_GPL(call_rcu_bh);
1826 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1828 * Control will return to the caller some time after a full rcu-sched
1829 * grace period has elapsed, in other words after all currently executing
1830 * rcu-sched read-side critical sections have completed. These read-side
1831 * critical sections are delimited by rcu_read_lock_sched() and
1832 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1833 * local_irq_disable(), and so on may be used in place of
1834 * rcu_read_lock_sched().
1836 * This means that all preempt_disable code sequences, including NMI and
1837 * hardware-interrupt handlers, in progress on entry will have completed
1838 * before this primitive returns. However, this does not guarantee that
1839 * softirq handlers will have completed, since in some kernels, these
1840 * handlers can run in process context, and can block.
1842 * This primitive provides the guarantees made by the (now removed)
1843 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1844 * guarantees that rcu_read_lock() sections will have completed.
1845 * In "classic RCU", these two guarantees happen to be one and
1846 * the same, but can differ in realtime RCU implementations.
1848 void synchronize_sched(void)
1850 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
1851 !lock_is_held(&rcu_lock_map) &&
1852 !lock_is_held(&rcu_sched_lock_map),
1853 "Illegal synchronize_sched() in RCU-sched read-side critical section");
1854 if (rcu_blocking_is_gp())
1856 wait_rcu_gp(call_rcu_sched);
1858 EXPORT_SYMBOL_GPL(synchronize_sched);
1861 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1863 * Control will return to the caller some time after a full rcu_bh grace
1864 * period has elapsed, in other words after all currently executing rcu_bh
1865 * read-side critical sections have completed. RCU read-side critical
1866 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1867 * and may be nested.
1869 void synchronize_rcu_bh(void)
1871 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
1872 !lock_is_held(&rcu_lock_map) &&
1873 !lock_is_held(&rcu_sched_lock_map),
1874 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
1875 if (rcu_blocking_is_gp())
1877 wait_rcu_gp(call_rcu_bh);
1879 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1882 * Check to see if there is any immediate RCU-related work to be done
1883 * by the current CPU, for the specified type of RCU, returning 1 if so.
1884 * The checks are in order of increasing expense: checks that can be
1885 * carried out against CPU-local state are performed first. However,
1886 * we must check for CPU stalls first, else we might not get a chance.
1888 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1890 struct rcu_node *rnp = rdp->mynode;
1892 rdp->n_rcu_pending++;
1894 /* Check for CPU stalls, if enabled. */
1895 check_cpu_stall(rsp, rdp);
1897 /* Is the RCU core waiting for a quiescent state from this CPU? */
1898 if (rcu_scheduler_fully_active &&
1899 rdp->qs_pending && !rdp->passed_quiesce) {
1902 * If force_quiescent_state() coming soon and this CPU
1903 * needs a quiescent state, and this is either RCU-sched
1904 * or RCU-bh, force a local reschedule.
1906 rdp->n_rp_qs_pending++;
1907 if (!rdp->preemptible &&
1908 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
1911 } else if (rdp->qs_pending && rdp->passed_quiesce) {
1912 rdp->n_rp_report_qs++;
1916 /* Does this CPU have callbacks ready to invoke? */
1917 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
1918 rdp->n_rp_cb_ready++;
1922 /* Has RCU gone idle with this CPU needing another grace period? */
1923 if (cpu_needs_another_gp(rsp, rdp)) {
1924 rdp->n_rp_cpu_needs_gp++;
1928 /* Has another RCU grace period completed? */
1929 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
1930 rdp->n_rp_gp_completed++;
1934 /* Has a new RCU grace period started? */
1935 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
1936 rdp->n_rp_gp_started++;
1940 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1941 if (rcu_gp_in_progress(rsp) &&
1942 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
1943 rdp->n_rp_need_fqs++;
1948 rdp->n_rp_need_nothing++;
1953 * Check to see if there is any immediate RCU-related work to be done
1954 * by the current CPU, returning 1 if so. This function is part of the
1955 * RCU implementation; it is -not- an exported member of the RCU API.
1957 static int rcu_pending(int cpu)
1959 return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
1960 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
1961 rcu_preempt_pending(cpu);
1965 * Check to see if any future RCU-related work will need to be done
1966 * by the current CPU, even if none need be done immediately, returning
1969 static int rcu_cpu_has_callbacks(int cpu)
1971 /* RCU callbacks either ready or pending? */
1972 return per_cpu(rcu_sched_data, cpu).nxtlist ||
1973 per_cpu(rcu_bh_data, cpu).nxtlist ||
1974 rcu_preempt_cpu_has_callbacks(cpu);
1977 static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1978 static atomic_t rcu_barrier_cpu_count;
1979 static DEFINE_MUTEX(rcu_barrier_mutex);
1980 static struct completion rcu_barrier_completion;
1982 static void rcu_barrier_callback(struct rcu_head *notused)
1984 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1985 complete(&rcu_barrier_completion);
1989 * Called with preemption disabled, and from cross-cpu IRQ context.
1991 static void rcu_barrier_func(void *type)
1993 int cpu = smp_processor_id();
1994 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1995 void (*call_rcu_func)(struct rcu_head *head,
1996 void (*func)(struct rcu_head *head));
1998 atomic_inc(&rcu_barrier_cpu_count);
1999 call_rcu_func = type;
2000 call_rcu_func(head, rcu_barrier_callback);
2004 * Orchestrate the specified type of RCU barrier, waiting for all
2005 * RCU callbacks of the specified type to complete.
2007 static void _rcu_barrier(struct rcu_state *rsp,
2008 void (*call_rcu_func)(struct rcu_head *head,
2009 void (*func)(struct rcu_head *head)))
2011 BUG_ON(in_interrupt());
2012 /* Take mutex to serialize concurrent rcu_barrier() requests. */
2013 mutex_lock(&rcu_barrier_mutex);
2014 init_completion(&rcu_barrier_completion);
2016 * Initialize rcu_barrier_cpu_count to 1, then invoke
2017 * rcu_barrier_func() on each CPU, so that each CPU also has
2018 * incremented rcu_barrier_cpu_count. Only then is it safe to
2019 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
2020 * might complete its grace period before all of the other CPUs
2021 * did their increment, causing this function to return too
2022 * early. Note that on_each_cpu() disables irqs, which prevents
2023 * any CPUs from coming online or going offline until each online
2024 * CPU has queued its RCU-barrier callback.
2026 atomic_set(&rcu_barrier_cpu_count, 1);
2027 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
2028 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
2029 complete(&rcu_barrier_completion);
2030 wait_for_completion(&rcu_barrier_completion);
2031 mutex_unlock(&rcu_barrier_mutex);
2035 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
2037 void rcu_barrier_bh(void)
2039 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
2041 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
2044 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
2046 void rcu_barrier_sched(void)
2048 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
2050 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
2053 * Do boot-time initialization of a CPU's per-CPU RCU data.
2056 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2058 unsigned long flags;
2060 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2061 struct rcu_node *rnp = rcu_get_root(rsp);
2063 /* Set up local state, ensuring consistent view of global state. */
2064 raw_spin_lock_irqsave(&rnp->lock, flags);
2065 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
2066 rdp->nxtlist = NULL;
2067 for (i = 0; i < RCU_NEXT_SIZE; i++)
2068 rdp->nxttail[i] = &rdp->nxtlist;
2071 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2072 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
2073 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
2076 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2080 * Initialize a CPU's per-CPU RCU data. Note that only one online or
2081 * offline event can be happening at a given time. Note also that we
2082 * can accept some slop in the rsp->completed access due to the fact
2083 * that this CPU cannot possibly have any RCU callbacks in flight yet.
2085 static void __cpuinit
2086 rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2088 unsigned long flags;
2090 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2091 struct rcu_node *rnp = rcu_get_root(rsp);
2093 /* Set up local state, ensuring consistent view of global state. */
2094 raw_spin_lock_irqsave(&rnp->lock, flags);
2095 rdp->beenonline = 1; /* We have now been online. */
2096 rdp->preemptible = preemptible;
2097 rdp->qlen_last_fqs_check = 0;
2098 rdp->n_force_qs_snap = rsp->n_force_qs;
2099 rdp->blimit = blimit;
2100 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
2101 atomic_set(&rdp->dynticks->dynticks,
2102 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
2103 rcu_prepare_for_idle_init(cpu);
2104 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2107 * A new grace period might start here. If so, we won't be part
2108 * of it, but that is OK, as we are currently in a quiescent state.
2111 /* Exclude any attempts to start a new GP on large systems. */
2112 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
2114 /* Add CPU to rcu_node bitmasks. */
2116 mask = rdp->grpmask;
2118 /* Exclude any attempts to start a new GP on small systems. */
2119 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
2120 rnp->qsmaskinit |= mask;
2121 mask = rnp->grpmask;
2122 if (rnp == rdp->mynode) {
2124 * If there is a grace period in progress, we will
2125 * set up to wait for it next time we run the
2128 rdp->gpnum = rnp->completed;
2129 rdp->completed = rnp->completed;
2130 rdp->passed_quiesce = 0;
2131 rdp->qs_pending = 0;
2132 rdp->passed_quiesce_gpnum = rnp->gpnum - 1;
2133 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
2135 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
2137 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
2139 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
2142 static void __cpuinit rcu_prepare_cpu(int cpu)
2144 rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
2145 rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
2146 rcu_preempt_init_percpu_data(cpu);
2150 * Handle CPU online/offline notification events.
2152 static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2153 unsigned long action, void *hcpu)
2155 long cpu = (long)hcpu;
2156 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2157 struct rcu_node *rnp = rdp->mynode;
2159 trace_rcu_utilization("Start CPU hotplug");
2161 case CPU_UP_PREPARE:
2162 case CPU_UP_PREPARE_FROZEN:
2163 rcu_prepare_cpu(cpu);
2164 rcu_prepare_kthreads(cpu);
2167 case CPU_DOWN_FAILED:
2168 rcu_node_kthread_setaffinity(rnp, -1);
2169 rcu_cpu_kthread_setrt(cpu, 1);
2171 case CPU_DOWN_PREPARE:
2172 rcu_node_kthread_setaffinity(rnp, cpu);
2173 rcu_cpu_kthread_setrt(cpu, 0);
2176 case CPU_DYING_FROZEN:
2178 * The whole machine is "stopped" except this CPU, so we can
2179 * touch any data without introducing corruption. We send the
2180 * dying CPU's callbacks to an arbitrarily chosen online CPU.
2182 rcu_cleanup_dying_cpu(&rcu_bh_state);
2183 rcu_cleanup_dying_cpu(&rcu_sched_state);
2184 rcu_preempt_cleanup_dying_cpu();
2185 rcu_cleanup_after_idle(cpu);
2188 case CPU_DEAD_FROZEN:
2189 case CPU_UP_CANCELED:
2190 case CPU_UP_CANCELED_FROZEN:
2191 rcu_cleanup_dead_cpu(cpu, &rcu_bh_state);
2192 rcu_cleanup_dead_cpu(cpu, &rcu_sched_state);
2193 rcu_preempt_cleanup_dead_cpu(cpu);
2198 trace_rcu_utilization("End CPU hotplug");
2203 * This function is invoked towards the end of the scheduler's initialization
2204 * process. Before this is called, the idle task might contain
2205 * RCU read-side critical sections (during which time, this idle
2206 * task is booting the system). After this function is called, the
2207 * idle tasks are prohibited from containing RCU read-side critical
2208 * sections. This function also enables RCU lockdep checking.
2210 void rcu_scheduler_starting(void)
2212 WARN_ON(num_online_cpus() != 1);
2213 WARN_ON(nr_context_switches() > 0);
2214 rcu_scheduler_active = 1;
2218 * Compute the per-level fanout, either using the exact fanout specified
2219 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
2221 #ifdef CONFIG_RCU_FANOUT_EXACT
2222 static void __init rcu_init_levelspread(struct rcu_state *rsp)
2226 for (i = NUM_RCU_LVLS - 1; i > 0; i--)
2227 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
2228 rsp->levelspread[0] = RCU_FANOUT_LEAF;
2230 #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
2231 static void __init rcu_init_levelspread(struct rcu_state *rsp)
2238 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
2239 ccur = rsp->levelcnt[i];
2240 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
2244 #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
2247 * Helper function for rcu_init() that initializes one rcu_state structure.
2249 static void __init rcu_init_one(struct rcu_state *rsp,
2250 struct rcu_data __percpu *rda)
2252 static char *buf[] = { "rcu_node_level_0",
2255 "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */
2259 struct rcu_node *rnp;
2261 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
2263 /* Initialize the level-tracking arrays. */
2265 for (i = 1; i < NUM_RCU_LVLS; i++)
2266 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
2267 rcu_init_levelspread(rsp);
2269 /* Initialize the elements themselves, starting from the leaves. */
2271 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
2272 cpustride *= rsp->levelspread[i];
2273 rnp = rsp->level[i];
2274 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
2275 raw_spin_lock_init(&rnp->lock);
2276 lockdep_set_class_and_name(&rnp->lock,
2277 &rcu_node_class[i], buf[i]);
2280 rnp->qsmaskinit = 0;
2281 rnp->grplo = j * cpustride;
2282 rnp->grphi = (j + 1) * cpustride - 1;
2283 if (rnp->grphi >= NR_CPUS)
2284 rnp->grphi = NR_CPUS - 1;
2290 rnp->grpnum = j % rsp->levelspread[i - 1];
2291 rnp->grpmask = 1UL << rnp->grpnum;
2292 rnp->parent = rsp->level[i - 1] +
2293 j / rsp->levelspread[i - 1];
2296 INIT_LIST_HEAD(&rnp->blkd_tasks);
2301 rnp = rsp->level[NUM_RCU_LVLS - 1];
2302 for_each_possible_cpu(i) {
2303 while (i > rnp->grphi)
2305 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
2306 rcu_boot_init_percpu_data(i, rsp);
2310 void __init rcu_init(void)
2314 rcu_bootup_announce();
2315 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
2316 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
2317 __rcu_init_preempt();
2318 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
2321 * We don't need protection against CPU-hotplug here because
2322 * this is called early in boot, before either interrupts
2323 * or the scheduler are operational.
2325 cpu_notifier(rcu_cpu_notify, 0);
2326 for_each_online_cpu(cpu)
2327 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
2328 check_cpu_stall_init();
2331 #include "rcutree_plugin.h"