2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2008
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
25 #include <linux/completion.h>
26 #include <linux/interrupt.h>
27 #include <linux/notifier.h>
28 #include <linux/rcupdate.h>
29 #include <linux/kernel.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/sched.h>
33 #include <linux/types.h>
34 #include <linux/init.h>
35 #include <linux/time.h>
36 #include <linux/cpu.h>
37 #include <linux/prefetch.h>
39 #ifdef CONFIG_RCU_TRACE
40 #include <trace/events/rcu.h>
41 #endif /* #else #ifdef CONFIG_RCU_TRACE */
45 /* Forward declarations for rcutiny_plugin.h. */
47 static void invoke_rcu_callbacks(void);
48 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49 static void rcu_process_callbacks(struct softirq_action *unused);
50 static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp);
54 #include "rcutiny_plugin.h"
56 static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
58 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59 static void rcu_idle_enter_common(long long oldval)
61 if (rcu_dynticks_nesting) {
62 RCU_TRACE(trace_rcu_dyntick("--=",
63 oldval, rcu_dynticks_nesting));
66 RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
67 if (!is_idle_task(current)) {
68 struct task_struct *idle = idle_task(smp_processor_id());
70 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
71 oldval, rcu_dynticks_nesting));
72 ftrace_dump(DUMP_ALL);
73 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
74 current->pid, current->comm,
75 idle->pid, idle->comm); /* must be idle task! */
77 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
81 * Enter idle, which is an extended quiescent state if we have fully
82 * entered that mode (i.e., if the new value of dynticks_nesting is zero).
84 void rcu_idle_enter(void)
89 local_irq_save(flags);
90 oldval = rcu_dynticks_nesting;
91 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
92 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
93 DYNTICK_TASK_NEST_VALUE)
94 rcu_dynticks_nesting = 0;
96 rcu_dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
97 rcu_idle_enter_common(oldval);
98 local_irq_restore(flags);
102 * Exit an interrupt handler towards idle.
104 void rcu_irq_exit(void)
109 local_irq_save(flags);
110 oldval = rcu_dynticks_nesting;
111 rcu_dynticks_nesting--;
112 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
113 rcu_idle_enter_common(oldval);
114 local_irq_restore(flags);
117 /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
118 static void rcu_idle_exit_common(long long oldval)
121 RCU_TRACE(trace_rcu_dyntick("++=",
122 oldval, rcu_dynticks_nesting));
125 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
126 if (!is_idle_task(current)) {
127 struct task_struct *idle = idle_task(smp_processor_id());
129 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
130 oldval, rcu_dynticks_nesting));
131 ftrace_dump(DUMP_ALL);
132 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
133 current->pid, current->comm,
134 idle->pid, idle->comm); /* must be idle task! */
139 * Exit idle, so that we are no longer in an extended quiescent state.
141 void rcu_idle_exit(void)
146 local_irq_save(flags);
147 oldval = rcu_dynticks_nesting;
148 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
149 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
150 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
152 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
153 rcu_idle_exit_common(oldval);
154 local_irq_restore(flags);
158 * Enter an interrupt handler, moving away from idle.
160 void rcu_irq_enter(void)
165 local_irq_save(flags);
166 oldval = rcu_dynticks_nesting;
167 rcu_dynticks_nesting++;
168 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
169 rcu_idle_exit_common(oldval);
170 local_irq_restore(flags);
173 #ifdef CONFIG_PROVE_RCU
176 * Test whether RCU thinks that the current CPU is idle.
178 int rcu_is_cpu_idle(void)
180 return !rcu_dynticks_nesting;
182 EXPORT_SYMBOL(rcu_is_cpu_idle);
184 #endif /* #ifdef CONFIG_PROVE_RCU */
187 * Test whether the current CPU was interrupted from idle. Nested
188 * interrupts don't count, we must be running at the first interrupt
191 int rcu_is_cpu_rrupt_from_idle(void)
193 return rcu_dynticks_nesting <= 0;
197 * Helper function for rcu_sched_qs() and rcu_bh_qs().
198 * Also irqs are disabled to avoid confusion due to interrupt handlers
199 * invoking call_rcu().
201 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
203 if (rcp->rcucblist != NULL &&
204 rcp->donetail != rcp->curtail) {
205 rcp->donetail = rcp->curtail;
213 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
214 * are at it, given that any rcu quiescent state is also an rcu_bh
215 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
217 void rcu_sched_qs(int cpu)
221 local_irq_save(flags);
222 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
223 rcu_qsctr_help(&rcu_bh_ctrlblk))
224 invoke_rcu_callbacks();
225 local_irq_restore(flags);
229 * Record an rcu_bh quiescent state.
231 void rcu_bh_qs(int cpu)
235 local_irq_save(flags);
236 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
237 invoke_rcu_callbacks();
238 local_irq_restore(flags);
242 * Check to see if the scheduling-clock interrupt came from an extended
243 * quiescent state, and, if so, tell RCU about it. This function must
244 * be called from hardirq context. It is normally called from the
245 * scheduling-clock interrupt.
247 void rcu_check_callbacks(int cpu, int user)
249 if (user || rcu_is_cpu_rrupt_from_idle())
251 else if (!in_softirq())
253 rcu_preempt_check_callbacks();
257 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
258 * whose grace period has elapsed.
260 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
263 struct rcu_head *next, *list;
265 RCU_TRACE(int cb_count = 0);
267 /* If no RCU callbacks ready to invoke, just return. */
268 if (&rcp->rcucblist == rcp->donetail) {
269 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
270 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
271 ACCESS_ONCE(rcp->rcucblist),
273 is_idle_task(current),
274 rcu_is_callbacks_kthread()));
278 /* Move the ready-to-invoke callbacks to a local list. */
279 local_irq_save(flags);
280 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
281 list = rcp->rcucblist;
282 rcp->rcucblist = *rcp->donetail;
283 *rcp->donetail = NULL;
284 if (rcp->curtail == rcp->donetail)
285 rcp->curtail = &rcp->rcucblist;
286 rcu_preempt_remove_callbacks(rcp);
287 rcp->donetail = &rcp->rcucblist;
288 local_irq_restore(flags);
290 /* Invoke the callbacks on the local list. */
291 RCU_TRACE(rn = rcp->name);
295 debug_rcu_head_unqueue(list);
297 __rcu_reclaim(rn, list);
300 RCU_TRACE(cb_count++);
302 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
303 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
304 is_idle_task(current),
305 rcu_is_callbacks_kthread()));
308 static void rcu_process_callbacks(struct softirq_action *unused)
310 __rcu_process_callbacks(&rcu_sched_ctrlblk);
311 __rcu_process_callbacks(&rcu_bh_ctrlblk);
312 rcu_preempt_process_callbacks();
316 * Wait for a grace period to elapse. But it is illegal to invoke
317 * synchronize_sched() from within an RCU read-side critical section.
318 * Therefore, any legal call to synchronize_sched() is a quiescent
319 * state, and so on a UP system, synchronize_sched() need do nothing.
320 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
321 * benefits of doing might_sleep() to reduce latency.)
323 * Cool, huh? (Due to Josh Triplett.)
325 * But we want to make this a static inline later. The cond_resched()
326 * currently makes this problematic.
328 void synchronize_sched(void)
330 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
331 !lock_is_held(&rcu_lock_map) &&
332 !lock_is_held(&rcu_sched_lock_map),
333 "Illegal synchronize_sched() in RCU read-side critical section");
336 EXPORT_SYMBOL_GPL(synchronize_sched);
339 * Helper function for call_rcu() and call_rcu_bh().
341 static void __call_rcu(struct rcu_head *head,
342 void (*func)(struct rcu_head *rcu),
343 struct rcu_ctrlblk *rcp)
347 debug_rcu_head_queue(head);
351 local_irq_save(flags);
352 *rcp->curtail = head;
353 rcp->curtail = &head->next;
354 RCU_TRACE(rcp->qlen++);
355 local_irq_restore(flags);
359 * Post an RCU callback to be invoked after the end of an RCU-sched grace
360 * period. But since we have but one CPU, that would be after any
363 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
365 __call_rcu(head, func, &rcu_sched_ctrlblk);
367 EXPORT_SYMBOL_GPL(call_rcu_sched);
370 * Post an RCU bottom-half callback to be invoked after any subsequent
373 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
375 __call_rcu(head, func, &rcu_bh_ctrlblk);
377 EXPORT_SYMBOL_GPL(call_rcu_bh);