2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright (c) 2010 Linaro
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 #include <linux/kthread.h>
27 #ifdef CONFIG_TINY_PREEMPT_RCU
29 #include <linux/delay.h>
31 /* Global control variables for preemptible RCU. */
32 struct rcu_preempt_ctrlblk {
33 struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
34 struct rcu_head **nexttail;
35 /* Tasks blocked in a preemptible RCU */
36 /* read-side critical section while an */
37 /* preemptible-RCU grace period is in */
38 /* progress must wait for a later grace */
39 /* period. This pointer points to the */
40 /* ->next pointer of the last task that */
41 /* must wait for a later grace period, or */
42 /* to &->rcb.rcucblist if there is no */
44 struct list_head blkd_tasks;
45 /* Tasks blocked in RCU read-side critical */
46 /* section. Tasks are placed at the head */
47 /* of this list and age towards the tail. */
48 struct list_head *gp_tasks;
49 /* Pointer to the first task blocking the */
50 /* current grace period, or NULL if there */
51 /* is not such task. */
52 struct list_head *exp_tasks;
53 /* Pointer to first task blocking the */
54 /* current expedited grace period, or NULL */
55 /* if there is no such task. If there */
56 /* is no current expedited grace period, */
57 /* then there cannot be any such task. */
58 u8 gpnum; /* Current grace period. */
59 u8 gpcpu; /* Last grace period blocked by the CPU. */
60 u8 completed; /* Last grace period completed. */
61 /* If all three are equal, RCU is idle. */
64 static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
65 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
66 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
67 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
68 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
71 static int rcu_preempted_readers_exp(void);
72 static void rcu_report_exp_done(void);
75 * Return true if the CPU has not yet responded to the current grace period.
77 static int rcu_cpu_blocking_cur_gp(void)
79 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
83 * Check for a running RCU reader. Because there is only one CPU,
84 * there can be but one running RCU reader at a time. ;-)
86 static int rcu_preempt_running_reader(void)
88 return current->rcu_read_lock_nesting;
92 * Check for preempted RCU readers blocking any grace period.
93 * If the caller needs a reliable answer, it must disable hard irqs.
95 static int rcu_preempt_blocked_readers_any(void)
97 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
101 * Check for preempted RCU readers blocking the current grace period.
102 * If the caller needs a reliable answer, it must disable hard irqs.
104 static int rcu_preempt_blocked_readers_cgp(void)
106 return rcu_preempt_ctrlblk.gp_tasks != NULL;
110 * Return true if another preemptible-RCU grace period is needed.
112 static int rcu_preempt_needs_another_gp(void)
114 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
118 * Return true if a preemptible-RCU grace period is in progress.
119 * The caller must disable hardirqs.
121 static int rcu_preempt_gp_in_progress(void)
123 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
127 * Record a preemptible-RCU quiescent state for the specified CPU. Note
128 * that this just means that the task currently running on the CPU is
129 * in a quiescent state. There might be any number of tasks blocked
130 * while in an RCU read-side critical section.
132 * Unlike the other rcu_*_qs() functions, callers to this function
133 * must disable irqs in order to protect the assignment to
134 * ->rcu_read_unlock_special.
136 * Because this is a single-CPU implementation, the only way a grace
137 * period can end is if the CPU is in a quiescent state. The reason is
138 * that a blocked preemptible-RCU reader can exit its critical section
139 * only if the CPU is running it at the time. Therefore, when the
140 * last task blocking the current grace period exits its RCU read-side
141 * critical section, neither the CPU nor blocked tasks will be stopping
142 * the current grace period. (In contrast, SMP implementations
143 * might have CPUs running in RCU read-side critical sections that
144 * block later grace periods -- but this is not possible given only
147 static void rcu_preempt_cpu_qs(void)
149 /* Record both CPU and task as having responded to current GP. */
150 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
151 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
154 * If there is no GP, or if blocked readers are still blocking GP,
155 * then there is nothing more to do.
157 if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
160 /* Advance callbacks. */
161 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
162 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
163 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
165 /* If there are no blocked readers, next GP is done instantly. */
166 if (!rcu_preempt_blocked_readers_any())
167 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
169 /* If there are done callbacks, cause them to be invoked. */
170 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
175 * Start a new RCU grace period if warranted. Hard irqs must be disabled.
177 static void rcu_preempt_start_gp(void)
179 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
181 /* Official start of GP. */
182 rcu_preempt_ctrlblk.gpnum++;
184 /* Any blocked RCU readers block new GP. */
185 if (rcu_preempt_blocked_readers_any())
186 rcu_preempt_ctrlblk.gp_tasks =
187 rcu_preempt_ctrlblk.blkd_tasks.next;
189 /* If there is no running reader, CPU is done with GP. */
190 if (!rcu_preempt_running_reader())
191 rcu_preempt_cpu_qs();
196 * We have entered the scheduler, and the current task might soon be
197 * context-switched away from. If this task is in an RCU read-side
198 * critical section, we will no longer be able to rely on the CPU to
199 * record that fact, so we enqueue the task on the blkd_tasks list.
200 * If the task started after the current grace period began, as recorded
201 * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
202 * before the element referenced by ->gp_tasks (or at the tail if
203 * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
204 * The task will dequeue itself when it exits the outermost enclosing
205 * RCU read-side critical section. Therefore, the current grace period
206 * cannot be permitted to complete until the ->gp_tasks pointer becomes
209 * Caller must disable preemption.
211 void rcu_preempt_note_context_switch(void)
213 struct task_struct *t = current;
216 local_irq_save(flags); /* must exclude scheduler_tick(). */
217 if (rcu_preempt_running_reader() &&
218 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
220 /* Possibly blocking in an RCU read-side critical section. */
221 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
224 * If this CPU has already checked in, then this task
225 * will hold up the next grace period rather than the
226 * current grace period. Queue the task accordingly.
227 * If the task is queued for the current grace period
228 * (i.e., this CPU has not yet passed through a quiescent
229 * state for the current grace period), then as long
230 * as that task remains queued, the current grace period
233 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
234 if (rcu_cpu_blocking_cur_gp())
235 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
239 * Either we were not in an RCU read-side critical section to
240 * begin with, or we have now recorded that critical section
241 * globally. Either way, we can now note a quiescent state
242 * for this CPU. Again, if we were in an RCU read-side critical
243 * section, and if that critical section was blocking the current
244 * grace period, then the fact that the task has been enqueued
245 * means that current grace period continues to be blocked.
247 rcu_preempt_cpu_qs();
248 local_irq_restore(flags);
252 * Tiny-preemptible RCU implementation for rcu_read_lock().
253 * Just increment ->rcu_read_lock_nesting, shared state will be updated
256 void __rcu_read_lock(void)
258 current->rcu_read_lock_nesting++;
259 barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
261 EXPORT_SYMBOL_GPL(__rcu_read_lock);
264 * Handle special cases during rcu_read_unlock(), such as needing to
265 * notify RCU core processing or task having blocked during the RCU
266 * read-side critical section.
268 static void rcu_read_unlock_special(struct task_struct *t)
273 struct list_head *np;
277 * NMI handlers cannot block and cannot safely manipulate state.
278 * They therefore cannot possibly be special, so just leave.
283 local_irq_save(flags);
286 * If RCU core is waiting for this CPU to exit critical section,
287 * let it know that we have done so.
289 special = t->rcu_read_unlock_special;
290 if (special & RCU_READ_UNLOCK_NEED_QS)
291 rcu_preempt_cpu_qs();
293 /* Hardware IRQ handlers cannot block. */
295 local_irq_restore(flags);
299 /* Clean up if blocked during RCU read-side critical section. */
300 if (special & RCU_READ_UNLOCK_BLOCKED) {
301 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
304 * Remove this task from the ->blkd_tasks list and adjust
305 * any pointers that might have been referencing it.
307 empty = !rcu_preempt_blocked_readers_cgp();
308 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
309 np = t->rcu_node_entry.next;
310 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
312 list_del(&t->rcu_node_entry);
313 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
314 rcu_preempt_ctrlblk.gp_tasks = np;
315 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
316 rcu_preempt_ctrlblk.exp_tasks = np;
317 INIT_LIST_HEAD(&t->rcu_node_entry);
320 * If this was the last task on the current list, and if
321 * we aren't waiting on the CPU, report the quiescent state
322 * and start a new grace period if needed.
324 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
325 rcu_preempt_cpu_qs();
326 rcu_preempt_start_gp();
330 * If this was the last task on the expedited lists,
331 * then we need wake up the waiting task.
333 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
334 rcu_report_exp_done();
336 local_irq_restore(flags);
340 * Tiny-preemptible RCU implementation for rcu_read_unlock().
341 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
342 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
343 * invoke rcu_read_unlock_special() to clean up after a context switch
344 * in an RCU read-side critical section and other special cases.
346 void __rcu_read_unlock(void)
348 struct task_struct *t = current;
350 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
351 --t->rcu_read_lock_nesting;
352 barrier(); /* decrement before load of ->rcu_read_unlock_special */
353 if (t->rcu_read_lock_nesting == 0 &&
354 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
355 rcu_read_unlock_special(t);
356 #ifdef CONFIG_PROVE_LOCKING
357 WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
358 #endif /* #ifdef CONFIG_PROVE_LOCKING */
360 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
363 * Check for a quiescent state from the current CPU. When a task blocks,
364 * the task is recorded in the rcu_preempt_ctrlblk structure, which is
365 * checked elsewhere. This is called from the scheduling-clock interrupt.
367 * Caller must disable hard irqs.
369 static void rcu_preempt_check_callbacks(void)
371 struct task_struct *t = current;
373 if (rcu_preempt_gp_in_progress() &&
374 (!rcu_preempt_running_reader() ||
375 !rcu_cpu_blocking_cur_gp()))
376 rcu_preempt_cpu_qs();
377 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
378 rcu_preempt_ctrlblk.rcb.donetail)
380 if (rcu_preempt_gp_in_progress() &&
381 rcu_cpu_blocking_cur_gp() &&
382 rcu_preempt_running_reader())
383 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
387 * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
388 * update, so this is invoked from rcu_process_callbacks() to
389 * handle that case. Of course, it is invoked for all flavors of
390 * RCU, but RCU callbacks can appear only on one of the lists, and
391 * neither ->nexttail nor ->donetail can possibly be NULL, so there
392 * is no need for an explicit check.
394 static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
396 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
397 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
401 * Process callbacks for preemptible RCU.
403 static void rcu_preempt_process_callbacks(void)
405 rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
409 * Queue a preemptible -RCU callback for invocation after a grace period.
411 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
415 debug_rcu_head_queue(head);
419 local_irq_save(flags);
420 *rcu_preempt_ctrlblk.nexttail = head;
421 rcu_preempt_ctrlblk.nexttail = &head->next;
422 rcu_preempt_start_gp(); /* checks to see if GP needed. */
423 local_irq_restore(flags);
425 EXPORT_SYMBOL_GPL(call_rcu);
427 void rcu_barrier(void)
429 struct rcu_synchronize rcu;
431 init_rcu_head_on_stack(&rcu.head);
432 init_completion(&rcu.completion);
433 /* Will wake me after RCU finished. */
434 call_rcu(&rcu.head, wakeme_after_rcu);
436 wait_for_completion(&rcu.completion);
437 destroy_rcu_head_on_stack(&rcu.head);
439 EXPORT_SYMBOL_GPL(rcu_barrier);
442 * synchronize_rcu - wait until a grace period has elapsed.
444 * Control will return to the caller some time after a full grace
445 * period has elapsed, in other words after all currently executing RCU
446 * read-side critical sections have completed. RCU read-side critical
447 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
450 void synchronize_rcu(void)
452 #ifdef CONFIG_DEBUG_LOCK_ALLOC
453 if (!rcu_scheduler_active)
455 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
457 WARN_ON_ONCE(rcu_preempt_running_reader());
458 if (!rcu_preempt_blocked_readers_any())
461 /* Once we get past the fastpath checks, same code as rcu_barrier(). */
464 EXPORT_SYMBOL_GPL(synchronize_rcu);
466 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
467 static unsigned long sync_rcu_preempt_exp_count;
468 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
471 * Return non-zero if there are any tasks in RCU read-side critical
472 * sections blocking the current preemptible-RCU expedited grace period.
473 * If there is no preemptible-RCU expedited grace period currently in
474 * progress, returns zero unconditionally.
476 static int rcu_preempted_readers_exp(void)
478 return rcu_preempt_ctrlblk.exp_tasks != NULL;
482 * Report the exit from RCU read-side critical section for the last task
483 * that queued itself during or before the current expedited preemptible-RCU
486 static void rcu_report_exp_done(void)
488 wake_up(&sync_rcu_preempt_exp_wq);
492 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
493 * is to rely in the fact that there is but one CPU, and that it is
494 * illegal for a task to invoke synchronize_rcu_expedited() while in a
495 * preemptible-RCU read-side critical section. Therefore, any such
496 * critical sections must correspond to blocked tasks, which must therefore
497 * be on the ->blkd_tasks list. So just record the current head of the
498 * list in the ->exp_tasks pointer, and wait for all tasks including and
499 * after the task pointed to by ->exp_tasks to drain.
501 void synchronize_rcu_expedited(void)
504 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
507 barrier(); /* ensure prior action seen before grace period. */
509 WARN_ON_ONCE(rcu_preempt_running_reader());
512 * Acquire lock so that there is only one preemptible RCU grace
513 * period in flight. Of course, if someone does the expedited
514 * grace period for us while we are acquiring the lock, just leave.
516 snap = sync_rcu_preempt_exp_count + 1;
517 mutex_lock(&sync_rcu_preempt_exp_mutex);
518 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
519 goto unlock_mb_ret; /* Others did our work for us. */
521 local_irq_save(flags);
524 * All RCU readers have to already be on blkd_tasks because
525 * we cannot legally be executing in an RCU read-side critical
529 /* Snapshot current head of ->blkd_tasks list. */
530 rpcp->exp_tasks = rpcp->blkd_tasks.next;
531 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
532 rpcp->exp_tasks = NULL;
533 local_irq_restore(flags);
535 /* Wait for tail of ->blkd_tasks list to drain. */
536 if (rcu_preempted_readers_exp())
537 wait_event(sync_rcu_preempt_exp_wq,
538 !rcu_preempted_readers_exp());
540 /* Clean up and exit. */
541 barrier(); /* ensure expedited GP seen before counter increment. */
542 sync_rcu_preempt_exp_count++;
544 mutex_unlock(&sync_rcu_preempt_exp_mutex);
545 barrier(); /* ensure subsequent action seen after grace period. */
547 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
550 * Does preemptible RCU need the CPU to stay out of dynticks mode?
552 int rcu_preempt_needs_cpu(void)
554 if (!rcu_preempt_running_reader())
555 rcu_preempt_cpu_qs();
556 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
560 * Check for a task exiting while in a preemptible -RCU read-side
561 * critical section, clean up if so. No need to issue warnings,
562 * as debug_check_no_locks_held() already does this if lockdep
567 struct task_struct *t = current;
569 if (t->rcu_read_lock_nesting == 0)
571 t->rcu_read_lock_nesting = 1;
575 #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
578 * Because preemptible RCU does not exist, it never has any callbacks
581 static void rcu_preempt_check_callbacks(void)
586 * Because preemptible RCU does not exist, it never has any callbacks
589 static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
594 * Because preemptible RCU does not exist, it never has any callbacks
597 static void rcu_preempt_process_callbacks(void)
601 #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
603 #ifdef CONFIG_DEBUG_LOCK_ALLOC
604 #include <linux/kernel_stat.h>
607 * During boot, we forgive RCU lockdep issues. After this function is
608 * invoked, we start taking RCU lockdep issues seriously.
610 void __init rcu_scheduler_starting(void)
612 WARN_ON(nr_context_switches() > 0);
613 rcu_scheduler_active = 1;
616 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */