2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright (c) 2010 Linaro
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 #include <linux/kthread.h>
27 /* Global control variables for rcupdate callback mechanism. */
29 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
30 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
31 struct rcu_head **curtail; /* ->next pointer of last CB. */
34 /* Definition for rcupdate control block. */
35 static struct rcu_ctrlblk rcu_sched_ctrlblk = {
36 .donetail = &rcu_sched_ctrlblk.rcucblist,
37 .curtail = &rcu_sched_ctrlblk.rcucblist,
40 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
41 .donetail = &rcu_bh_ctrlblk.rcucblist,
42 .curtail = &rcu_bh_ctrlblk.rcucblist,
45 #ifdef CONFIG_DEBUG_LOCK_ALLOC
46 int rcu_scheduler_active __read_mostly;
47 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
48 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
50 #ifdef CONFIG_TINY_PREEMPT_RCU
52 #include <linux/delay.h>
54 /* Global control variables for preemptible RCU. */
55 struct rcu_preempt_ctrlblk {
56 struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
57 struct rcu_head **nexttail;
58 /* Tasks blocked in a preemptible RCU */
59 /* read-side critical section while an */
60 /* preemptible-RCU grace period is in */
61 /* progress must wait for a later grace */
62 /* period. This pointer points to the */
63 /* ->next pointer of the last task that */
64 /* must wait for a later grace period, or */
65 /* to &->rcb.rcucblist if there is no */
67 struct list_head blkd_tasks;
68 /* Tasks blocked in RCU read-side critical */
69 /* section. Tasks are placed at the head */
70 /* of this list and age towards the tail. */
71 struct list_head *gp_tasks;
72 /* Pointer to the first task blocking the */
73 /* current grace period, or NULL if there */
74 /* is no such task. */
75 struct list_head *exp_tasks;
76 /* Pointer to first task blocking the */
77 /* current expedited grace period, or NULL */
78 /* if there is no such task. If there */
79 /* is no current expedited grace period, */
80 /* then there cannot be any such task. */
81 #ifdef CONFIG_RCU_BOOST
82 struct list_head *boost_tasks;
83 /* Pointer to first task that needs to be */
84 /* priority-boosted, or NULL if no priority */
85 /* boosting is needed. If there is no */
86 /* current or expedited grace period, there */
87 /* can be no such task. */
88 #endif /* #ifdef CONFIG_RCU_BOOST */
89 u8 gpnum; /* Current grace period. */
90 u8 gpcpu; /* Last grace period blocked by the CPU. */
91 u8 completed; /* Last grace period completed. */
92 /* If all three are equal, RCU is idle. */
93 s8 boosted_this_gp; /* Has boosting already happened? */
94 unsigned long boost_time; /* When to start boosting (jiffies) */
97 static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
98 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
99 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
100 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
101 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
104 static int rcu_preempted_readers_exp(void);
105 static void rcu_report_exp_done(void);
108 * Return true if the CPU has not yet responded to the current grace period.
110 static int rcu_cpu_blocking_cur_gp(void)
112 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
116 * Check for a running RCU reader. Because there is only one CPU,
117 * there can be but one running RCU reader at a time. ;-)
119 static int rcu_preempt_running_reader(void)
121 return current->rcu_read_lock_nesting;
125 * Check for preempted RCU readers blocking any grace period.
126 * If the caller needs a reliable answer, it must disable hard irqs.
128 static int rcu_preempt_blocked_readers_any(void)
130 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
134 * Check for preempted RCU readers blocking the current grace period.
135 * If the caller needs a reliable answer, it must disable hard irqs.
137 static int rcu_preempt_blocked_readers_cgp(void)
139 return rcu_preempt_ctrlblk.gp_tasks != NULL;
143 * Return true if another preemptible-RCU grace period is needed.
145 static int rcu_preempt_needs_another_gp(void)
147 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
151 * Return true if a preemptible-RCU grace period is in progress.
152 * The caller must disable hardirqs.
154 static int rcu_preempt_gp_in_progress(void)
156 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
160 * Advance a ->blkd_tasks-list pointer to the next entry, instead
161 * returning NULL if at the end of the list.
163 static struct list_head *rcu_next_node_entry(struct task_struct *t)
165 struct list_head *np;
167 np = t->rcu_node_entry.next;
168 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
173 #ifdef CONFIG_RCU_BOOST
175 #include "rtmutex_common.h"
178 * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
179 * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
181 static int rcu_boost(void)
185 struct list_head *np;
186 struct task_struct *t;
188 if (rcu_preempt_ctrlblk.boost_tasks == NULL)
189 return 0; /* Nothing to boost. */
190 raw_local_irq_save(flags);
191 rcu_preempt_ctrlblk.boosted_this_gp++;
192 t = container_of(rcu_preempt_ctrlblk.boost_tasks, struct task_struct,
194 np = rcu_next_node_entry(t);
195 rt_mutex_init_proxy_locked(&mtx, t);
196 t->rcu_boost_mutex = &mtx;
197 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
198 raw_local_irq_restore(flags);
200 rt_mutex_unlock(&mtx);
201 return rcu_preempt_ctrlblk.boost_tasks != NULL;
205 * Check to see if it is now time to start boosting RCU readers blocking
206 * the current grace period, and, if so, tell the rcu_kthread_task to
207 * start boosting them. If there is an expedited boost in progress,
208 * we wait for it to complete.
210 static void rcu_initiate_boost(void)
212 if (rcu_preempt_ctrlblk.gp_tasks != NULL &&
213 rcu_preempt_ctrlblk.boost_tasks == NULL &&
214 rcu_preempt_ctrlblk.boosted_this_gp == 0 &&
215 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) {
216 rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks;
217 invoke_rcu_kthread();
222 * Initiate boosting for an expedited grace period.
224 static void rcu_initiate_expedited_boost(void)
228 raw_local_irq_save(flags);
229 if (!list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) {
230 rcu_preempt_ctrlblk.boost_tasks =
231 rcu_preempt_ctrlblk.blkd_tasks.next;
232 rcu_preempt_ctrlblk.boosted_this_gp = -1;
233 invoke_rcu_kthread();
235 raw_local_irq_restore(flags);
238 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000);
241 * Do priority-boost accounting for the start of a new grace period.
243 static void rcu_preempt_boost_start_gp(void)
245 rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
246 if (rcu_preempt_ctrlblk.boosted_this_gp > 0)
247 rcu_preempt_ctrlblk.boosted_this_gp = 0;
250 #else /* #ifdef CONFIG_RCU_BOOST */
253 * If there is no RCU priority boosting, we don't boost.
255 static int rcu_boost(void)
261 * If there is no RCU priority boosting, we don't initiate boosting.
263 static void rcu_initiate_boost(void)
268 * If there is no RCU priority boosting, we don't initiate expedited boosting.
270 static void rcu_initiate_expedited_boost(void)
275 * If there is no RCU priority boosting, nothing to do at grace-period start.
277 static void rcu_preempt_boost_start_gp(void)
281 #endif /* else #ifdef CONFIG_RCU_BOOST */
284 * Record a preemptible-RCU quiescent state for the specified CPU. Note
285 * that this just means that the task currently running on the CPU is
286 * in a quiescent state. There might be any number of tasks blocked
287 * while in an RCU read-side critical section.
289 * Unlike the other rcu_*_qs() functions, callers to this function
290 * must disable irqs in order to protect the assignment to
291 * ->rcu_read_unlock_special.
293 * Because this is a single-CPU implementation, the only way a grace
294 * period can end is if the CPU is in a quiescent state. The reason is
295 * that a blocked preemptible-RCU reader can exit its critical section
296 * only if the CPU is running it at the time. Therefore, when the
297 * last task blocking the current grace period exits its RCU read-side
298 * critical section, neither the CPU nor blocked tasks will be stopping
299 * the current grace period. (In contrast, SMP implementations
300 * might have CPUs running in RCU read-side critical sections that
301 * block later grace periods -- but this is not possible given only
304 static void rcu_preempt_cpu_qs(void)
306 /* Record both CPU and task as having responded to current GP. */
307 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
308 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
310 /* If there is no GP then there is nothing more to do. */
311 if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
313 /* If there are blocked readers, go check up on boosting. */
314 if (rcu_preempt_blocked_readers_cgp()) {
315 rcu_initiate_boost();
319 /* Advance callbacks. */
320 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
321 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
322 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
324 /* If there are no blocked readers, next GP is done instantly. */
325 if (!rcu_preempt_blocked_readers_any())
326 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
328 /* If there are done callbacks, cause them to be invoked. */
329 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
330 invoke_rcu_kthread();
334 * Start a new RCU grace period if warranted. Hard irqs must be disabled.
336 static void rcu_preempt_start_gp(void)
338 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
340 /* Official start of GP. */
341 rcu_preempt_ctrlblk.gpnum++;
343 /* Any blocked RCU readers block new GP. */
344 if (rcu_preempt_blocked_readers_any())
345 rcu_preempt_ctrlblk.gp_tasks =
346 rcu_preempt_ctrlblk.blkd_tasks.next;
348 /* Set up for RCU priority boosting. */
349 rcu_preempt_boost_start_gp();
351 /* If there is no running reader, CPU is done with GP. */
352 if (!rcu_preempt_running_reader())
353 rcu_preempt_cpu_qs();
358 * We have entered the scheduler, and the current task might soon be
359 * context-switched away from. If this task is in an RCU read-side
360 * critical section, we will no longer be able to rely on the CPU to
361 * record that fact, so we enqueue the task on the blkd_tasks list.
362 * If the task started after the current grace period began, as recorded
363 * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
364 * before the element referenced by ->gp_tasks (or at the tail if
365 * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
366 * The task will dequeue itself when it exits the outermost enclosing
367 * RCU read-side critical section. Therefore, the current grace period
368 * cannot be permitted to complete until the ->gp_tasks pointer becomes
371 * Caller must disable preemption.
373 void rcu_preempt_note_context_switch(void)
375 struct task_struct *t = current;
378 local_irq_save(flags); /* must exclude scheduler_tick(). */
379 if (rcu_preempt_running_reader() &&
380 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
382 /* Possibly blocking in an RCU read-side critical section. */
383 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
386 * If this CPU has already checked in, then this task
387 * will hold up the next grace period rather than the
388 * current grace period. Queue the task accordingly.
389 * If the task is queued for the current grace period
390 * (i.e., this CPU has not yet passed through a quiescent
391 * state for the current grace period), then as long
392 * as that task remains queued, the current grace period
395 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
396 if (rcu_cpu_blocking_cur_gp())
397 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
401 * Either we were not in an RCU read-side critical section to
402 * begin with, or we have now recorded that critical section
403 * globally. Either way, we can now note a quiescent state
404 * for this CPU. Again, if we were in an RCU read-side critical
405 * section, and if that critical section was blocking the current
406 * grace period, then the fact that the task has been enqueued
407 * means that current grace period continues to be blocked.
409 rcu_preempt_cpu_qs();
410 local_irq_restore(flags);
414 * Tiny-preemptible RCU implementation for rcu_read_lock().
415 * Just increment ->rcu_read_lock_nesting, shared state will be updated
418 void __rcu_read_lock(void)
420 current->rcu_read_lock_nesting++;
421 barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
423 EXPORT_SYMBOL_GPL(__rcu_read_lock);
426 * Handle special cases during rcu_read_unlock(), such as needing to
427 * notify RCU core processing or task having blocked during the RCU
428 * read-side critical section.
430 static void rcu_read_unlock_special(struct task_struct *t)
435 struct list_head *np;
439 * NMI handlers cannot block and cannot safely manipulate state.
440 * They therefore cannot possibly be special, so just leave.
445 local_irq_save(flags);
448 * If RCU core is waiting for this CPU to exit critical section,
449 * let it know that we have done so.
451 special = t->rcu_read_unlock_special;
452 if (special & RCU_READ_UNLOCK_NEED_QS)
453 rcu_preempt_cpu_qs();
455 /* Hardware IRQ handlers cannot block. */
457 local_irq_restore(flags);
461 /* Clean up if blocked during RCU read-side critical section. */
462 if (special & RCU_READ_UNLOCK_BLOCKED) {
463 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
466 * Remove this task from the ->blkd_tasks list and adjust
467 * any pointers that might have been referencing it.
469 empty = !rcu_preempt_blocked_readers_cgp();
470 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
471 np = rcu_next_node_entry(t);
472 list_del(&t->rcu_node_entry);
473 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
474 rcu_preempt_ctrlblk.gp_tasks = np;
475 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
476 rcu_preempt_ctrlblk.exp_tasks = np;
477 #ifdef CONFIG_RCU_BOOST
478 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
479 rcu_preempt_ctrlblk.boost_tasks = np;
480 #endif /* #ifdef CONFIG_RCU_BOOST */
481 INIT_LIST_HEAD(&t->rcu_node_entry);
484 * If this was the last task on the current list, and if
485 * we aren't waiting on the CPU, report the quiescent state
486 * and start a new grace period if needed.
488 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
489 rcu_preempt_cpu_qs();
490 rcu_preempt_start_gp();
494 * If this was the last task on the expedited lists,
495 * then we need wake up the waiting task.
497 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
498 rcu_report_exp_done();
500 #ifdef CONFIG_RCU_BOOST
501 /* Unboost self if was boosted. */
502 if (special & RCU_READ_UNLOCK_BOOSTED) {
503 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
504 rt_mutex_unlock(t->rcu_boost_mutex);
505 t->rcu_boost_mutex = NULL;
507 #endif /* #ifdef CONFIG_RCU_BOOST */
508 local_irq_restore(flags);
512 * Tiny-preemptible RCU implementation for rcu_read_unlock().
513 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
514 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
515 * invoke rcu_read_unlock_special() to clean up after a context switch
516 * in an RCU read-side critical section and other special cases.
518 void __rcu_read_unlock(void)
520 struct task_struct *t = current;
522 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
523 --t->rcu_read_lock_nesting;
524 barrier(); /* decrement before load of ->rcu_read_unlock_special */
525 if (t->rcu_read_lock_nesting == 0 &&
526 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
527 rcu_read_unlock_special(t);
528 #ifdef CONFIG_PROVE_LOCKING
529 WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
530 #endif /* #ifdef CONFIG_PROVE_LOCKING */
532 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
535 * Check for a quiescent state from the current CPU. When a task blocks,
536 * the task is recorded in the rcu_preempt_ctrlblk structure, which is
537 * checked elsewhere. This is called from the scheduling-clock interrupt.
539 * Caller must disable hard irqs.
541 static void rcu_preempt_check_callbacks(void)
543 struct task_struct *t = current;
545 if (rcu_preempt_gp_in_progress() &&
546 (!rcu_preempt_running_reader() ||
547 !rcu_cpu_blocking_cur_gp()))
548 rcu_preempt_cpu_qs();
549 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
550 rcu_preempt_ctrlblk.rcb.donetail)
551 invoke_rcu_kthread();
552 if (rcu_preempt_gp_in_progress() &&
553 rcu_cpu_blocking_cur_gp() &&
554 rcu_preempt_running_reader())
555 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
559 * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
560 * update, so this is invoked from rcu_process_callbacks() to
561 * handle that case. Of course, it is invoked for all flavors of
562 * RCU, but RCU callbacks can appear only on one of the lists, and
563 * neither ->nexttail nor ->donetail can possibly be NULL, so there
564 * is no need for an explicit check.
566 static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
568 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
569 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
573 * Process callbacks for preemptible RCU.
575 static void rcu_preempt_process_callbacks(void)
577 rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
581 * Queue a preemptible -RCU callback for invocation after a grace period.
583 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
587 debug_rcu_head_queue(head);
591 local_irq_save(flags);
592 *rcu_preempt_ctrlblk.nexttail = head;
593 rcu_preempt_ctrlblk.nexttail = &head->next;
594 rcu_preempt_start_gp(); /* checks to see if GP needed. */
595 local_irq_restore(flags);
597 EXPORT_SYMBOL_GPL(call_rcu);
599 void rcu_barrier(void)
601 struct rcu_synchronize rcu;
603 init_rcu_head_on_stack(&rcu.head);
604 init_completion(&rcu.completion);
605 /* Will wake me after RCU finished. */
606 call_rcu(&rcu.head, wakeme_after_rcu);
608 wait_for_completion(&rcu.completion);
609 destroy_rcu_head_on_stack(&rcu.head);
611 EXPORT_SYMBOL_GPL(rcu_barrier);
614 * synchronize_rcu - wait until a grace period has elapsed.
616 * Control will return to the caller some time after a full grace
617 * period has elapsed, in other words after all currently executing RCU
618 * read-side critical sections have completed. RCU read-side critical
619 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
622 void synchronize_rcu(void)
624 #ifdef CONFIG_DEBUG_LOCK_ALLOC
625 if (!rcu_scheduler_active)
627 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
629 WARN_ON_ONCE(rcu_preempt_running_reader());
630 if (!rcu_preempt_blocked_readers_any())
633 /* Once we get past the fastpath checks, same code as rcu_barrier(). */
636 EXPORT_SYMBOL_GPL(synchronize_rcu);
638 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
639 static unsigned long sync_rcu_preempt_exp_count;
640 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
643 * Return non-zero if there are any tasks in RCU read-side critical
644 * sections blocking the current preemptible-RCU expedited grace period.
645 * If there is no preemptible-RCU expedited grace period currently in
646 * progress, returns zero unconditionally.
648 static int rcu_preempted_readers_exp(void)
650 return rcu_preempt_ctrlblk.exp_tasks != NULL;
654 * Report the exit from RCU read-side critical section for the last task
655 * that queued itself during or before the current expedited preemptible-RCU
658 static void rcu_report_exp_done(void)
660 wake_up(&sync_rcu_preempt_exp_wq);
664 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
665 * is to rely in the fact that there is but one CPU, and that it is
666 * illegal for a task to invoke synchronize_rcu_expedited() while in a
667 * preemptible-RCU read-side critical section. Therefore, any such
668 * critical sections must correspond to blocked tasks, which must therefore
669 * be on the ->blkd_tasks list. So just record the current head of the
670 * list in the ->exp_tasks pointer, and wait for all tasks including and
671 * after the task pointed to by ->exp_tasks to drain.
673 void synchronize_rcu_expedited(void)
676 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
679 barrier(); /* ensure prior action seen before grace period. */
681 WARN_ON_ONCE(rcu_preempt_running_reader());
684 * Acquire lock so that there is only one preemptible RCU grace
685 * period in flight. Of course, if someone does the expedited
686 * grace period for us while we are acquiring the lock, just leave.
688 snap = sync_rcu_preempt_exp_count + 1;
689 mutex_lock(&sync_rcu_preempt_exp_mutex);
690 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
691 goto unlock_mb_ret; /* Others did our work for us. */
693 local_irq_save(flags);
696 * All RCU readers have to already be on blkd_tasks because
697 * we cannot legally be executing in an RCU read-side critical
701 /* Snapshot current head of ->blkd_tasks list. */
702 rpcp->exp_tasks = rpcp->blkd_tasks.next;
703 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
704 rpcp->exp_tasks = NULL;
705 local_irq_restore(flags);
707 /* Wait for tail of ->blkd_tasks list to drain. */
708 if (rcu_preempted_readers_exp())
709 rcu_initiate_expedited_boost();
710 wait_event(sync_rcu_preempt_exp_wq,
711 !rcu_preempted_readers_exp());
713 /* Clean up and exit. */
714 barrier(); /* ensure expedited GP seen before counter increment. */
715 sync_rcu_preempt_exp_count++;
717 mutex_unlock(&sync_rcu_preempt_exp_mutex);
718 barrier(); /* ensure subsequent action seen after grace period. */
720 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
723 * Does preemptible RCU need the CPU to stay out of dynticks mode?
725 int rcu_preempt_needs_cpu(void)
727 if (!rcu_preempt_running_reader())
728 rcu_preempt_cpu_qs();
729 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
733 * Check for a task exiting while in a preemptible -RCU read-side
734 * critical section, clean up if so. No need to issue warnings,
735 * as debug_check_no_locks_held() already does this if lockdep
740 struct task_struct *t = current;
742 if (t->rcu_read_lock_nesting == 0)
744 t->rcu_read_lock_nesting = 1;
748 #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
751 * Because preemptible RCU does not exist, it is never necessary to
752 * boost preempted RCU readers.
754 static int rcu_boost(void)
760 * Because preemptible RCU does not exist, it never has any callbacks
763 static void rcu_preempt_check_callbacks(void)
768 * Because preemptible RCU does not exist, it never has any callbacks
771 static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
776 * Because preemptible RCU does not exist, it never has any callbacks
779 static void rcu_preempt_process_callbacks(void)
783 #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
785 #ifdef CONFIG_DEBUG_LOCK_ALLOC
786 #include <linux/kernel_stat.h>
789 * During boot, we forgive RCU lockdep issues. After this function is
790 * invoked, we start taking RCU lockdep issues seriously.
792 void __init rcu_scheduler_starting(void)
794 WARN_ON(nr_context_switches() > 0);
795 rcu_scheduler_active = 1;
798 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
800 #ifdef CONFIG_RCU_BOOST
801 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
802 #else /* #ifdef CONFIG_RCU_BOOST */
803 #define RCU_BOOST_PRIO 1
804 #endif /* #else #ifdef CONFIG_RCU_BOOST */