2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/percpu.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/mutex.h>
46 #include <linux/export.h>
47 #include <linux/hardirq.h>
48 #include <linux/delay.h>
49 #include <linux/module.h>
50 #include <linux/kthread.h>
51 #include <linux/tick.h>
53 #define CREATE_TRACE_POINTS
57 MODULE_ALIAS("rcupdate");
58 #ifdef MODULE_PARAM_PREFIX
59 #undef MODULE_PARAM_PREFIX
61 #define MODULE_PARAM_PREFIX "rcupdate."
63 #ifndef CONFIG_TINY_RCU
64 module_param(rcu_expedited
, int, 0);
65 module_param(rcu_normal
, int, 0);
66 static int rcu_normal_after_boot
;
67 module_param(rcu_normal_after_boot
, int, 0);
68 #endif /* #ifndef CONFIG_TINY_RCU */
70 #ifdef CONFIG_DEBUG_LOCK_ALLOC
72 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
74 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
75 * RCU-sched read-side critical section. In absence of
76 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
77 * critical section unless it can prove otherwise. Note that disabling
78 * of preemption (including disabling irqs) counts as an RCU-sched
79 * read-side critical section. This is useful for debug checks in functions
80 * that required that they be called within an RCU-sched read-side
83 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
84 * and while lockdep is disabled.
86 * Note that if the CPU is in the idle loop from an RCU point of
87 * view (ie: that we are in the section between rcu_idle_enter() and
88 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
89 * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
90 * that are in such a section, considering these as in extended quiescent
91 * state, so such a CPU is effectively never in an RCU read-side critical
92 * section regardless of what RCU primitives it invokes. This state of
93 * affairs is required --- we need to keep an RCU-free window in idle
94 * where the CPU may possibly enter into low power mode. This way we can
95 * notice an extended quiescent state to other CPUs that started a grace
96 * period. Otherwise we would delay any grace period as long as we run in
99 * Similarly, we avoid claiming an SRCU read lock held if the current
102 int rcu_read_lock_sched_held(void)
104 int lockdep_opinion
= 0;
106 if (!debug_lockdep_rcu_enabled())
108 if (!rcu_is_watching())
110 if (!rcu_lockdep_current_cpu_online())
113 lockdep_opinion
= lock_is_held(&rcu_sched_lock_map
);
114 return lockdep_opinion
|| !preemptible();
116 EXPORT_SYMBOL(rcu_read_lock_sched_held
);
119 #ifndef CONFIG_TINY_RCU
122 * Should expedited grace-period primitives always fall back to their
123 * non-expedited counterparts? Intended for use within RCU. Note
124 * that if the user specifies both rcu_expedited and rcu_normal, then
127 bool rcu_gp_is_normal(void)
129 return READ_ONCE(rcu_normal
);
131 EXPORT_SYMBOL_GPL(rcu_gp_is_normal
);
133 static atomic_t rcu_expedited_nesting
=
134 ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT
) ? 1 : 0);
137 * Should normal grace-period primitives be expedited? Intended for
138 * use within RCU. Note that this function takes the rcu_expedited
139 * sysfs/boot variable into account as well as the rcu_expedite_gp()
140 * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
141 * returns false is a -really- bad idea.
143 bool rcu_gp_is_expedited(void)
145 return rcu_expedited
|| atomic_read(&rcu_expedited_nesting
);
147 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited
);
150 * rcu_expedite_gp - Expedite future RCU grace periods
152 * After a call to this function, future calls to synchronize_rcu() and
153 * friends act as the corresponding synchronize_rcu_expedited() function
154 * had instead been called.
156 void rcu_expedite_gp(void)
158 atomic_inc(&rcu_expedited_nesting
);
160 EXPORT_SYMBOL_GPL(rcu_expedite_gp
);
163 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
165 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
166 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
167 * and if the rcu_expedited sysfs/boot parameter is not set, then all
168 * subsequent calls to synchronize_rcu() and friends will return to
169 * their normal non-expedited behavior.
171 void rcu_unexpedite_gp(void)
173 atomic_dec(&rcu_expedited_nesting
);
175 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp
);
178 * Inform RCU of the end of the in-kernel boot sequence.
180 void rcu_end_inkernel_boot(void)
182 if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT
))
184 if (rcu_normal_after_boot
)
185 WRITE_ONCE(rcu_normal
, 1);
188 #endif /* #ifndef CONFIG_TINY_RCU */
190 #ifdef CONFIG_PREEMPT_RCU
193 * Preemptible RCU implementation for rcu_read_lock().
194 * Just increment ->rcu_read_lock_nesting, shared state will be updated
197 void __rcu_read_lock(void)
199 current
->rcu_read_lock_nesting
++;
200 barrier(); /* critical section after entry code. */
202 EXPORT_SYMBOL_GPL(__rcu_read_lock
);
205 * Preemptible RCU implementation for rcu_read_unlock().
206 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
207 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
208 * invoke rcu_read_unlock_special() to clean up after a context switch
209 * in an RCU read-side critical section and other special cases.
211 void __rcu_read_unlock(void)
213 struct task_struct
*t
= current
;
215 if (t
->rcu_read_lock_nesting
!= 1) {
216 --t
->rcu_read_lock_nesting
;
218 barrier(); /* critical section before exit code. */
219 t
->rcu_read_lock_nesting
= INT_MIN
;
220 barrier(); /* assign before ->rcu_read_unlock_special load */
221 if (unlikely(READ_ONCE(t
->rcu_read_unlock_special
.s
)))
222 rcu_read_unlock_special(t
);
223 barrier(); /* ->rcu_read_unlock_special load before assign */
224 t
->rcu_read_lock_nesting
= 0;
226 #ifdef CONFIG_PROVE_LOCKING
228 int rrln
= READ_ONCE(t
->rcu_read_lock_nesting
);
230 WARN_ON_ONCE(rrln
< 0 && rrln
> INT_MIN
/ 2);
232 #endif /* #ifdef CONFIG_PROVE_LOCKING */
234 EXPORT_SYMBOL_GPL(__rcu_read_unlock
);
236 #endif /* #ifdef CONFIG_PREEMPT_RCU */
238 #ifdef CONFIG_DEBUG_LOCK_ALLOC
239 static struct lock_class_key rcu_lock_key
;
240 struct lockdep_map rcu_lock_map
=
241 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key
);
242 EXPORT_SYMBOL_GPL(rcu_lock_map
);
244 static struct lock_class_key rcu_bh_lock_key
;
245 struct lockdep_map rcu_bh_lock_map
=
246 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key
);
247 EXPORT_SYMBOL_GPL(rcu_bh_lock_map
);
249 static struct lock_class_key rcu_sched_lock_key
;
250 struct lockdep_map rcu_sched_lock_map
=
251 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key
);
252 EXPORT_SYMBOL_GPL(rcu_sched_lock_map
);
254 static struct lock_class_key rcu_callback_key
;
255 struct lockdep_map rcu_callback_map
=
256 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key
);
257 EXPORT_SYMBOL_GPL(rcu_callback_map
);
259 int notrace
debug_lockdep_rcu_enabled(void)
261 return rcu_scheduler_active
&& debug_locks
&&
262 current
->lockdep_recursion
== 0;
264 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled
);
267 * rcu_read_lock_held() - might we be in RCU read-side critical section?
269 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
270 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
271 * this assumes we are in an RCU read-side critical section unless it can
272 * prove otherwise. This is useful for debug checks in functions that
273 * require that they be called within an RCU read-side critical section.
275 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
276 * and while lockdep is disabled.
278 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
279 * occur in the same context, for example, it is illegal to invoke
280 * rcu_read_unlock() in process context if the matching rcu_read_lock()
281 * was invoked from within an irq handler.
283 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
284 * offline from an RCU perspective, so check for those as well.
286 int rcu_read_lock_held(void)
288 if (!debug_lockdep_rcu_enabled())
290 if (!rcu_is_watching())
292 if (!rcu_lockdep_current_cpu_online())
294 return lock_is_held(&rcu_lock_map
);
296 EXPORT_SYMBOL_GPL(rcu_read_lock_held
);
299 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
301 * Check for bottom half being disabled, which covers both the
302 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
303 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
304 * will show the situation. This is useful for debug checks in functions
305 * that require that they be called within an RCU read-side critical
308 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
310 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
311 * offline from an RCU perspective, so check for those as well.
313 int rcu_read_lock_bh_held(void)
315 if (!debug_lockdep_rcu_enabled())
317 if (!rcu_is_watching())
319 if (!rcu_lockdep_current_cpu_online())
321 return in_softirq() || irqs_disabled();
323 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held
);
325 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
328 * wakeme_after_rcu() - Callback function to awaken a task after grace period
329 * @head: Pointer to rcu_head member within rcu_synchronize structure
331 * Awaken the corresponding task now that a grace period has elapsed.
333 void wakeme_after_rcu(struct rcu_head
*head
)
335 struct rcu_synchronize
*rcu
;
337 rcu
= container_of(head
, struct rcu_synchronize
, head
);
338 complete(&rcu
->completion
);
340 EXPORT_SYMBOL_GPL(wakeme_after_rcu
);
342 void __wait_rcu_gp(bool checktiny
, int n
, call_rcu_func_t
*crcu_array
,
343 struct rcu_synchronize
*rs_array
)
347 /* Initialize and register callbacks for each flavor specified. */
348 for (i
= 0; i
< n
; i
++) {
350 (crcu_array
[i
] == call_rcu
||
351 crcu_array
[i
] == call_rcu_bh
)) {
355 init_rcu_head_on_stack(&rs_array
[i
].head
);
356 init_completion(&rs_array
[i
].completion
);
357 (crcu_array
[i
])(&rs_array
[i
].head
, wakeme_after_rcu
);
360 /* Wait for all callbacks to be invoked. */
361 for (i
= 0; i
< n
; i
++) {
363 (crcu_array
[i
] == call_rcu
||
364 crcu_array
[i
] == call_rcu_bh
))
366 wait_for_completion(&rs_array
[i
].completion
);
367 destroy_rcu_head_on_stack(&rs_array
[i
].head
);
370 EXPORT_SYMBOL_GPL(__wait_rcu_gp
);
372 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
373 void init_rcu_head(struct rcu_head
*head
)
375 debug_object_init(head
, &rcuhead_debug_descr
);
378 void destroy_rcu_head(struct rcu_head
*head
)
380 debug_object_free(head
, &rcuhead_debug_descr
);
383 static bool rcuhead_is_static_object(void *addr
)
389 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
390 * @head: pointer to rcu_head structure to be initialized
392 * This function informs debugobjects of a new rcu_head structure that
393 * has been allocated as an auto variable on the stack. This function
394 * is not required for rcu_head structures that are statically defined or
395 * that are dynamically allocated on the heap. This function has no
396 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
398 void init_rcu_head_on_stack(struct rcu_head
*head
)
400 debug_object_init_on_stack(head
, &rcuhead_debug_descr
);
402 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack
);
405 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
406 * @head: pointer to rcu_head structure to be initialized
408 * This function informs debugobjects that an on-stack rcu_head structure
409 * is about to go out of scope. As with init_rcu_head_on_stack(), this
410 * function is not required for rcu_head structures that are statically
411 * defined or that are dynamically allocated on the heap. Also as with
412 * init_rcu_head_on_stack(), this function has no effect for
413 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
415 void destroy_rcu_head_on_stack(struct rcu_head
*head
)
417 debug_object_free(head
, &rcuhead_debug_descr
);
419 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack
);
421 struct debug_obj_descr rcuhead_debug_descr
= {
423 .is_static_object
= rcuhead_is_static_object
,
425 EXPORT_SYMBOL_GPL(rcuhead_debug_descr
);
426 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
428 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
429 void do_trace_rcu_torture_read(const char *rcutorturename
, struct rcu_head
*rhp
,
431 unsigned long c_old
, unsigned long c
)
433 trace_rcu_torture_read(rcutorturename
, rhp
, secs
, c_old
, c
);
435 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read
);
437 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
441 #ifdef CONFIG_RCU_STALL_COMMON
443 #ifdef CONFIG_PROVE_RCU
444 #define RCU_STALL_DELAY_DELTA (5 * HZ)
446 #define RCU_STALL_DELAY_DELTA 0
449 int rcu_cpu_stall_suppress __read_mostly
; /* 1 = suppress stall warnings. */
450 static int rcu_cpu_stall_timeout __read_mostly
= CONFIG_RCU_CPU_STALL_TIMEOUT
;
452 module_param(rcu_cpu_stall_suppress
, int, 0644);
453 module_param(rcu_cpu_stall_timeout
, int, 0644);
455 int rcu_jiffies_till_stall_check(void)
457 int till_stall_check
= READ_ONCE(rcu_cpu_stall_timeout
);
460 * Limit check must be consistent with the Kconfig limits
461 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
463 if (till_stall_check
< 3) {
464 WRITE_ONCE(rcu_cpu_stall_timeout
, 3);
465 till_stall_check
= 3;
466 } else if (till_stall_check
> 300) {
467 WRITE_ONCE(rcu_cpu_stall_timeout
, 300);
468 till_stall_check
= 300;
470 return till_stall_check
* HZ
+ RCU_STALL_DELAY_DELTA
;
473 void rcu_sysrq_start(void)
475 if (!rcu_cpu_stall_suppress
)
476 rcu_cpu_stall_suppress
= 2;
479 void rcu_sysrq_end(void)
481 if (rcu_cpu_stall_suppress
== 2)
482 rcu_cpu_stall_suppress
= 0;
485 static int rcu_panic(struct notifier_block
*this, unsigned long ev
, void *ptr
)
487 rcu_cpu_stall_suppress
= 1;
491 static struct notifier_block rcu_panic_block
= {
492 .notifier_call
= rcu_panic
,
495 static int __init
check_cpu_stall_init(void)
497 atomic_notifier_chain_register(&panic_notifier_list
, &rcu_panic_block
);
500 early_initcall(check_cpu_stall_init
);
502 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
504 #ifdef CONFIG_TASKS_RCU
507 * Simple variant of RCU whose quiescent states are voluntary context switch,
508 * user-space execution, and idle. As such, grace periods can take one good
509 * long time. There are no read-side primitives similar to rcu_read_lock()
510 * and rcu_read_unlock() because this implementation is intended to get
511 * the system into a safe state for some of the manipulations involved in
512 * tracing and the like. Finally, this implementation does not support
513 * high call_rcu_tasks() rates from multiple CPUs. If this is required,
514 * per-CPU callback lists will be needed.
517 /* Global list of callbacks and associated lock. */
518 static struct rcu_head
*rcu_tasks_cbs_head
;
519 static struct rcu_head
**rcu_tasks_cbs_tail
= &rcu_tasks_cbs_head
;
520 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq
);
521 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock
);
523 /* Track exiting tasks in order to allow them to be waited for. */
524 DEFINE_SRCU(tasks_rcu_exit_srcu
);
526 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
527 static int rcu_task_stall_timeout __read_mostly
= HZ
* 60 * 10;
528 module_param(rcu_task_stall_timeout
, int, 0644);
530 static void rcu_spawn_tasks_kthread(void);
531 static struct task_struct
*rcu_tasks_kthread_ptr
;
534 * Post an RCU-tasks callback. First call must be from process context
535 * after the scheduler if fully operational.
537 void call_rcu_tasks(struct rcu_head
*rhp
, rcu_callback_t func
)
541 bool havetask
= READ_ONCE(rcu_tasks_kthread_ptr
);
545 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock
, flags
);
546 needwake
= !rcu_tasks_cbs_head
;
547 *rcu_tasks_cbs_tail
= rhp
;
548 rcu_tasks_cbs_tail
= &rhp
->next
;
549 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock
, flags
);
550 /* We can't create the thread unless interrupts are enabled. */
551 if ((needwake
&& havetask
) ||
552 (!havetask
&& !irqs_disabled_flags(flags
))) {
553 rcu_spawn_tasks_kthread();
554 wake_up(&rcu_tasks_cbs_wq
);
557 EXPORT_SYMBOL_GPL(call_rcu_tasks
);
560 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
562 * Control will return to the caller some time after a full rcu-tasks
563 * grace period has elapsed, in other words after all currently
564 * executing rcu-tasks read-side critical sections have elapsed. These
565 * read-side critical sections are delimited by calls to schedule(),
566 * cond_resched_rcu_qs(), idle execution, userspace execution, calls
567 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
569 * This is a very specialized primitive, intended only for a few uses in
570 * tracing and other situations requiring manipulation of function
571 * preambles and profiling hooks. The synchronize_rcu_tasks() function
572 * is not (yet) intended for heavy use from multiple CPUs.
574 * Note that this guarantee implies further memory-ordering guarantees.
575 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
576 * each CPU is guaranteed to have executed a full memory barrier since the
577 * end of its last RCU-tasks read-side critical section whose beginning
578 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
579 * having an RCU-tasks read-side critical section that extends beyond
580 * the return from synchronize_rcu_tasks() is guaranteed to have executed
581 * a full memory barrier after the beginning of synchronize_rcu_tasks()
582 * and before the beginning of that RCU-tasks read-side critical section.
583 * Note that these guarantees include CPUs that are offline, idle, or
584 * executing in user mode, as well as CPUs that are executing in the kernel.
586 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
587 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
588 * to have executed a full memory barrier during the execution of
589 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
590 * (but again only if the system has more than one CPU).
592 void synchronize_rcu_tasks(void)
594 /* Complain if the scheduler has not started. */
595 RCU_LOCKDEP_WARN(!rcu_scheduler_active
,
596 "synchronize_rcu_tasks called too soon");
598 /* Wait for the grace period. */
599 wait_rcu_gp(call_rcu_tasks
);
601 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks
);
604 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
606 * Although the current implementation is guaranteed to wait, it is not
607 * obligated to, for example, if there are no pending callbacks.
609 void rcu_barrier_tasks(void)
611 /* There is only one callback queue, so this is easy. ;-) */
612 synchronize_rcu_tasks();
614 EXPORT_SYMBOL_GPL(rcu_barrier_tasks
);
616 /* See if tasks are still holding out, complain if so. */
617 static void check_holdout_task(struct task_struct
*t
,
618 bool needreport
, bool *firstreport
)
622 if (!READ_ONCE(t
->rcu_tasks_holdout
) ||
623 t
->rcu_tasks_nvcsw
!= READ_ONCE(t
->nvcsw
) ||
624 !READ_ONCE(t
->on_rq
) ||
625 (IS_ENABLED(CONFIG_NO_HZ_FULL
) &&
626 !is_idle_task(t
) && t
->rcu_tasks_idle_cpu
>= 0)) {
627 WRITE_ONCE(t
->rcu_tasks_holdout
, false);
628 list_del_init(&t
->rcu_tasks_holdout_list
);
635 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
636 *firstreport
= false;
639 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
640 t
, ".I"[is_idle_task(t
)],
641 "N."[cpu
< 0 || !tick_nohz_full_cpu(cpu
)],
642 t
->rcu_tasks_nvcsw
, t
->nvcsw
, t
->rcu_tasks_holdout
,
643 t
->rcu_tasks_idle_cpu
, cpu
);
647 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
648 static int __noreturn
rcu_tasks_kthread(void *arg
)
651 struct task_struct
*g
, *t
;
652 unsigned long lastreport
;
653 struct rcu_head
*list
;
654 struct rcu_head
*next
;
655 LIST_HEAD(rcu_tasks_holdouts
);
657 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
658 housekeeping_affine(current
);
661 * Each pass through the following loop makes one check for
662 * newly arrived callbacks, and, if there are some, waits for
663 * one RCU-tasks grace period and then invokes the callbacks.
664 * This loop is terminated by the system going down. ;-)
668 /* Pick up any new callbacks. */
669 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock
, flags
);
670 list
= rcu_tasks_cbs_head
;
671 rcu_tasks_cbs_head
= NULL
;
672 rcu_tasks_cbs_tail
= &rcu_tasks_cbs_head
;
673 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock
, flags
);
675 /* If there were none, wait a bit and start over. */
677 wait_event_interruptible(rcu_tasks_cbs_wq
,
679 if (!rcu_tasks_cbs_head
) {
680 WARN_ON(signal_pending(current
));
681 schedule_timeout_interruptible(HZ
/10);
687 * Wait for all pre-existing t->on_rq and t->nvcsw
688 * transitions to complete. Invoking synchronize_sched()
689 * suffices because all these transitions occur with
690 * interrupts disabled. Without this synchronize_sched(),
691 * a read-side critical section that started before the
692 * grace period might be incorrectly seen as having started
693 * after the grace period.
695 * This synchronize_sched() also dispenses with the
696 * need for a memory barrier on the first store to
697 * ->rcu_tasks_holdout, as it forces the store to happen
698 * after the beginning of the grace period.
703 * There were callbacks, so we need to wait for an
704 * RCU-tasks grace period. Start off by scanning
705 * the task list for tasks that are not already
706 * voluntarily blocked. Mark these tasks and make
707 * a list of them in rcu_tasks_holdouts.
710 for_each_process_thread(g
, t
) {
711 if (t
!= current
&& READ_ONCE(t
->on_rq
) &&
714 t
->rcu_tasks_nvcsw
= READ_ONCE(t
->nvcsw
);
715 WRITE_ONCE(t
->rcu_tasks_holdout
, true);
716 list_add(&t
->rcu_tasks_holdout_list
,
717 &rcu_tasks_holdouts
);
723 * Wait for tasks that are in the process of exiting.
724 * This does only part of the job, ensuring that all
725 * tasks that were previously exiting reach the point
726 * where they have disabled preemption, allowing the
727 * later synchronize_sched() to finish the job.
729 synchronize_srcu(&tasks_rcu_exit_srcu
);
732 * Each pass through the following loop scans the list
733 * of holdout tasks, removing any that are no longer
734 * holdouts. When the list is empty, we are done.
736 lastreport
= jiffies
;
737 while (!list_empty(&rcu_tasks_holdouts
)) {
741 struct task_struct
*t1
;
743 schedule_timeout_interruptible(HZ
);
744 rtst
= READ_ONCE(rcu_task_stall_timeout
);
745 needreport
= rtst
> 0 &&
746 time_after(jiffies
, lastreport
+ rtst
);
748 lastreport
= jiffies
;
750 WARN_ON(signal_pending(current
));
751 list_for_each_entry_safe(t
, t1
, &rcu_tasks_holdouts
,
752 rcu_tasks_holdout_list
) {
753 check_holdout_task(t
, needreport
, &firstreport
);
759 * Because ->on_rq and ->nvcsw are not guaranteed
760 * to have a full memory barriers prior to them in the
761 * schedule() path, memory reordering on other CPUs could
762 * cause their RCU-tasks read-side critical sections to
763 * extend past the end of the grace period. However,
764 * because these ->nvcsw updates are carried out with
765 * interrupts disabled, we can use synchronize_sched()
766 * to force the needed ordering on all such CPUs.
768 * This synchronize_sched() also confines all
769 * ->rcu_tasks_holdout accesses to be within the grace
770 * period, avoiding the need for memory barriers for
771 * ->rcu_tasks_holdout accesses.
773 * In addition, this synchronize_sched() waits for exiting
774 * tasks to complete their final preempt_disable() region
775 * of execution, cleaning up after the synchronize_srcu()
780 /* Invoke the callbacks. */
789 schedule_timeout_uninterruptible(HZ
/10);
793 /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
794 static void rcu_spawn_tasks_kthread(void)
796 static DEFINE_MUTEX(rcu_tasks_kthread_mutex
);
797 struct task_struct
*t
;
799 if (READ_ONCE(rcu_tasks_kthread_ptr
)) {
800 smp_mb(); /* Ensure caller sees full kthread. */
803 mutex_lock(&rcu_tasks_kthread_mutex
);
804 if (rcu_tasks_kthread_ptr
) {
805 mutex_unlock(&rcu_tasks_kthread_mutex
);
808 t
= kthread_run(rcu_tasks_kthread
, NULL
, "rcu_tasks_kthread");
810 smp_mb(); /* Ensure others see full kthread. */
811 WRITE_ONCE(rcu_tasks_kthread_ptr
, t
);
812 mutex_unlock(&rcu_tasks_kthread_mutex
);
815 #endif /* #ifdef CONFIG_TASKS_RCU */
817 #ifdef CONFIG_PROVE_RCU
820 * Early boot self test parameters, one for each flavor
822 static bool rcu_self_test
;
823 static bool rcu_self_test_bh
;
824 static bool rcu_self_test_sched
;
826 module_param(rcu_self_test
, bool, 0444);
827 module_param(rcu_self_test_bh
, bool, 0444);
828 module_param(rcu_self_test_sched
, bool, 0444);
830 static int rcu_self_test_counter
;
832 static void test_callback(struct rcu_head
*r
)
834 rcu_self_test_counter
++;
835 pr_info("RCU test callback executed %d\n", rcu_self_test_counter
);
838 static void early_boot_test_call_rcu(void)
840 static struct rcu_head head
;
842 call_rcu(&head
, test_callback
);
845 static void early_boot_test_call_rcu_bh(void)
847 static struct rcu_head head
;
849 call_rcu_bh(&head
, test_callback
);
852 static void early_boot_test_call_rcu_sched(void)
854 static struct rcu_head head
;
856 call_rcu_sched(&head
, test_callback
);
859 void rcu_early_boot_tests(void)
861 pr_info("Running RCU self tests\n");
864 early_boot_test_call_rcu();
865 if (rcu_self_test_bh
)
866 early_boot_test_call_rcu_bh();
867 if (rcu_self_test_sched
)
868 early_boot_test_call_rcu_sched();
871 static int rcu_verify_early_boot_tests(void)
874 int early_boot_test_counter
= 0;
877 early_boot_test_counter
++;
880 if (rcu_self_test_bh
) {
881 early_boot_test_counter
++;
884 if (rcu_self_test_sched
) {
885 early_boot_test_counter
++;
889 if (rcu_self_test_counter
!= early_boot_test_counter
) {
896 late_initcall(rcu_verify_early_boot_tests
);
898 void rcu_early_boot_tests(void) {}
899 #endif /* CONFIG_PROVE_RCU */