Merge tag 'dm-3.11-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk...
[deliverable/linux.git] / kernel / rcutree_plugin.h
1 /*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
27 #include <linux/delay.h>
28 #include <linux/gfp.h>
29 #include <linux/oom.h>
30 #include <linux/smpboot.h>
31 #include <linux/tick.h>
32
33 #define RCU_KTHREAD_PRIO 1
34
35 #ifdef CONFIG_RCU_BOOST
36 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
37 #else
38 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
39 #endif
40
41 #ifdef CONFIG_RCU_NOCB_CPU
42 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
43 static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
44 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
45 static char __initdata nocb_buf[NR_CPUS * 5];
46 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
47
48 /*
49 * Check the RCU kernel configuration parameters and print informative
50 * messages about anything out of the ordinary. If you like #ifdef, you
51 * will love this function.
52 */
53 static void __init rcu_bootup_announce_oddness(void)
54 {
55 #ifdef CONFIG_RCU_TRACE
56 pr_info("\tRCU debugfs-based tracing is enabled.\n");
57 #endif
58 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
59 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
60 CONFIG_RCU_FANOUT);
61 #endif
62 #ifdef CONFIG_RCU_FANOUT_EXACT
63 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
64 #endif
65 #ifdef CONFIG_RCU_FAST_NO_HZ
66 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
67 #endif
68 #ifdef CONFIG_PROVE_RCU
69 pr_info("\tRCU lockdep checking is enabled.\n");
70 #endif
71 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
72 pr_info("\tRCU torture testing starts during boot.\n");
73 #endif
74 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
75 pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
76 #endif
77 #if defined(CONFIG_RCU_CPU_STALL_INFO)
78 pr_info("\tAdditional per-CPU info printed with stalls.\n");
79 #endif
80 #if NUM_RCU_LVL_4 != 0
81 pr_info("\tFour-level hierarchy is enabled.\n");
82 #endif
83 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
84 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
85 if (nr_cpu_ids != NR_CPUS)
86 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
87 #ifdef CONFIG_RCU_NOCB_CPU
88 #ifndef CONFIG_RCU_NOCB_CPU_NONE
89 if (!have_rcu_nocb_mask) {
90 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
91 have_rcu_nocb_mask = true;
92 }
93 #ifdef CONFIG_RCU_NOCB_CPU_ZERO
94 pr_info("\tOffload RCU callbacks from CPU 0\n");
95 cpumask_set_cpu(0, rcu_nocb_mask);
96 #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
97 #ifdef CONFIG_RCU_NOCB_CPU_ALL
98 pr_info("\tOffload RCU callbacks from all CPUs\n");
99 cpumask_setall(rcu_nocb_mask);
100 #endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
101 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
102 if (have_rcu_nocb_mask) {
103 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
104 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
105 if (rcu_nocb_poll)
106 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
107 }
108 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
109 }
110
111 #ifdef CONFIG_TREE_PREEMPT_RCU
112
113 struct rcu_state rcu_preempt_state =
114 RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
115 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
116 static struct rcu_state *rcu_state = &rcu_preempt_state;
117
118 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
119
120 /*
121 * Tell them what RCU they are running.
122 */
123 static void __init rcu_bootup_announce(void)
124 {
125 pr_info("Preemptible hierarchical RCU implementation.\n");
126 rcu_bootup_announce_oddness();
127 }
128
129 /*
130 * Return the number of RCU-preempt batches processed thus far
131 * for debug and statistics.
132 */
133 long rcu_batches_completed_preempt(void)
134 {
135 return rcu_preempt_state.completed;
136 }
137 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
138
139 /*
140 * Return the number of RCU batches processed thus far for debug & stats.
141 */
142 long rcu_batches_completed(void)
143 {
144 return rcu_batches_completed_preempt();
145 }
146 EXPORT_SYMBOL_GPL(rcu_batches_completed);
147
148 /*
149 * Force a quiescent state for preemptible RCU.
150 */
151 void rcu_force_quiescent_state(void)
152 {
153 force_quiescent_state(&rcu_preempt_state);
154 }
155 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
156
157 /*
158 * Record a preemptible-RCU quiescent state for the specified CPU. Note
159 * that this just means that the task currently running on the CPU is
160 * not in a quiescent state. There might be any number of tasks blocked
161 * while in an RCU read-side critical section.
162 *
163 * Unlike the other rcu_*_qs() functions, callers to this function
164 * must disable irqs in order to protect the assignment to
165 * ->rcu_read_unlock_special.
166 */
167 static void rcu_preempt_qs(int cpu)
168 {
169 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
170
171 if (rdp->passed_quiesce == 0)
172 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
173 rdp->passed_quiesce = 1;
174 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
175 }
176
177 /*
178 * We have entered the scheduler, and the current task might soon be
179 * context-switched away from. If this task is in an RCU read-side
180 * critical section, we will no longer be able to rely on the CPU to
181 * record that fact, so we enqueue the task on the blkd_tasks list.
182 * The task will dequeue itself when it exits the outermost enclosing
183 * RCU read-side critical section. Therefore, the current grace period
184 * cannot be permitted to complete until the blkd_tasks list entries
185 * predating the current grace period drain, in other words, until
186 * rnp->gp_tasks becomes NULL.
187 *
188 * Caller must disable preemption.
189 */
190 static void rcu_preempt_note_context_switch(int cpu)
191 {
192 struct task_struct *t = current;
193 unsigned long flags;
194 struct rcu_data *rdp;
195 struct rcu_node *rnp;
196
197 if (t->rcu_read_lock_nesting > 0 &&
198 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
199
200 /* Possibly blocking in an RCU read-side critical section. */
201 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
202 rnp = rdp->mynode;
203 raw_spin_lock_irqsave(&rnp->lock, flags);
204 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
205 t->rcu_blocked_node = rnp;
206
207 /*
208 * If this CPU has already checked in, then this task
209 * will hold up the next grace period rather than the
210 * current grace period. Queue the task accordingly.
211 * If the task is queued for the current grace period
212 * (i.e., this CPU has not yet passed through a quiescent
213 * state for the current grace period), then as long
214 * as that task remains queued, the current grace period
215 * cannot end. Note that there is some uncertainty as
216 * to exactly when the current grace period started.
217 * We take a conservative approach, which can result
218 * in unnecessarily waiting on tasks that started very
219 * slightly after the current grace period began. C'est
220 * la vie!!!
221 *
222 * But first, note that the current CPU must still be
223 * on line!
224 */
225 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
226 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
227 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
228 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
229 rnp->gp_tasks = &t->rcu_node_entry;
230 #ifdef CONFIG_RCU_BOOST
231 if (rnp->boost_tasks != NULL)
232 rnp->boost_tasks = rnp->gp_tasks;
233 #endif /* #ifdef CONFIG_RCU_BOOST */
234 } else {
235 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
236 if (rnp->qsmask & rdp->grpmask)
237 rnp->gp_tasks = &t->rcu_node_entry;
238 }
239 trace_rcu_preempt_task(rdp->rsp->name,
240 t->pid,
241 (rnp->qsmask & rdp->grpmask)
242 ? rnp->gpnum
243 : rnp->gpnum + 1);
244 raw_spin_unlock_irqrestore(&rnp->lock, flags);
245 } else if (t->rcu_read_lock_nesting < 0 &&
246 t->rcu_read_unlock_special) {
247
248 /*
249 * Complete exit from RCU read-side critical section on
250 * behalf of preempted instance of __rcu_read_unlock().
251 */
252 rcu_read_unlock_special(t);
253 }
254
255 /*
256 * Either we were not in an RCU read-side critical section to
257 * begin with, or we have now recorded that critical section
258 * globally. Either way, we can now note a quiescent state
259 * for this CPU. Again, if we were in an RCU read-side critical
260 * section, and if that critical section was blocking the current
261 * grace period, then the fact that the task has been enqueued
262 * means that we continue to block the current grace period.
263 */
264 local_irq_save(flags);
265 rcu_preempt_qs(cpu);
266 local_irq_restore(flags);
267 }
268
269 /*
270 * Check for preempted RCU readers blocking the current grace period
271 * for the specified rcu_node structure. If the caller needs a reliable
272 * answer, it must hold the rcu_node's ->lock.
273 */
274 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
275 {
276 return rnp->gp_tasks != NULL;
277 }
278
279 /*
280 * Record a quiescent state for all tasks that were previously queued
281 * on the specified rcu_node structure and that were blocking the current
282 * RCU grace period. The caller must hold the specified rnp->lock with
283 * irqs disabled, and this lock is released upon return, but irqs remain
284 * disabled.
285 */
286 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
287 __releases(rnp->lock)
288 {
289 unsigned long mask;
290 struct rcu_node *rnp_p;
291
292 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
293 raw_spin_unlock_irqrestore(&rnp->lock, flags);
294 return; /* Still need more quiescent states! */
295 }
296
297 rnp_p = rnp->parent;
298 if (rnp_p == NULL) {
299 /*
300 * Either there is only one rcu_node in the tree,
301 * or tasks were kicked up to root rcu_node due to
302 * CPUs going offline.
303 */
304 rcu_report_qs_rsp(&rcu_preempt_state, flags);
305 return;
306 }
307
308 /* Report up the rest of the hierarchy. */
309 mask = rnp->grpmask;
310 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
311 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
312 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
313 }
314
315 /*
316 * Advance a ->blkd_tasks-list pointer to the next entry, instead
317 * returning NULL if at the end of the list.
318 */
319 static struct list_head *rcu_next_node_entry(struct task_struct *t,
320 struct rcu_node *rnp)
321 {
322 struct list_head *np;
323
324 np = t->rcu_node_entry.next;
325 if (np == &rnp->blkd_tasks)
326 np = NULL;
327 return np;
328 }
329
330 /*
331 * Handle special cases during rcu_read_unlock(), such as needing to
332 * notify RCU core processing or task having blocked during the RCU
333 * read-side critical section.
334 */
335 void rcu_read_unlock_special(struct task_struct *t)
336 {
337 int empty;
338 int empty_exp;
339 int empty_exp_now;
340 unsigned long flags;
341 struct list_head *np;
342 #ifdef CONFIG_RCU_BOOST
343 struct rt_mutex *rbmp = NULL;
344 #endif /* #ifdef CONFIG_RCU_BOOST */
345 struct rcu_node *rnp;
346 int special;
347
348 /* NMI handlers cannot block and cannot safely manipulate state. */
349 if (in_nmi())
350 return;
351
352 local_irq_save(flags);
353
354 /*
355 * If RCU core is waiting for this CPU to exit critical section,
356 * let it know that we have done so.
357 */
358 special = t->rcu_read_unlock_special;
359 if (special & RCU_READ_UNLOCK_NEED_QS) {
360 rcu_preempt_qs(smp_processor_id());
361 }
362
363 /* Hardware IRQ handlers cannot block. */
364 if (in_irq() || in_serving_softirq()) {
365 local_irq_restore(flags);
366 return;
367 }
368
369 /* Clean up if blocked during RCU read-side critical section. */
370 if (special & RCU_READ_UNLOCK_BLOCKED) {
371 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
372
373 /*
374 * Remove this task from the list it blocked on. The
375 * task can migrate while we acquire the lock, but at
376 * most one time. So at most two passes through loop.
377 */
378 for (;;) {
379 rnp = t->rcu_blocked_node;
380 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
381 if (rnp == t->rcu_blocked_node)
382 break;
383 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
384 }
385 empty = !rcu_preempt_blocked_readers_cgp(rnp);
386 empty_exp = !rcu_preempted_readers_exp(rnp);
387 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
388 np = rcu_next_node_entry(t, rnp);
389 list_del_init(&t->rcu_node_entry);
390 t->rcu_blocked_node = NULL;
391 trace_rcu_unlock_preempted_task("rcu_preempt",
392 rnp->gpnum, t->pid);
393 if (&t->rcu_node_entry == rnp->gp_tasks)
394 rnp->gp_tasks = np;
395 if (&t->rcu_node_entry == rnp->exp_tasks)
396 rnp->exp_tasks = np;
397 #ifdef CONFIG_RCU_BOOST
398 if (&t->rcu_node_entry == rnp->boost_tasks)
399 rnp->boost_tasks = np;
400 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
401 if (t->rcu_boost_mutex) {
402 rbmp = t->rcu_boost_mutex;
403 t->rcu_boost_mutex = NULL;
404 }
405 #endif /* #ifdef CONFIG_RCU_BOOST */
406
407 /*
408 * If this was the last task on the current list, and if
409 * we aren't waiting on any CPUs, report the quiescent state.
410 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
411 * so we must take a snapshot of the expedited state.
412 */
413 empty_exp_now = !rcu_preempted_readers_exp(rnp);
414 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
415 trace_rcu_quiescent_state_report("preempt_rcu",
416 rnp->gpnum,
417 0, rnp->qsmask,
418 rnp->level,
419 rnp->grplo,
420 rnp->grphi,
421 !!rnp->gp_tasks);
422 rcu_report_unblock_qs_rnp(rnp, flags);
423 } else {
424 raw_spin_unlock_irqrestore(&rnp->lock, flags);
425 }
426
427 #ifdef CONFIG_RCU_BOOST
428 /* Unboost if we were boosted. */
429 if (rbmp)
430 rt_mutex_unlock(rbmp);
431 #endif /* #ifdef CONFIG_RCU_BOOST */
432
433 /*
434 * If this was the last task on the expedited lists,
435 * then we need to report up the rcu_node hierarchy.
436 */
437 if (!empty_exp && empty_exp_now)
438 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
439 } else {
440 local_irq_restore(flags);
441 }
442 }
443
444 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
445
446 /*
447 * Dump detailed information for all tasks blocking the current RCU
448 * grace period on the specified rcu_node structure.
449 */
450 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
451 {
452 unsigned long flags;
453 struct task_struct *t;
454
455 raw_spin_lock_irqsave(&rnp->lock, flags);
456 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
457 raw_spin_unlock_irqrestore(&rnp->lock, flags);
458 return;
459 }
460 t = list_entry(rnp->gp_tasks,
461 struct task_struct, rcu_node_entry);
462 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
463 sched_show_task(t);
464 raw_spin_unlock_irqrestore(&rnp->lock, flags);
465 }
466
467 /*
468 * Dump detailed information for all tasks blocking the current RCU
469 * grace period.
470 */
471 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
472 {
473 struct rcu_node *rnp = rcu_get_root(rsp);
474
475 rcu_print_detail_task_stall_rnp(rnp);
476 rcu_for_each_leaf_node(rsp, rnp)
477 rcu_print_detail_task_stall_rnp(rnp);
478 }
479
480 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
481
482 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
483 {
484 }
485
486 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
487
488 #ifdef CONFIG_RCU_CPU_STALL_INFO
489
490 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
491 {
492 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
493 rnp->level, rnp->grplo, rnp->grphi);
494 }
495
496 static void rcu_print_task_stall_end(void)
497 {
498 pr_cont("\n");
499 }
500
501 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
502
503 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
504 {
505 }
506
507 static void rcu_print_task_stall_end(void)
508 {
509 }
510
511 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
512
513 /*
514 * Scan the current list of tasks blocked within RCU read-side critical
515 * sections, printing out the tid of each.
516 */
517 static int rcu_print_task_stall(struct rcu_node *rnp)
518 {
519 struct task_struct *t;
520 int ndetected = 0;
521
522 if (!rcu_preempt_blocked_readers_cgp(rnp))
523 return 0;
524 rcu_print_task_stall_begin(rnp);
525 t = list_entry(rnp->gp_tasks,
526 struct task_struct, rcu_node_entry);
527 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
528 pr_cont(" P%d", t->pid);
529 ndetected++;
530 }
531 rcu_print_task_stall_end();
532 return ndetected;
533 }
534
535 /*
536 * Check that the list of blocked tasks for the newly completed grace
537 * period is in fact empty. It is a serious bug to complete a grace
538 * period that still has RCU readers blocked! This function must be
539 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
540 * must be held by the caller.
541 *
542 * Also, if there are blocked tasks on the list, they automatically
543 * block the newly created grace period, so set up ->gp_tasks accordingly.
544 */
545 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
546 {
547 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
548 if (!list_empty(&rnp->blkd_tasks))
549 rnp->gp_tasks = rnp->blkd_tasks.next;
550 WARN_ON_ONCE(rnp->qsmask);
551 }
552
553 #ifdef CONFIG_HOTPLUG_CPU
554
555 /*
556 * Handle tasklist migration for case in which all CPUs covered by the
557 * specified rcu_node have gone offline. Move them up to the root
558 * rcu_node. The reason for not just moving them to the immediate
559 * parent is to remove the need for rcu_read_unlock_special() to
560 * make more than two attempts to acquire the target rcu_node's lock.
561 * Returns true if there were tasks blocking the current RCU grace
562 * period.
563 *
564 * Returns 1 if there was previously a task blocking the current grace
565 * period on the specified rcu_node structure.
566 *
567 * The caller must hold rnp->lock with irqs disabled.
568 */
569 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
570 struct rcu_node *rnp,
571 struct rcu_data *rdp)
572 {
573 struct list_head *lp;
574 struct list_head *lp_root;
575 int retval = 0;
576 struct rcu_node *rnp_root = rcu_get_root(rsp);
577 struct task_struct *t;
578
579 if (rnp == rnp_root) {
580 WARN_ONCE(1, "Last CPU thought to be offlined?");
581 return 0; /* Shouldn't happen: at least one CPU online. */
582 }
583
584 /* If we are on an internal node, complain bitterly. */
585 WARN_ON_ONCE(rnp != rdp->mynode);
586
587 /*
588 * Move tasks up to root rcu_node. Don't try to get fancy for
589 * this corner-case operation -- just put this node's tasks
590 * at the head of the root node's list, and update the root node's
591 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
592 * if non-NULL. This might result in waiting for more tasks than
593 * absolutely necessary, but this is a good performance/complexity
594 * tradeoff.
595 */
596 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
597 retval |= RCU_OFL_TASKS_NORM_GP;
598 if (rcu_preempted_readers_exp(rnp))
599 retval |= RCU_OFL_TASKS_EXP_GP;
600 lp = &rnp->blkd_tasks;
601 lp_root = &rnp_root->blkd_tasks;
602 while (!list_empty(lp)) {
603 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
604 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
605 list_del(&t->rcu_node_entry);
606 t->rcu_blocked_node = rnp_root;
607 list_add(&t->rcu_node_entry, lp_root);
608 if (&t->rcu_node_entry == rnp->gp_tasks)
609 rnp_root->gp_tasks = rnp->gp_tasks;
610 if (&t->rcu_node_entry == rnp->exp_tasks)
611 rnp_root->exp_tasks = rnp->exp_tasks;
612 #ifdef CONFIG_RCU_BOOST
613 if (&t->rcu_node_entry == rnp->boost_tasks)
614 rnp_root->boost_tasks = rnp->boost_tasks;
615 #endif /* #ifdef CONFIG_RCU_BOOST */
616 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
617 }
618
619 rnp->gp_tasks = NULL;
620 rnp->exp_tasks = NULL;
621 #ifdef CONFIG_RCU_BOOST
622 rnp->boost_tasks = NULL;
623 /*
624 * In case root is being boosted and leaf was not. Make sure
625 * that we boost the tasks blocking the current grace period
626 * in this case.
627 */
628 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
629 if (rnp_root->boost_tasks != NULL &&
630 rnp_root->boost_tasks != rnp_root->gp_tasks &&
631 rnp_root->boost_tasks != rnp_root->exp_tasks)
632 rnp_root->boost_tasks = rnp_root->gp_tasks;
633 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
634 #endif /* #ifdef CONFIG_RCU_BOOST */
635
636 return retval;
637 }
638
639 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
640
641 /*
642 * Check for a quiescent state from the current CPU. When a task blocks,
643 * the task is recorded in the corresponding CPU's rcu_node structure,
644 * which is checked elsewhere.
645 *
646 * Caller must disable hard irqs.
647 */
648 static void rcu_preempt_check_callbacks(int cpu)
649 {
650 struct task_struct *t = current;
651
652 if (t->rcu_read_lock_nesting == 0) {
653 rcu_preempt_qs(cpu);
654 return;
655 }
656 if (t->rcu_read_lock_nesting > 0 &&
657 per_cpu(rcu_preempt_data, cpu).qs_pending)
658 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
659 }
660
661 #ifdef CONFIG_RCU_BOOST
662
663 static void rcu_preempt_do_callbacks(void)
664 {
665 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
666 }
667
668 #endif /* #ifdef CONFIG_RCU_BOOST */
669
670 /*
671 * Queue a preemptible-RCU callback for invocation after a grace period.
672 */
673 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
674 {
675 __call_rcu(head, func, &rcu_preempt_state, -1, 0);
676 }
677 EXPORT_SYMBOL_GPL(call_rcu);
678
679 /*
680 * Queue an RCU callback for lazy invocation after a grace period.
681 * This will likely be later named something like "call_rcu_lazy()",
682 * but this change will require some way of tagging the lazy RCU
683 * callbacks in the list of pending callbacks. Until then, this
684 * function may only be called from __kfree_rcu().
685 */
686 void kfree_call_rcu(struct rcu_head *head,
687 void (*func)(struct rcu_head *rcu))
688 {
689 __call_rcu(head, func, &rcu_preempt_state, -1, 1);
690 }
691 EXPORT_SYMBOL_GPL(kfree_call_rcu);
692
693 /**
694 * synchronize_rcu - wait until a grace period has elapsed.
695 *
696 * Control will return to the caller some time after a full grace
697 * period has elapsed, in other words after all currently executing RCU
698 * read-side critical sections have completed. Note, however, that
699 * upon return from synchronize_rcu(), the caller might well be executing
700 * concurrently with new RCU read-side critical sections that began while
701 * synchronize_rcu() was waiting. RCU read-side critical sections are
702 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
703 *
704 * See the description of synchronize_sched() for more detailed information
705 * on memory ordering guarantees.
706 */
707 void synchronize_rcu(void)
708 {
709 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
710 !lock_is_held(&rcu_lock_map) &&
711 !lock_is_held(&rcu_sched_lock_map),
712 "Illegal synchronize_rcu() in RCU read-side critical section");
713 if (!rcu_scheduler_active)
714 return;
715 if (rcu_expedited)
716 synchronize_rcu_expedited();
717 else
718 wait_rcu_gp(call_rcu);
719 }
720 EXPORT_SYMBOL_GPL(synchronize_rcu);
721
722 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
723 static unsigned long sync_rcu_preempt_exp_count;
724 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
725
726 /*
727 * Return non-zero if there are any tasks in RCU read-side critical
728 * sections blocking the current preemptible-RCU expedited grace period.
729 * If there is no preemptible-RCU expedited grace period currently in
730 * progress, returns zero unconditionally.
731 */
732 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
733 {
734 return rnp->exp_tasks != NULL;
735 }
736
737 /*
738 * return non-zero if there is no RCU expedited grace period in progress
739 * for the specified rcu_node structure, in other words, if all CPUs and
740 * tasks covered by the specified rcu_node structure have done their bit
741 * for the current expedited grace period. Works only for preemptible
742 * RCU -- other RCU implementation use other means.
743 *
744 * Caller must hold sync_rcu_preempt_exp_mutex.
745 */
746 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
747 {
748 return !rcu_preempted_readers_exp(rnp) &&
749 ACCESS_ONCE(rnp->expmask) == 0;
750 }
751
752 /*
753 * Report the exit from RCU read-side critical section for the last task
754 * that queued itself during or before the current expedited preemptible-RCU
755 * grace period. This event is reported either to the rcu_node structure on
756 * which the task was queued or to one of that rcu_node structure's ancestors,
757 * recursively up the tree. (Calm down, calm down, we do the recursion
758 * iteratively!)
759 *
760 * Most callers will set the "wake" flag, but the task initiating the
761 * expedited grace period need not wake itself.
762 *
763 * Caller must hold sync_rcu_preempt_exp_mutex.
764 */
765 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
766 bool wake)
767 {
768 unsigned long flags;
769 unsigned long mask;
770
771 raw_spin_lock_irqsave(&rnp->lock, flags);
772 for (;;) {
773 if (!sync_rcu_preempt_exp_done(rnp)) {
774 raw_spin_unlock_irqrestore(&rnp->lock, flags);
775 break;
776 }
777 if (rnp->parent == NULL) {
778 raw_spin_unlock_irqrestore(&rnp->lock, flags);
779 if (wake)
780 wake_up(&sync_rcu_preempt_exp_wq);
781 break;
782 }
783 mask = rnp->grpmask;
784 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
785 rnp = rnp->parent;
786 raw_spin_lock(&rnp->lock); /* irqs already disabled */
787 rnp->expmask &= ~mask;
788 }
789 }
790
791 /*
792 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
793 * grace period for the specified rcu_node structure. If there are no such
794 * tasks, report it up the rcu_node hierarchy.
795 *
796 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
797 * CPU hotplug operations.
798 */
799 static void
800 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
801 {
802 unsigned long flags;
803 int must_wait = 0;
804
805 raw_spin_lock_irqsave(&rnp->lock, flags);
806 if (list_empty(&rnp->blkd_tasks)) {
807 raw_spin_unlock_irqrestore(&rnp->lock, flags);
808 } else {
809 rnp->exp_tasks = rnp->blkd_tasks.next;
810 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
811 must_wait = 1;
812 }
813 if (!must_wait)
814 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
815 }
816
817 /**
818 * synchronize_rcu_expedited - Brute-force RCU grace period
819 *
820 * Wait for an RCU-preempt grace period, but expedite it. The basic
821 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
822 * the ->blkd_tasks lists and wait for this list to drain. This consumes
823 * significant time on all CPUs and is unfriendly to real-time workloads,
824 * so is thus not recommended for any sort of common-case code.
825 * In fact, if you are using synchronize_rcu_expedited() in a loop,
826 * please restructure your code to batch your updates, and then Use a
827 * single synchronize_rcu() instead.
828 *
829 * Note that it is illegal to call this function while holding any lock
830 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
831 * to call this function from a CPU-hotplug notifier. Failing to observe
832 * these restriction will result in deadlock.
833 */
834 void synchronize_rcu_expedited(void)
835 {
836 unsigned long flags;
837 struct rcu_node *rnp;
838 struct rcu_state *rsp = &rcu_preempt_state;
839 unsigned long snap;
840 int trycount = 0;
841
842 smp_mb(); /* Caller's modifications seen first by other CPUs. */
843 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
844 smp_mb(); /* Above access cannot bleed into critical section. */
845
846 /*
847 * Block CPU-hotplug operations. This means that any CPU-hotplug
848 * operation that finds an rcu_node structure with tasks in the
849 * process of being boosted will know that all tasks blocking
850 * this expedited grace period will already be in the process of
851 * being boosted. This simplifies the process of moving tasks
852 * from leaf to root rcu_node structures.
853 */
854 get_online_cpus();
855
856 /*
857 * Acquire lock, falling back to synchronize_rcu() if too many
858 * lock-acquisition failures. Of course, if someone does the
859 * expedited grace period for us, just leave.
860 */
861 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
862 if (ULONG_CMP_LT(snap,
863 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
864 put_online_cpus();
865 goto mb_ret; /* Others did our work for us. */
866 }
867 if (trycount++ < 10) {
868 udelay(trycount * num_online_cpus());
869 } else {
870 put_online_cpus();
871 wait_rcu_gp(call_rcu);
872 return;
873 }
874 }
875 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
876 put_online_cpus();
877 goto unlock_mb_ret; /* Others did our work for us. */
878 }
879
880 /* force all RCU readers onto ->blkd_tasks lists. */
881 synchronize_sched_expedited();
882
883 /* Initialize ->expmask for all non-leaf rcu_node structures. */
884 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
885 raw_spin_lock_irqsave(&rnp->lock, flags);
886 rnp->expmask = rnp->qsmaskinit;
887 raw_spin_unlock_irqrestore(&rnp->lock, flags);
888 }
889
890 /* Snapshot current state of ->blkd_tasks lists. */
891 rcu_for_each_leaf_node(rsp, rnp)
892 sync_rcu_preempt_exp_init(rsp, rnp);
893 if (NUM_RCU_NODES > 1)
894 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
895
896 put_online_cpus();
897
898 /* Wait for snapshotted ->blkd_tasks lists to drain. */
899 rnp = rcu_get_root(rsp);
900 wait_event(sync_rcu_preempt_exp_wq,
901 sync_rcu_preempt_exp_done(rnp));
902
903 /* Clean up and exit. */
904 smp_mb(); /* ensure expedited GP seen before counter increment. */
905 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
906 unlock_mb_ret:
907 mutex_unlock(&sync_rcu_preempt_exp_mutex);
908 mb_ret:
909 smp_mb(); /* ensure subsequent action seen after grace period. */
910 }
911 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
912
913 /**
914 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
915 *
916 * Note that this primitive does not necessarily wait for an RCU grace period
917 * to complete. For example, if there are no RCU callbacks queued anywhere
918 * in the system, then rcu_barrier() is within its rights to return
919 * immediately, without waiting for anything, much less an RCU grace period.
920 */
921 void rcu_barrier(void)
922 {
923 _rcu_barrier(&rcu_preempt_state);
924 }
925 EXPORT_SYMBOL_GPL(rcu_barrier);
926
927 /*
928 * Initialize preemptible RCU's state structures.
929 */
930 static void __init __rcu_init_preempt(void)
931 {
932 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
933 }
934
935 /*
936 * Check for a task exiting while in a preemptible-RCU read-side
937 * critical section, clean up if so. No need to issue warnings,
938 * as debug_check_no_locks_held() already does this if lockdep
939 * is enabled.
940 */
941 void exit_rcu(void)
942 {
943 struct task_struct *t = current;
944
945 if (likely(list_empty(&current->rcu_node_entry)))
946 return;
947 t->rcu_read_lock_nesting = 1;
948 barrier();
949 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
950 __rcu_read_unlock();
951 }
952
953 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
954
955 static struct rcu_state *rcu_state = &rcu_sched_state;
956
957 /*
958 * Tell them what RCU they are running.
959 */
960 static void __init rcu_bootup_announce(void)
961 {
962 pr_info("Hierarchical RCU implementation.\n");
963 rcu_bootup_announce_oddness();
964 }
965
966 /*
967 * Return the number of RCU batches processed thus far for debug & stats.
968 */
969 long rcu_batches_completed(void)
970 {
971 return rcu_batches_completed_sched();
972 }
973 EXPORT_SYMBOL_GPL(rcu_batches_completed);
974
975 /*
976 * Force a quiescent state for RCU, which, because there is no preemptible
977 * RCU, becomes the same as rcu-sched.
978 */
979 void rcu_force_quiescent_state(void)
980 {
981 rcu_sched_force_quiescent_state();
982 }
983 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
984
985 /*
986 * Because preemptible RCU does not exist, we never have to check for
987 * CPUs being in quiescent states.
988 */
989 static void rcu_preempt_note_context_switch(int cpu)
990 {
991 }
992
993 /*
994 * Because preemptible RCU does not exist, there are never any preempted
995 * RCU readers.
996 */
997 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
998 {
999 return 0;
1000 }
1001
1002 #ifdef CONFIG_HOTPLUG_CPU
1003
1004 /* Because preemptible RCU does not exist, no quieting of tasks. */
1005 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1006 {
1007 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1008 }
1009
1010 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1011
1012 /*
1013 * Because preemptible RCU does not exist, we never have to check for
1014 * tasks blocked within RCU read-side critical sections.
1015 */
1016 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1017 {
1018 }
1019
1020 /*
1021 * Because preemptible RCU does not exist, we never have to check for
1022 * tasks blocked within RCU read-side critical sections.
1023 */
1024 static int rcu_print_task_stall(struct rcu_node *rnp)
1025 {
1026 return 0;
1027 }
1028
1029 /*
1030 * Because there is no preemptible RCU, there can be no readers blocked,
1031 * so there is no need to check for blocked tasks. So check only for
1032 * bogus qsmask values.
1033 */
1034 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1035 {
1036 WARN_ON_ONCE(rnp->qsmask);
1037 }
1038
1039 #ifdef CONFIG_HOTPLUG_CPU
1040
1041 /*
1042 * Because preemptible RCU does not exist, it never needs to migrate
1043 * tasks that were blocked within RCU read-side critical sections, and
1044 * such non-existent tasks cannot possibly have been blocking the current
1045 * grace period.
1046 */
1047 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1048 struct rcu_node *rnp,
1049 struct rcu_data *rdp)
1050 {
1051 return 0;
1052 }
1053
1054 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1055
1056 /*
1057 * Because preemptible RCU does not exist, it never has any callbacks
1058 * to check.
1059 */
1060 static void rcu_preempt_check_callbacks(int cpu)
1061 {
1062 }
1063
1064 /*
1065 * Queue an RCU callback for lazy invocation after a grace period.
1066 * This will likely be later named something like "call_rcu_lazy()",
1067 * but this change will require some way of tagging the lazy RCU
1068 * callbacks in the list of pending callbacks. Until then, this
1069 * function may only be called from __kfree_rcu().
1070 *
1071 * Because there is no preemptible RCU, we use RCU-sched instead.
1072 */
1073 void kfree_call_rcu(struct rcu_head *head,
1074 void (*func)(struct rcu_head *rcu))
1075 {
1076 __call_rcu(head, func, &rcu_sched_state, -1, 1);
1077 }
1078 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1079
1080 /*
1081 * Wait for an rcu-preempt grace period, but make it happen quickly.
1082 * But because preemptible RCU does not exist, map to rcu-sched.
1083 */
1084 void synchronize_rcu_expedited(void)
1085 {
1086 synchronize_sched_expedited();
1087 }
1088 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1089
1090 #ifdef CONFIG_HOTPLUG_CPU
1091
1092 /*
1093 * Because preemptible RCU does not exist, there is never any need to
1094 * report on tasks preempted in RCU read-side critical sections during
1095 * expedited RCU grace periods.
1096 */
1097 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1098 bool wake)
1099 {
1100 }
1101
1102 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1103
1104 /*
1105 * Because preemptible RCU does not exist, rcu_barrier() is just
1106 * another name for rcu_barrier_sched().
1107 */
1108 void rcu_barrier(void)
1109 {
1110 rcu_barrier_sched();
1111 }
1112 EXPORT_SYMBOL_GPL(rcu_barrier);
1113
1114 /*
1115 * Because preemptible RCU does not exist, it need not be initialized.
1116 */
1117 static void __init __rcu_init_preempt(void)
1118 {
1119 }
1120
1121 /*
1122 * Because preemptible RCU does not exist, tasks cannot possibly exit
1123 * while in preemptible RCU read-side critical sections.
1124 */
1125 void exit_rcu(void)
1126 {
1127 }
1128
1129 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1130
1131 #ifdef CONFIG_RCU_BOOST
1132
1133 #include "rtmutex_common.h"
1134
1135 #ifdef CONFIG_RCU_TRACE
1136
1137 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1138 {
1139 if (list_empty(&rnp->blkd_tasks))
1140 rnp->n_balk_blkd_tasks++;
1141 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1142 rnp->n_balk_exp_gp_tasks++;
1143 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1144 rnp->n_balk_boost_tasks++;
1145 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1146 rnp->n_balk_notblocked++;
1147 else if (rnp->gp_tasks != NULL &&
1148 ULONG_CMP_LT(jiffies, rnp->boost_time))
1149 rnp->n_balk_notyet++;
1150 else
1151 rnp->n_balk_nos++;
1152 }
1153
1154 #else /* #ifdef CONFIG_RCU_TRACE */
1155
1156 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1157 {
1158 }
1159
1160 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1161
1162 static void rcu_wake_cond(struct task_struct *t, int status)
1163 {
1164 /*
1165 * If the thread is yielding, only wake it when this
1166 * is invoked from idle
1167 */
1168 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1169 wake_up_process(t);
1170 }
1171
1172 /*
1173 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1174 * or ->boost_tasks, advancing the pointer to the next task in the
1175 * ->blkd_tasks list.
1176 *
1177 * Note that irqs must be enabled: boosting the task can block.
1178 * Returns 1 if there are more tasks needing to be boosted.
1179 */
1180 static int rcu_boost(struct rcu_node *rnp)
1181 {
1182 unsigned long flags;
1183 struct rt_mutex mtx;
1184 struct task_struct *t;
1185 struct list_head *tb;
1186
1187 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1188 return 0; /* Nothing left to boost. */
1189
1190 raw_spin_lock_irqsave(&rnp->lock, flags);
1191
1192 /*
1193 * Recheck under the lock: all tasks in need of boosting
1194 * might exit their RCU read-side critical sections on their own.
1195 */
1196 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1197 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1198 return 0;
1199 }
1200
1201 /*
1202 * Preferentially boost tasks blocking expedited grace periods.
1203 * This cannot starve the normal grace periods because a second
1204 * expedited grace period must boost all blocked tasks, including
1205 * those blocking the pre-existing normal grace period.
1206 */
1207 if (rnp->exp_tasks != NULL) {
1208 tb = rnp->exp_tasks;
1209 rnp->n_exp_boosts++;
1210 } else {
1211 tb = rnp->boost_tasks;
1212 rnp->n_normal_boosts++;
1213 }
1214 rnp->n_tasks_boosted++;
1215
1216 /*
1217 * We boost task t by manufacturing an rt_mutex that appears to
1218 * be held by task t. We leave a pointer to that rt_mutex where
1219 * task t can find it, and task t will release the mutex when it
1220 * exits its outermost RCU read-side critical section. Then
1221 * simply acquiring this artificial rt_mutex will boost task
1222 * t's priority. (Thanks to tglx for suggesting this approach!)
1223 *
1224 * Note that task t must acquire rnp->lock to remove itself from
1225 * the ->blkd_tasks list, which it will do from exit() if from
1226 * nowhere else. We therefore are guaranteed that task t will
1227 * stay around at least until we drop rnp->lock. Note that
1228 * rnp->lock also resolves races between our priority boosting
1229 * and task t's exiting its outermost RCU read-side critical
1230 * section.
1231 */
1232 t = container_of(tb, struct task_struct, rcu_node_entry);
1233 rt_mutex_init_proxy_locked(&mtx, t);
1234 t->rcu_boost_mutex = &mtx;
1235 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1236 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1237 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1238
1239 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1240 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1241 }
1242
1243 /*
1244 * Priority-boosting kthread. One per leaf rcu_node and one for the
1245 * root rcu_node.
1246 */
1247 static int rcu_boost_kthread(void *arg)
1248 {
1249 struct rcu_node *rnp = (struct rcu_node *)arg;
1250 int spincnt = 0;
1251 int more2boost;
1252
1253 trace_rcu_utilization("Start boost kthread@init");
1254 for (;;) {
1255 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1256 trace_rcu_utilization("End boost kthread@rcu_wait");
1257 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1258 trace_rcu_utilization("Start boost kthread@rcu_wait");
1259 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1260 more2boost = rcu_boost(rnp);
1261 if (more2boost)
1262 spincnt++;
1263 else
1264 spincnt = 0;
1265 if (spincnt > 10) {
1266 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1267 trace_rcu_utilization("End boost kthread@rcu_yield");
1268 schedule_timeout_interruptible(2);
1269 trace_rcu_utilization("Start boost kthread@rcu_yield");
1270 spincnt = 0;
1271 }
1272 }
1273 /* NOTREACHED */
1274 trace_rcu_utilization("End boost kthread@notreached");
1275 return 0;
1276 }
1277
1278 /*
1279 * Check to see if it is time to start boosting RCU readers that are
1280 * blocking the current grace period, and, if so, tell the per-rcu_node
1281 * kthread to start boosting them. If there is an expedited grace
1282 * period in progress, it is always time to boost.
1283 *
1284 * The caller must hold rnp->lock, which this function releases.
1285 * The ->boost_kthread_task is immortal, so we don't need to worry
1286 * about it going away.
1287 */
1288 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1289 {
1290 struct task_struct *t;
1291
1292 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1293 rnp->n_balk_exp_gp_tasks++;
1294 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1295 return;
1296 }
1297 if (rnp->exp_tasks != NULL ||
1298 (rnp->gp_tasks != NULL &&
1299 rnp->boost_tasks == NULL &&
1300 rnp->qsmask == 0 &&
1301 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1302 if (rnp->exp_tasks == NULL)
1303 rnp->boost_tasks = rnp->gp_tasks;
1304 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1305 t = rnp->boost_kthread_task;
1306 if (t)
1307 rcu_wake_cond(t, rnp->boost_kthread_status);
1308 } else {
1309 rcu_initiate_boost_trace(rnp);
1310 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1311 }
1312 }
1313
1314 /*
1315 * Wake up the per-CPU kthread to invoke RCU callbacks.
1316 */
1317 static void invoke_rcu_callbacks_kthread(void)
1318 {
1319 unsigned long flags;
1320
1321 local_irq_save(flags);
1322 __this_cpu_write(rcu_cpu_has_work, 1);
1323 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1324 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1325 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1326 __this_cpu_read(rcu_cpu_kthread_status));
1327 }
1328 local_irq_restore(flags);
1329 }
1330
1331 /*
1332 * Is the current CPU running the RCU-callbacks kthread?
1333 * Caller must have preemption disabled.
1334 */
1335 static bool rcu_is_callbacks_kthread(void)
1336 {
1337 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1338 }
1339
1340 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1341
1342 /*
1343 * Do priority-boost accounting for the start of a new grace period.
1344 */
1345 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1346 {
1347 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1348 }
1349
1350 /*
1351 * Create an RCU-boost kthread for the specified node if one does not
1352 * already exist. We only create this kthread for preemptible RCU.
1353 * Returns zero if all is well, a negated errno otherwise.
1354 */
1355 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1356 struct rcu_node *rnp)
1357 {
1358 int rnp_index = rnp - &rsp->node[0];
1359 unsigned long flags;
1360 struct sched_param sp;
1361 struct task_struct *t;
1362
1363 if (&rcu_preempt_state != rsp)
1364 return 0;
1365
1366 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1367 return 0;
1368
1369 rsp->boost = 1;
1370 if (rnp->boost_kthread_task != NULL)
1371 return 0;
1372 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1373 "rcub/%d", rnp_index);
1374 if (IS_ERR(t))
1375 return PTR_ERR(t);
1376 raw_spin_lock_irqsave(&rnp->lock, flags);
1377 rnp->boost_kthread_task = t;
1378 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1379 sp.sched_priority = RCU_BOOST_PRIO;
1380 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1381 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1382 return 0;
1383 }
1384
1385 static void rcu_kthread_do_work(void)
1386 {
1387 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1388 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1389 rcu_preempt_do_callbacks();
1390 }
1391
1392 static void rcu_cpu_kthread_setup(unsigned int cpu)
1393 {
1394 struct sched_param sp;
1395
1396 sp.sched_priority = RCU_KTHREAD_PRIO;
1397 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1398 }
1399
1400 static void rcu_cpu_kthread_park(unsigned int cpu)
1401 {
1402 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1403 }
1404
1405 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1406 {
1407 return __get_cpu_var(rcu_cpu_has_work);
1408 }
1409
1410 /*
1411 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1412 * RCU softirq used in flavors and configurations of RCU that do not
1413 * support RCU priority boosting.
1414 */
1415 static void rcu_cpu_kthread(unsigned int cpu)
1416 {
1417 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1418 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1419 int spincnt;
1420
1421 for (spincnt = 0; spincnt < 10; spincnt++) {
1422 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1423 local_bh_disable();
1424 *statusp = RCU_KTHREAD_RUNNING;
1425 this_cpu_inc(rcu_cpu_kthread_loops);
1426 local_irq_disable();
1427 work = *workp;
1428 *workp = 0;
1429 local_irq_enable();
1430 if (work)
1431 rcu_kthread_do_work();
1432 local_bh_enable();
1433 if (*workp == 0) {
1434 trace_rcu_utilization("End CPU kthread@rcu_wait");
1435 *statusp = RCU_KTHREAD_WAITING;
1436 return;
1437 }
1438 }
1439 *statusp = RCU_KTHREAD_YIELDING;
1440 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1441 schedule_timeout_interruptible(2);
1442 trace_rcu_utilization("End CPU kthread@rcu_yield");
1443 *statusp = RCU_KTHREAD_WAITING;
1444 }
1445
1446 /*
1447 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1448 * served by the rcu_node in question. The CPU hotplug lock is still
1449 * held, so the value of rnp->qsmaskinit will be stable.
1450 *
1451 * We don't include outgoingcpu in the affinity set, use -1 if there is
1452 * no outgoing CPU. If there are no CPUs left in the affinity set,
1453 * this function allows the kthread to execute on any CPU.
1454 */
1455 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1456 {
1457 struct task_struct *t = rnp->boost_kthread_task;
1458 unsigned long mask = rnp->qsmaskinit;
1459 cpumask_var_t cm;
1460 int cpu;
1461
1462 if (!t)
1463 return;
1464 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1465 return;
1466 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1467 if ((mask & 0x1) && cpu != outgoingcpu)
1468 cpumask_set_cpu(cpu, cm);
1469 if (cpumask_weight(cm) == 0) {
1470 cpumask_setall(cm);
1471 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1472 cpumask_clear_cpu(cpu, cm);
1473 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1474 }
1475 set_cpus_allowed_ptr(t, cm);
1476 free_cpumask_var(cm);
1477 }
1478
1479 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1480 .store = &rcu_cpu_kthread_task,
1481 .thread_should_run = rcu_cpu_kthread_should_run,
1482 .thread_fn = rcu_cpu_kthread,
1483 .thread_comm = "rcuc/%u",
1484 .setup = rcu_cpu_kthread_setup,
1485 .park = rcu_cpu_kthread_park,
1486 };
1487
1488 /*
1489 * Spawn all kthreads -- called as soon as the scheduler is running.
1490 */
1491 static int __init rcu_spawn_kthreads(void)
1492 {
1493 struct rcu_node *rnp;
1494 int cpu;
1495
1496 rcu_scheduler_fully_active = 1;
1497 for_each_possible_cpu(cpu)
1498 per_cpu(rcu_cpu_has_work, cpu) = 0;
1499 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1500 rnp = rcu_get_root(rcu_state);
1501 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1502 if (NUM_RCU_NODES > 1) {
1503 rcu_for_each_leaf_node(rcu_state, rnp)
1504 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1505 }
1506 return 0;
1507 }
1508 early_initcall(rcu_spawn_kthreads);
1509
1510 static void __cpuinit rcu_prepare_kthreads(int cpu)
1511 {
1512 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1513 struct rcu_node *rnp = rdp->mynode;
1514
1515 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1516 if (rcu_scheduler_fully_active)
1517 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1518 }
1519
1520 #else /* #ifdef CONFIG_RCU_BOOST */
1521
1522 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1523 {
1524 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1525 }
1526
1527 static void invoke_rcu_callbacks_kthread(void)
1528 {
1529 WARN_ON_ONCE(1);
1530 }
1531
1532 static bool rcu_is_callbacks_kthread(void)
1533 {
1534 return false;
1535 }
1536
1537 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1538 {
1539 }
1540
1541 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1542 {
1543 }
1544
1545 static int __init rcu_scheduler_really_started(void)
1546 {
1547 rcu_scheduler_fully_active = 1;
1548 return 0;
1549 }
1550 early_initcall(rcu_scheduler_really_started);
1551
1552 static void __cpuinit rcu_prepare_kthreads(int cpu)
1553 {
1554 }
1555
1556 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1557
1558 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1559
1560 /*
1561 * Check to see if any future RCU-related work will need to be done
1562 * by the current CPU, even if none need be done immediately, returning
1563 * 1 if so. This function is part of the RCU implementation; it is -not-
1564 * an exported member of the RCU API.
1565 *
1566 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1567 * any flavor of RCU.
1568 */
1569 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1570 {
1571 *delta_jiffies = ULONG_MAX;
1572 return rcu_cpu_has_callbacks(cpu, NULL);
1573 }
1574
1575 /*
1576 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1577 * after it.
1578 */
1579 static void rcu_cleanup_after_idle(int cpu)
1580 {
1581 }
1582
1583 /*
1584 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1585 * is nothing.
1586 */
1587 static void rcu_prepare_for_idle(int cpu)
1588 {
1589 }
1590
1591 /*
1592 * Don't bother keeping a running count of the number of RCU callbacks
1593 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1594 */
1595 static void rcu_idle_count_callbacks_posted(void)
1596 {
1597 }
1598
1599 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1600
1601 /*
1602 * This code is invoked when a CPU goes idle, at which point we want
1603 * to have the CPU do everything required for RCU so that it can enter
1604 * the energy-efficient dyntick-idle mode. This is handled by a
1605 * state machine implemented by rcu_prepare_for_idle() below.
1606 *
1607 * The following three proprocessor symbols control this state machine:
1608 *
1609 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1610 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1611 * is sized to be roughly one RCU grace period. Those energy-efficiency
1612 * benchmarkers who might otherwise be tempted to set this to a large
1613 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1614 * system. And if you are -that- concerned about energy efficiency,
1615 * just power the system down and be done with it!
1616 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1617 * permitted to sleep in dyntick-idle mode with only lazy RCU
1618 * callbacks pending. Setting this too high can OOM your system.
1619 *
1620 * The values below work well in practice. If future workloads require
1621 * adjustment, they can be converted into kernel config parameters, though
1622 * making the state machine smarter might be a better option.
1623 */
1624 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
1625 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1626
1627 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1628 module_param(rcu_idle_gp_delay, int, 0644);
1629 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1630 module_param(rcu_idle_lazy_gp_delay, int, 0644);
1631
1632 extern int tick_nohz_enabled;
1633
1634 /*
1635 * Try to advance callbacks for all flavors of RCU on the current CPU.
1636 * Afterwards, if there are any callbacks ready for immediate invocation,
1637 * return true.
1638 */
1639 static bool rcu_try_advance_all_cbs(void)
1640 {
1641 bool cbs_ready = false;
1642 struct rcu_data *rdp;
1643 struct rcu_node *rnp;
1644 struct rcu_state *rsp;
1645
1646 for_each_rcu_flavor(rsp) {
1647 rdp = this_cpu_ptr(rsp->rda);
1648 rnp = rdp->mynode;
1649
1650 /*
1651 * Don't bother checking unless a grace period has
1652 * completed since we last checked and there are
1653 * callbacks not yet ready to invoke.
1654 */
1655 if (rdp->completed != rnp->completed &&
1656 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1657 note_gp_changes(rsp, rdp);
1658
1659 if (cpu_has_callbacks_ready_to_invoke(rdp))
1660 cbs_ready = true;
1661 }
1662 return cbs_ready;
1663 }
1664
1665 /*
1666 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1667 * to invoke. If the CPU has callbacks, try to advance them. Tell the
1668 * caller to set the timeout based on whether or not there are non-lazy
1669 * callbacks.
1670 *
1671 * The caller must have disabled interrupts.
1672 */
1673 int rcu_needs_cpu(int cpu, unsigned long *dj)
1674 {
1675 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1676
1677 /* Snapshot to detect later posting of non-lazy callback. */
1678 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1679
1680 /* If no callbacks, RCU doesn't need the CPU. */
1681 if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) {
1682 *dj = ULONG_MAX;
1683 return 0;
1684 }
1685
1686 /* Attempt to advance callbacks. */
1687 if (rcu_try_advance_all_cbs()) {
1688 /* Some ready to invoke, so initiate later invocation. */
1689 invoke_rcu_core();
1690 return 1;
1691 }
1692 rdtp->last_accelerate = jiffies;
1693
1694 /* Request timer delay depending on laziness, and round. */
1695 if (!rdtp->all_lazy) {
1696 *dj = round_up(rcu_idle_gp_delay + jiffies,
1697 rcu_idle_gp_delay) - jiffies;
1698 } else {
1699 *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1700 }
1701 return 0;
1702 }
1703
1704 /*
1705 * Prepare a CPU for idle from an RCU perspective. The first major task
1706 * is to sense whether nohz mode has been enabled or disabled via sysfs.
1707 * The second major task is to check to see if a non-lazy callback has
1708 * arrived at a CPU that previously had only lazy callbacks. The third
1709 * major task is to accelerate (that is, assign grace-period numbers to)
1710 * any recently arrived callbacks.
1711 *
1712 * The caller must have disabled interrupts.
1713 */
1714 static void rcu_prepare_for_idle(int cpu)
1715 {
1716 struct rcu_data *rdp;
1717 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1718 struct rcu_node *rnp;
1719 struct rcu_state *rsp;
1720 int tne;
1721
1722 /* Handle nohz enablement switches conservatively. */
1723 tne = ACCESS_ONCE(tick_nohz_enabled);
1724 if (tne != rdtp->tick_nohz_enabled_snap) {
1725 if (rcu_cpu_has_callbacks(cpu, NULL))
1726 invoke_rcu_core(); /* force nohz to see update. */
1727 rdtp->tick_nohz_enabled_snap = tne;
1728 return;
1729 }
1730 if (!tne)
1731 return;
1732
1733 /* If this is a no-CBs CPU, no callbacks, just return. */
1734 if (rcu_is_nocb_cpu(cpu))
1735 return;
1736
1737 /*
1738 * If a non-lazy callback arrived at a CPU having only lazy
1739 * callbacks, invoke RCU core for the side-effect of recalculating
1740 * idle duration on re-entry to idle.
1741 */
1742 if (rdtp->all_lazy &&
1743 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1744 invoke_rcu_core();
1745 return;
1746 }
1747
1748 /*
1749 * If we have not yet accelerated this jiffy, accelerate all
1750 * callbacks on this CPU.
1751 */
1752 if (rdtp->last_accelerate == jiffies)
1753 return;
1754 rdtp->last_accelerate = jiffies;
1755 for_each_rcu_flavor(rsp) {
1756 rdp = per_cpu_ptr(rsp->rda, cpu);
1757 if (!*rdp->nxttail[RCU_DONE_TAIL])
1758 continue;
1759 rnp = rdp->mynode;
1760 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
1761 rcu_accelerate_cbs(rsp, rnp, rdp);
1762 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1763 }
1764 }
1765
1766 /*
1767 * Clean up for exit from idle. Attempt to advance callbacks based on
1768 * any grace periods that elapsed while the CPU was idle, and if any
1769 * callbacks are now ready to invoke, initiate invocation.
1770 */
1771 static void rcu_cleanup_after_idle(int cpu)
1772 {
1773 struct rcu_data *rdp;
1774 struct rcu_state *rsp;
1775
1776 if (rcu_is_nocb_cpu(cpu))
1777 return;
1778 rcu_try_advance_all_cbs();
1779 for_each_rcu_flavor(rsp) {
1780 rdp = per_cpu_ptr(rsp->rda, cpu);
1781 if (cpu_has_callbacks_ready_to_invoke(rdp))
1782 invoke_rcu_core();
1783 }
1784 }
1785
1786 /*
1787 * Keep a running count of the number of non-lazy callbacks posted
1788 * on this CPU. This running counter (which is never decremented) allows
1789 * rcu_prepare_for_idle() to detect when something out of the idle loop
1790 * posts a callback, even if an equal number of callbacks are invoked.
1791 * Of course, callbacks should only be posted from within a trace event
1792 * designed to be called from idle or from within RCU_NONIDLE().
1793 */
1794 static void rcu_idle_count_callbacks_posted(void)
1795 {
1796 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1797 }
1798
1799 /*
1800 * Data for flushing lazy RCU callbacks at OOM time.
1801 */
1802 static atomic_t oom_callback_count;
1803 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1804
1805 /*
1806 * RCU OOM callback -- decrement the outstanding count and deliver the
1807 * wake-up if we are the last one.
1808 */
1809 static void rcu_oom_callback(struct rcu_head *rhp)
1810 {
1811 if (atomic_dec_and_test(&oom_callback_count))
1812 wake_up(&oom_callback_wq);
1813 }
1814
1815 /*
1816 * Post an rcu_oom_notify callback on the current CPU if it has at
1817 * least one lazy callback. This will unnecessarily post callbacks
1818 * to CPUs that already have a non-lazy callback at the end of their
1819 * callback list, but this is an infrequent operation, so accept some
1820 * extra overhead to keep things simple.
1821 */
1822 static void rcu_oom_notify_cpu(void *unused)
1823 {
1824 struct rcu_state *rsp;
1825 struct rcu_data *rdp;
1826
1827 for_each_rcu_flavor(rsp) {
1828 rdp = __this_cpu_ptr(rsp->rda);
1829 if (rdp->qlen_lazy != 0) {
1830 atomic_inc(&oom_callback_count);
1831 rsp->call(&rdp->oom_head, rcu_oom_callback);
1832 }
1833 }
1834 }
1835
1836 /*
1837 * If low on memory, ensure that each CPU has a non-lazy callback.
1838 * This will wake up CPUs that have only lazy callbacks, in turn
1839 * ensuring that they free up the corresponding memory in a timely manner.
1840 * Because an uncertain amount of memory will be freed in some uncertain
1841 * timeframe, we do not claim to have freed anything.
1842 */
1843 static int rcu_oom_notify(struct notifier_block *self,
1844 unsigned long notused, void *nfreed)
1845 {
1846 int cpu;
1847
1848 /* Wait for callbacks from earlier instance to complete. */
1849 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1850
1851 /*
1852 * Prevent premature wakeup: ensure that all increments happen
1853 * before there is a chance of the counter reaching zero.
1854 */
1855 atomic_set(&oom_callback_count, 1);
1856
1857 get_online_cpus();
1858 for_each_online_cpu(cpu) {
1859 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1860 cond_resched();
1861 }
1862 put_online_cpus();
1863
1864 /* Unconditionally decrement: no need to wake ourselves up. */
1865 atomic_dec(&oom_callback_count);
1866
1867 return NOTIFY_OK;
1868 }
1869
1870 static struct notifier_block rcu_oom_nb = {
1871 .notifier_call = rcu_oom_notify
1872 };
1873
1874 static int __init rcu_register_oom_notifier(void)
1875 {
1876 register_oom_notifier(&rcu_oom_nb);
1877 return 0;
1878 }
1879 early_initcall(rcu_register_oom_notifier);
1880
1881 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1882
1883 #ifdef CONFIG_RCU_CPU_STALL_INFO
1884
1885 #ifdef CONFIG_RCU_FAST_NO_HZ
1886
1887 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1888 {
1889 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1890 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1891
1892 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1893 rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1894 ulong2long(nlpd),
1895 rdtp->all_lazy ? 'L' : '.',
1896 rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1897 }
1898
1899 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1900
1901 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1902 {
1903 *cp = '\0';
1904 }
1905
1906 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1907
1908 /* Initiate the stall-info list. */
1909 static void print_cpu_stall_info_begin(void)
1910 {
1911 pr_cont("\n");
1912 }
1913
1914 /*
1915 * Print out diagnostic information for the specified stalled CPU.
1916 *
1917 * If the specified CPU is aware of the current RCU grace period
1918 * (flavor specified by rsp), then print the number of scheduling
1919 * clock interrupts the CPU has taken during the time that it has
1920 * been aware. Otherwise, print the number of RCU grace periods
1921 * that this CPU is ignorant of, for example, "1" if the CPU was
1922 * aware of the previous grace period.
1923 *
1924 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1925 */
1926 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1927 {
1928 char fast_no_hz[72];
1929 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1930 struct rcu_dynticks *rdtp = rdp->dynticks;
1931 char *ticks_title;
1932 unsigned long ticks_value;
1933
1934 if (rsp->gpnum == rdp->gpnum) {
1935 ticks_title = "ticks this GP";
1936 ticks_value = rdp->ticks_this_gp;
1937 } else {
1938 ticks_title = "GPs behind";
1939 ticks_value = rsp->gpnum - rdp->gpnum;
1940 }
1941 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1942 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
1943 cpu, ticks_value, ticks_title,
1944 atomic_read(&rdtp->dynticks) & 0xfff,
1945 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1946 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1947 fast_no_hz);
1948 }
1949
1950 /* Terminate the stall-info list. */
1951 static void print_cpu_stall_info_end(void)
1952 {
1953 pr_err("\t");
1954 }
1955
1956 /* Zero ->ticks_this_gp for all flavors of RCU. */
1957 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1958 {
1959 rdp->ticks_this_gp = 0;
1960 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1961 }
1962
1963 /* Increment ->ticks_this_gp for all flavors of RCU. */
1964 static void increment_cpu_stall_ticks(void)
1965 {
1966 struct rcu_state *rsp;
1967
1968 for_each_rcu_flavor(rsp)
1969 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
1970 }
1971
1972 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
1973
1974 static void print_cpu_stall_info_begin(void)
1975 {
1976 pr_cont(" {");
1977 }
1978
1979 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1980 {
1981 pr_cont(" %d", cpu);
1982 }
1983
1984 static void print_cpu_stall_info_end(void)
1985 {
1986 pr_cont("} ");
1987 }
1988
1989 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1990 {
1991 }
1992
1993 static void increment_cpu_stall_ticks(void)
1994 {
1995 }
1996
1997 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
1998
1999 #ifdef CONFIG_RCU_NOCB_CPU
2000
2001 /*
2002 * Offload callback processing from the boot-time-specified set of CPUs
2003 * specified by rcu_nocb_mask. For each CPU in the set, there is a
2004 * kthread created that pulls the callbacks from the corresponding CPU,
2005 * waits for a grace period to elapse, and invokes the callbacks.
2006 * The no-CBs CPUs do a wake_up() on their kthread when they insert
2007 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
2008 * has been specified, in which case each kthread actively polls its
2009 * CPU. (Which isn't so great for energy efficiency, but which does
2010 * reduce RCU's overhead on that CPU.)
2011 *
2012 * This is intended to be used in conjunction with Frederic Weisbecker's
2013 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
2014 * running CPU-bound user-mode computations.
2015 *
2016 * Offloading of callback processing could also in theory be used as
2017 * an energy-efficiency measure because CPUs with no RCU callbacks
2018 * queued are more aggressive about entering dyntick-idle mode.
2019 */
2020
2021
2022 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
2023 static int __init rcu_nocb_setup(char *str)
2024 {
2025 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2026 have_rcu_nocb_mask = true;
2027 cpulist_parse(str, rcu_nocb_mask);
2028 return 1;
2029 }
2030 __setup("rcu_nocbs=", rcu_nocb_setup);
2031
2032 static int __init parse_rcu_nocb_poll(char *arg)
2033 {
2034 rcu_nocb_poll = 1;
2035 return 0;
2036 }
2037 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2038
2039 /*
2040 * Do any no-CBs CPUs need another grace period?
2041 *
2042 * Interrupts must be disabled. If the caller does not hold the root
2043 * rnp_node structure's ->lock, the results are advisory only.
2044 */
2045 static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2046 {
2047 struct rcu_node *rnp = rcu_get_root(rsp);
2048
2049 return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
2050 }
2051
2052 /*
2053 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
2054 * grace period.
2055 */
2056 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2057 {
2058 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
2059 }
2060
2061 /*
2062 * Set the root rcu_node structure's ->need_future_gp field
2063 * based on the sum of those of all rcu_node structures. This does
2064 * double-count the root rcu_node structure's requests, but this
2065 * is necessary to handle the possibility of a rcu_nocb_kthread()
2066 * having awakened during the time that the rcu_node structures
2067 * were being updated for the end of the previous grace period.
2068 */
2069 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2070 {
2071 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
2072 }
2073
2074 static void rcu_init_one_nocb(struct rcu_node *rnp)
2075 {
2076 init_waitqueue_head(&rnp->nocb_gp_wq[0]);
2077 init_waitqueue_head(&rnp->nocb_gp_wq[1]);
2078 }
2079
2080 /* Is the specified CPU a no-CPUs CPU? */
2081 bool rcu_is_nocb_cpu(int cpu)
2082 {
2083 if (have_rcu_nocb_mask)
2084 return cpumask_test_cpu(cpu, rcu_nocb_mask);
2085 return false;
2086 }
2087
2088 /*
2089 * Enqueue the specified string of rcu_head structures onto the specified
2090 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2091 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
2092 * counts are supplied by rhcount and rhcount_lazy.
2093 *
2094 * If warranted, also wake up the kthread servicing this CPUs queues.
2095 */
2096 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2097 struct rcu_head *rhp,
2098 struct rcu_head **rhtp,
2099 int rhcount, int rhcount_lazy)
2100 {
2101 int len;
2102 struct rcu_head **old_rhpp;
2103 struct task_struct *t;
2104
2105 /* Enqueue the callback on the nocb list and update counts. */
2106 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2107 ACCESS_ONCE(*old_rhpp) = rhp;
2108 atomic_long_add(rhcount, &rdp->nocb_q_count);
2109 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2110
2111 /* If we are not being polled and there is a kthread, awaken it ... */
2112 t = ACCESS_ONCE(rdp->nocb_kthread);
2113 if (rcu_nocb_poll | !t)
2114 return;
2115 len = atomic_long_read(&rdp->nocb_q_count);
2116 if (old_rhpp == &rdp->nocb_head) {
2117 wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
2118 rdp->qlen_last_fqs_check = 0;
2119 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2120 wake_up_process(t); /* ... or if many callbacks queued. */
2121 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2122 }
2123 return;
2124 }
2125
2126 /*
2127 * This is a helper for __call_rcu(), which invokes this when the normal
2128 * callback queue is inoperable. If this is not a no-CBs CPU, this
2129 * function returns failure back to __call_rcu(), which can complain
2130 * appropriately.
2131 *
2132 * Otherwise, this function queues the callback where the corresponding
2133 * "rcuo" kthread can find it.
2134 */
2135 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2136 bool lazy)
2137 {
2138
2139 if (!rcu_is_nocb_cpu(rdp->cpu))
2140 return 0;
2141 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
2142 if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2143 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2144 (unsigned long)rhp->func,
2145 rdp->qlen_lazy, rdp->qlen);
2146 else
2147 trace_rcu_callback(rdp->rsp->name, rhp,
2148 rdp->qlen_lazy, rdp->qlen);
2149 return 1;
2150 }
2151
2152 /*
2153 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2154 * not a no-CBs CPU.
2155 */
2156 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2157 struct rcu_data *rdp)
2158 {
2159 long ql = rsp->qlen;
2160 long qll = rsp->qlen_lazy;
2161
2162 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2163 if (!rcu_is_nocb_cpu(smp_processor_id()))
2164 return 0;
2165 rsp->qlen = 0;
2166 rsp->qlen_lazy = 0;
2167
2168 /* First, enqueue the donelist, if any. This preserves CB ordering. */
2169 if (rsp->orphan_donelist != NULL) {
2170 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2171 rsp->orphan_donetail, ql, qll);
2172 ql = qll = 0;
2173 rsp->orphan_donelist = NULL;
2174 rsp->orphan_donetail = &rsp->orphan_donelist;
2175 }
2176 if (rsp->orphan_nxtlist != NULL) {
2177 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2178 rsp->orphan_nxttail, ql, qll);
2179 ql = qll = 0;
2180 rsp->orphan_nxtlist = NULL;
2181 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2182 }
2183 return 1;
2184 }
2185
2186 /*
2187 * If necessary, kick off a new grace period, and either way wait
2188 * for a subsequent grace period to complete.
2189 */
2190 static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2191 {
2192 unsigned long c;
2193 bool d;
2194 unsigned long flags;
2195 struct rcu_node *rnp = rdp->mynode;
2196
2197 raw_spin_lock_irqsave(&rnp->lock, flags);
2198 c = rcu_start_future_gp(rnp, rdp);
2199 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2200
2201 /*
2202 * Wait for the grace period. Do so interruptibly to avoid messing
2203 * up the load average.
2204 */
2205 trace_rcu_future_gp(rnp, rdp, c, "StartWait");
2206 for (;;) {
2207 wait_event_interruptible(
2208 rnp->nocb_gp_wq[c & 0x1],
2209 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2210 if (likely(d))
2211 break;
2212 flush_signals(current);
2213 trace_rcu_future_gp(rnp, rdp, c, "ResumeWait");
2214 }
2215 trace_rcu_future_gp(rnp, rdp, c, "EndWait");
2216 smp_mb(); /* Ensure that CB invocation happens after GP end. */
2217 }
2218
2219 /*
2220 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
2221 * callbacks queued by the corresponding no-CBs CPU.
2222 */
2223 static int rcu_nocb_kthread(void *arg)
2224 {
2225 int c, cl;
2226 struct rcu_head *list;
2227 struct rcu_head *next;
2228 struct rcu_head **tail;
2229 struct rcu_data *rdp = arg;
2230
2231 /* Each pass through this loop invokes one batch of callbacks */
2232 for (;;) {
2233 /* If not polling, wait for next batch of callbacks. */
2234 if (!rcu_nocb_poll)
2235 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2236 list = ACCESS_ONCE(rdp->nocb_head);
2237 if (!list) {
2238 schedule_timeout_interruptible(1);
2239 flush_signals(current);
2240 continue;
2241 }
2242
2243 /*
2244 * Extract queued callbacks, update counts, and wait
2245 * for a grace period to elapse.
2246 */
2247 ACCESS_ONCE(rdp->nocb_head) = NULL;
2248 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2249 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2250 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2251 ACCESS_ONCE(rdp->nocb_p_count) += c;
2252 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
2253 rcu_nocb_wait_gp(rdp);
2254
2255 /* Each pass through the following loop invokes a callback. */
2256 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2257 c = cl = 0;
2258 while (list) {
2259 next = list->next;
2260 /* Wait for enqueuing to complete, if needed. */
2261 while (next == NULL && &list->next != tail) {
2262 schedule_timeout_interruptible(1);
2263 next = list->next;
2264 }
2265 debug_rcu_head_unqueue(list);
2266 local_bh_disable();
2267 if (__rcu_reclaim(rdp->rsp->name, list))
2268 cl++;
2269 c++;
2270 local_bh_enable();
2271 list = next;
2272 }
2273 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2274 ACCESS_ONCE(rdp->nocb_p_count) -= c;
2275 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
2276 rdp->n_nocbs_invoked += c;
2277 }
2278 return 0;
2279 }
2280
2281 /* Initialize per-rcu_data variables for no-CBs CPUs. */
2282 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2283 {
2284 rdp->nocb_tail = &rdp->nocb_head;
2285 init_waitqueue_head(&rdp->nocb_wq);
2286 }
2287
2288 /* Create a kthread for each RCU flavor for each no-CBs CPU. */
2289 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2290 {
2291 int cpu;
2292 struct rcu_data *rdp;
2293 struct task_struct *t;
2294
2295 if (rcu_nocb_mask == NULL)
2296 return;
2297 for_each_cpu(cpu, rcu_nocb_mask) {
2298 rdp = per_cpu_ptr(rsp->rda, cpu);
2299 t = kthread_run(rcu_nocb_kthread, rdp,
2300 "rcuo%c/%d", rsp->abbr, cpu);
2301 BUG_ON(IS_ERR(t));
2302 ACCESS_ONCE(rdp->nocb_kthread) = t;
2303 }
2304 }
2305
2306 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2307 static bool init_nocb_callback_list(struct rcu_data *rdp)
2308 {
2309 if (rcu_nocb_mask == NULL ||
2310 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
2311 return false;
2312 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2313 return true;
2314 }
2315
2316 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2317
2318 static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2319 {
2320 return 0;
2321 }
2322
2323 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2324 {
2325 }
2326
2327 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2328 {
2329 }
2330
2331 static void rcu_init_one_nocb(struct rcu_node *rnp)
2332 {
2333 }
2334
2335 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2336 bool lazy)
2337 {
2338 return 0;
2339 }
2340
2341 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2342 struct rcu_data *rdp)
2343 {
2344 return 0;
2345 }
2346
2347 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2348 {
2349 }
2350
2351 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2352 {
2353 }
2354
2355 static bool init_nocb_callback_list(struct rcu_data *rdp)
2356 {
2357 return false;
2358 }
2359
2360 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2361
2362 /*
2363 * An adaptive-ticks CPU can potentially execute in kernel mode for an
2364 * arbitrarily long period of time with the scheduling-clock tick turned
2365 * off. RCU will be paying attention to this CPU because it is in the
2366 * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2367 * machine because the scheduling-clock tick has been disabled. Therefore,
2368 * if an adaptive-ticks CPU is failing to respond to the current grace
2369 * period and has not be idle from an RCU perspective, kick it.
2370 */
2371 static void rcu_kick_nohz_cpu(int cpu)
2372 {
2373 #ifdef CONFIG_NO_HZ_FULL
2374 if (tick_nohz_full_cpu(cpu))
2375 smp_send_reschedule(cpu);
2376 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2377 }
This page took 0.078217 seconds and 5 git commands to generate.