Commit | Line | Data |
---|---|---|
f41d911f PM |
1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | |
3 | * Internal non-public definitions that provide either classic | |
4 | * or preemptable semantics. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
19 | * | |
20 | * Copyright Red Hat, 2009 | |
21 | * Copyright IBM Corporation, 2009 | |
22 | * | |
23 | * Author: Ingo Molnar <mingo@elte.hu> | |
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | |
25 | */ | |
26 | ||
d9a3da06 | 27 | #include <linux/delay.h> |
f41d911f PM |
28 | |
29 | #ifdef CONFIG_TREE_PREEMPT_RCU | |
30 | ||
31 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | |
32 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | |
33 | ||
d9a3da06 PM |
34 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); |
35 | ||
f41d911f PM |
36 | /* |
37 | * Tell them what RCU they are running. | |
38 | */ | |
0e0fc1c2 | 39 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
40 | { |
41 | printk(KERN_INFO | |
42 | "Experimental preemptable hierarchical RCU implementation.\n"); | |
43 | } | |
44 | ||
45 | /* | |
46 | * Return the number of RCU-preempt batches processed thus far | |
47 | * for debug and statistics. | |
48 | */ | |
49 | long rcu_batches_completed_preempt(void) | |
50 | { | |
51 | return rcu_preempt_state.completed; | |
52 | } | |
53 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | |
54 | ||
55 | /* | |
56 | * Return the number of RCU batches processed thus far for debug & stats. | |
57 | */ | |
58 | long rcu_batches_completed(void) | |
59 | { | |
60 | return rcu_batches_completed_preempt(); | |
61 | } | |
62 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
63 | ||
64 | /* | |
65 | * Record a preemptable-RCU quiescent state for the specified CPU. Note | |
66 | * that this just means that the task currently running on the CPU is | |
67 | * not in a quiescent state. There might be any number of tasks blocked | |
68 | * while in an RCU read-side critical section. | |
69 | */ | |
c3422bea | 70 | static void rcu_preempt_qs(int cpu) |
f41d911f PM |
71 | { |
72 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | |
c64ac3ce | 73 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
c3422bea PM |
74 | barrier(); |
75 | rdp->passed_quiesc = 1; | |
f41d911f PM |
76 | } |
77 | ||
78 | /* | |
c3422bea PM |
79 | * We have entered the scheduler, and the current task might soon be |
80 | * context-switched away from. If this task is in an RCU read-side | |
81 | * critical section, we will no longer be able to rely on the CPU to | |
82 | * record that fact, so we enqueue the task on the appropriate entry | |
83 | * of the blocked_tasks[] array. The task will dequeue itself when | |
84 | * it exits the outermost enclosing RCU read-side critical section. | |
85 | * Therefore, the current grace period cannot be permitted to complete | |
86 | * until the blocked_tasks[] entry indexed by the low-order bit of | |
87 | * rnp->gpnum empties. | |
88 | * | |
89 | * Caller must disable preemption. | |
f41d911f | 90 | */ |
c3422bea | 91 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
92 | { |
93 | struct task_struct *t = current; | |
c3422bea | 94 | unsigned long flags; |
f41d911f PM |
95 | int phase; |
96 | struct rcu_data *rdp; | |
97 | struct rcu_node *rnp; | |
98 | ||
99 | if (t->rcu_read_lock_nesting && | |
100 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | |
101 | ||
102 | /* Possibly blocking in an RCU read-side critical section. */ | |
103 | rdp = rcu_preempt_state.rda[cpu]; | |
104 | rnp = rdp->mynode; | |
c3422bea | 105 | spin_lock_irqsave(&rnp->lock, flags); |
f41d911f | 106 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; |
86848966 | 107 | t->rcu_blocked_node = rnp; |
f41d911f PM |
108 | |
109 | /* | |
110 | * If this CPU has already checked in, then this task | |
111 | * will hold up the next grace period rather than the | |
112 | * current grace period. Queue the task accordingly. | |
113 | * If the task is queued for the current grace period | |
114 | * (i.e., this CPU has not yet passed through a quiescent | |
115 | * state for the current grace period), then as long | |
116 | * as that task remains queued, the current grace period | |
117 | * cannot end. | |
b0e165c0 PM |
118 | * |
119 | * But first, note that the current CPU must still be | |
120 | * on line! | |
f41d911f | 121 | */ |
b0e165c0 | 122 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); |
e7d8842e PM |
123 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
124 | phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; | |
f41d911f | 125 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); |
c3422bea | 126 | spin_unlock_irqrestore(&rnp->lock, flags); |
f41d911f PM |
127 | } |
128 | ||
129 | /* | |
130 | * Either we were not in an RCU read-side critical section to | |
131 | * begin with, or we have now recorded that critical section | |
132 | * globally. Either way, we can now note a quiescent state | |
133 | * for this CPU. Again, if we were in an RCU read-side critical | |
134 | * section, and if that critical section was blocking the current | |
135 | * grace period, then the fact that the task has been enqueued | |
136 | * means that we continue to block the current grace period. | |
137 | */ | |
c3422bea | 138 | rcu_preempt_qs(cpu); |
e7d8842e | 139 | local_irq_save(flags); |
c3422bea | 140 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
e7d8842e | 141 | local_irq_restore(flags); |
f41d911f PM |
142 | } |
143 | ||
144 | /* | |
145 | * Tree-preemptable RCU implementation for rcu_read_lock(). | |
146 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | |
147 | * if we block. | |
148 | */ | |
149 | void __rcu_read_lock(void) | |
150 | { | |
151 | ACCESS_ONCE(current->rcu_read_lock_nesting)++; | |
152 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ | |
153 | } | |
154 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | |
155 | ||
fc2219d4 PM |
156 | /* |
157 | * Check for preempted RCU readers blocking the current grace period | |
158 | * for the specified rcu_node structure. If the caller needs a reliable | |
159 | * answer, it must hold the rcu_node's ->lock. | |
160 | */ | |
161 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
162 | { | |
d9a3da06 PM |
163 | int phase = rnp->gpnum & 0x1; |
164 | ||
165 | return !list_empty(&rnp->blocked_tasks[phase]) || | |
166 | !list_empty(&rnp->blocked_tasks[phase + 2]); | |
fc2219d4 PM |
167 | } |
168 | ||
b668c9cf PM |
169 | /* |
170 | * Record a quiescent state for all tasks that were previously queued | |
171 | * on the specified rcu_node structure and that were blocking the current | |
172 | * RCU grace period. The caller must hold the specified rnp->lock with | |
173 | * irqs disabled, and this lock is released upon return, but irqs remain | |
174 | * disabled. | |
175 | */ | |
d3f6bad3 | 176 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
177 | __releases(rnp->lock) |
178 | { | |
179 | unsigned long mask; | |
180 | struct rcu_node *rnp_p; | |
181 | ||
182 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | |
183 | spin_unlock_irqrestore(&rnp->lock, flags); | |
184 | return; /* Still need more quiescent states! */ | |
185 | } | |
186 | ||
187 | rnp_p = rnp->parent; | |
188 | if (rnp_p == NULL) { | |
189 | /* | |
190 | * Either there is only one rcu_node in the tree, | |
191 | * or tasks were kicked up to root rcu_node due to | |
192 | * CPUs going offline. | |
193 | */ | |
d3f6bad3 | 194 | rcu_report_qs_rsp(&rcu_preempt_state, flags); |
b668c9cf PM |
195 | return; |
196 | } | |
197 | ||
198 | /* Report up the rest of the hierarchy. */ | |
199 | mask = rnp->grpmask; | |
200 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | |
201 | spin_lock(&rnp_p->lock); /* irqs already disabled. */ | |
d3f6bad3 | 202 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); |
b668c9cf PM |
203 | } |
204 | ||
205 | /* | |
206 | * Handle special cases during rcu_read_unlock(), such as needing to | |
207 | * notify RCU core processing or task having blocked during the RCU | |
208 | * read-side critical section. | |
209 | */ | |
f41d911f PM |
210 | static void rcu_read_unlock_special(struct task_struct *t) |
211 | { | |
212 | int empty; | |
d9a3da06 | 213 | int empty_exp; |
f41d911f | 214 | unsigned long flags; |
f41d911f PM |
215 | struct rcu_node *rnp; |
216 | int special; | |
217 | ||
218 | /* NMI handlers cannot block and cannot safely manipulate state. */ | |
219 | if (in_nmi()) | |
220 | return; | |
221 | ||
222 | local_irq_save(flags); | |
223 | ||
224 | /* | |
225 | * If RCU core is waiting for this CPU to exit critical section, | |
226 | * let it know that we have done so. | |
227 | */ | |
228 | special = t->rcu_read_unlock_special; | |
229 | if (special & RCU_READ_UNLOCK_NEED_QS) { | |
230 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | |
c3422bea | 231 | rcu_preempt_qs(smp_processor_id()); |
f41d911f PM |
232 | } |
233 | ||
234 | /* Hardware IRQ handlers cannot block. */ | |
235 | if (in_irq()) { | |
236 | local_irq_restore(flags); | |
237 | return; | |
238 | } | |
239 | ||
240 | /* Clean up if blocked during RCU read-side critical section. */ | |
241 | if (special & RCU_READ_UNLOCK_BLOCKED) { | |
242 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | |
243 | ||
dd5d19ba PM |
244 | /* |
245 | * Remove this task from the list it blocked on. The | |
246 | * task can migrate while we acquire the lock, but at | |
247 | * most one time. So at most two passes through loop. | |
248 | */ | |
249 | for (;;) { | |
86848966 | 250 | rnp = t->rcu_blocked_node; |
e7d8842e | 251 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
86848966 | 252 | if (rnp == t->rcu_blocked_node) |
dd5d19ba | 253 | break; |
e7d8842e | 254 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
dd5d19ba | 255 | } |
fc2219d4 | 256 | empty = !rcu_preempted_readers(rnp); |
d9a3da06 PM |
257 | empty_exp = !rcu_preempted_readers_exp(rnp); |
258 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | |
f41d911f | 259 | list_del_init(&t->rcu_node_entry); |
dd5d19ba | 260 | t->rcu_blocked_node = NULL; |
f41d911f PM |
261 | |
262 | /* | |
263 | * If this was the last task on the current list, and if | |
264 | * we aren't waiting on any CPUs, report the quiescent state. | |
d3f6bad3 | 265 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
f41d911f | 266 | */ |
b668c9cf | 267 | if (empty) |
f41d911f | 268 | spin_unlock_irqrestore(&rnp->lock, flags); |
b668c9cf | 269 | else |
d3f6bad3 | 270 | rcu_report_unblock_qs_rnp(rnp, flags); |
d9a3da06 PM |
271 | |
272 | /* | |
273 | * If this was the last task on the expedited lists, | |
274 | * then we need to report up the rcu_node hierarchy. | |
275 | */ | |
276 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | |
277 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | |
b668c9cf PM |
278 | } else { |
279 | local_irq_restore(flags); | |
f41d911f | 280 | } |
f41d911f PM |
281 | } |
282 | ||
283 | /* | |
284 | * Tree-preemptable RCU implementation for rcu_read_unlock(). | |
285 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | |
286 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | |
287 | * invoke rcu_read_unlock_special() to clean up after a context switch | |
288 | * in an RCU read-side critical section and other special cases. | |
289 | */ | |
290 | void __rcu_read_unlock(void) | |
291 | { | |
292 | struct task_struct *t = current; | |
293 | ||
294 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | |
295 | if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && | |
296 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | |
297 | rcu_read_unlock_special(t); | |
298 | } | |
299 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | |
300 | ||
301 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | |
302 | ||
303 | /* | |
304 | * Scan the current list of tasks blocked within RCU read-side critical | |
305 | * sections, printing out the tid of each. | |
306 | */ | |
307 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
308 | { | |
309 | unsigned long flags; | |
310 | struct list_head *lp; | |
fc2219d4 | 311 | int phase; |
f41d911f PM |
312 | struct task_struct *t; |
313 | ||
fc2219d4 | 314 | if (rcu_preempted_readers(rnp)) { |
f41d911f | 315 | spin_lock_irqsave(&rnp->lock, flags); |
fc2219d4 | 316 | phase = rnp->gpnum & 0x1; |
f41d911f PM |
317 | lp = &rnp->blocked_tasks[phase]; |
318 | list_for_each_entry(t, lp, rcu_node_entry) | |
319 | printk(" P%d", t->pid); | |
320 | spin_unlock_irqrestore(&rnp->lock, flags); | |
321 | } | |
322 | } | |
323 | ||
324 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
325 | ||
b0e165c0 PM |
326 | /* |
327 | * Check that the list of blocked tasks for the newly completed grace | |
328 | * period is in fact empty. It is a serious bug to complete a grace | |
329 | * period that still has RCU readers blocked! This function must be | |
330 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | |
331 | * must be held by the caller. | |
332 | */ | |
333 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
334 | { | |
fc2219d4 | 335 | WARN_ON_ONCE(rcu_preempted_readers(rnp)); |
28ecd580 | 336 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
337 | } |
338 | ||
33f76148 PM |
339 | #ifdef CONFIG_HOTPLUG_CPU |
340 | ||
dd5d19ba PM |
341 | /* |
342 | * Handle tasklist migration for case in which all CPUs covered by the | |
343 | * specified rcu_node have gone offline. Move them up to the root | |
344 | * rcu_node. The reason for not just moving them to the immediate | |
345 | * parent is to remove the need for rcu_read_unlock_special() to | |
346 | * make more than two attempts to acquire the target rcu_node's lock. | |
b668c9cf PM |
347 | * Returns true if there were tasks blocking the current RCU grace |
348 | * period. | |
dd5d19ba | 349 | * |
237c80c5 PM |
350 | * Returns 1 if there was previously a task blocking the current grace |
351 | * period on the specified rcu_node structure. | |
352 | * | |
dd5d19ba PM |
353 | * The caller must hold rnp->lock with irqs disabled. |
354 | */ | |
237c80c5 PM |
355 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
356 | struct rcu_node *rnp, | |
357 | struct rcu_data *rdp) | |
dd5d19ba PM |
358 | { |
359 | int i; | |
360 | struct list_head *lp; | |
361 | struct list_head *lp_root; | |
d9a3da06 | 362 | int retval = 0; |
dd5d19ba PM |
363 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
364 | struct task_struct *tp; | |
365 | ||
86848966 PM |
366 | if (rnp == rnp_root) { |
367 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | |
237c80c5 | 368 | return 0; /* Shouldn't happen: at least one CPU online. */ |
86848966 | 369 | } |
28ecd580 PM |
370 | WARN_ON_ONCE(rnp != rdp->mynode && |
371 | (!list_empty(&rnp->blocked_tasks[0]) || | |
d9a3da06 PM |
372 | !list_empty(&rnp->blocked_tasks[1]) || |
373 | !list_empty(&rnp->blocked_tasks[2]) || | |
374 | !list_empty(&rnp->blocked_tasks[3]))); | |
dd5d19ba PM |
375 | |
376 | /* | |
377 | * Move tasks up to root rcu_node. Rely on the fact that the | |
378 | * root rcu_node can be at most one ahead of the rest of the | |
379 | * rcu_nodes in terms of gp_num value. This fact allows us to | |
380 | * move the blocked_tasks[] array directly, element by element. | |
381 | */ | |
d9a3da06 PM |
382 | if (rcu_preempted_readers(rnp)) |
383 | retval |= RCU_OFL_TASKS_NORM_GP; | |
384 | if (rcu_preempted_readers_exp(rnp)) | |
385 | retval |= RCU_OFL_TASKS_EXP_GP; | |
386 | for (i = 0; i < 4; i++) { | |
dd5d19ba PM |
387 | lp = &rnp->blocked_tasks[i]; |
388 | lp_root = &rnp_root->blocked_tasks[i]; | |
389 | while (!list_empty(lp)) { | |
390 | tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); | |
391 | spin_lock(&rnp_root->lock); /* irqs already disabled */ | |
392 | list_del(&tp->rcu_node_entry); | |
393 | tp->rcu_blocked_node = rnp_root; | |
394 | list_add(&tp->rcu_node_entry, lp_root); | |
395 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | |
396 | } | |
397 | } | |
237c80c5 | 398 | return retval; |
dd5d19ba PM |
399 | } |
400 | ||
33f76148 PM |
401 | /* |
402 | * Do CPU-offline processing for preemptable RCU. | |
403 | */ | |
404 | static void rcu_preempt_offline_cpu(int cpu) | |
405 | { | |
406 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | |
407 | } | |
408 | ||
409 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
410 | ||
f41d911f PM |
411 | /* |
412 | * Check for a quiescent state from the current CPU. When a task blocks, | |
413 | * the task is recorded in the corresponding CPU's rcu_node structure, | |
414 | * which is checked elsewhere. | |
415 | * | |
416 | * Caller must disable hard irqs. | |
417 | */ | |
418 | static void rcu_preempt_check_callbacks(int cpu) | |
419 | { | |
420 | struct task_struct *t = current; | |
421 | ||
422 | if (t->rcu_read_lock_nesting == 0) { | |
c3422bea PM |
423 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
424 | rcu_preempt_qs(cpu); | |
f41d911f PM |
425 | return; |
426 | } | |
a71fca58 | 427 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) |
c3422bea | 428 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; |
f41d911f PM |
429 | } |
430 | ||
431 | /* | |
432 | * Process callbacks for preemptable RCU. | |
433 | */ | |
434 | static void rcu_preempt_process_callbacks(void) | |
435 | { | |
436 | __rcu_process_callbacks(&rcu_preempt_state, | |
437 | &__get_cpu_var(rcu_preempt_data)); | |
438 | } | |
439 | ||
440 | /* | |
441 | * Queue a preemptable-RCU callback for invocation after a grace period. | |
442 | */ | |
443 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
444 | { | |
445 | __call_rcu(head, func, &rcu_preempt_state); | |
446 | } | |
447 | EXPORT_SYMBOL_GPL(call_rcu); | |
448 | ||
6ebb237b PM |
449 | /** |
450 | * synchronize_rcu - wait until a grace period has elapsed. | |
451 | * | |
452 | * Control will return to the caller some time after a full grace | |
453 | * period has elapsed, in other words after all currently executing RCU | |
454 | * read-side critical sections have completed. RCU read-side critical | |
455 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | |
456 | * and may be nested. | |
457 | */ | |
458 | void synchronize_rcu(void) | |
459 | { | |
460 | struct rcu_synchronize rcu; | |
461 | ||
462 | if (!rcu_scheduler_active) | |
463 | return; | |
464 | ||
465 | init_completion(&rcu.completion); | |
466 | /* Will wake me after RCU finished. */ | |
467 | call_rcu(&rcu.head, wakeme_after_rcu); | |
468 | /* Wait for it. */ | |
469 | wait_for_completion(&rcu.completion); | |
470 | } | |
471 | EXPORT_SYMBOL_GPL(synchronize_rcu); | |
472 | ||
d9a3da06 PM |
473 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); |
474 | static long sync_rcu_preempt_exp_count; | |
475 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | |
476 | ||
477 | /* | |
478 | * Return non-zero if there are any tasks in RCU read-side critical | |
479 | * sections blocking the current preemptible-RCU expedited grace period. | |
480 | * If there is no preemptible-RCU expedited grace period currently in | |
481 | * progress, returns zero unconditionally. | |
482 | */ | |
483 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | |
484 | { | |
485 | return !list_empty(&rnp->blocked_tasks[2]) || | |
486 | !list_empty(&rnp->blocked_tasks[3]); | |
487 | } | |
488 | ||
489 | /* | |
490 | * return non-zero if there is no RCU expedited grace period in progress | |
491 | * for the specified rcu_node structure, in other words, if all CPUs and | |
492 | * tasks covered by the specified rcu_node structure have done their bit | |
493 | * for the current expedited grace period. Works only for preemptible | |
494 | * RCU -- other RCU implementation use other means. | |
495 | * | |
496 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
497 | */ | |
498 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |
499 | { | |
500 | return !rcu_preempted_readers_exp(rnp) && | |
501 | ACCESS_ONCE(rnp->expmask) == 0; | |
502 | } | |
503 | ||
504 | /* | |
505 | * Report the exit from RCU read-side critical section for the last task | |
506 | * that queued itself during or before the current expedited preemptible-RCU | |
507 | * grace period. This event is reported either to the rcu_node structure on | |
508 | * which the task was queued or to one of that rcu_node structure's ancestors, | |
509 | * recursively up the tree. (Calm down, calm down, we do the recursion | |
510 | * iteratively!) | |
511 | * | |
512 | * Caller must hold sync_rcu_preempt_exp_mutex. | |
513 | */ | |
514 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
515 | { | |
516 | unsigned long flags; | |
517 | unsigned long mask; | |
518 | ||
519 | spin_lock_irqsave(&rnp->lock, flags); | |
520 | for (;;) { | |
521 | if (!sync_rcu_preempt_exp_done(rnp)) | |
522 | break; | |
523 | if (rnp->parent == NULL) { | |
524 | wake_up(&sync_rcu_preempt_exp_wq); | |
525 | break; | |
526 | } | |
527 | mask = rnp->grpmask; | |
528 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | |
529 | rnp = rnp->parent; | |
530 | spin_lock(&rnp->lock); /* irqs already disabled */ | |
531 | rnp->expmask &= ~mask; | |
532 | } | |
533 | spin_unlock_irqrestore(&rnp->lock, flags); | |
534 | } | |
535 | ||
536 | /* | |
537 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | |
538 | * grace period for the specified rcu_node structure. If there are no such | |
539 | * tasks, report it up the rcu_node hierarchy. | |
540 | * | |
541 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | |
542 | */ | |
543 | static void | |
544 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |
545 | { | |
546 | int must_wait; | |
547 | ||
548 | spin_lock(&rnp->lock); /* irqs already disabled */ | |
549 | list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); | |
550 | list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); | |
551 | must_wait = rcu_preempted_readers_exp(rnp); | |
552 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | |
553 | if (!must_wait) | |
554 | rcu_report_exp_rnp(rsp, rnp); | |
555 | } | |
556 | ||
019129d5 | 557 | /* |
d9a3da06 PM |
558 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea |
559 | * is to invoke synchronize_sched_expedited() to push all the tasks to | |
560 | * the ->blocked_tasks[] lists, move all entries from the first set of | |
561 | * ->blocked_tasks[] lists to the second set, and finally wait for this | |
562 | * second set to drain. | |
019129d5 PM |
563 | */ |
564 | void synchronize_rcu_expedited(void) | |
565 | { | |
d9a3da06 PM |
566 | unsigned long flags; |
567 | struct rcu_node *rnp; | |
568 | struct rcu_state *rsp = &rcu_preempt_state; | |
569 | long snap; | |
570 | int trycount = 0; | |
571 | ||
572 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | |
573 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | |
574 | smp_mb(); /* Above access cannot bleed into critical section. */ | |
575 | ||
576 | /* | |
577 | * Acquire lock, falling back to synchronize_rcu() if too many | |
578 | * lock-acquisition failures. Of course, if someone does the | |
579 | * expedited grace period for us, just leave. | |
580 | */ | |
581 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | |
582 | if (trycount++ < 10) | |
583 | udelay(trycount * num_online_cpus()); | |
584 | else { | |
585 | synchronize_rcu(); | |
586 | return; | |
587 | } | |
588 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
589 | goto mb_ret; /* Others did our work for us. */ | |
590 | } | |
591 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | |
592 | goto unlock_mb_ret; /* Others did our work for us. */ | |
593 | ||
594 | /* force all RCU readers onto blocked_tasks[]. */ | |
595 | synchronize_sched_expedited(); | |
596 | ||
597 | spin_lock_irqsave(&rsp->onofflock, flags); | |
598 | ||
599 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | |
600 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | |
601 | spin_lock(&rnp->lock); /* irqs already disabled. */ | |
602 | rnp->expmask = rnp->qsmaskinit; | |
603 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | |
604 | } | |
605 | ||
606 | /* Snapshot current state of ->blocked_tasks[] lists. */ | |
607 | rcu_for_each_leaf_node(rsp, rnp) | |
608 | sync_rcu_preempt_exp_init(rsp, rnp); | |
609 | if (NUM_RCU_NODES > 1) | |
610 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | |
611 | ||
612 | spin_unlock_irqrestore(&rsp->onofflock, flags); | |
613 | ||
614 | /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ | |
615 | rnp = rcu_get_root(rsp); | |
616 | wait_event(sync_rcu_preempt_exp_wq, | |
617 | sync_rcu_preempt_exp_done(rnp)); | |
618 | ||
619 | /* Clean up and exit. */ | |
620 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | |
621 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | |
622 | unlock_mb_ret: | |
623 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | |
624 | mb_ret: | |
625 | smp_mb(); /* ensure subsequent action seen after grace period. */ | |
019129d5 PM |
626 | } |
627 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
628 | ||
f41d911f PM |
629 | /* |
630 | * Check to see if there is any immediate preemptable-RCU-related work | |
631 | * to be done. | |
632 | */ | |
633 | static int rcu_preempt_pending(int cpu) | |
634 | { | |
635 | return __rcu_pending(&rcu_preempt_state, | |
636 | &per_cpu(rcu_preempt_data, cpu)); | |
637 | } | |
638 | ||
639 | /* | |
640 | * Does preemptable RCU need the CPU to stay out of dynticks mode? | |
641 | */ | |
642 | static int rcu_preempt_needs_cpu(int cpu) | |
643 | { | |
644 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | |
645 | } | |
646 | ||
e74f4c45 PM |
647 | /** |
648 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | |
649 | */ | |
650 | void rcu_barrier(void) | |
651 | { | |
652 | _rcu_barrier(&rcu_preempt_state, call_rcu); | |
653 | } | |
654 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
655 | ||
f41d911f PM |
656 | /* |
657 | * Initialize preemptable RCU's per-CPU data. | |
658 | */ | |
659 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
660 | { | |
661 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | |
662 | } | |
663 | ||
e74f4c45 PM |
664 | /* |
665 | * Move preemptable RCU's callbacks to ->orphan_cbs_list. | |
666 | */ | |
667 | static void rcu_preempt_send_cbs_to_orphanage(void) | |
668 | { | |
669 | rcu_send_cbs_to_orphanage(&rcu_preempt_state); | |
670 | } | |
671 | ||
1eba8f84 PM |
672 | /* |
673 | * Initialize preemptable RCU's state structures. | |
674 | */ | |
675 | static void __init __rcu_init_preempt(void) | |
676 | { | |
1eba8f84 PM |
677 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); |
678 | } | |
679 | ||
f41d911f PM |
680 | /* |
681 | * Check for a task exiting while in a preemptable-RCU read-side | |
682 | * critical section, clean up if so. No need to issue warnings, | |
683 | * as debug_check_no_locks_held() already does this if lockdep | |
684 | * is enabled. | |
685 | */ | |
686 | void exit_rcu(void) | |
687 | { | |
688 | struct task_struct *t = current; | |
689 | ||
690 | if (t->rcu_read_lock_nesting == 0) | |
691 | return; | |
692 | t->rcu_read_lock_nesting = 1; | |
693 | rcu_read_unlock(); | |
694 | } | |
695 | ||
696 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | |
697 | ||
698 | /* | |
699 | * Tell them what RCU they are running. | |
700 | */ | |
0e0fc1c2 | 701 | static void __init rcu_bootup_announce(void) |
f41d911f PM |
702 | { |
703 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | |
704 | } | |
705 | ||
706 | /* | |
707 | * Return the number of RCU batches processed thus far for debug & stats. | |
708 | */ | |
709 | long rcu_batches_completed(void) | |
710 | { | |
711 | return rcu_batches_completed_sched(); | |
712 | } | |
713 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | |
714 | ||
715 | /* | |
716 | * Because preemptable RCU does not exist, we never have to check for | |
717 | * CPUs being in quiescent states. | |
718 | */ | |
c3422bea | 719 | static void rcu_preempt_note_context_switch(int cpu) |
f41d911f PM |
720 | { |
721 | } | |
722 | ||
fc2219d4 PM |
723 | /* |
724 | * Because preemptable RCU does not exist, there are never any preempted | |
725 | * RCU readers. | |
726 | */ | |
727 | static int rcu_preempted_readers(struct rcu_node *rnp) | |
728 | { | |
729 | return 0; | |
730 | } | |
731 | ||
b668c9cf PM |
732 | #ifdef CONFIG_HOTPLUG_CPU |
733 | ||
734 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | |
d3f6bad3 | 735 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) |
b668c9cf PM |
736 | { |
737 | spin_unlock_irqrestore(&rnp->lock, flags); | |
738 | } | |
739 | ||
740 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
741 | ||
f41d911f PM |
742 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
743 | ||
744 | /* | |
745 | * Because preemptable RCU does not exist, we never have to check for | |
746 | * tasks blocked within RCU read-side critical sections. | |
747 | */ | |
748 | static void rcu_print_task_stall(struct rcu_node *rnp) | |
749 | { | |
750 | } | |
751 | ||
752 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | |
753 | ||
b0e165c0 PM |
754 | /* |
755 | * Because there is no preemptable RCU, there can be no readers blocked, | |
49e29126 PM |
756 | * so there is no need to check for blocked tasks. So check only for |
757 | * bogus qsmask values. | |
b0e165c0 PM |
758 | */ |
759 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |
760 | { | |
49e29126 | 761 | WARN_ON_ONCE(rnp->qsmask); |
b0e165c0 PM |
762 | } |
763 | ||
33f76148 PM |
764 | #ifdef CONFIG_HOTPLUG_CPU |
765 | ||
dd5d19ba PM |
766 | /* |
767 | * Because preemptable RCU does not exist, it never needs to migrate | |
237c80c5 PM |
768 | * tasks that were blocked within RCU read-side critical sections, and |
769 | * such non-existent tasks cannot possibly have been blocking the current | |
770 | * grace period. | |
dd5d19ba | 771 | */ |
237c80c5 PM |
772 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
773 | struct rcu_node *rnp, | |
774 | struct rcu_data *rdp) | |
dd5d19ba | 775 | { |
237c80c5 | 776 | return 0; |
dd5d19ba PM |
777 | } |
778 | ||
33f76148 PM |
779 | /* |
780 | * Because preemptable RCU does not exist, it never needs CPU-offline | |
781 | * processing. | |
782 | */ | |
783 | static void rcu_preempt_offline_cpu(int cpu) | |
784 | { | |
785 | } | |
786 | ||
787 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
788 | ||
f41d911f PM |
789 | /* |
790 | * Because preemptable RCU does not exist, it never has any callbacks | |
791 | * to check. | |
792 | */ | |
1eba8f84 | 793 | static void rcu_preempt_check_callbacks(int cpu) |
f41d911f PM |
794 | { |
795 | } | |
796 | ||
797 | /* | |
798 | * Because preemptable RCU does not exist, it never has any callbacks | |
799 | * to process. | |
800 | */ | |
1eba8f84 | 801 | static void rcu_preempt_process_callbacks(void) |
f41d911f PM |
802 | { |
803 | } | |
804 | ||
805 | /* | |
806 | * In classic RCU, call_rcu() is just call_rcu_sched(). | |
807 | */ | |
808 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |
809 | { | |
810 | call_rcu_sched(head, func); | |
811 | } | |
812 | EXPORT_SYMBOL_GPL(call_rcu); | |
813 | ||
019129d5 PM |
814 | /* |
815 | * Wait for an rcu-preempt grace period, but make it happen quickly. | |
816 | * But because preemptable RCU does not exist, map to rcu-sched. | |
817 | */ | |
818 | void synchronize_rcu_expedited(void) | |
819 | { | |
820 | synchronize_sched_expedited(); | |
821 | } | |
822 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |
823 | ||
d9a3da06 PM |
824 | #ifdef CONFIG_HOTPLUG_CPU |
825 | ||
826 | /* | |
827 | * Because preemptable RCU does not exist, there is never any need to | |
828 | * report on tasks preempted in RCU read-side critical sections during | |
829 | * expedited RCU grace periods. | |
830 | */ | |
831 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |
832 | { | |
833 | return; | |
834 | } | |
835 | ||
836 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | |
837 | ||
f41d911f PM |
838 | /* |
839 | * Because preemptable RCU does not exist, it never has any work to do. | |
840 | */ | |
841 | static int rcu_preempt_pending(int cpu) | |
842 | { | |
843 | return 0; | |
844 | } | |
845 | ||
846 | /* | |
847 | * Because preemptable RCU does not exist, it never needs any CPU. | |
848 | */ | |
849 | static int rcu_preempt_needs_cpu(int cpu) | |
850 | { | |
851 | return 0; | |
852 | } | |
853 | ||
e74f4c45 PM |
854 | /* |
855 | * Because preemptable RCU does not exist, rcu_barrier() is just | |
856 | * another name for rcu_barrier_sched(). | |
857 | */ | |
858 | void rcu_barrier(void) | |
859 | { | |
860 | rcu_barrier_sched(); | |
861 | } | |
862 | EXPORT_SYMBOL_GPL(rcu_barrier); | |
863 | ||
f41d911f PM |
864 | /* |
865 | * Because preemptable RCU does not exist, there is no per-CPU | |
866 | * data to initialize. | |
867 | */ | |
868 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |
869 | { | |
870 | } | |
871 | ||
e74f4c45 PM |
872 | /* |
873 | * Because there is no preemptable RCU, there are no callbacks to move. | |
874 | */ | |
875 | static void rcu_preempt_send_cbs_to_orphanage(void) | |
876 | { | |
877 | } | |
878 | ||
1eba8f84 PM |
879 | /* |
880 | * Because preemptable RCU does not exist, it need not be initialized. | |
881 | */ | |
882 | static void __init __rcu_init_preempt(void) | |
883 | { | |
884 | } | |
885 | ||
f41d911f | 886 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |