crypto: sparc - initialize blkcipher.ivsize
[deliverable/linux.git] / kernel / rcu / srcu.c
1 /*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
20 *
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
23 *
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
26 *
27 */
28
29 #include <linux/export.h>
30 #include <linux/mutex.h>
31 #include <linux/percpu.h>
32 #include <linux/preempt.h>
33 #include <linux/rcupdate.h>
34 #include <linux/sched.h>
35 #include <linux/smp.h>
36 #include <linux/delay.h>
37 #include <linux/srcu.h>
38
39 #include "rcu.h"
40
41 /*
42 * Initialize an rcu_batch structure to empty.
43 */
44 static inline void rcu_batch_init(struct rcu_batch *b)
45 {
46 b->head = NULL;
47 b->tail = &b->head;
48 }
49
50 /*
51 * Enqueue a callback onto the tail of the specified rcu_batch structure.
52 */
53 static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
54 {
55 *b->tail = head;
56 b->tail = &head->next;
57 }
58
59 /*
60 * Is the specified rcu_batch structure empty?
61 */
62 static inline bool rcu_batch_empty(struct rcu_batch *b)
63 {
64 return b->tail == &b->head;
65 }
66
67 /*
68 * Remove the callback at the head of the specified rcu_batch structure
69 * and return a pointer to it, or return NULL if the structure is empty.
70 */
71 static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
72 {
73 struct rcu_head *head;
74
75 if (rcu_batch_empty(b))
76 return NULL;
77
78 head = b->head;
79 b->head = head->next;
80 if (b->tail == &head->next)
81 rcu_batch_init(b);
82
83 return head;
84 }
85
86 /*
87 * Move all callbacks from the rcu_batch structure specified by "from" to
88 * the structure specified by "to".
89 */
90 static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
91 {
92 if (!rcu_batch_empty(from)) {
93 *to->tail = from->head;
94 to->tail = from->tail;
95 rcu_batch_init(from);
96 }
97 }
98
99 static int init_srcu_struct_fields(struct srcu_struct *sp)
100 {
101 sp->completed = 0;
102 spin_lock_init(&sp->queue_lock);
103 sp->running = false;
104 rcu_batch_init(&sp->batch_queue);
105 rcu_batch_init(&sp->batch_check0);
106 rcu_batch_init(&sp->batch_check1);
107 rcu_batch_init(&sp->batch_done);
108 INIT_DELAYED_WORK(&sp->work, process_srcu);
109 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
110 return sp->per_cpu_ref ? 0 : -ENOMEM;
111 }
112
113 #ifdef CONFIG_DEBUG_LOCK_ALLOC
114
115 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
116 struct lock_class_key *key)
117 {
118 /* Don't re-initialize a lock while it is held. */
119 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
120 lockdep_init_map(&sp->dep_map, name, key, 0);
121 return init_srcu_struct_fields(sp);
122 }
123 EXPORT_SYMBOL_GPL(__init_srcu_struct);
124
125 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
126
127 /**
128 * init_srcu_struct - initialize a sleep-RCU structure
129 * @sp: structure to initialize.
130 *
131 * Must invoke this on a given srcu_struct before passing that srcu_struct
132 * to any other function. Each srcu_struct represents a separate domain
133 * of SRCU protection.
134 */
135 int init_srcu_struct(struct srcu_struct *sp)
136 {
137 return init_srcu_struct_fields(sp);
138 }
139 EXPORT_SYMBOL_GPL(init_srcu_struct);
140
141 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
142
143 /*
144 * Returns approximate total of the readers' ->seq[] values for the
145 * rank of per-CPU counters specified by idx.
146 */
147 static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
148 {
149 int cpu;
150 unsigned long sum = 0;
151 unsigned long t;
152
153 for_each_possible_cpu(cpu) {
154 t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
155 sum += t;
156 }
157 return sum;
158 }
159
160 /*
161 * Returns approximate number of readers active on the specified rank
162 * of the per-CPU ->c[] counters.
163 */
164 static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
165 {
166 int cpu;
167 unsigned long sum = 0;
168 unsigned long t;
169
170 for_each_possible_cpu(cpu) {
171 t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
172 sum += t;
173 }
174 return sum;
175 }
176
177 /*
178 * Return true if the number of pre-existing readers is determined to
179 * be stably zero. An example unstable zero can occur if the call
180 * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
181 * but due to task migration, sees the corresponding __srcu_read_unlock()
182 * decrement. This can happen because srcu_readers_active_idx() takes
183 * time to sum the array, and might in fact be interrupted or preempted
184 * partway through the summation.
185 */
186 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
187 {
188 unsigned long seq;
189
190 seq = srcu_readers_seq_idx(sp, idx);
191
192 /*
193 * The following smp_mb() A pairs with the smp_mb() B located in
194 * __srcu_read_lock(). This pairing ensures that if an
195 * __srcu_read_lock() increments its counter after the summation
196 * in srcu_readers_active_idx(), then the corresponding SRCU read-side
197 * critical section will see any changes made prior to the start
198 * of the current SRCU grace period.
199 *
200 * Also, if the above call to srcu_readers_seq_idx() saw the
201 * increment of ->seq[], then the call to srcu_readers_active_idx()
202 * must see the increment of ->c[].
203 */
204 smp_mb(); /* A */
205
206 /*
207 * Note that srcu_readers_active_idx() can incorrectly return
208 * zero even though there is a pre-existing reader throughout.
209 * To see this, suppose that task A is in a very long SRCU
210 * read-side critical section that started on CPU 0, and that
211 * no other reader exists, so that the sum of the counters
212 * is equal to one. Then suppose that task B starts executing
213 * srcu_readers_active_idx(), summing up to CPU 1, and then that
214 * task C starts reading on CPU 0, so that its increment is not
215 * summed, but finishes reading on CPU 2, so that its decrement
216 * -is- summed. Then when task B completes its sum, it will
217 * incorrectly get zero, despite the fact that task A has been
218 * in its SRCU read-side critical section the whole time.
219 *
220 * We therefore do a validation step should srcu_readers_active_idx()
221 * return zero.
222 */
223 if (srcu_readers_active_idx(sp, idx) != 0)
224 return false;
225
226 /*
227 * The remainder of this function is the validation step.
228 * The following smp_mb() D pairs with the smp_mb() C in
229 * __srcu_read_unlock(). If the __srcu_read_unlock() was seen
230 * by srcu_readers_active_idx() above, then any destructive
231 * operation performed after the grace period will happen after
232 * the corresponding SRCU read-side critical section.
233 *
234 * Note that there can be at most NR_CPUS worth of readers using
235 * the old index, which is not enough to overflow even a 32-bit
236 * integer. (Yes, this does mean that systems having more than
237 * a billion or so CPUs need to be 64-bit systems.) Therefore,
238 * the sum of the ->seq[] counters cannot possibly overflow.
239 * Therefore, the only way that the return values of the two
240 * calls to srcu_readers_seq_idx() can be equal is if there were
241 * no increments of the corresponding rank of ->seq[] counts
242 * in the interim. But the missed-increment scenario laid out
243 * above includes an increment of the ->seq[] counter by
244 * the corresponding __srcu_read_lock(). Therefore, if this
245 * scenario occurs, the return values from the two calls to
246 * srcu_readers_seq_idx() will differ, and thus the validation
247 * step below suffices.
248 */
249 smp_mb(); /* D */
250
251 return srcu_readers_seq_idx(sp, idx) == seq;
252 }
253
254 /**
255 * srcu_readers_active - returns approximate number of readers.
256 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
257 *
258 * Note that this is not an atomic primitive, and can therefore suffer
259 * severe errors when invoked on an active srcu_struct. That said, it
260 * can be useful as an error check at cleanup time.
261 */
262 static int srcu_readers_active(struct srcu_struct *sp)
263 {
264 int cpu;
265 unsigned long sum = 0;
266
267 for_each_possible_cpu(cpu) {
268 sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
269 sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
270 }
271 return sum;
272 }
273
274 /**
275 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
276 * @sp: structure to clean up.
277 *
278 * Must invoke this after you are finished using a given srcu_struct that
279 * was initialized via init_srcu_struct(), else you leak memory.
280 */
281 void cleanup_srcu_struct(struct srcu_struct *sp)
282 {
283 if (WARN_ON(srcu_readers_active(sp)))
284 return; /* Leakage unless caller handles error. */
285 free_percpu(sp->per_cpu_ref);
286 sp->per_cpu_ref = NULL;
287 }
288 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
289
290 /*
291 * Counts the new reader in the appropriate per-CPU element of the
292 * srcu_struct. Must be called from process context.
293 * Returns an index that must be passed to the matching srcu_read_unlock().
294 */
295 int __srcu_read_lock(struct srcu_struct *sp)
296 {
297 int idx;
298
299 idx = READ_ONCE(sp->completed) & 0x1;
300 preempt_disable();
301 __this_cpu_inc(sp->per_cpu_ref->c[idx]);
302 smp_mb(); /* B */ /* Avoid leaking the critical section. */
303 __this_cpu_inc(sp->per_cpu_ref->seq[idx]);
304 preempt_enable();
305 return idx;
306 }
307 EXPORT_SYMBOL_GPL(__srcu_read_lock);
308
309 /*
310 * Removes the count for the old reader from the appropriate per-CPU
311 * element of the srcu_struct. Note that this may well be a different
312 * CPU than that which was incremented by the corresponding srcu_read_lock().
313 * Must be called from process context.
314 */
315 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
316 {
317 smp_mb(); /* C */ /* Avoid leaking the critical section. */
318 this_cpu_dec(sp->per_cpu_ref->c[idx]);
319 }
320 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
321
322 /*
323 * We use an adaptive strategy for synchronize_srcu() and especially for
324 * synchronize_srcu_expedited(). We spin for a fixed time period
325 * (defined below) to allow SRCU readers to exit their read-side critical
326 * sections. If there are still some readers after 10 microseconds,
327 * we repeatedly block for 1-millisecond time periods. This approach
328 * has done well in testing, so there is no need for a config parameter.
329 */
330 #define SRCU_RETRY_CHECK_DELAY 5
331 #define SYNCHRONIZE_SRCU_TRYCOUNT 2
332 #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
333
334 /*
335 * @@@ Wait until all pre-existing readers complete. Such readers
336 * will have used the index specified by "idx".
337 * the caller should ensures the ->completed is not changed while checking
338 * and idx = (->completed & 1) ^ 1
339 */
340 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
341 {
342 for (;;) {
343 if (srcu_readers_active_idx_check(sp, idx))
344 return true;
345 if (--trycount <= 0)
346 return false;
347 udelay(SRCU_RETRY_CHECK_DELAY);
348 }
349 }
350
351 /*
352 * Increment the ->completed counter so that future SRCU readers will
353 * use the other rank of the ->c[] and ->seq[] arrays. This allows
354 * us to wait for pre-existing readers in a starvation-free manner.
355 */
356 static void srcu_flip(struct srcu_struct *sp)
357 {
358 sp->completed++;
359 }
360
361 /*
362 * Enqueue an SRCU callback on the specified srcu_struct structure,
363 * initiating grace-period processing if it is not already running.
364 *
365 * Note that all CPUs must agree that the grace period extended beyond
366 * all pre-existing SRCU read-side critical section. On systems with
367 * more than one CPU, this means that when "func()" is invoked, each CPU
368 * is guaranteed to have executed a full memory barrier since the end of
369 * its last corresponding SRCU read-side critical section whose beginning
370 * preceded the call to call_rcu(). It also means that each CPU executing
371 * an SRCU read-side critical section that continues beyond the start of
372 * "func()" must have executed a memory barrier after the call_rcu()
373 * but before the beginning of that SRCU read-side critical section.
374 * Note that these guarantees include CPUs that are offline, idle, or
375 * executing in user mode, as well as CPUs that are executing in the kernel.
376 *
377 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
378 * resulting SRCU callback function "func()", then both CPU A and CPU
379 * B are guaranteed to execute a full memory barrier during the time
380 * interval between the call to call_rcu() and the invocation of "func()".
381 * This guarantee applies even if CPU A and CPU B are the same CPU (but
382 * again only if the system has more than one CPU).
383 *
384 * Of course, these guarantees apply only for invocations of call_srcu(),
385 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
386 * srcu_struct structure.
387 */
388 void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
389 void (*func)(struct rcu_head *head))
390 {
391 unsigned long flags;
392
393 head->next = NULL;
394 head->func = func;
395 spin_lock_irqsave(&sp->queue_lock, flags);
396 rcu_batch_queue(&sp->batch_queue, head);
397 if (!sp->running) {
398 sp->running = true;
399 queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
400 }
401 spin_unlock_irqrestore(&sp->queue_lock, flags);
402 }
403 EXPORT_SYMBOL_GPL(call_srcu);
404
405 static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
406 static void srcu_reschedule(struct srcu_struct *sp);
407
408 /*
409 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
410 */
411 static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
412 {
413 struct rcu_synchronize rcu;
414 struct rcu_head *head = &rcu.head;
415 bool done = false;
416
417 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
418 !lock_is_held(&rcu_bh_lock_map) &&
419 !lock_is_held(&rcu_lock_map) &&
420 !lock_is_held(&rcu_sched_lock_map),
421 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
422
423 might_sleep();
424 init_completion(&rcu.completion);
425
426 head->next = NULL;
427 head->func = wakeme_after_rcu;
428 spin_lock_irq(&sp->queue_lock);
429 if (!sp->running) {
430 /* steal the processing owner */
431 sp->running = true;
432 rcu_batch_queue(&sp->batch_check0, head);
433 spin_unlock_irq(&sp->queue_lock);
434
435 srcu_advance_batches(sp, trycount);
436 if (!rcu_batch_empty(&sp->batch_done)) {
437 BUG_ON(sp->batch_done.head != head);
438 rcu_batch_dequeue(&sp->batch_done);
439 done = true;
440 }
441 /* give the processing owner to work_struct */
442 srcu_reschedule(sp);
443 } else {
444 rcu_batch_queue(&sp->batch_queue, head);
445 spin_unlock_irq(&sp->queue_lock);
446 }
447
448 if (!done)
449 wait_for_completion(&rcu.completion);
450 }
451
452 /**
453 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
454 * @sp: srcu_struct with which to synchronize.
455 *
456 * Wait for the count to drain to zero of both indexes. To avoid the
457 * possible starvation of synchronize_srcu(), it waits for the count of
458 * the index=((->completed & 1) ^ 1) to drain to zero at first,
459 * and then flip the completed and wait for the count of the other index.
460 *
461 * Can block; must be called from process context.
462 *
463 * Note that it is illegal to call synchronize_srcu() from the corresponding
464 * SRCU read-side critical section; doing so will result in deadlock.
465 * However, it is perfectly legal to call synchronize_srcu() on one
466 * srcu_struct from some other srcu_struct's read-side critical section,
467 * as long as the resulting graph of srcu_structs is acyclic.
468 *
469 * There are memory-ordering constraints implied by synchronize_srcu().
470 * On systems with more than one CPU, when synchronize_srcu() returns,
471 * each CPU is guaranteed to have executed a full memory barrier since
472 * the end of its last corresponding SRCU-sched read-side critical section
473 * whose beginning preceded the call to synchronize_srcu(). In addition,
474 * each CPU having an SRCU read-side critical section that extends beyond
475 * the return from synchronize_srcu() is guaranteed to have executed a
476 * full memory barrier after the beginning of synchronize_srcu() and before
477 * the beginning of that SRCU read-side critical section. Note that these
478 * guarantees include CPUs that are offline, idle, or executing in user mode,
479 * as well as CPUs that are executing in the kernel.
480 *
481 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
482 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
483 * to have executed a full memory barrier during the execution of
484 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
485 * are the same CPU, but again only if the system has more than one CPU.
486 *
487 * Of course, these memory-ordering guarantees apply only when
488 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
489 * passed the same srcu_struct structure.
490 */
491 void synchronize_srcu(struct srcu_struct *sp)
492 {
493 __synchronize_srcu(sp, rcu_gp_is_expedited()
494 ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
495 : SYNCHRONIZE_SRCU_TRYCOUNT);
496 }
497 EXPORT_SYMBOL_GPL(synchronize_srcu);
498
499 /**
500 * synchronize_srcu_expedited - Brute-force SRCU grace period
501 * @sp: srcu_struct with which to synchronize.
502 *
503 * Wait for an SRCU grace period to elapse, but be more aggressive about
504 * spinning rather than blocking when waiting.
505 *
506 * Note that synchronize_srcu_expedited() has the same deadlock and
507 * memory-ordering properties as does synchronize_srcu().
508 */
509 void synchronize_srcu_expedited(struct srcu_struct *sp)
510 {
511 __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
512 }
513 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
514
515 /**
516 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
517 * @sp: srcu_struct on which to wait for in-flight callbacks.
518 */
519 void srcu_barrier(struct srcu_struct *sp)
520 {
521 synchronize_srcu(sp);
522 }
523 EXPORT_SYMBOL_GPL(srcu_barrier);
524
525 /**
526 * srcu_batches_completed - return batches completed.
527 * @sp: srcu_struct on which to report batch completion.
528 *
529 * Report the number of batches, correlated with, but not necessarily
530 * precisely the same as, the number of grace periods that have elapsed.
531 */
532 unsigned long srcu_batches_completed(struct srcu_struct *sp)
533 {
534 return sp->completed;
535 }
536 EXPORT_SYMBOL_GPL(srcu_batches_completed);
537
538 #define SRCU_CALLBACK_BATCH 10
539 #define SRCU_INTERVAL 1
540
541 /*
542 * Move any new SRCU callbacks to the first stage of the SRCU grace
543 * period pipeline.
544 */
545 static void srcu_collect_new(struct srcu_struct *sp)
546 {
547 if (!rcu_batch_empty(&sp->batch_queue)) {
548 spin_lock_irq(&sp->queue_lock);
549 rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
550 spin_unlock_irq(&sp->queue_lock);
551 }
552 }
553
554 /*
555 * Core SRCU state machine. Advance callbacks from ->batch_check0 to
556 * ->batch_check1 and then to ->batch_done as readers drain.
557 */
558 static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
559 {
560 int idx = 1 ^ (sp->completed & 1);
561
562 /*
563 * Because readers might be delayed for an extended period after
564 * fetching ->completed for their index, at any point in time there
565 * might well be readers using both idx=0 and idx=1. We therefore
566 * need to wait for readers to clear from both index values before
567 * invoking a callback.
568 */
569
570 if (rcu_batch_empty(&sp->batch_check0) &&
571 rcu_batch_empty(&sp->batch_check1))
572 return; /* no callbacks need to be advanced */
573
574 if (!try_check_zero(sp, idx, trycount))
575 return; /* failed to advance, will try after SRCU_INTERVAL */
576
577 /*
578 * The callbacks in ->batch_check1 have already done with their
579 * first zero check and flip back when they were enqueued on
580 * ->batch_check0 in a previous invocation of srcu_advance_batches().
581 * (Presumably try_check_zero() returned false during that
582 * invocation, leaving the callbacks stranded on ->batch_check1.)
583 * They are therefore ready to invoke, so move them to ->batch_done.
584 */
585 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
586
587 if (rcu_batch_empty(&sp->batch_check0))
588 return; /* no callbacks need to be advanced */
589 srcu_flip(sp);
590
591 /*
592 * The callbacks in ->batch_check0 just finished their
593 * first check zero and flip, so move them to ->batch_check1
594 * for future checking on the other idx.
595 */
596 rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
597
598 /*
599 * SRCU read-side critical sections are normally short, so check
600 * at least twice in quick succession after a flip.
601 */
602 trycount = trycount < 2 ? 2 : trycount;
603 if (!try_check_zero(sp, idx^1, trycount))
604 return; /* failed to advance, will try after SRCU_INTERVAL */
605
606 /*
607 * The callbacks in ->batch_check1 have now waited for all
608 * pre-existing readers using both idx values. They are therefore
609 * ready to invoke, so move them to ->batch_done.
610 */
611 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
612 }
613
614 /*
615 * Invoke a limited number of SRCU callbacks that have passed through
616 * their grace period. If there are more to do, SRCU will reschedule
617 * the workqueue.
618 */
619 static void srcu_invoke_callbacks(struct srcu_struct *sp)
620 {
621 int i;
622 struct rcu_head *head;
623
624 for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
625 head = rcu_batch_dequeue(&sp->batch_done);
626 if (!head)
627 break;
628 local_bh_disable();
629 head->func(head);
630 local_bh_enable();
631 }
632 }
633
634 /*
635 * Finished one round of SRCU grace period. Start another if there are
636 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
637 */
638 static void srcu_reschedule(struct srcu_struct *sp)
639 {
640 bool pending = true;
641
642 if (rcu_batch_empty(&sp->batch_done) &&
643 rcu_batch_empty(&sp->batch_check1) &&
644 rcu_batch_empty(&sp->batch_check0) &&
645 rcu_batch_empty(&sp->batch_queue)) {
646 spin_lock_irq(&sp->queue_lock);
647 if (rcu_batch_empty(&sp->batch_done) &&
648 rcu_batch_empty(&sp->batch_check1) &&
649 rcu_batch_empty(&sp->batch_check0) &&
650 rcu_batch_empty(&sp->batch_queue)) {
651 sp->running = false;
652 pending = false;
653 }
654 spin_unlock_irq(&sp->queue_lock);
655 }
656
657 if (pending)
658 queue_delayed_work(system_power_efficient_wq,
659 &sp->work, SRCU_INTERVAL);
660 }
661
662 /*
663 * This is the work-queue function that handles SRCU grace periods.
664 */
665 void process_srcu(struct work_struct *work)
666 {
667 struct srcu_struct *sp;
668
669 sp = container_of(work, struct srcu_struct, work.work);
670
671 srcu_collect_new(sp);
672 srcu_advance_batches(sp, 1);
673 srcu_invoke_callbacks(sp);
674 srcu_reschedule(sp);
675 }
676 EXPORT_SYMBOL_GPL(process_srcu);
This page took 0.043939 seconds and 5 git commands to generate.