| 1 | /* |
| 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2006 |
| 19 | * |
| 20 | * Author: Paul McKenney <paulmck@us.ibm.com> |
| 21 | * |
| 22 | * For detailed explanation of Read-Copy Update mechanism see - |
| 23 | * Documentation/RCU/ *.txt |
| 24 | * |
| 25 | */ |
| 26 | |
| 27 | #include <linux/export.h> |
| 28 | #include <linux/mutex.h> |
| 29 | #include <linux/percpu.h> |
| 30 | #include <linux/preempt.h> |
| 31 | #include <linux/rcupdate.h> |
| 32 | #include <linux/sched.h> |
| 33 | #include <linux/smp.h> |
| 34 | #include <linux/delay.h> |
| 35 | #include <linux/srcu.h> |
| 36 | |
| 37 | static int init_srcu_struct_fields(struct srcu_struct *sp) |
| 38 | { |
| 39 | sp->completed = 0; |
| 40 | mutex_init(&sp->mutex); |
| 41 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); |
| 42 | return sp->per_cpu_ref ? 0 : -ENOMEM; |
| 43 | } |
| 44 | |
| 45 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 46 | |
| 47 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
| 48 | struct lock_class_key *key) |
| 49 | { |
| 50 | /* Don't re-initialize a lock while it is held. */ |
| 51 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
| 52 | lockdep_init_map(&sp->dep_map, name, key, 0); |
| 53 | return init_srcu_struct_fields(sp); |
| 54 | } |
| 55 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
| 56 | |
| 57 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 58 | |
| 59 | /** |
| 60 | * init_srcu_struct - initialize a sleep-RCU structure |
| 61 | * @sp: structure to initialize. |
| 62 | * |
| 63 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
| 64 | * to any other function. Each srcu_struct represents a separate domain |
| 65 | * of SRCU protection. |
| 66 | */ |
| 67 | int init_srcu_struct(struct srcu_struct *sp) |
| 68 | { |
| 69 | return init_srcu_struct_fields(sp); |
| 70 | } |
| 71 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
| 72 | |
| 73 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 74 | |
| 75 | /* |
| 76 | * Returns approximate number of readers active on the specified rank |
| 77 | * of per-CPU counters. Also snapshots each counter's value in the |
| 78 | * corresponding element of sp->snap[] for later use validating |
| 79 | * the sum. |
| 80 | */ |
| 81 | static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx) |
| 82 | { |
| 83 | int cpu; |
| 84 | unsigned long sum = 0; |
| 85 | unsigned long t; |
| 86 | |
| 87 | for_each_possible_cpu(cpu) { |
| 88 | t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); |
| 89 | sum += t; |
| 90 | sp->snap[cpu] = t; |
| 91 | } |
| 92 | return sum & SRCU_REF_MASK; |
| 93 | } |
| 94 | |
| 95 | /* |
| 96 | * To be called from the update side after an index flip. Returns true |
| 97 | * if the modulo sum of the counters is stably zero, false if there is |
| 98 | * some possibility of non-zero. |
| 99 | */ |
| 100 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) |
| 101 | { |
| 102 | int cpu; |
| 103 | |
| 104 | /* |
| 105 | * Note that srcu_readers_active_idx() can incorrectly return |
| 106 | * zero even though there is a pre-existing reader throughout. |
| 107 | * To see this, suppose that task A is in a very long SRCU |
| 108 | * read-side critical section that started on CPU 0, and that |
| 109 | * no other reader exists, so that the modulo sum of the counters |
| 110 | * is equal to one. Then suppose that task B starts executing |
| 111 | * srcu_readers_active_idx(), summing up to CPU 1, and then that |
| 112 | * task C starts reading on CPU 0, so that its increment is not |
| 113 | * summed, but finishes reading on CPU 2, so that its decrement |
| 114 | * -is- summed. Then when task B completes its sum, it will |
| 115 | * incorrectly get zero, despite the fact that task A has been |
| 116 | * in its SRCU read-side critical section the whole time. |
| 117 | * |
| 118 | * We therefore do a validation step should srcu_readers_active_idx() |
| 119 | * return zero. |
| 120 | */ |
| 121 | if (srcu_readers_active_idx(sp, idx) != 0) |
| 122 | return false; |
| 123 | |
| 124 | /* |
| 125 | * Since the caller recently flipped ->completed, we can see at |
| 126 | * most one increment of each CPU's counter from this point |
| 127 | * forward. The reason for this is that the reader CPU must have |
| 128 | * fetched the index before srcu_readers_active_idx checked |
| 129 | * that CPU's counter, but not yet incremented its counter. |
| 130 | * Its eventual counter increment will follow the read in |
| 131 | * srcu_readers_active_idx(), and that increment is immediately |
| 132 | * followed by smp_mb() B. Because smp_mb() D is between |
| 133 | * the ->completed flip and srcu_readers_active_idx()'s read, |
| 134 | * that CPU's subsequent load of ->completed must see the new |
| 135 | * value, and therefore increment the counter in the other rank. |
| 136 | */ |
| 137 | smp_mb(); /* A */ |
| 138 | |
| 139 | /* |
| 140 | * Now, we check the ->snap array that srcu_readers_active_idx() |
| 141 | * filled in from the per-CPU counter values. Since |
| 142 | * __srcu_read_lock() increments the upper bits of the per-CPU |
| 143 | * counter, an increment/decrement pair will change the value |
| 144 | * of the counter. Since there is only one possible increment, |
| 145 | * the only way to wrap the counter is to have a huge number of |
| 146 | * counter decrements, which requires a huge number of tasks and |
| 147 | * huge SRCU read-side critical-section nesting levels, even on |
| 148 | * 32-bit systems. |
| 149 | * |
| 150 | * All of the ways of confusing the readings require that the scan |
| 151 | * in srcu_readers_active_idx() see the read-side task's decrement, |
| 152 | * but not its increment. However, between that decrement and |
| 153 | * increment are smb_mb() B and C. Either or both of these pair |
| 154 | * with smp_mb() A above to ensure that the scan below will see |
| 155 | * the read-side tasks's increment, thus noting a difference in |
| 156 | * the counter values between the two passes. |
| 157 | * |
| 158 | * Therefore, if srcu_readers_active_idx() returned zero, and |
| 159 | * none of the counters changed, we know that the zero was the |
| 160 | * correct sum. |
| 161 | * |
| 162 | * Of course, it is possible that a task might be delayed |
| 163 | * for a very long time in __srcu_read_lock() after fetching |
| 164 | * the index but before incrementing its counter. This |
| 165 | * possibility will be dealt with in __synchronize_srcu(). |
| 166 | */ |
| 167 | for_each_possible_cpu(cpu) |
| 168 | if (sp->snap[cpu] != |
| 169 | ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx])) |
| 170 | return false; /* False zero reading! */ |
| 171 | return true; |
| 172 | } |
| 173 | |
| 174 | /** |
| 175 | * srcu_readers_active - returns approximate number of readers. |
| 176 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). |
| 177 | * |
| 178 | * Note that this is not an atomic primitive, and can therefore suffer |
| 179 | * severe errors when invoked on an active srcu_struct. That said, it |
| 180 | * can be useful as an error check at cleanup time. |
| 181 | */ |
| 182 | static int srcu_readers_active(struct srcu_struct *sp) |
| 183 | { |
| 184 | return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1); |
| 185 | } |
| 186 | |
| 187 | /** |
| 188 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
| 189 | * @sp: structure to clean up. |
| 190 | * |
| 191 | * Must invoke this after you are finished using a given srcu_struct that |
| 192 | * was initialized via init_srcu_struct(), else you leak memory. |
| 193 | */ |
| 194 | void cleanup_srcu_struct(struct srcu_struct *sp) |
| 195 | { |
| 196 | int sum; |
| 197 | |
| 198 | sum = srcu_readers_active(sp); |
| 199 | WARN_ON(sum); /* Leakage unless caller handles error. */ |
| 200 | if (sum != 0) |
| 201 | return; |
| 202 | free_percpu(sp->per_cpu_ref); |
| 203 | sp->per_cpu_ref = NULL; |
| 204 | } |
| 205 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
| 206 | |
| 207 | /* |
| 208 | * Counts the new reader in the appropriate per-CPU element of the |
| 209 | * srcu_struct. Must be called from process context. |
| 210 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
| 211 | */ |
| 212 | int __srcu_read_lock(struct srcu_struct *sp) |
| 213 | { |
| 214 | int idx; |
| 215 | |
| 216 | preempt_disable(); |
| 217 | idx = rcu_dereference_index_check(sp->completed, |
| 218 | rcu_read_lock_sched_held()) & 0x1; |
| 219 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += |
| 220 | SRCU_USAGE_COUNT + 1; |
| 221 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
| 222 | preempt_enable(); |
| 223 | return idx; |
| 224 | } |
| 225 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
| 226 | |
| 227 | /* |
| 228 | * Removes the count for the old reader from the appropriate per-CPU |
| 229 | * element of the srcu_struct. Note that this may well be a different |
| 230 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
| 231 | * Must be called from process context. |
| 232 | */ |
| 233 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
| 234 | { |
| 235 | preempt_disable(); |
| 236 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
| 237 | ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1; |
| 238 | preempt_enable(); |
| 239 | } |
| 240 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
| 241 | |
| 242 | /* |
| 243 | * We use an adaptive strategy for synchronize_srcu() and especially for |
| 244 | * synchronize_srcu_expedited(). We spin for a fixed time period |
| 245 | * (defined below) to allow SRCU readers to exit their read-side critical |
| 246 | * sections. If there are still some readers after 10 microseconds, |
| 247 | * we repeatedly block for 1-millisecond time periods. This approach |
| 248 | * has done well in testing, so there is no need for a config parameter. |
| 249 | */ |
| 250 | #define SYNCHRONIZE_SRCU_READER_DELAY 5 |
| 251 | |
| 252 | /* |
| 253 | * Wait until all pre-existing readers complete. Such readers |
| 254 | * will have used the index specified by "idx". |
| 255 | */ |
| 256 | static void wait_idx(struct srcu_struct *sp, int idx, bool expedited) |
| 257 | { |
| 258 | int trycount = 0; |
| 259 | |
| 260 | /* |
| 261 | * If a reader fetches the index before the ->completed increment, |
| 262 | * but increments its counter after srcu_readers_active_idx_check() |
| 263 | * sums it, then smp_mb() D will pair with __srcu_read_lock()'s |
| 264 | * smp_mb() B to ensure that the SRCU read-side critical section |
| 265 | * will see any updates that the current task performed before its |
| 266 | * call to synchronize_srcu(), or to synchronize_srcu_expedited(), |
| 267 | * as the case may be. |
| 268 | */ |
| 269 | smp_mb(); /* D */ |
| 270 | |
| 271 | /* |
| 272 | * SRCU read-side critical sections are normally short, so wait |
| 273 | * a small amount of time before possibly blocking. |
| 274 | */ |
| 275 | if (!srcu_readers_active_idx_check(sp, idx)) { |
| 276 | udelay(SYNCHRONIZE_SRCU_READER_DELAY); |
| 277 | while (!srcu_readers_active_idx_check(sp, idx)) { |
| 278 | if (expedited && ++ trycount < 10) |
| 279 | udelay(SYNCHRONIZE_SRCU_READER_DELAY); |
| 280 | else |
| 281 | schedule_timeout_interruptible(1); |
| 282 | } |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * The following smp_mb() E pairs with srcu_read_unlock()'s |
| 287 | * smp_mb C to ensure that if srcu_readers_active_idx_check() |
| 288 | * sees srcu_read_unlock()'s counter decrement, then any |
| 289 | * of the current task's subsequent code will happen after |
| 290 | * that SRCU read-side critical section. |
| 291 | * |
| 292 | * It also ensures the order between the above waiting and |
| 293 | * the next flipping. |
| 294 | */ |
| 295 | smp_mb(); /* E */ |
| 296 | } |
| 297 | |
| 298 | static void srcu_flip(struct srcu_struct *sp) |
| 299 | { |
| 300 | sp->completed++; |
| 301 | } |
| 302 | |
| 303 | /* |
| 304 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
| 305 | */ |
| 306 | static void __synchronize_srcu(struct srcu_struct *sp, bool expedited) |
| 307 | { |
| 308 | int busy_idx; |
| 309 | |
| 310 | rcu_lockdep_assert(!lock_is_held(&sp->dep_map) && |
| 311 | !lock_is_held(&rcu_bh_lock_map) && |
| 312 | !lock_is_held(&rcu_lock_map) && |
| 313 | !lock_is_held(&rcu_sched_lock_map), |
| 314 | "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); |
| 315 | |
| 316 | mutex_lock(&sp->mutex); |
| 317 | busy_idx = sp->completed & 0X1UL; |
| 318 | |
| 319 | /* |
| 320 | * If we recently flipped the index, there will be some readers |
| 321 | * using idx=0 and others using idx=1. Therefore, two calls to |
| 322 | * wait_idx()s suffice to ensure that all pre-existing readers |
| 323 | * have completed: |
| 324 | * |
| 325 | * __synchronize_srcu() { |
| 326 | * wait_idx(sp, 0, expedited); |
| 327 | * wait_idx(sp, 1, expedited); |
| 328 | * } |
| 329 | * |
| 330 | * Starvation is prevented by the fact that we flip the index. |
| 331 | * While we wait on one index to clear out, almost all new readers |
| 332 | * will be using the other index. The number of new readers using the |
| 333 | * index we are waiting on is sharply bounded by roughly the number |
| 334 | * of CPUs. |
| 335 | * |
| 336 | * How can new readers possibly using the old pre-flip value of |
| 337 | * the index? Consider the following sequence of events: |
| 338 | * |
| 339 | * Suppose that during the previous grace period, a reader |
| 340 | * picked up the old value of the index, but did not increment |
| 341 | * its counter until after the previous instance of |
| 342 | * __synchronize_srcu() did the counter summation and recheck. |
| 343 | * That previous grace period was OK because the reader did |
| 344 | * not start until after the grace period started, so the grace |
| 345 | * period was not obligated to wait for that reader. |
| 346 | * |
| 347 | * However, this sequence of events is quite improbable, so |
| 348 | * this call to wait_idx(), which waits on really old readers |
| 349 | * describe in this comment above, will almost never need to wait. |
| 350 | */ |
| 351 | wait_idx(sp, 1 - busy_idx, expedited); |
| 352 | |
| 353 | /* Flip the index to avoid reader-induced starvation. */ |
| 354 | srcu_flip(sp); |
| 355 | |
| 356 | /* Wait for recent pre-existing readers. */ |
| 357 | wait_idx(sp, busy_idx, expedited); |
| 358 | |
| 359 | mutex_unlock(&sp->mutex); |
| 360 | } |
| 361 | |
| 362 | /** |
| 363 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
| 364 | * @sp: srcu_struct with which to synchronize. |
| 365 | * |
| 366 | * Flip the completed counter, and wait for the old count to drain to zero. |
| 367 | * As with classic RCU, the updater must use some separate means of |
| 368 | * synchronizing concurrent updates. Can block; must be called from |
| 369 | * process context. |
| 370 | * |
| 371 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
| 372 | * SRCU read-side critical section; doing so will result in deadlock. |
| 373 | * However, it is perfectly legal to call synchronize_srcu() on one |
| 374 | * srcu_struct from some other srcu_struct's read-side critical section. |
| 375 | */ |
| 376 | void synchronize_srcu(struct srcu_struct *sp) |
| 377 | { |
| 378 | __synchronize_srcu(sp, 0); |
| 379 | } |
| 380 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
| 381 | |
| 382 | /** |
| 383 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
| 384 | * @sp: srcu_struct with which to synchronize. |
| 385 | * |
| 386 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
| 387 | * spinning rather than blocking when waiting. |
| 388 | * |
| 389 | * Note that it is illegal to call this function while holding any lock |
| 390 | * that is acquired by a CPU-hotplug notifier. It is also illegal to call |
| 391 | * synchronize_srcu_expedited() from the corresponding SRCU read-side |
| 392 | * critical section; doing so will result in deadlock. However, it is |
| 393 | * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct |
| 394 | * from some other srcu_struct's read-side critical section, as long as |
| 395 | * the resulting graph of srcu_structs is acyclic. |
| 396 | */ |
| 397 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
| 398 | { |
| 399 | __synchronize_srcu(sp, 1); |
| 400 | } |
| 401 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
| 402 | |
| 403 | /** |
| 404 | * srcu_batches_completed - return batches completed. |
| 405 | * @sp: srcu_struct on which to report batch completion. |
| 406 | * |
| 407 | * Report the number of batches, correlated with, but not necessarily |
| 408 | * precisely the same as, the number of grace periods that have elapsed. |
| 409 | */ |
| 410 | |
| 411 | long srcu_batches_completed(struct srcu_struct *sp) |
| 412 | { |
| 413 | return sp->completed; |
| 414 | } |
| 415 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |