Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
[deliverable/linux.git] / include / linux / seqlock.h
1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
3 /*
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. There are two types
7 * of readers:
8 * 1. Sequence readers which never block a writer but they may have to retry
9 * if a writer is in progress by detecting change in sequence number.
10 * Writers do not wait for a sequence reader.
11 * 2. Locking readers which will wait if a writer or another locking reader
12 * is in progress. A locking reader in progress will also block a writer
13 * from going forward. Unlike the regular rwlock, the read lock here is
14 * exclusive so that only one locking reader can get it.
15 *
16 * This is not as cache friendly as brlock. Also, this may not work well
17 * for data that contains pointers, because any writer could
18 * invalidate a pointer that a reader was following.
19 *
20 * Expected non-blocking reader usage:
21 * do {
22 * seq = read_seqbegin(&foo);
23 * ...
24 * } while (read_seqretry(&foo, seq));
25 *
26 *
27 * On non-SMP the spin locks disappear but the writer still needs
28 * to increment the sequence variables because an interrupt routine could
29 * change the state of the data.
30 *
31 * Based on x86_64 vsyscall gettimeofday
32 * by Keith Owens and Andrea Arcangeli
33 */
34
35 #include <linux/spinlock.h>
36 #include <linux/preempt.h>
37 #include <linux/lockdep.h>
38 #include <asm/processor.h>
39
40 /*
41 * Version using sequence counter only.
42 * This can be used when code has its own mutex protecting the
43 * updating starting before the write_seqcountbeqin() and ending
44 * after the write_seqcount_end().
45 */
46 typedef struct seqcount {
47 unsigned sequence;
48 #ifdef CONFIG_DEBUG_LOCK_ALLOC
49 struct lockdep_map dep_map;
50 #endif
51 } seqcount_t;
52
53 static inline void __seqcount_init(seqcount_t *s, const char *name,
54 struct lock_class_key *key)
55 {
56 /*
57 * Make sure we are not reinitializing a held lock:
58 */
59 lockdep_init_map(&s->dep_map, name, key, 0);
60 s->sequence = 0;
61 }
62
63 #ifdef CONFIG_DEBUG_LOCK_ALLOC
64 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
65 .dep_map = { .name = #lockname } \
66
67 # define seqcount_init(s) \
68 do { \
69 static struct lock_class_key __key; \
70 __seqcount_init((s), #s, &__key); \
71 } while (0)
72
73 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
74 {
75 seqcount_t *l = (seqcount_t *)s;
76 unsigned long flags;
77
78 local_irq_save(flags);
79 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
80 seqcount_release(&l->dep_map, 1, _RET_IP_);
81 local_irq_restore(flags);
82 }
83
84 #else
85 # define SEQCOUNT_DEP_MAP_INIT(lockname)
86 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
87 # define seqcount_lockdep_reader_access(x)
88 #endif
89
90 #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
91
92
93 /**
94 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
95 * @s: pointer to seqcount_t
96 * Returns: count to be passed to read_seqcount_retry
97 *
98 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
99 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
100 * provided before actually loading any of the variables that are to be
101 * protected in this critical section.
102 *
103 * Use carefully, only in critical code, and comment how the barrier is
104 * provided.
105 */
106 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
107 {
108 unsigned ret;
109
110 repeat:
111 ret = READ_ONCE(s->sequence);
112 if (unlikely(ret & 1)) {
113 cpu_relax();
114 goto repeat;
115 }
116 return ret;
117 }
118
119 /**
120 * raw_read_seqcount - Read the raw seqcount
121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry
123 *
124 * raw_read_seqcount opens a read critical section of the given
125 * seqcount without any lockdep checking and without checking or
126 * masking the LSB. Calling code is responsible for handling that.
127 */
128 static inline unsigned raw_read_seqcount(const seqcount_t *s)
129 {
130 unsigned ret = READ_ONCE(s->sequence);
131 smp_rmb();
132 return ret;
133 }
134
135 /**
136 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
137 * @s: pointer to seqcount_t
138 * Returns: count to be passed to read_seqcount_retry
139 *
140 * raw_read_seqcount_begin opens a read critical section of the given
141 * seqcount, but without any lockdep checking. Validity of the critical
142 * section is tested by checking read_seqcount_retry function.
143 */
144 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
145 {
146 unsigned ret = __read_seqcount_begin(s);
147 smp_rmb();
148 return ret;
149 }
150
151 /**
152 * read_seqcount_begin - begin a seq-read critical section
153 * @s: pointer to seqcount_t
154 * Returns: count to be passed to read_seqcount_retry
155 *
156 * read_seqcount_begin opens a read critical section of the given seqcount.
157 * Validity of the critical section is tested by checking read_seqcount_retry
158 * function.
159 */
160 static inline unsigned read_seqcount_begin(const seqcount_t *s)
161 {
162 seqcount_lockdep_reader_access(s);
163 return raw_read_seqcount_begin(s);
164 }
165
166 /**
167 * raw_seqcount_begin - begin a seq-read critical section
168 * @s: pointer to seqcount_t
169 * Returns: count to be passed to read_seqcount_retry
170 *
171 * raw_seqcount_begin opens a read critical section of the given seqcount.
172 * Validity of the critical section is tested by checking read_seqcount_retry
173 * function.
174 *
175 * Unlike read_seqcount_begin(), this function will not wait for the count
176 * to stabilize. If a writer is active when we begin, we will fail the
177 * read_seqcount_retry() instead of stabilizing at the beginning of the
178 * critical section.
179 */
180 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
181 {
182 unsigned ret = READ_ONCE(s->sequence);
183 smp_rmb();
184 return ret & ~1;
185 }
186
187 /**
188 * __read_seqcount_retry - end a seq-read critical section (without barrier)
189 * @s: pointer to seqcount_t
190 * @start: count, from read_seqcount_begin
191 * Returns: 1 if retry is required, else 0
192 *
193 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
194 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
195 * provided before actually loading any of the variables that are to be
196 * protected in this critical section.
197 *
198 * Use carefully, only in critical code, and comment how the barrier is
199 * provided.
200 */
201 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
202 {
203 return unlikely(s->sequence != start);
204 }
205
206 /**
207 * read_seqcount_retry - end a seq-read critical section
208 * @s: pointer to seqcount_t
209 * @start: count, from read_seqcount_begin
210 * Returns: 1 if retry is required, else 0
211 *
212 * read_seqcount_retry closes a read critical section of the given seqcount.
213 * If the critical section was invalid, it must be ignored (and typically
214 * retried).
215 */
216 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
217 {
218 smp_rmb();
219 return __read_seqcount_retry(s, start);
220 }
221
222
223
224 static inline void raw_write_seqcount_begin(seqcount_t *s)
225 {
226 s->sequence++;
227 smp_wmb();
228 }
229
230 static inline void raw_write_seqcount_end(seqcount_t *s)
231 {
232 smp_wmb();
233 s->sequence++;
234 }
235
236 /**
237 * raw_write_seqcount_barrier - do a seq write barrier
238 * @s: pointer to seqcount_t
239 *
240 * This can be used to provide an ordering guarantee instead of the
241 * usual consistency guarantee. It is one wmb cheaper, because we can
242 * collapse the two back-to-back wmb()s.
243 *
244 * seqcount_t seq;
245 * bool X = true, Y = false;
246 *
247 * void read(void)
248 * {
249 * bool x, y;
250 *
251 * do {
252 * int s = read_seqcount_begin(&seq);
253 *
254 * x = X; y = Y;
255 *
256 * } while (read_seqcount_retry(&seq, s));
257 *
258 * BUG_ON(!x && !y);
259 * }
260 *
261 * void write(void)
262 * {
263 * Y = true;
264 *
265 * raw_write_seqcount_barrier(seq);
266 *
267 * X = false;
268 * }
269 */
270 static inline void raw_write_seqcount_barrier(seqcount_t *s)
271 {
272 s->sequence++;
273 smp_wmb();
274 s->sequence++;
275 }
276
277 /*
278 * raw_write_seqcount_latch - redirect readers to even/odd copy
279 * @s: pointer to seqcount_t
280 */
281 static inline void raw_write_seqcount_latch(seqcount_t *s)
282 {
283 smp_wmb(); /* prior stores before incrementing "sequence" */
284 s->sequence++;
285 smp_wmb(); /* increment "sequence" before following stores */
286 }
287
288 /*
289 * Sequence counter only version assumes that callers are using their
290 * own mutexing.
291 */
292 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
293 {
294 raw_write_seqcount_begin(s);
295 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
296 }
297
298 static inline void write_seqcount_begin(seqcount_t *s)
299 {
300 write_seqcount_begin_nested(s, 0);
301 }
302
303 static inline void write_seqcount_end(seqcount_t *s)
304 {
305 seqcount_release(&s->dep_map, 1, _RET_IP_);
306 raw_write_seqcount_end(s);
307 }
308
309 /**
310 * write_seqcount_invalidate - invalidate in-progress read-side seq operations
311 * @s: pointer to seqcount_t
312 *
313 * After write_seqcount_invalidate, no read-side seq operations will complete
314 * successfully and see data older than this.
315 */
316 static inline void write_seqcount_invalidate(seqcount_t *s)
317 {
318 smp_wmb();
319 s->sequence+=2;
320 }
321
322 typedef struct {
323 struct seqcount seqcount;
324 spinlock_t lock;
325 } seqlock_t;
326
327 /*
328 * These macros triggered gcc-3.x compile-time problems. We think these are
329 * OK now. Be cautious.
330 */
331 #define __SEQLOCK_UNLOCKED(lockname) \
332 { \
333 .seqcount = SEQCNT_ZERO(lockname), \
334 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
335 }
336
337 #define seqlock_init(x) \
338 do { \
339 seqcount_init(&(x)->seqcount); \
340 spin_lock_init(&(x)->lock); \
341 } while (0)
342
343 #define DEFINE_SEQLOCK(x) \
344 seqlock_t x = __SEQLOCK_UNLOCKED(x)
345
346 /*
347 * Read side functions for starting and finalizing a read side section.
348 */
349 static inline unsigned read_seqbegin(const seqlock_t *sl)
350 {
351 return read_seqcount_begin(&sl->seqcount);
352 }
353
354 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
355 {
356 return read_seqcount_retry(&sl->seqcount, start);
357 }
358
359 /*
360 * Lock out other writers and update the count.
361 * Acts like a normal spin_lock/unlock.
362 * Don't need preempt_disable() because that is in the spin_lock already.
363 */
364 static inline void write_seqlock(seqlock_t *sl)
365 {
366 spin_lock(&sl->lock);
367 write_seqcount_begin(&sl->seqcount);
368 }
369
370 static inline void write_sequnlock(seqlock_t *sl)
371 {
372 write_seqcount_end(&sl->seqcount);
373 spin_unlock(&sl->lock);
374 }
375
376 static inline void write_seqlock_bh(seqlock_t *sl)
377 {
378 spin_lock_bh(&sl->lock);
379 write_seqcount_begin(&sl->seqcount);
380 }
381
382 static inline void write_sequnlock_bh(seqlock_t *sl)
383 {
384 write_seqcount_end(&sl->seqcount);
385 spin_unlock_bh(&sl->lock);
386 }
387
388 static inline void write_seqlock_irq(seqlock_t *sl)
389 {
390 spin_lock_irq(&sl->lock);
391 write_seqcount_begin(&sl->seqcount);
392 }
393
394 static inline void write_sequnlock_irq(seqlock_t *sl)
395 {
396 write_seqcount_end(&sl->seqcount);
397 spin_unlock_irq(&sl->lock);
398 }
399
400 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
401 {
402 unsigned long flags;
403
404 spin_lock_irqsave(&sl->lock, flags);
405 write_seqcount_begin(&sl->seqcount);
406 return flags;
407 }
408
409 #define write_seqlock_irqsave(lock, flags) \
410 do { flags = __write_seqlock_irqsave(lock); } while (0)
411
412 static inline void
413 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
414 {
415 write_seqcount_end(&sl->seqcount);
416 spin_unlock_irqrestore(&sl->lock, flags);
417 }
418
419 /*
420 * A locking reader exclusively locks out other writers and locking readers,
421 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
422 * Don't need preempt_disable() because that is in the spin_lock already.
423 */
424 static inline void read_seqlock_excl(seqlock_t *sl)
425 {
426 spin_lock(&sl->lock);
427 }
428
429 static inline void read_sequnlock_excl(seqlock_t *sl)
430 {
431 spin_unlock(&sl->lock);
432 }
433
434 /**
435 * read_seqbegin_or_lock - begin a sequence number check or locking block
436 * @lock: sequence lock
437 * @seq : sequence number to be checked
438 *
439 * First try it once optimistically without taking the lock. If that fails,
440 * take the lock. The sequence number is also used as a marker for deciding
441 * whether to be a reader (even) or writer (odd).
442 * N.B. seq must be initialized to an even number to begin with.
443 */
444 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
445 {
446 if (!(*seq & 1)) /* Even */
447 *seq = read_seqbegin(lock);
448 else /* Odd */
449 read_seqlock_excl(lock);
450 }
451
452 static inline int need_seqretry(seqlock_t *lock, int seq)
453 {
454 return !(seq & 1) && read_seqretry(lock, seq);
455 }
456
457 static inline void done_seqretry(seqlock_t *lock, int seq)
458 {
459 if (seq & 1)
460 read_sequnlock_excl(lock);
461 }
462
463 static inline void read_seqlock_excl_bh(seqlock_t *sl)
464 {
465 spin_lock_bh(&sl->lock);
466 }
467
468 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
469 {
470 spin_unlock_bh(&sl->lock);
471 }
472
473 static inline void read_seqlock_excl_irq(seqlock_t *sl)
474 {
475 spin_lock_irq(&sl->lock);
476 }
477
478 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
479 {
480 spin_unlock_irq(&sl->lock);
481 }
482
483 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&sl->lock, flags);
488 return flags;
489 }
490
491 #define read_seqlock_excl_irqsave(lock, flags) \
492 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
493
494 static inline void
495 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
496 {
497 spin_unlock_irqrestore(&sl->lock, flags);
498 }
499
500 static inline unsigned long
501 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
502 {
503 unsigned long flags = 0;
504
505 if (!(*seq & 1)) /* Even */
506 *seq = read_seqbegin(lock);
507 else /* Odd */
508 read_seqlock_excl_irqsave(lock, flags);
509
510 return flags;
511 }
512
513 static inline void
514 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
515 {
516 if (seq & 1)
517 read_sequnlock_excl_irqrestore(lock, flags);
518 }
519 #endif /* __LINUX_SEQLOCK_H */
This page took 0.042361 seconds and 6 git commands to generate.