nft_hash: Remove rhashtable_remove_pprev()
[deliverable/linux.git] / include / linux / spinlock.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_SPINLOCK_H
2#define __LINUX_SPINLOCK_H
3
4/*
fb1c8f93
IM
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
fb3a6bbc 11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
fb1c8f93
IM
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
0199c4e6 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
fb1c8f93
IM
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
0199c4e6 37 * contains the arch_spin_*()/etc. version of UP
fb1c8f93
IM
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
1da177e4
LT
47 */
48
3f307891 49#include <linux/typecheck.h>
1da177e4
LT
50#include <linux/preempt.h>
51#include <linux/linkage.h>
52#include <linux/compiler.h>
df9ee292 53#include <linux/irqflags.h>
1da177e4
LT
54#include <linux/thread_info.h>
55#include <linux/kernel.h>
56#include <linux/stringify.h>
676dcb8b 57#include <linux/bottom_half.h>
96f951ed 58#include <asm/barrier.h>
1da177e4 59
1da177e4
LT
60
61/*
62 * Must define these before including other files, inline functions need them
63 */
75ddb0e8 64#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
1da177e4
LT
65
66#define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73#define LOCK_SECTION_END \
74 ".previous\n\t"
75
ec701584 76#define __lockfunc __attribute__((section(".spinlock.text")))
1da177e4
LT
77
78/*
fb3a6bbc 79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
1da177e4 80 */
fb1c8f93 81#include <linux/spinlock_types.h>
1da177e4 82
1da177e4 83/*
25985edc 84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
1da177e4 85 */
8a25d5de 86#ifdef CONFIG_SMP
fb1c8f93 87# include <asm/spinlock.h>
1da177e4 88#else
fb1c8f93 89# include <linux/spinlock_up.h>
1da177e4
LT
90#endif
91
8a25d5de 92#ifdef CONFIG_DEBUG_SPINLOCK
c2f21ce2
TG
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95# define raw_spin_lock_init(lock) \
8a25d5de
IM
96do { \
97 static struct lock_class_key __key; \
98 \
c2f21ce2 99 __raw_spin_lock_init((lock), #lock, &__key); \
8a25d5de
IM
100} while (0)
101
102#else
c2f21ce2
TG
103# define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
8a25d5de
IM
105#endif
106
c2f21ce2 107#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
fb1c8f93 108
95c354fe 109#ifdef CONFIG_GENERIC_LOCKBREAK
c2f21ce2 110#define raw_spin_is_contended(lock) ((lock)->break_lock)
95c354fe 111#else
a5ef7ca0 112
0199c4e6 113#ifdef arch_spin_is_contended
c2f21ce2 114#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
a5ef7ca0 115#else
c2f21ce2 116#define raw_spin_is_contended(lock) (((void)(lock), 0))
0199c4e6 117#endif /*arch_spin_is_contended*/
95c354fe
NP
118#endif
119
e0acd0a6
ON
120/*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
ad462769
JO
131#endif
132
01352fb8
PM
133/*
134 * Place this after a lock-acquisition primitive to guarantee that
135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
136 * if the UNLOCK and LOCK are executed by the same CPU or if the
137 * UNLOCK and LOCK operate on the same lock variable.
138 */
139#ifndef smp_mb__after_unlock_lock
140#define smp_mb__after_unlock_lock() do { } while (0)
141#endif
142
fb1c8f93 143/**
c2f21ce2 144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
fb1c8f93
IM
145 * @lock: the spinlock in question.
146 */
c2f21ce2 147#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
fb1c8f93 148
fb1c8f93 149#ifdef CONFIG_DEBUG_SPINLOCK
b97c4bc1 150 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
9828ea9d
TG
151#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
152 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
b97c4bc1 153 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
fb1c8f93 154#else
b97c4bc1 155static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
c2f21ce2 156{
b97c4bc1 157 __acquire(lock);
c2f21ce2
TG
158 arch_spin_lock(&lock->raw_lock);
159}
160
161static inline void
b97c4bc1 162do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
c2f21ce2 163{
b97c4bc1 164 __acquire(lock);
c2f21ce2
TG
165 arch_spin_lock_flags(&lock->raw_lock, *flags);
166}
167
9828ea9d 168static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
c2f21ce2
TG
169{
170 return arch_spin_trylock(&(lock)->raw_lock);
171}
172
b97c4bc1 173static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
c2f21ce2
TG
174{
175 arch_spin_unlock(&lock->raw_lock);
b97c4bc1 176 __release(lock);
c2f21ce2 177}
fb1c8f93 178#endif
1da177e4 179
1da177e4 180/*
ef12f109
TG
181 * Define the various spin_lock methods. Note we define these
182 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
183 * various methods are defined as nops in the case they are not
184 * required.
1da177e4 185 */
9c1721aa 186#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
1da177e4 187
9c1721aa 188#define raw_spin_lock(lock) _raw_spin_lock(lock)
8a25d5de
IM
189
190#ifdef CONFIG_DEBUG_LOCK_ALLOC
9c1721aa
TG
191# define raw_spin_lock_nested(lock, subclass) \
192 _raw_spin_lock_nested(lock, subclass)
193
c2f21ce2 194# define raw_spin_lock_nest_lock(lock, nest_lock) \
b7d39aff
PZ
195 do { \
196 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
9c1721aa 197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
b7d39aff 198 } while (0)
8a25d5de 199#else
4999201a
BVA
200/*
201 * Always evaluate the 'subclass' argument to avoid that the compiler
202 * warns about set-but-not-used variables when building with
203 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
204 */
205# define raw_spin_lock_nested(lock, subclass) \
206 _raw_spin_lock(((void)(subclass), (lock)))
9c1721aa 207# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
8a25d5de
IM
208#endif
209
fb1c8f93 210#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
b8e6ec86 211
c2f21ce2 212#define raw_spin_lock_irqsave(lock, flags) \
3f307891
SR
213 do { \
214 typecheck(unsigned long, flags); \
9c1721aa 215 flags = _raw_spin_lock_irqsave(lock); \
3f307891 216 } while (0)
cfd3ef23
AV
217
218#ifdef CONFIG_DEBUG_LOCK_ALLOC
c2f21ce2 219#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
3f307891
SR
220 do { \
221 typecheck(unsigned long, flags); \
9c1721aa 222 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
3f307891 223 } while (0)
cfd3ef23 224#else
c2f21ce2 225#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
3f307891
SR
226 do { \
227 typecheck(unsigned long, flags); \
9c1721aa 228 flags = _raw_spin_lock_irqsave(lock); \
3f307891 229 } while (0)
cfd3ef23
AV
230#endif
231
1da177e4 232#else
b8e6ec86 233
c2f21ce2 234#define raw_spin_lock_irqsave(lock, flags) \
3f307891
SR
235 do { \
236 typecheck(unsigned long, flags); \
9c1721aa 237 _raw_spin_lock_irqsave(lock, flags); \
3f307891 238 } while (0)
ef12f109 239
c2f21ce2
TG
240#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
241 raw_spin_lock_irqsave(lock, flags)
cfd3ef23 242
1da177e4
LT
243#endif
244
9c1721aa
TG
245#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
246#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
247#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
248#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
1da177e4 249
c2f21ce2
TG
250#define raw_spin_unlock_irqrestore(lock, flags) \
251 do { \
252 typecheck(unsigned long, flags); \
9c1721aa 253 _raw_spin_unlock_irqrestore(lock, flags); \
3f307891 254 } while (0)
9c1721aa 255#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
1da177e4 256
9c1721aa
TG
257#define raw_spin_trylock_bh(lock) \
258 __cond_lock(lock, _raw_spin_trylock_bh(lock))
1da177e4 259
c2f21ce2 260#define raw_spin_trylock_irq(lock) \
1da177e4
LT
261({ \
262 local_irq_disable(); \
c2f21ce2 263 raw_spin_trylock(lock) ? \
fb1c8f93 264 1 : ({ local_irq_enable(); 0; }); \
1da177e4
LT
265})
266
c2f21ce2 267#define raw_spin_trylock_irqsave(lock, flags) \
1da177e4
LT
268({ \
269 local_irq_save(flags); \
c2f21ce2 270 raw_spin_trylock(lock) ? \
fb1c8f93 271 1 : ({ local_irq_restore(flags); 0; }); \
1da177e4
LT
272})
273
c2f21ce2
TG
274/**
275 * raw_spin_can_lock - would raw_spin_trylock() succeed?
276 * @lock: the spinlock in question.
277 */
278#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
279
280/* Include rwlock functions */
281#include <linux/rwlock.h>
282
283/*
284 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
285 */
286#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
287# include <linux/spinlock_api_smp.h>
288#else
289# include <linux/spinlock_api_up.h>
290#endif
291
292/*
293 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
294 */
295
296static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
297{
298 return &lock->rlock;
299}
300
301#define spin_lock_init(_lock) \
302do { \
303 spinlock_check(_lock); \
304 raw_spin_lock_init(&(_lock)->rlock); \
305} while (0)
306
307static inline void spin_lock(spinlock_t *lock)
308{
309 raw_spin_lock(&lock->rlock);
310}
311
312static inline void spin_lock_bh(spinlock_t *lock)
313{
314 raw_spin_lock_bh(&lock->rlock);
315}
316
317static inline int spin_trylock(spinlock_t *lock)
318{
319 return raw_spin_trylock(&lock->rlock);
320}
321
322#define spin_lock_nested(lock, subclass) \
323do { \
324 raw_spin_lock_nested(spinlock_check(lock), subclass); \
325} while (0)
326
327#define spin_lock_nest_lock(lock, nest_lock) \
328do { \
329 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
330} while (0)
331
332static inline void spin_lock_irq(spinlock_t *lock)
333{
334 raw_spin_lock_irq(&lock->rlock);
335}
336
337#define spin_lock_irqsave(lock, flags) \
338do { \
339 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
340} while (0)
341
342#define spin_lock_irqsave_nested(lock, flags, subclass) \
343do { \
344 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
345} while (0)
346
347static inline void spin_unlock(spinlock_t *lock)
348{
349 raw_spin_unlock(&lock->rlock);
350}
351
352static inline void spin_unlock_bh(spinlock_t *lock)
353{
354 raw_spin_unlock_bh(&lock->rlock);
355}
356
357static inline void spin_unlock_irq(spinlock_t *lock)
358{
359 raw_spin_unlock_irq(&lock->rlock);
360}
361
362static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
363{
364 raw_spin_unlock_irqrestore(&lock->rlock, flags);
365}
366
367static inline int spin_trylock_bh(spinlock_t *lock)
368{
369 return raw_spin_trylock_bh(&lock->rlock);
370}
371
372static inline int spin_trylock_irq(spinlock_t *lock)
373{
374 return raw_spin_trylock_irq(&lock->rlock);
375}
376
377#define spin_trylock_irqsave(lock, flags) \
378({ \
379 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
380})
381
382static inline void spin_unlock_wait(spinlock_t *lock)
383{
384 raw_spin_unlock_wait(&lock->rlock);
385}
386
387static inline int spin_is_locked(spinlock_t *lock)
388{
389 return raw_spin_is_locked(&lock->rlock);
390}
391
392static inline int spin_is_contended(spinlock_t *lock)
393{
394 return raw_spin_is_contended(&lock->rlock);
395}
396
397static inline int spin_can_lock(spinlock_t *lock)
398{
399 return raw_spin_can_lock(&lock->rlock);
400}
401
4ebc1b4b 402#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
c2f21ce2 403
1da177e4 404/*
fb1c8f93
IM
405 * Pull the atomic_t declaration:
406 * (asm-mips/atomic.h needs above definitions)
1da177e4 407 */
60063497 408#include <linux/atomic.h>
fb1c8f93
IM
409/**
410 * atomic_dec_and_lock - lock on reaching reference count zero
411 * @atomic: the atomic counter
412 * @lock: the spinlock in question
dc07e721
BF
413 *
414 * Decrements @atomic by 1. If the result is 0, returns true and locks
415 * @lock. Returns false for all other cases.
1da177e4 416 */
fb1c8f93
IM
417extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
418#define atomic_dec_and_lock(atomic, lock) \
dcc8e559 419 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
1da177e4 420
1da177e4 421#endif /* __LINUX_SPINLOCK_H */
This page took 1.224195 seconds and 5 git commands to generate.