Merge tag 'v3.18-rc1' into for_next
[deliverable/linux.git] / include / linux / spinlock.h
... / ...
CommitLineData
1#ifndef __LINUX_SPINLOCK_H
2#define __LINUX_SPINLOCK_H
3
4/*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49#include <linux/typecheck.h>
50#include <linux/preempt.h>
51#include <linux/linkage.h>
52#include <linux/compiler.h>
53#include <linux/irqflags.h>
54#include <linux/thread_info.h>
55#include <linux/kernel.h>
56#include <linux/stringify.h>
57#include <linux/bottom_half.h>
58#include <asm/barrier.h>
59
60
61/*
62 * Must define these before including other files, inline functions need them
63 */
64#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66#define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73#define LOCK_SECTION_END \
74 ".previous\n\t"
75
76#define __lockfunc __attribute__((section(".spinlock.text")))
77
78/*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81#include <linux/spinlock_types.h>
82
83/*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86#ifdef CONFIG_SMP
87# include <asm/spinlock.h>
88#else
89# include <linux/spinlock_up.h>
90#endif
91
92#ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95# define raw_spin_lock_init(lock) \
96do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100} while (0)
101
102#else
103# define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105#endif
106
107#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109#ifdef CONFIG_GENERIC_LOCKBREAK
110#define raw_spin_is_contended(lock) ((lock)->break_lock)
111#else
112
113#ifdef arch_spin_is_contended
114#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115#else
116#define raw_spin_is_contended(lock) (((void)(lock), 0))
117#endif /*arch_spin_is_contended*/
118#endif
119
120/*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
131#endif
132
133/*
134 * Place this after a lock-acquisition primitive to guarantee that
135 * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
136 * if the UNLOCK and LOCK are executed by the same CPU or if the
137 * UNLOCK and LOCK operate on the same lock variable.
138 */
139#ifndef smp_mb__after_unlock_lock
140#define smp_mb__after_unlock_lock() do { } while (0)
141#endif
142
143/**
144 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
145 * @lock: the spinlock in question.
146 */
147#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
148
149#ifdef CONFIG_DEBUG_SPINLOCK
150 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
151#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
152 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
153 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
154#else
155static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
156{
157 __acquire(lock);
158 arch_spin_lock(&lock->raw_lock);
159}
160
161static inline void
162do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
163{
164 __acquire(lock);
165 arch_spin_lock_flags(&lock->raw_lock, *flags);
166}
167
168static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
169{
170 return arch_spin_trylock(&(lock)->raw_lock);
171}
172
173static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
174{
175 arch_spin_unlock(&lock->raw_lock);
176 __release(lock);
177}
178#endif
179
180/*
181 * Define the various spin_lock methods. Note we define these
182 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
183 * various methods are defined as nops in the case they are not
184 * required.
185 */
186#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
187
188#define raw_spin_lock(lock) _raw_spin_lock(lock)
189
190#ifdef CONFIG_DEBUG_LOCK_ALLOC
191# define raw_spin_lock_nested(lock, subclass) \
192 _raw_spin_lock_nested(lock, subclass)
193
194# define raw_spin_lock_nest_lock(lock, nest_lock) \
195 do { \
196 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
198 } while (0)
199#else
200/*
201 * Always evaluate the 'subclass' argument to avoid that the compiler
202 * warns about set-but-not-used variables when building with
203 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
204 */
205# define raw_spin_lock_nested(lock, subclass) \
206 _raw_spin_lock(((void)(subclass), (lock)))
207# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
208#endif
209
210#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
211
212#define raw_spin_lock_irqsave(lock, flags) \
213 do { \
214 typecheck(unsigned long, flags); \
215 flags = _raw_spin_lock_irqsave(lock); \
216 } while (0)
217
218#ifdef CONFIG_DEBUG_LOCK_ALLOC
219#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
220 do { \
221 typecheck(unsigned long, flags); \
222 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
223 } while (0)
224#else
225#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
226 do { \
227 typecheck(unsigned long, flags); \
228 flags = _raw_spin_lock_irqsave(lock); \
229 } while (0)
230#endif
231
232#else
233
234#define raw_spin_lock_irqsave(lock, flags) \
235 do { \
236 typecheck(unsigned long, flags); \
237 _raw_spin_lock_irqsave(lock, flags); \
238 } while (0)
239
240#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
241 raw_spin_lock_irqsave(lock, flags)
242
243#endif
244
245#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
246#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
247#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
248#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
249
250#define raw_spin_unlock_irqrestore(lock, flags) \
251 do { \
252 typecheck(unsigned long, flags); \
253 _raw_spin_unlock_irqrestore(lock, flags); \
254 } while (0)
255#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
256
257#define raw_spin_trylock_bh(lock) \
258 __cond_lock(lock, _raw_spin_trylock_bh(lock))
259
260#define raw_spin_trylock_irq(lock) \
261({ \
262 local_irq_disable(); \
263 raw_spin_trylock(lock) ? \
264 1 : ({ local_irq_enable(); 0; }); \
265})
266
267#define raw_spin_trylock_irqsave(lock, flags) \
268({ \
269 local_irq_save(flags); \
270 raw_spin_trylock(lock) ? \
271 1 : ({ local_irq_restore(flags); 0; }); \
272})
273
274/**
275 * raw_spin_can_lock - would raw_spin_trylock() succeed?
276 * @lock: the spinlock in question.
277 */
278#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
279
280/* Include rwlock functions */
281#include <linux/rwlock.h>
282
283/*
284 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
285 */
286#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
287# include <linux/spinlock_api_smp.h>
288#else
289# include <linux/spinlock_api_up.h>
290#endif
291
292/*
293 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
294 */
295
296static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
297{
298 return &lock->rlock;
299}
300
301#define spin_lock_init(_lock) \
302do { \
303 spinlock_check(_lock); \
304 raw_spin_lock_init(&(_lock)->rlock); \
305} while (0)
306
307static inline void spin_lock(spinlock_t *lock)
308{
309 raw_spin_lock(&lock->rlock);
310}
311
312static inline void spin_lock_bh(spinlock_t *lock)
313{
314 raw_spin_lock_bh(&lock->rlock);
315}
316
317static inline int spin_trylock(spinlock_t *lock)
318{
319 return raw_spin_trylock(&lock->rlock);
320}
321
322#define spin_lock_nested(lock, subclass) \
323do { \
324 raw_spin_lock_nested(spinlock_check(lock), subclass); \
325} while (0)
326
327#define spin_lock_nest_lock(lock, nest_lock) \
328do { \
329 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
330} while (0)
331
332static inline void spin_lock_irq(spinlock_t *lock)
333{
334 raw_spin_lock_irq(&lock->rlock);
335}
336
337#define spin_lock_irqsave(lock, flags) \
338do { \
339 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
340} while (0)
341
342#define spin_lock_irqsave_nested(lock, flags, subclass) \
343do { \
344 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
345} while (0)
346
347static inline void spin_unlock(spinlock_t *lock)
348{
349 raw_spin_unlock(&lock->rlock);
350}
351
352static inline void spin_unlock_bh(spinlock_t *lock)
353{
354 raw_spin_unlock_bh(&lock->rlock);
355}
356
357static inline void spin_unlock_irq(spinlock_t *lock)
358{
359 raw_spin_unlock_irq(&lock->rlock);
360}
361
362static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
363{
364 raw_spin_unlock_irqrestore(&lock->rlock, flags);
365}
366
367static inline int spin_trylock_bh(spinlock_t *lock)
368{
369 return raw_spin_trylock_bh(&lock->rlock);
370}
371
372static inline int spin_trylock_irq(spinlock_t *lock)
373{
374 return raw_spin_trylock_irq(&lock->rlock);
375}
376
377#define spin_trylock_irqsave(lock, flags) \
378({ \
379 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
380})
381
382static inline void spin_unlock_wait(spinlock_t *lock)
383{
384 raw_spin_unlock_wait(&lock->rlock);
385}
386
387static inline int spin_is_locked(spinlock_t *lock)
388{
389 return raw_spin_is_locked(&lock->rlock);
390}
391
392static inline int spin_is_contended(spinlock_t *lock)
393{
394 return raw_spin_is_contended(&lock->rlock);
395}
396
397static inline int spin_can_lock(spinlock_t *lock)
398{
399 return raw_spin_can_lock(&lock->rlock);
400}
401
402#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
403
404/*
405 * Pull the atomic_t declaration:
406 * (asm-mips/atomic.h needs above definitions)
407 */
408#include <linux/atomic.h>
409/**
410 * atomic_dec_and_lock - lock on reaching reference count zero
411 * @atomic: the atomic counter
412 * @lock: the spinlock in question
413 *
414 * Decrements @atomic by 1. If the result is 0, returns true and locks
415 * @lock. Returns false for all other cases.
416 */
417extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
418#define atomic_dec_and_lock(atomic, lock) \
419 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
420
421#endif /* __LINUX_SPINLOCK_H */
This page took 0.023485 seconds and 5 git commands to generate.