Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_SPINLOCK_H |
2 | #define __LINUX_SPINLOCK_H | |
3 | ||
4 | /* | |
5 | * include/linux/spinlock.h - generic locking declarations | |
6 | */ | |
7 | ||
8 | #include <linux/config.h> | |
9 | #include <linux/preempt.h> | |
10 | #include <linux/linkage.h> | |
11 | #include <linux/compiler.h> | |
12 | #include <linux/thread_info.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/stringify.h> | |
15 | ||
16 | #include <asm/processor.h> /* for cpu relax */ | |
17 | #include <asm/system.h> | |
18 | ||
19 | /* | |
20 | * Must define these before including other files, inline functions need them | |
21 | */ | |
22 | #define LOCK_SECTION_NAME \ | |
23 | ".text.lock." __stringify(KBUILD_BASENAME) | |
24 | ||
25 | #define LOCK_SECTION_START(extra) \ | |
26 | ".subsection 1\n\t" \ | |
27 | extra \ | |
28 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ | |
29 | LOCK_SECTION_NAME ":\n\t" \ | |
30 | ".endif\n" | |
31 | ||
32 | #define LOCK_SECTION_END \ | |
33 | ".previous\n\t" | |
34 | ||
35 | #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) | |
36 | ||
37 | /* | |
38 | * If CONFIG_SMP is set, pull in the _raw_* definitions | |
39 | */ | |
40 | #ifdef CONFIG_SMP | |
41 | ||
42 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | |
43 | #include <asm/spinlock.h> | |
44 | ||
45 | int __lockfunc _spin_trylock(spinlock_t *lock); | |
46 | int __lockfunc _read_trylock(rwlock_t *lock); | |
47 | int __lockfunc _write_trylock(rwlock_t *lock); | |
48 | ||
49 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); | |
50 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); | |
51 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); | |
52 | ||
53 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(spinlock_t); | |
54 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(rwlock_t); | |
55 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(rwlock_t); | |
56 | ||
57 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) __acquires(spinlock_t); | |
58 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | |
59 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) __acquires(rwlock_t); | |
60 | ||
61 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(spinlock_t); | |
62 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t); | |
63 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | |
64 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | |
65 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(rwlock_t); | |
66 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(rwlock_t); | |
67 | ||
68 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) __releases(spinlock_t); | |
69 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(spinlock_t); | |
70 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(spinlock_t); | |
71 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | |
72 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | |
73 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | |
74 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(rwlock_t); | |
75 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(rwlock_t); | |
76 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(rwlock_t); | |
77 | ||
78 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | |
79 | int __lockfunc generic_raw_read_trylock(rwlock_t *lock); | |
80 | int in_lock_functions(unsigned long addr); | |
81 | ||
82 | #else | |
83 | ||
84 | #define in_lock_functions(ADDR) 0 | |
85 | ||
86 | #if !defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_SPINLOCK) | |
87 | # define _atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) | |
88 | # define ATOMIC_DEC_AND_LOCK | |
89 | #endif | |
90 | ||
91 | #ifdef CONFIG_DEBUG_SPINLOCK | |
92 | ||
93 | #define SPINLOCK_MAGIC 0x1D244B3C | |
94 | typedef struct { | |
95 | unsigned long magic; | |
96 | volatile unsigned long lock; | |
97 | volatile unsigned int babble; | |
98 | const char *module; | |
99 | char *owner; | |
100 | int oline; | |
101 | } spinlock_t; | |
102 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { SPINLOCK_MAGIC, 0, 10, __FILE__ , NULL, 0} | |
103 | ||
104 | #define spin_lock_init(x) \ | |
105 | do { \ | |
106 | (x)->magic = SPINLOCK_MAGIC; \ | |
107 | (x)->lock = 0; \ | |
108 | (x)->babble = 5; \ | |
109 | (x)->module = __FILE__; \ | |
110 | (x)->owner = NULL; \ | |
111 | (x)->oline = 0; \ | |
112 | } while (0) | |
113 | ||
114 | #define CHECK_LOCK(x) \ | |
115 | do { \ | |
116 | if ((x)->magic != SPINLOCK_MAGIC) { \ | |
117 | printk(KERN_ERR "%s:%d: spin_is_locked on uninitialized spinlock %p.\n", \ | |
118 | __FILE__, __LINE__, (x)); \ | |
119 | } \ | |
120 | } while(0) | |
121 | ||
122 | #define _raw_spin_lock(x) \ | |
123 | do { \ | |
124 | CHECK_LOCK(x); \ | |
125 | if ((x)->lock&&(x)->babble) { \ | |
126 | (x)->babble--; \ | |
127 | printk("%s:%d: spin_lock(%s:%p) already locked by %s/%d\n", \ | |
128 | __FILE__,__LINE__, (x)->module, \ | |
129 | (x), (x)->owner, (x)->oline); \ | |
130 | } \ | |
131 | (x)->lock = 1; \ | |
132 | (x)->owner = __FILE__; \ | |
133 | (x)->oline = __LINE__; \ | |
134 | } while (0) | |
135 | ||
136 | /* without debugging, spin_is_locked on UP always says | |
137 | * FALSE. --> printk if already locked. */ | |
138 | #define spin_is_locked(x) \ | |
139 | ({ \ | |
140 | CHECK_LOCK(x); \ | |
141 | if ((x)->lock&&(x)->babble) { \ | |
142 | (x)->babble--; \ | |
143 | printk("%s:%d: spin_is_locked(%s:%p) already locked by %s/%d\n", \ | |
144 | __FILE__,__LINE__, (x)->module, \ | |
145 | (x), (x)->owner, (x)->oline); \ | |
146 | } \ | |
147 | 0; \ | |
148 | }) | |
149 | ||
150 | /* with debugging, assert_spin_locked() on UP does check | |
151 | * the lock value properly */ | |
152 | #define assert_spin_locked(x) \ | |
153 | ({ \ | |
154 | CHECK_LOCK(x); \ | |
155 | BUG_ON(!(x)->lock); \ | |
156 | }) | |
157 | ||
158 | /* without debugging, spin_trylock on UP always says | |
159 | * TRUE. --> printk if already locked. */ | |
160 | #define _raw_spin_trylock(x) \ | |
161 | ({ \ | |
162 | CHECK_LOCK(x); \ | |
163 | if ((x)->lock&&(x)->babble) { \ | |
164 | (x)->babble--; \ | |
165 | printk("%s:%d: spin_trylock(%s:%p) already locked by %s/%d\n", \ | |
166 | __FILE__,__LINE__, (x)->module, \ | |
167 | (x), (x)->owner, (x)->oline); \ | |
168 | } \ | |
169 | (x)->lock = 1; \ | |
170 | (x)->owner = __FILE__; \ | |
171 | (x)->oline = __LINE__; \ | |
172 | 1; \ | |
173 | }) | |
174 | ||
175 | #define spin_unlock_wait(x) \ | |
176 | do { \ | |
177 | CHECK_LOCK(x); \ | |
178 | if ((x)->lock&&(x)->babble) { \ | |
179 | (x)->babble--; \ | |
180 | printk("%s:%d: spin_unlock_wait(%s:%p) owned by %s/%d\n", \ | |
181 | __FILE__,__LINE__, (x)->module, (x), \ | |
182 | (x)->owner, (x)->oline); \ | |
183 | }\ | |
184 | } while (0) | |
185 | ||
186 | #define _raw_spin_unlock(x) \ | |
187 | do { \ | |
188 | CHECK_LOCK(x); \ | |
189 | if (!(x)->lock&&(x)->babble) { \ | |
190 | (x)->babble--; \ | |
191 | printk("%s:%d: spin_unlock(%s:%p) not locked\n", \ | |
192 | __FILE__,__LINE__, (x)->module, (x));\ | |
193 | } \ | |
194 | (x)->lock = 0; \ | |
195 | } while (0) | |
196 | #else | |
197 | /* | |
198 | * gcc versions before ~2.95 have a nasty bug with empty initializers. | |
199 | */ | |
200 | #if (__GNUC__ > 2) | |
201 | typedef struct { } spinlock_t; | |
202 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { } | |
203 | #else | |
204 | typedef struct { int gcc_is_buggy; } spinlock_t; | |
205 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | |
206 | #endif | |
207 | ||
208 | /* | |
209 | * If CONFIG_SMP is unset, declare the _raw_* definitions as nops | |
210 | */ | |
211 | #define spin_lock_init(lock) do { (void)(lock); } while(0) | |
212 | #define _raw_spin_lock(lock) do { (void)(lock); } while(0) | |
213 | #define spin_is_locked(lock) ((void)(lock), 0) | |
214 | #define assert_spin_locked(lock) do { (void)(lock); } while(0) | |
215 | #define _raw_spin_trylock(lock) (((void)(lock), 1)) | |
216 | #define spin_unlock_wait(lock) (void)(lock) | |
217 | #define _raw_spin_unlock(lock) do { (void)(lock); } while(0) | |
218 | #endif /* CONFIG_DEBUG_SPINLOCK */ | |
219 | ||
220 | /* RW spinlocks: No debug version */ | |
221 | ||
222 | #if (__GNUC__ > 2) | |
223 | typedef struct { } rwlock_t; | |
224 | #define RW_LOCK_UNLOCKED (rwlock_t) { } | |
225 | #else | |
226 | typedef struct { int gcc_is_buggy; } rwlock_t; | |
227 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | |
228 | #endif | |
229 | ||
230 | #define rwlock_init(lock) do { (void)(lock); } while(0) | |
231 | #define _raw_read_lock(lock) do { (void)(lock); } while(0) | |
232 | #define _raw_read_unlock(lock) do { (void)(lock); } while(0) | |
233 | #define _raw_write_lock(lock) do { (void)(lock); } while(0) | |
234 | #define _raw_write_unlock(lock) do { (void)(lock); } while(0) | |
235 | #define read_can_lock(lock) (((void)(lock), 1)) | |
236 | #define write_can_lock(lock) (((void)(lock), 1)) | |
237 | #define _raw_read_trylock(lock) ({ (void)(lock); (1); }) | |
238 | #define _raw_write_trylock(lock) ({ (void)(lock); (1); }) | |
239 | ||
240 | #define _spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ | |
241 | 1 : ({preempt_enable(); 0;});}) | |
242 | ||
243 | #define _read_trylock(lock) ({preempt_disable();_raw_read_trylock(lock) ? \ | |
244 | 1 : ({preempt_enable(); 0;});}) | |
245 | ||
246 | #define _write_trylock(lock) ({preempt_disable(); _raw_write_trylock(lock) ? \ | |
247 | 1 : ({preempt_enable(); 0;});}) | |
248 | ||
249 | #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ | |
250 | _raw_spin_trylock(lock) ? \ | |
10f02d1c | 251 | 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});}) |
1da177e4 LT |
252 | |
253 | #define _spin_lock(lock) \ | |
254 | do { \ | |
255 | preempt_disable(); \ | |
256 | _raw_spin_lock(lock); \ | |
257 | __acquire(lock); \ | |
258 | } while(0) | |
259 | ||
260 | #define _write_lock(lock) \ | |
261 | do { \ | |
262 | preempt_disable(); \ | |
263 | _raw_write_lock(lock); \ | |
264 | __acquire(lock); \ | |
265 | } while(0) | |
266 | ||
267 | #define _read_lock(lock) \ | |
268 | do { \ | |
269 | preempt_disable(); \ | |
270 | _raw_read_lock(lock); \ | |
271 | __acquire(lock); \ | |
272 | } while(0) | |
273 | ||
274 | #define _spin_unlock(lock) \ | |
275 | do { \ | |
276 | _raw_spin_unlock(lock); \ | |
277 | preempt_enable(); \ | |
278 | __release(lock); \ | |
279 | } while (0) | |
280 | ||
281 | #define _write_unlock(lock) \ | |
282 | do { \ | |
283 | _raw_write_unlock(lock); \ | |
284 | preempt_enable(); \ | |
285 | __release(lock); \ | |
286 | } while(0) | |
287 | ||
288 | #define _read_unlock(lock) \ | |
289 | do { \ | |
290 | _raw_read_unlock(lock); \ | |
291 | preempt_enable(); \ | |
292 | __release(lock); \ | |
293 | } while(0) | |
294 | ||
295 | #define _spin_lock_irqsave(lock, flags) \ | |
296 | do { \ | |
297 | local_irq_save(flags); \ | |
298 | preempt_disable(); \ | |
299 | _raw_spin_lock(lock); \ | |
300 | __acquire(lock); \ | |
301 | } while (0) | |
302 | ||
303 | #define _spin_lock_irq(lock) \ | |
304 | do { \ | |
305 | local_irq_disable(); \ | |
306 | preempt_disable(); \ | |
307 | _raw_spin_lock(lock); \ | |
308 | __acquire(lock); \ | |
309 | } while (0) | |
310 | ||
311 | #define _spin_lock_bh(lock) \ | |
312 | do { \ | |
313 | local_bh_disable(); \ | |
314 | preempt_disable(); \ | |
315 | _raw_spin_lock(lock); \ | |
316 | __acquire(lock); \ | |
317 | } while (0) | |
318 | ||
319 | #define _read_lock_irqsave(lock, flags) \ | |
320 | do { \ | |
321 | local_irq_save(flags); \ | |
322 | preempt_disable(); \ | |
323 | _raw_read_lock(lock); \ | |
324 | __acquire(lock); \ | |
325 | } while (0) | |
326 | ||
327 | #define _read_lock_irq(lock) \ | |
328 | do { \ | |
329 | local_irq_disable(); \ | |
330 | preempt_disable(); \ | |
331 | _raw_read_lock(lock); \ | |
332 | __acquire(lock); \ | |
333 | } while (0) | |
334 | ||
335 | #define _read_lock_bh(lock) \ | |
336 | do { \ | |
337 | local_bh_disable(); \ | |
338 | preempt_disable(); \ | |
339 | _raw_read_lock(lock); \ | |
340 | __acquire(lock); \ | |
341 | } while (0) | |
342 | ||
343 | #define _write_lock_irqsave(lock, flags) \ | |
344 | do { \ | |
345 | local_irq_save(flags); \ | |
346 | preempt_disable(); \ | |
347 | _raw_write_lock(lock); \ | |
348 | __acquire(lock); \ | |
349 | } while (0) | |
350 | ||
351 | #define _write_lock_irq(lock) \ | |
352 | do { \ | |
353 | local_irq_disable(); \ | |
354 | preempt_disable(); \ | |
355 | _raw_write_lock(lock); \ | |
356 | __acquire(lock); \ | |
357 | } while (0) | |
358 | ||
359 | #define _write_lock_bh(lock) \ | |
360 | do { \ | |
361 | local_bh_disable(); \ | |
362 | preempt_disable(); \ | |
363 | _raw_write_lock(lock); \ | |
364 | __acquire(lock); \ | |
365 | } while (0) | |
366 | ||
367 | #define _spin_unlock_irqrestore(lock, flags) \ | |
368 | do { \ | |
369 | _raw_spin_unlock(lock); \ | |
370 | local_irq_restore(flags); \ | |
371 | preempt_enable(); \ | |
372 | __release(lock); \ | |
373 | } while (0) | |
374 | ||
375 | #define _spin_unlock_irq(lock) \ | |
376 | do { \ | |
377 | _raw_spin_unlock(lock); \ | |
378 | local_irq_enable(); \ | |
379 | preempt_enable(); \ | |
380 | __release(lock); \ | |
381 | } while (0) | |
382 | ||
383 | #define _spin_unlock_bh(lock) \ | |
384 | do { \ | |
385 | _raw_spin_unlock(lock); \ | |
10f02d1c | 386 | preempt_enable_no_resched(); \ |
1da177e4 LT |
387 | local_bh_enable(); \ |
388 | __release(lock); \ | |
389 | } while (0) | |
390 | ||
391 | #define _write_unlock_bh(lock) \ | |
392 | do { \ | |
393 | _raw_write_unlock(lock); \ | |
10f02d1c | 394 | preempt_enable_no_resched(); \ |
1da177e4 LT |
395 | local_bh_enable(); \ |
396 | __release(lock); \ | |
397 | } while (0) | |
398 | ||
399 | #define _read_unlock_irqrestore(lock, flags) \ | |
400 | do { \ | |
401 | _raw_read_unlock(lock); \ | |
402 | local_irq_restore(flags); \ | |
403 | preempt_enable(); \ | |
404 | __release(lock); \ | |
405 | } while (0) | |
406 | ||
407 | #define _write_unlock_irqrestore(lock, flags) \ | |
408 | do { \ | |
409 | _raw_write_unlock(lock); \ | |
410 | local_irq_restore(flags); \ | |
411 | preempt_enable(); \ | |
412 | __release(lock); \ | |
413 | } while (0) | |
414 | ||
415 | #define _read_unlock_irq(lock) \ | |
416 | do { \ | |
417 | _raw_read_unlock(lock); \ | |
418 | local_irq_enable(); \ | |
419 | preempt_enable(); \ | |
420 | __release(lock); \ | |
421 | } while (0) | |
422 | ||
423 | #define _read_unlock_bh(lock) \ | |
424 | do { \ | |
425 | _raw_read_unlock(lock); \ | |
10f02d1c | 426 | preempt_enable_no_resched(); \ |
1da177e4 | 427 | local_bh_enable(); \ |
1da177e4 LT |
428 | __release(lock); \ |
429 | } while (0) | |
430 | ||
431 | #define _write_unlock_irq(lock) \ | |
432 | do { \ | |
433 | _raw_write_unlock(lock); \ | |
434 | local_irq_enable(); \ | |
435 | preempt_enable(); \ | |
436 | __release(lock); \ | |
437 | } while (0) | |
438 | ||
439 | #endif /* !SMP */ | |
440 | ||
441 | /* | |
442 | * Define the various spin_lock and rw_lock methods. Note we define these | |
443 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | |
444 | * methods are defined as nops in the case they are not required. | |
445 | */ | |
446 | #define spin_trylock(lock) __cond_lock(_spin_trylock(lock)) | |
447 | #define read_trylock(lock) __cond_lock(_read_trylock(lock)) | |
448 | #define write_trylock(lock) __cond_lock(_write_trylock(lock)) | |
449 | ||
450 | #define spin_lock(lock) _spin_lock(lock) | |
451 | #define write_lock(lock) _write_lock(lock) | |
452 | #define read_lock(lock) _read_lock(lock) | |
453 | ||
454 | #ifdef CONFIG_SMP | |
455 | #define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) | |
456 | #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) | |
457 | #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) | |
458 | #else | |
459 | #define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags) | |
460 | #define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags) | |
461 | #define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags) | |
462 | #endif | |
463 | ||
464 | #define spin_lock_irq(lock) _spin_lock_irq(lock) | |
465 | #define spin_lock_bh(lock) _spin_lock_bh(lock) | |
466 | ||
467 | #define read_lock_irq(lock) _read_lock_irq(lock) | |
468 | #define read_lock_bh(lock) _read_lock_bh(lock) | |
469 | ||
470 | #define write_lock_irq(lock) _write_lock_irq(lock) | |
471 | #define write_lock_bh(lock) _write_lock_bh(lock) | |
472 | ||
473 | #define spin_unlock(lock) _spin_unlock(lock) | |
474 | #define write_unlock(lock) _write_unlock(lock) | |
475 | #define read_unlock(lock) _read_unlock(lock) | |
476 | ||
477 | #define spin_unlock_irqrestore(lock, flags) _spin_unlock_irqrestore(lock, flags) | |
478 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | |
479 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | |
480 | ||
481 | #define read_unlock_irqrestore(lock, flags) _read_unlock_irqrestore(lock, flags) | |
482 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | |
483 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | |
484 | ||
485 | #define write_unlock_irqrestore(lock, flags) _write_unlock_irqrestore(lock, flags) | |
486 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | |
487 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | |
488 | ||
489 | #define spin_trylock_bh(lock) __cond_lock(_spin_trylock_bh(lock)) | |
490 | ||
491 | #define spin_trylock_irq(lock) \ | |
492 | ({ \ | |
493 | local_irq_disable(); \ | |
494 | _spin_trylock(lock) ? \ | |
495 | 1 : ({local_irq_enable(); 0; }); \ | |
496 | }) | |
497 | ||
498 | #define spin_trylock_irqsave(lock, flags) \ | |
499 | ({ \ | |
500 | local_irq_save(flags); \ | |
501 | _spin_trylock(lock) ? \ | |
502 | 1 : ({local_irq_restore(flags); 0;}); \ | |
503 | }) | |
504 | ||
505 | #ifdef CONFIG_LOCKMETER | |
506 | extern void _metered_spin_lock (spinlock_t *lock); | |
507 | extern void _metered_spin_unlock (spinlock_t *lock); | |
508 | extern int _metered_spin_trylock(spinlock_t *lock); | |
509 | extern void _metered_read_lock (rwlock_t *lock); | |
510 | extern void _metered_read_unlock (rwlock_t *lock); | |
511 | extern void _metered_write_lock (rwlock_t *lock); | |
512 | extern void _metered_write_unlock (rwlock_t *lock); | |
513 | extern int _metered_read_trylock (rwlock_t *lock); | |
514 | extern int _metered_write_trylock(rwlock_t *lock); | |
515 | #endif | |
516 | ||
517 | /* "lock on reference count zero" */ | |
518 | #ifndef ATOMIC_DEC_AND_LOCK | |
519 | #include <asm/atomic.h> | |
520 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |
521 | #endif | |
522 | ||
523 | #define atomic_dec_and_lock(atomic,lock) __cond_lock(_atomic_dec_and_lock(atomic,lock)) | |
524 | ||
525 | /* | |
526 | * bit-based spin_lock() | |
527 | * | |
528 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | |
529 | * are significantly faster. | |
530 | */ | |
531 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | |
532 | { | |
533 | /* | |
534 | * Assuming the lock is uncontended, this never enters | |
535 | * the body of the outer loop. If it is contended, then | |
536 | * within the inner loop a non-atomic test is used to | |
537 | * busywait with less bus contention for a good time to | |
538 | * attempt to acquire the lock bit. | |
539 | */ | |
540 | preempt_disable(); | |
541 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
542 | while (test_and_set_bit(bitnum, addr)) { | |
543 | while (test_bit(bitnum, addr)) { | |
544 | preempt_enable(); | |
545 | cpu_relax(); | |
546 | preempt_disable(); | |
547 | } | |
548 | } | |
549 | #endif | |
550 | __acquire(bitlock); | |
551 | } | |
552 | ||
553 | /* | |
554 | * Return true if it was acquired | |
555 | */ | |
556 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | |
557 | { | |
558 | preempt_disable(); | |
559 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
560 | if (test_and_set_bit(bitnum, addr)) { | |
561 | preempt_enable(); | |
562 | return 0; | |
563 | } | |
564 | #endif | |
565 | __acquire(bitlock); | |
566 | return 1; | |
567 | } | |
568 | ||
569 | /* | |
570 | * bit-based spin_unlock() | |
571 | */ | |
572 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | |
573 | { | |
574 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
575 | BUG_ON(!test_bit(bitnum, addr)); | |
576 | smp_mb__before_clear_bit(); | |
577 | clear_bit(bitnum, addr); | |
578 | #endif | |
579 | preempt_enable(); | |
580 | __release(bitlock); | |
581 | } | |
582 | ||
583 | /* | |
584 | * Return true if the lock is held. | |
585 | */ | |
586 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | |
587 | { | |
588 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
589 | return test_bit(bitnum, addr); | |
590 | #elif defined CONFIG_PREEMPT | |
591 | return preempt_count(); | |
592 | #else | |
593 | return 1; | |
594 | #endif | |
595 | } | |
596 | ||
597 | #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED | |
598 | #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED | |
599 | ||
600 | /** | |
601 | * spin_can_lock - would spin_trylock() succeed? | |
602 | * @lock: the spinlock in question. | |
603 | */ | |
604 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | |
605 | ||
606 | #endif /* __LINUX_SPINLOCK_H */ |