Commit | Line | Data |
---|---|---|
fbb9ce95 IM |
1 | /* |
2 | * Runtime locking correctness validator | |
3 | * | |
4b32d0a4 PZ |
4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
fbb9ce95 IM |
6 | * |
7 | * see Documentation/lockdep-design.txt for more details. | |
8 | */ | |
9 | #ifndef __LINUX_LOCKDEP_H | |
10 | #define __LINUX_LOCKDEP_H | |
11 | ||
a1e96b03 | 12 | struct task_struct; |
f20786ff | 13 | struct lockdep_map; |
a1e96b03 | 14 | |
2edf5e49 DY |
15 | /* for sysctl */ |
16 | extern int prove_locking; | |
17 | extern int lock_stat; | |
18 | ||
db0b0ead MT |
19 | #ifdef CONFIG_LOCKDEP |
20 | ||
fbb9ce95 IM |
21 | #include <linux/linkage.h> |
22 | #include <linux/list.h> | |
23 | #include <linux/debug_locks.h> | |
24 | #include <linux/stacktrace.h> | |
25 | ||
fbb9ce95 | 26 | /* |
9851673b PZ |
27 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
28 | * the total number of states... :-( | |
fbb9ce95 | 29 | */ |
9851673b | 30 | #define XXX_LOCK_USAGE_STATES (1+3*4) |
fbb9ce95 IM |
31 | |
32 | #define MAX_LOCKDEP_SUBCLASSES 8UL | |
33 | ||
62016250 HM |
34 | /* |
35 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes | |
36 | * cached in the instance of lockdep_map | |
37 | * | |
38 | * Currently main class (subclass == 0) and signle depth subclass | |
39 | * are cached in lockdep_map. This optimization is mainly targeting | |
40 | * on rq->lock. double_rq_lock() acquires this highly competitive with | |
41 | * single depth. | |
42 | */ | |
43 | #define NR_LOCKDEP_CACHING_CLASSES 2 | |
44 | ||
fbb9ce95 IM |
45 | /* |
46 | * Lock-classes are keyed via unique addresses, by embedding the | |
47 | * lockclass-key into the kernel (or module) .data section. (For | |
48 | * static locks we use the lock address itself as the key.) | |
49 | */ | |
50 | struct lockdep_subclass_key { | |
51 | char __one_byte; | |
52 | } __attribute__ ((__packed__)); | |
53 | ||
54 | struct lock_class_key { | |
55 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; | |
56 | }; | |
57 | ||
1704f47b PZ |
58 | extern struct lock_class_key __lockdep_no_validate__; |
59 | ||
c7e78cff PZ |
60 | #define LOCKSTAT_POINTS 4 |
61 | ||
fbb9ce95 IM |
62 | /* |
63 | * The lock-class itself: | |
64 | */ | |
65 | struct lock_class { | |
66 | /* | |
67 | * class-hash: | |
68 | */ | |
69 | struct list_head hash_entry; | |
70 | ||
71 | /* | |
72 | * global list of all lock-classes: | |
73 | */ | |
74 | struct list_head lock_entry; | |
75 | ||
76 | struct lockdep_subclass_key *key; | |
77 | unsigned int subclass; | |
e351b660 | 78 | unsigned int dep_gen_id; |
fbb9ce95 IM |
79 | |
80 | /* | |
81 | * IRQ/softirq usage tracking bits: | |
82 | */ | |
83 | unsigned long usage_mask; | |
9851673b | 84 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
fbb9ce95 IM |
85 | |
86 | /* | |
87 | * These fields represent a directed graph of lock dependencies, | |
88 | * to every node we attach a list of "forward" and a list of | |
89 | * "backward" graph nodes. | |
90 | */ | |
91 | struct list_head locks_after, locks_before; | |
92 | ||
93 | /* | |
94 | * Generation counter, when doing certain classes of graph walking, | |
95 | * to ensure that we check one node only once: | |
96 | */ | |
97 | unsigned int version; | |
98 | ||
99 | /* | |
100 | * Statistics counter: | |
101 | */ | |
102 | unsigned long ops; | |
103 | ||
104 | const char *name; | |
105 | int name_version; | |
f20786ff PZ |
106 | |
107 | #ifdef CONFIG_LOCK_STAT | |
c7e78cff PZ |
108 | unsigned long contention_point[LOCKSTAT_POINTS]; |
109 | unsigned long contending_point[LOCKSTAT_POINTS]; | |
f20786ff PZ |
110 | #endif |
111 | }; | |
112 | ||
113 | #ifdef CONFIG_LOCK_STAT | |
114 | struct lock_time { | |
115 | s64 min; | |
116 | s64 max; | |
117 | s64 total; | |
118 | unsigned long nr; | |
fbb9ce95 IM |
119 | }; |
120 | ||
96645678 PZ |
121 | enum bounce_type { |
122 | bounce_acquired_write, | |
123 | bounce_acquired_read, | |
124 | bounce_contended_write, | |
125 | bounce_contended_read, | |
126 | nr_bounce_types, | |
127 | ||
128 | bounce_acquired = bounce_acquired_write, | |
129 | bounce_contended = bounce_contended_write, | |
130 | }; | |
131 | ||
f20786ff PZ |
132 | struct lock_class_stats { |
133 | unsigned long contention_point[4]; | |
c7e78cff | 134 | unsigned long contending_point[4]; |
f20786ff PZ |
135 | struct lock_time read_waittime; |
136 | struct lock_time write_waittime; | |
137 | struct lock_time read_holdtime; | |
138 | struct lock_time write_holdtime; | |
96645678 | 139 | unsigned long bounces[nr_bounce_types]; |
f20786ff PZ |
140 | }; |
141 | ||
142 | struct lock_class_stats lock_stats(struct lock_class *class); | |
143 | void clear_lock_stats(struct lock_class *class); | |
144 | #endif | |
145 | ||
fbb9ce95 IM |
146 | /* |
147 | * Map the lock object (the lock instance) to the lock-class object. | |
148 | * This is embedded into specific lock instances: | |
149 | */ | |
150 | struct lockdep_map { | |
151 | struct lock_class_key *key; | |
62016250 | 152 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
fbb9ce95 | 153 | const char *name; |
96645678 PZ |
154 | #ifdef CONFIG_LOCK_STAT |
155 | int cpu; | |
c7e78cff | 156 | unsigned long ip; |
96645678 | 157 | #endif |
fbb9ce95 IM |
158 | }; |
159 | ||
4d82a1de PZ |
160 | static inline void lockdep_copy_map(struct lockdep_map *to, |
161 | struct lockdep_map *from) | |
162 | { | |
163 | int i; | |
164 | ||
165 | *to = *from; | |
166 | /* | |
167 | * Since the class cache can be modified concurrently we could observe | |
168 | * half pointers (64bit arch using 32bit copy insns). Therefore clear | |
169 | * the caches and take the performance hit. | |
170 | * | |
171 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since | |
172 | * that relies on cache abuse. | |
173 | */ | |
174 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | |
175 | to->class_cache[i] = NULL; | |
176 | } | |
177 | ||
fbb9ce95 IM |
178 | /* |
179 | * Every lock has a list of other locks that were taken after it. | |
180 | * We only grow the list, never remove from it: | |
181 | */ | |
182 | struct lock_list { | |
183 | struct list_head entry; | |
184 | struct lock_class *class; | |
185 | struct stack_trace trace; | |
068135e6 | 186 | int distance; |
c94aa5ca | 187 | |
af012961 PZ |
188 | /* |
189 | * The parent field is used to implement breadth-first search, and the | |
190 | * bit 0 is reused to indicate if the lock has been accessed in BFS. | |
c94aa5ca ML |
191 | */ |
192 | struct lock_list *parent; | |
fbb9ce95 IM |
193 | }; |
194 | ||
195 | /* | |
196 | * We record lock dependency chains, so that we can cache them: | |
197 | */ | |
198 | struct lock_chain { | |
443cd507 HY |
199 | u8 irq_context; |
200 | u8 depth; | |
201 | u16 base; | |
fbb9ce95 IM |
202 | struct list_head entry; |
203 | u64 chain_key; | |
204 | }; | |
205 | ||
e5f363e3 | 206 | #define MAX_LOCKDEP_KEYS_BITS 13 |
b42e737e PZ |
207 | /* |
208 | * Subtract one because we offset hlock->class_idx by 1 in order | |
209 | * to make 0 mean no class. This avoids overflowing the class_idx | |
210 | * bitfield and hitting the BUG in hlock_class(). | |
211 | */ | |
212 | #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) | |
f82b217e | 213 | |
fbb9ce95 IM |
214 | struct held_lock { |
215 | /* | |
216 | * One-way hash of the dependency chain up to this point. We | |
217 | * hash the hashes step by step as the dependency chain grows. | |
218 | * | |
219 | * We use it for dependency-caching and we skip detection | |
220 | * passes and dependency-updates if there is a cache-hit, so | |
221 | * it is absolutely critical for 100% coverage of the validator | |
222 | * to have a unique key value for every unique dependency path | |
223 | * that can occur in the system, to make a unique hash value | |
224 | * as likely as possible - hence the 64-bit width. | |
225 | * | |
226 | * The task struct holds the current hash value (initialized | |
227 | * with zero), here we store the previous hash value: | |
228 | */ | |
229 | u64 prev_chain_key; | |
fbb9ce95 IM |
230 | unsigned long acquire_ip; |
231 | struct lockdep_map *instance; | |
7531e2f3 | 232 | struct lockdep_map *nest_lock; |
f20786ff PZ |
233 | #ifdef CONFIG_LOCK_STAT |
234 | u64 waittime_stamp; | |
235 | u64 holdtime_stamp; | |
236 | #endif | |
f82b217e | 237 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
fbb9ce95 IM |
238 | /* |
239 | * The lock-stack is unified in that the lock chains of interrupt | |
240 | * contexts nest ontop of process context chains, but we 'separate' | |
241 | * the hashes by starting with 0 if we cross into an interrupt | |
242 | * context, and we also keep do not add cross-context lock | |
243 | * dependencies - the lock usage graph walking covers that area | |
244 | * anyway, and we'd just unnecessarily increase the number of | |
245 | * dependencies otherwise. [Note: hardirq and softirq contexts | |
246 | * are separated from each other too.] | |
247 | * | |
248 | * The following field is used to detect when we cross into an | |
249 | * interrupt context: | |
250 | */ | |
f82b217e | 251 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
bb97a91e PZ |
252 | unsigned int trylock:1; /* 16 bits */ |
253 | ||
f82b217e DJ |
254 | unsigned int read:2; /* see lock_acquire() comment */ |
255 | unsigned int check:2; /* see lock_acquire() comment */ | |
256 | unsigned int hardirqs_off:1; | |
bb97a91e | 257 | unsigned int references:11; /* 32 bits */ |
fbb9ce95 IM |
258 | }; |
259 | ||
260 | /* | |
261 | * Initialization, self-test and debugging-output methods: | |
262 | */ | |
263 | extern void lockdep_init(void); | |
264 | extern void lockdep_info(void); | |
265 | extern void lockdep_reset(void); | |
266 | extern void lockdep_reset_lock(struct lockdep_map *lock); | |
267 | extern void lockdep_free_key_range(void *start, unsigned long size); | |
b351d164 | 268 | extern void lockdep_sys_exit(void); |
fbb9ce95 IM |
269 | |
270 | extern void lockdep_off(void); | |
271 | extern void lockdep_on(void); | |
fbb9ce95 IM |
272 | |
273 | /* | |
274 | * These methods are used by specific locking variants (spinlocks, | |
275 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events | |
276 | * to lockdep: | |
277 | */ | |
278 | ||
279 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |
4dfbb9d8 | 280 | struct lock_class_key *key, int subclass); |
fbb9ce95 | 281 | |
851a67b8 PZ |
282 | /* |
283 | * To initialize a lockdep_map statically use this macro. | |
284 | * Note that _name must not be NULL. | |
285 | */ | |
286 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | |
287 | { .name = (_name), .key = (void *)(_key), } | |
288 | ||
fbb9ce95 IM |
289 | /* |
290 | * Reinitialize a lock key - for cases where there is special locking or | |
291 | * special initialization of locks so that the validator gets the scope | |
292 | * of dependencies wrong: they are either too broad (they need a class-split) | |
293 | * or they are too narrow (they suffer from a false class-split): | |
294 | */ | |
295 | #define lockdep_set_class(lock, key) \ | |
4dfbb9d8 | 296 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
fbb9ce95 | 297 | #define lockdep_set_class_and_name(lock, key, name) \ |
4dfbb9d8 PZ |
298 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
299 | #define lockdep_set_class_and_subclass(lock, key, sub) \ | |
300 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) | |
301 | #define lockdep_set_subclass(lock, sub) \ | |
302 | lockdep_init_map(&(lock)->dep_map, #lock, \ | |
303 | (lock)->dep_map.key, sub) | |
1704f47b PZ |
304 | |
305 | #define lockdep_set_novalidate_class(lock) \ | |
306 | lockdep_set_class(lock, &__lockdep_no_validate__) | |
9a7aa12f JK |
307 | /* |
308 | * Compare locking classes | |
309 | */ | |
310 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) | |
311 | ||
312 | static inline int lockdep_match_key(struct lockdep_map *lock, | |
313 | struct lock_class_key *key) | |
314 | { | |
315 | return lock->key == key; | |
316 | } | |
fbb9ce95 IM |
317 | |
318 | /* | |
319 | * Acquire a lock. | |
320 | * | |
321 | * Values for "read": | |
322 | * | |
323 | * 0: exclusive (write) acquire | |
324 | * 1: read-acquire (no recursion allowed) | |
325 | * 2: read-acquire with same-instance recursion allowed | |
326 | * | |
327 | * Values for check: | |
328 | * | |
329 | * 0: disabled | |
330 | * 1: simple checks (freeing, held-at-exit-time, etc.) | |
331 | * 2: full validation | |
332 | */ | |
333 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |
7531e2f3 PZ |
334 | int trylock, int read, int check, |
335 | struct lockdep_map *nest_lock, unsigned long ip); | |
fbb9ce95 IM |
336 | |
337 | extern void lock_release(struct lockdep_map *lock, int nested, | |
338 | unsigned long ip); | |
339 | ||
f607c668 PZ |
340 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
341 | ||
342 | extern int lock_is_held(struct lockdep_map *lock); | |
343 | ||
00ef9f73 PZ |
344 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
345 | struct lock_class_key *key, unsigned int subclass, | |
346 | unsigned long ip); | |
347 | ||
348 | static inline void lock_set_subclass(struct lockdep_map *lock, | |
349 | unsigned int subclass, unsigned long ip) | |
350 | { | |
351 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | |
352 | } | |
64aa348e | 353 | |
cf40bd16 NP |
354 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
355 | extern void lockdep_clear_current_reclaim_state(void); | |
356 | extern void lockdep_trace_alloc(gfp_t mask); | |
357 | ||
358 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, | |
fbb9ce95 | 359 | |
e3a55fd1 | 360 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
d5abe669 | 361 | |
b1ae345d JB |
362 | #define lockdep_assert_held(l) do { \ |
363 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ | |
364 | } while (0) | |
f607c668 | 365 | |
94d24fc4 PZ |
366 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
367 | ||
fbb9ce95 IM |
368 | #else /* !LOCKDEP */ |
369 | ||
370 | static inline void lockdep_off(void) | |
371 | { | |
372 | } | |
373 | ||
374 | static inline void lockdep_on(void) | |
375 | { | |
376 | } | |
377 | ||
7531e2f3 | 378 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
fbb9ce95 | 379 | # define lock_release(l, n, i) do { } while (0) |
00ef9f73 | 380 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
64aa348e | 381 | # define lock_set_subclass(l, s, i) do { } while (0) |
cf40bd16 NP |
382 | # define lockdep_set_current_reclaim_state(g) do { } while (0) |
383 | # define lockdep_clear_current_reclaim_state() do { } while (0) | |
384 | # define lockdep_trace_alloc(g) do { } while (0) | |
fbb9ce95 IM |
385 | # define lockdep_init() do { } while (0) |
386 | # define lockdep_info() do { } while (0) | |
e25cf3db IM |
387 | # define lockdep_init_map(lock, name, key, sub) \ |
388 | do { (void)(name); (void)(key); } while (0) | |
fbb9ce95 IM |
389 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
390 | # define lockdep_set_class_and_name(lock, key, name) \ | |
e25cf3db | 391 | do { (void)(key); (void)(name); } while (0) |
4dfbb9d8 PZ |
392 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
393 | do { (void)(key); } while (0) | |
07646e21 | 394 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
1704f47b PZ |
395 | |
396 | #define lockdep_set_novalidate_class(lock) do { } while (0) | |
397 | ||
9a7aa12f JK |
398 | /* |
399 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP | |
400 | * case since the result is not well defined and the caller should rather | |
401 | * #ifdef the call himself. | |
402 | */ | |
07646e21 | 403 | |
fbb9ce95 IM |
404 | # define INIT_LOCKDEP |
405 | # define lockdep_reset() do { debug_locks = 1; } while (0) | |
406 | # define lockdep_free_key_range(start, size) do { } while (0) | |
b351d164 | 407 | # define lockdep_sys_exit() do { } while (0) |
fbb9ce95 IM |
408 | /* |
409 | * The class key takes no space if lockdep is disabled: | |
410 | */ | |
411 | struct lock_class_key { }; | |
d5abe669 PZ |
412 | |
413 | #define lockdep_depth(tsk) (0) | |
414 | ||
5cd3f5af | 415 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
f607c668 | 416 | |
94d24fc4 PZ |
417 | #define lockdep_recursing(tsk) (0) |
418 | ||
fbb9ce95 IM |
419 | #endif /* !LOCKDEP */ |
420 | ||
f20786ff PZ |
421 | #ifdef CONFIG_LOCK_STAT |
422 | ||
423 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); | |
c7e78cff | 424 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
f20786ff PZ |
425 | |
426 | #define LOCK_CONTENDED(_lock, try, lock) \ | |
427 | do { \ | |
428 | if (!try(_lock)) { \ | |
429 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ | |
430 | lock(_lock); \ | |
f20786ff | 431 | } \ |
c7e78cff | 432 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
f20786ff PZ |
433 | } while (0) |
434 | ||
435 | #else /* CONFIG_LOCK_STAT */ | |
436 | ||
437 | #define lock_contended(lockdep_map, ip) do {} while (0) | |
c7e78cff | 438 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
f20786ff PZ |
439 | |
440 | #define LOCK_CONTENDED(_lock, try, lock) \ | |
441 | lock(_lock) | |
442 | ||
443 | #endif /* CONFIG_LOCK_STAT */ | |
444 | ||
e8c158bb RH |
445 | #ifdef CONFIG_LOCKDEP |
446 | ||
447 | /* | |
448 | * On lockdep we dont want the hand-coded irq-enable of | |
449 | * _raw_*_lock_flags() code, because lockdep assumes | |
450 | * that interrupts are not re-enabled during lock-acquire: | |
451 | */ | |
452 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | |
453 | LOCK_CONTENDED((_lock), (try), (lock)) | |
454 | ||
455 | #else /* CONFIG_LOCKDEP */ | |
456 | ||
457 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | |
458 | lockfl((_lock), (flags)) | |
459 | ||
460 | #endif /* CONFIG_LOCKDEP */ | |
461 | ||
fbb9ce95 | 462 | #ifdef CONFIG_TRACE_IRQFLAGS |
3117df04 | 463 | extern void print_irqtrace_events(struct task_struct *curr); |
fbb9ce95 | 464 | #else |
3117df04 IM |
465 | static inline void print_irqtrace_events(struct task_struct *curr) |
466 | { | |
467 | } | |
fbb9ce95 IM |
468 | #endif |
469 | ||
470 | /* | |
471 | * For trivial one-depth nesting of a lock-class, the following | |
472 | * global define can be used. (Subsystems with multiple levels | |
473 | * of nesting should define their own lock-nesting subclasses.) | |
474 | */ | |
475 | #define SINGLE_DEPTH_NESTING 1 | |
476 | ||
477 | /* | |
478 | * Map the dependency ops to NOP or to real lockdep ops, depending | |
479 | * on the per lock-class debug mode: | |
480 | */ | |
481 | ||
482 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
483 | # ifdef CONFIG_PROVE_LOCKING | |
7531e2f3 | 484 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
b7d39aff | 485 | # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
fbb9ce95 | 486 | # else |
7531e2f3 | 487 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
b7d39aff | 488 | # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
fbb9ce95 IM |
489 | # endif |
490 | # define spin_release(l, n, i) lock_release(l, n, i) | |
491 | #else | |
492 | # define spin_acquire(l, s, t, i) do { } while (0) | |
493 | # define spin_release(l, n, i) do { } while (0) | |
494 | #endif | |
495 | ||
496 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
497 | # ifdef CONFIG_PROVE_LOCKING | |
7531e2f3 PZ |
498 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
499 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) | |
fbb9ce95 | 500 | # else |
7531e2f3 PZ |
501 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
502 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) | |
fbb9ce95 IM |
503 | # endif |
504 | # define rwlock_release(l, n, i) lock_release(l, n, i) | |
505 | #else | |
506 | # define rwlock_acquire(l, s, t, i) do { } while (0) | |
507 | # define rwlock_acquire_read(l, s, t, i) do { } while (0) | |
508 | # define rwlock_release(l, n, i) do { } while (0) | |
509 | #endif | |
510 | ||
511 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
512 | # ifdef CONFIG_PROVE_LOCKING | |
7531e2f3 | 513 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
e4c70a66 | 514 | # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
fbb9ce95 | 515 | # else |
7531e2f3 | 516 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
e4c70a66 | 517 | # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
fbb9ce95 IM |
518 | # endif |
519 | # define mutex_release(l, n, i) lock_release(l, n, i) | |
520 | #else | |
521 | # define mutex_acquire(l, s, t, i) do { } while (0) | |
e4c70a66 | 522 | # define mutex_acquire_nest(l, s, t, n, i) do { } while (0) |
fbb9ce95 IM |
523 | # define mutex_release(l, n, i) do { } while (0) |
524 | #endif | |
525 | ||
526 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
527 | # ifdef CONFIG_PROVE_LOCKING | |
7531e2f3 | 528 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
1b963c81 | 529 | # define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
7531e2f3 | 530 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
fbb9ce95 | 531 | # else |
7531e2f3 | 532 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
1b963c81 | 533 | # define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
7531e2f3 | 534 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
fbb9ce95 IM |
535 | # endif |
536 | # define rwsem_release(l, n, i) lock_release(l, n, i) | |
537 | #else | |
538 | # define rwsem_acquire(l, s, t, i) do { } while (0) | |
1b963c81 | 539 | # define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) |
fbb9ce95 IM |
540 | # define rwsem_acquire_read(l, s, t, i) do { } while (0) |
541 | # define rwsem_release(l, n, i) do { } while (0) | |
542 | #endif | |
543 | ||
4f3e7524 PZ |
544 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
545 | # ifdef CONFIG_PROVE_LOCKING | |
3295f0ef | 546 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
e159489b | 547 | # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) |
4f3e7524 | 548 | # else |
3295f0ef | 549 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
e159489b | 550 | # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) |
4f3e7524 | 551 | # endif |
3295f0ef | 552 | # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
4f3e7524 | 553 | #else |
3295f0ef | 554 | # define lock_map_acquire(l) do { } while (0) |
e159489b | 555 | # define lock_map_acquire_read(l) do { } while (0) |
3295f0ef | 556 | # define lock_map_release(l) do { } while (0) |
4f3e7524 PZ |
557 | #endif |
558 | ||
76b189e9 PZ |
559 | #ifdef CONFIG_PROVE_LOCKING |
560 | # define might_lock(lock) \ | |
561 | do { \ | |
562 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | |
563 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ | |
564 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | |
565 | } while (0) | |
566 | # define might_lock_read(lock) \ | |
567 | do { \ | |
568 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ | |
569 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ | |
570 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | |
571 | } while (0) | |
572 | #else | |
573 | # define might_lock(lock) do { } while (0) | |
574 | # define might_lock_read(lock) do { } while (0) | |
575 | #endif | |
576 | ||
0632eb3d | 577 | #ifdef CONFIG_PROVE_RCU |
b3fbab05 | 578 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
0632eb3d PM |
579 | #endif |
580 | ||
fbb9ce95 | 581 | #endif /* __LINUX_LOCKDEP_H */ |