drm/amdgpu: make sure vertical front porch is at least 1
[deliverable/linux.git] / include / linux / lockdep.h
1 /*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
6 *
7 * see Documentation/locking/lockdep-design.txt for more details.
8 */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11
12 struct task_struct;
13 struct lockdep_map;
14
15 /* for sysctl */
16 extern int prove_locking;
17 extern int lock_stat;
18
19 #ifdef CONFIG_LOCKDEP
20
21 #include <linux/linkage.h>
22 #include <linux/list.h>
23 #include <linux/debug_locks.h>
24 #include <linux/stacktrace.h>
25
26 /*
27 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
28 * the total number of states... :-(
29 */
30 #define XXX_LOCK_USAGE_STATES (1+3*4)
31
32 #define MAX_LOCKDEP_SUBCLASSES 8UL
33
34 /*
35 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
36 * cached in the instance of lockdep_map
37 *
38 * Currently main class (subclass == 0) and signle depth subclass
39 * are cached in lockdep_map. This optimization is mainly targeting
40 * on rq->lock. double_rq_lock() acquires this highly competitive with
41 * single depth.
42 */
43 #define NR_LOCKDEP_CACHING_CLASSES 2
44
45 /*
46 * Lock-classes are keyed via unique addresses, by embedding the
47 * lockclass-key into the kernel (or module) .data section. (For
48 * static locks we use the lock address itself as the key.)
49 */
50 struct lockdep_subclass_key {
51 char __one_byte;
52 } __attribute__ ((__packed__));
53
54 struct lock_class_key {
55 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
56 };
57
58 extern struct lock_class_key __lockdep_no_validate__;
59
60 #define LOCKSTAT_POINTS 4
61
62 /*
63 * The lock-class itself:
64 */
65 struct lock_class {
66 /*
67 * class-hash:
68 */
69 struct hlist_node hash_entry;
70
71 /*
72 * global list of all lock-classes:
73 */
74 struct list_head lock_entry;
75
76 struct lockdep_subclass_key *key;
77 unsigned int subclass;
78 unsigned int dep_gen_id;
79
80 /*
81 * IRQ/softirq usage tracking bits:
82 */
83 unsigned long usage_mask;
84 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
85
86 /*
87 * These fields represent a directed graph of lock dependencies,
88 * to every node we attach a list of "forward" and a list of
89 * "backward" graph nodes.
90 */
91 struct list_head locks_after, locks_before;
92
93 /*
94 * Generation counter, when doing certain classes of graph walking,
95 * to ensure that we check one node only once:
96 */
97 unsigned int version;
98
99 /*
100 * Statistics counter:
101 */
102 unsigned long ops;
103
104 const char *name;
105 int name_version;
106
107 #ifdef CONFIG_LOCK_STAT
108 unsigned long contention_point[LOCKSTAT_POINTS];
109 unsigned long contending_point[LOCKSTAT_POINTS];
110 #endif
111 };
112
113 #ifdef CONFIG_LOCK_STAT
114 struct lock_time {
115 s64 min;
116 s64 max;
117 s64 total;
118 unsigned long nr;
119 };
120
121 enum bounce_type {
122 bounce_acquired_write,
123 bounce_acquired_read,
124 bounce_contended_write,
125 bounce_contended_read,
126 nr_bounce_types,
127
128 bounce_acquired = bounce_acquired_write,
129 bounce_contended = bounce_contended_write,
130 };
131
132 struct lock_class_stats {
133 unsigned long contention_point[LOCKSTAT_POINTS];
134 unsigned long contending_point[LOCKSTAT_POINTS];
135 struct lock_time read_waittime;
136 struct lock_time write_waittime;
137 struct lock_time read_holdtime;
138 struct lock_time write_holdtime;
139 unsigned long bounces[nr_bounce_types];
140 };
141
142 struct lock_class_stats lock_stats(struct lock_class *class);
143 void clear_lock_stats(struct lock_class *class);
144 #endif
145
146 /*
147 * Map the lock object (the lock instance) to the lock-class object.
148 * This is embedded into specific lock instances:
149 */
150 struct lockdep_map {
151 struct lock_class_key *key;
152 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
153 const char *name;
154 #ifdef CONFIG_LOCK_STAT
155 int cpu;
156 unsigned long ip;
157 #endif
158 };
159
160 static inline void lockdep_copy_map(struct lockdep_map *to,
161 struct lockdep_map *from)
162 {
163 int i;
164
165 *to = *from;
166 /*
167 * Since the class cache can be modified concurrently we could observe
168 * half pointers (64bit arch using 32bit copy insns). Therefore clear
169 * the caches and take the performance hit.
170 *
171 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
172 * that relies on cache abuse.
173 */
174 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
175 to->class_cache[i] = NULL;
176 }
177
178 /*
179 * Every lock has a list of other locks that were taken after it.
180 * We only grow the list, never remove from it:
181 */
182 struct lock_list {
183 struct list_head entry;
184 struct lock_class *class;
185 struct stack_trace trace;
186 int distance;
187
188 /*
189 * The parent field is used to implement breadth-first search, and the
190 * bit 0 is reused to indicate if the lock has been accessed in BFS.
191 */
192 struct lock_list *parent;
193 };
194
195 /*
196 * We record lock dependency chains, so that we can cache them:
197 */
198 struct lock_chain {
199 u8 irq_context;
200 u8 depth;
201 u16 base;
202 struct hlist_node entry;
203 u64 chain_key;
204 };
205
206 #define MAX_LOCKDEP_KEYS_BITS 13
207 /*
208 * Subtract one because we offset hlock->class_idx by 1 in order
209 * to make 0 mean no class. This avoids overflowing the class_idx
210 * bitfield and hitting the BUG in hlock_class().
211 */
212 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
213
214 struct held_lock {
215 /*
216 * One-way hash of the dependency chain up to this point. We
217 * hash the hashes step by step as the dependency chain grows.
218 *
219 * We use it for dependency-caching and we skip detection
220 * passes and dependency-updates if there is a cache-hit, so
221 * it is absolutely critical for 100% coverage of the validator
222 * to have a unique key value for every unique dependency path
223 * that can occur in the system, to make a unique hash value
224 * as likely as possible - hence the 64-bit width.
225 *
226 * The task struct holds the current hash value (initialized
227 * with zero), here we store the previous hash value:
228 */
229 u64 prev_chain_key;
230 unsigned long acquire_ip;
231 struct lockdep_map *instance;
232 struct lockdep_map *nest_lock;
233 #ifdef CONFIG_LOCK_STAT
234 u64 waittime_stamp;
235 u64 holdtime_stamp;
236 #endif
237 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
238 /*
239 * The lock-stack is unified in that the lock chains of interrupt
240 * contexts nest ontop of process context chains, but we 'separate'
241 * the hashes by starting with 0 if we cross into an interrupt
242 * context, and we also keep do not add cross-context lock
243 * dependencies - the lock usage graph walking covers that area
244 * anyway, and we'd just unnecessarily increase the number of
245 * dependencies otherwise. [Note: hardirq and softirq contexts
246 * are separated from each other too.]
247 *
248 * The following field is used to detect when we cross into an
249 * interrupt context:
250 */
251 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
252 unsigned int trylock:1; /* 16 bits */
253
254 unsigned int read:2; /* see lock_acquire() comment */
255 unsigned int check:1; /* see lock_acquire() comment */
256 unsigned int hardirqs_off:1;
257 unsigned int references:12; /* 32 bits */
258 unsigned int pin_count;
259 };
260
261 /*
262 * Initialization, self-test and debugging-output methods:
263 */
264 extern void lockdep_info(void);
265 extern void lockdep_reset(void);
266 extern void lockdep_reset_lock(struct lockdep_map *lock);
267 extern void lockdep_free_key_range(void *start, unsigned long size);
268 extern asmlinkage void lockdep_sys_exit(void);
269
270 extern void lockdep_off(void);
271 extern void lockdep_on(void);
272
273 /*
274 * These methods are used by specific locking variants (spinlocks,
275 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
276 * to lockdep:
277 */
278
279 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
280 struct lock_class_key *key, int subclass);
281
282 /*
283 * To initialize a lockdep_map statically use this macro.
284 * Note that _name must not be NULL.
285 */
286 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
287 { .name = (_name), .key = (void *)(_key), }
288
289 /*
290 * Reinitialize a lock key - for cases where there is special locking or
291 * special initialization of locks so that the validator gets the scope
292 * of dependencies wrong: they are either too broad (they need a class-split)
293 * or they are too narrow (they suffer from a false class-split):
294 */
295 #define lockdep_set_class(lock, key) \
296 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
297 #define lockdep_set_class_and_name(lock, key, name) \
298 lockdep_init_map(&(lock)->dep_map, name, key, 0)
299 #define lockdep_set_class_and_subclass(lock, key, sub) \
300 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
301 #define lockdep_set_subclass(lock, sub) \
302 lockdep_init_map(&(lock)->dep_map, #lock, \
303 (lock)->dep_map.key, sub)
304
305 #define lockdep_set_novalidate_class(lock) \
306 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
307 /*
308 * Compare locking classes
309 */
310 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
311
312 static inline int lockdep_match_key(struct lockdep_map *lock,
313 struct lock_class_key *key)
314 {
315 return lock->key == key;
316 }
317
318 /*
319 * Acquire a lock.
320 *
321 * Values for "read":
322 *
323 * 0: exclusive (write) acquire
324 * 1: read-acquire (no recursion allowed)
325 * 2: read-acquire with same-instance recursion allowed
326 *
327 * Values for check:
328 *
329 * 0: simple checks (freeing, held-at-exit-time, etc.)
330 * 1: full validation
331 */
332 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
333 int trylock, int read, int check,
334 struct lockdep_map *nest_lock, unsigned long ip);
335
336 extern void lock_release(struct lockdep_map *lock, int nested,
337 unsigned long ip);
338
339 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
340
341 extern int lock_is_held(struct lockdep_map *lock);
342
343 extern void lock_set_class(struct lockdep_map *lock, const char *name,
344 struct lock_class_key *key, unsigned int subclass,
345 unsigned long ip);
346
347 static inline void lock_set_subclass(struct lockdep_map *lock,
348 unsigned int subclass, unsigned long ip)
349 {
350 lock_set_class(lock, lock->name, lock->key, subclass, ip);
351 }
352
353 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
354 extern void lockdep_clear_current_reclaim_state(void);
355 extern void lockdep_trace_alloc(gfp_t mask);
356
357 extern void lock_pin_lock(struct lockdep_map *lock);
358 extern void lock_unpin_lock(struct lockdep_map *lock);
359
360 # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
361
362 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
363
364 #define lockdep_assert_held(l) do { \
365 WARN_ON(debug_locks && !lockdep_is_held(l)); \
366 } while (0)
367
368 #define lockdep_assert_held_once(l) do { \
369 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
370 } while (0)
371
372 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
373
374 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
375 #define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
376
377 #else /* !CONFIG_LOCKDEP */
378
379 static inline void lockdep_off(void)
380 {
381 }
382
383 static inline void lockdep_on(void)
384 {
385 }
386
387 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
388 # define lock_release(l, n, i) do { } while (0)
389 # define lock_set_class(l, n, k, s, i) do { } while (0)
390 # define lock_set_subclass(l, s, i) do { } while (0)
391 # define lockdep_set_current_reclaim_state(g) do { } while (0)
392 # define lockdep_clear_current_reclaim_state() do { } while (0)
393 # define lockdep_trace_alloc(g) do { } while (0)
394 # define lockdep_info() do { } while (0)
395 # define lockdep_init_map(lock, name, key, sub) \
396 do { (void)(name); (void)(key); } while (0)
397 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
398 # define lockdep_set_class_and_name(lock, key, name) \
399 do { (void)(key); (void)(name); } while (0)
400 #define lockdep_set_class_and_subclass(lock, key, sub) \
401 do { (void)(key); } while (0)
402 #define lockdep_set_subclass(lock, sub) do { } while (0)
403
404 #define lockdep_set_novalidate_class(lock) do { } while (0)
405
406 /*
407 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
408 * case since the result is not well defined and the caller should rather
409 * #ifdef the call himself.
410 */
411
412 # define INIT_LOCKDEP
413 # define lockdep_reset() do { debug_locks = 1; } while (0)
414 # define lockdep_free_key_range(start, size) do { } while (0)
415 # define lockdep_sys_exit() do { } while (0)
416 /*
417 * The class key takes no space if lockdep is disabled:
418 */
419 struct lock_class_key { };
420
421 #define lockdep_depth(tsk) (0)
422
423 #define lockdep_assert_held(l) do { (void)(l); } while (0)
424 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
425
426 #define lockdep_recursing(tsk) (0)
427
428 #define lockdep_pin_lock(l) do { (void)(l); } while (0)
429 #define lockdep_unpin_lock(l) do { (void)(l); } while (0)
430
431 #endif /* !LOCKDEP */
432
433 #ifdef CONFIG_LOCK_STAT
434
435 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
436 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
437
438 #define LOCK_CONTENDED(_lock, try, lock) \
439 do { \
440 if (!try(_lock)) { \
441 lock_contended(&(_lock)->dep_map, _RET_IP_); \
442 lock(_lock); \
443 } \
444 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
445 } while (0)
446
447 #else /* CONFIG_LOCK_STAT */
448
449 #define lock_contended(lockdep_map, ip) do {} while (0)
450 #define lock_acquired(lockdep_map, ip) do {} while (0)
451
452 #define LOCK_CONTENDED(_lock, try, lock) \
453 lock(_lock)
454
455 #endif /* CONFIG_LOCK_STAT */
456
457 #ifdef CONFIG_LOCKDEP
458
459 /*
460 * On lockdep we dont want the hand-coded irq-enable of
461 * _raw_*_lock_flags() code, because lockdep assumes
462 * that interrupts are not re-enabled during lock-acquire:
463 */
464 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
465 LOCK_CONTENDED((_lock), (try), (lock))
466
467 #else /* CONFIG_LOCKDEP */
468
469 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
470 lockfl((_lock), (flags))
471
472 #endif /* CONFIG_LOCKDEP */
473
474 #ifdef CONFIG_TRACE_IRQFLAGS
475 extern void print_irqtrace_events(struct task_struct *curr);
476 #else
477 static inline void print_irqtrace_events(struct task_struct *curr)
478 {
479 }
480 #endif
481
482 /*
483 * For trivial one-depth nesting of a lock-class, the following
484 * global define can be used. (Subsystems with multiple levels
485 * of nesting should define their own lock-nesting subclasses.)
486 */
487 #define SINGLE_DEPTH_NESTING 1
488
489 /*
490 * Map the dependency ops to NOP or to real lockdep ops, depending
491 * on the per lock-class debug mode:
492 */
493
494 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
495 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
496 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
497
498 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
499 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
500 #define spin_release(l, n, i) lock_release(l, n, i)
501
502 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
503 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
504 #define rwlock_release(l, n, i) lock_release(l, n, i)
505
506 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
507 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
508 #define seqcount_release(l, n, i) lock_release(l, n, i)
509
510 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
511 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
512 #define mutex_release(l, n, i) lock_release(l, n, i)
513
514 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
515 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
516 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
517 #define rwsem_release(l, n, i) lock_release(l, n, i)
518
519 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
520 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
521 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
522 #define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
523
524 #ifdef CONFIG_PROVE_LOCKING
525 # define might_lock(lock) \
526 do { \
527 typecheck(struct lockdep_map *, &(lock)->dep_map); \
528 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
529 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
530 } while (0)
531 # define might_lock_read(lock) \
532 do { \
533 typecheck(struct lockdep_map *, &(lock)->dep_map); \
534 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
535 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
536 } while (0)
537 #else
538 # define might_lock(lock) do { } while (0)
539 # define might_lock_read(lock) do { } while (0)
540 #endif
541
542 #ifdef CONFIG_LOCKDEP
543 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
544 #else
545 static inline void
546 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
547 {
548 }
549 #endif
550
551 #endif /* __LINUX_LOCKDEP_H */
This page took 0.045393 seconds and 5 git commands to generate.