Pull ia64-clocksource into release branch
[deliverable/linux.git] / include / linux / lockdep.h
1 /*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11
12 struct task_struct;
13 struct lockdep_map;
14
15 #ifdef CONFIG_LOCKDEP
16
17 #include <linux/linkage.h>
18 #include <linux/list.h>
19 #include <linux/debug_locks.h>
20 #include <linux/stacktrace.h>
21
22 /*
23 * Lock-class usage-state bits:
24 */
25 enum lock_usage_bit
26 {
27 LOCK_USED = 0,
28 LOCK_USED_IN_HARDIRQ,
29 LOCK_USED_IN_SOFTIRQ,
30 LOCK_ENABLED_SOFTIRQS,
31 LOCK_ENABLED_HARDIRQS,
32 LOCK_USED_IN_HARDIRQ_READ,
33 LOCK_USED_IN_SOFTIRQ_READ,
34 LOCK_ENABLED_SOFTIRQS_READ,
35 LOCK_ENABLED_HARDIRQS_READ,
36 LOCK_USAGE_STATES
37 };
38
39 /*
40 * Usage-state bitmasks:
41 */
42 #define LOCKF_USED (1 << LOCK_USED)
43 #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44 #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45 #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46 #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
47
48 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50
51 #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52 #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53 #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54 #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
55
56 #define LOCKF_ENABLED_IRQS_READ \
57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58 #define LOCKF_USED_IN_IRQ_READ \
59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60
61 #define MAX_LOCKDEP_SUBCLASSES 8UL
62
63 /*
64 * Lock-classes are keyed via unique addresses, by embedding the
65 * lockclass-key into the kernel (or module) .data section. (For
66 * static locks we use the lock address itself as the key.)
67 */
68 struct lockdep_subclass_key {
69 char __one_byte;
70 } __attribute__ ((__packed__));
71
72 struct lock_class_key {
73 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
74 };
75
76 /*
77 * The lock-class itself:
78 */
79 struct lock_class {
80 /*
81 * class-hash:
82 */
83 struct list_head hash_entry;
84
85 /*
86 * global list of all lock-classes:
87 */
88 struct list_head lock_entry;
89
90 struct lockdep_subclass_key *key;
91 unsigned int subclass;
92
93 /*
94 * IRQ/softirq usage tracking bits:
95 */
96 unsigned long usage_mask;
97 struct stack_trace usage_traces[LOCK_USAGE_STATES];
98
99 /*
100 * These fields represent a directed graph of lock dependencies,
101 * to every node we attach a list of "forward" and a list of
102 * "backward" graph nodes.
103 */
104 struct list_head locks_after, locks_before;
105
106 /*
107 * Generation counter, when doing certain classes of graph walking,
108 * to ensure that we check one node only once:
109 */
110 unsigned int version;
111
112 /*
113 * Statistics counter:
114 */
115 unsigned long ops;
116
117 const char *name;
118 int name_version;
119
120 #ifdef CONFIG_LOCK_STAT
121 unsigned long contention_point[4];
122 #endif
123 };
124
125 #ifdef CONFIG_LOCK_STAT
126 struct lock_time {
127 s64 min;
128 s64 max;
129 s64 total;
130 unsigned long nr;
131 };
132
133 enum bounce_type {
134 bounce_acquired_write,
135 bounce_acquired_read,
136 bounce_contended_write,
137 bounce_contended_read,
138 nr_bounce_types,
139
140 bounce_acquired = bounce_acquired_write,
141 bounce_contended = bounce_contended_write,
142 };
143
144 struct lock_class_stats {
145 unsigned long contention_point[4];
146 struct lock_time read_waittime;
147 struct lock_time write_waittime;
148 struct lock_time read_holdtime;
149 struct lock_time write_holdtime;
150 unsigned long bounces[nr_bounce_types];
151 };
152
153 struct lock_class_stats lock_stats(struct lock_class *class);
154 void clear_lock_stats(struct lock_class *class);
155 #endif
156
157 /*
158 * Map the lock object (the lock instance) to the lock-class object.
159 * This is embedded into specific lock instances:
160 */
161 struct lockdep_map {
162 struct lock_class_key *key;
163 struct lock_class *class_cache;
164 const char *name;
165 #ifdef CONFIG_LOCK_STAT
166 int cpu;
167 #endif
168 };
169
170 /*
171 * Every lock has a list of other locks that were taken after it.
172 * We only grow the list, never remove from it:
173 */
174 struct lock_list {
175 struct list_head entry;
176 struct lock_class *class;
177 struct stack_trace trace;
178 int distance;
179 };
180
181 /*
182 * We record lock dependency chains, so that we can cache them:
183 */
184 struct lock_chain {
185 struct list_head entry;
186 u64 chain_key;
187 };
188
189 struct held_lock {
190 /*
191 * One-way hash of the dependency chain up to this point. We
192 * hash the hashes step by step as the dependency chain grows.
193 *
194 * We use it for dependency-caching and we skip detection
195 * passes and dependency-updates if there is a cache-hit, so
196 * it is absolutely critical for 100% coverage of the validator
197 * to have a unique key value for every unique dependency path
198 * that can occur in the system, to make a unique hash value
199 * as likely as possible - hence the 64-bit width.
200 *
201 * The task struct holds the current hash value (initialized
202 * with zero), here we store the previous hash value:
203 */
204 u64 prev_chain_key;
205 struct lock_class *class;
206 unsigned long acquire_ip;
207 struct lockdep_map *instance;
208
209 #ifdef CONFIG_LOCK_STAT
210 u64 waittime_stamp;
211 u64 holdtime_stamp;
212 #endif
213 /*
214 * The lock-stack is unified in that the lock chains of interrupt
215 * contexts nest ontop of process context chains, but we 'separate'
216 * the hashes by starting with 0 if we cross into an interrupt
217 * context, and we also keep do not add cross-context lock
218 * dependencies - the lock usage graph walking covers that area
219 * anyway, and we'd just unnecessarily increase the number of
220 * dependencies otherwise. [Note: hardirq and softirq contexts
221 * are separated from each other too.]
222 *
223 * The following field is used to detect when we cross into an
224 * interrupt context:
225 */
226 int irq_context;
227 int trylock;
228 int read;
229 int check;
230 int hardirqs_off;
231 };
232
233 /*
234 * Initialization, self-test and debugging-output methods:
235 */
236 extern void lockdep_init(void);
237 extern void lockdep_info(void);
238 extern void lockdep_reset(void);
239 extern void lockdep_reset_lock(struct lockdep_map *lock);
240 extern void lockdep_free_key_range(void *start, unsigned long size);
241
242 extern void lockdep_off(void);
243 extern void lockdep_on(void);
244
245 /*
246 * These methods are used by specific locking variants (spinlocks,
247 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
248 * to lockdep:
249 */
250
251 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
252 struct lock_class_key *key, int subclass);
253
254 /*
255 * Reinitialize a lock key - for cases where there is special locking or
256 * special initialization of locks so that the validator gets the scope
257 * of dependencies wrong: they are either too broad (they need a class-split)
258 * or they are too narrow (they suffer from a false class-split):
259 */
260 #define lockdep_set_class(lock, key) \
261 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
262 #define lockdep_set_class_and_name(lock, key, name) \
263 lockdep_init_map(&(lock)->dep_map, name, key, 0)
264 #define lockdep_set_class_and_subclass(lock, key, sub) \
265 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
266 #define lockdep_set_subclass(lock, sub) \
267 lockdep_init_map(&(lock)->dep_map, #lock, \
268 (lock)->dep_map.key, sub)
269
270 /*
271 * Acquire a lock.
272 *
273 * Values for "read":
274 *
275 * 0: exclusive (write) acquire
276 * 1: read-acquire (no recursion allowed)
277 * 2: read-acquire with same-instance recursion allowed
278 *
279 * Values for check:
280 *
281 * 0: disabled
282 * 1: simple checks (freeing, held-at-exit-time, etc.)
283 * 2: full validation
284 */
285 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
286 int trylock, int read, int check, unsigned long ip);
287
288 extern void lock_release(struct lockdep_map *lock, int nested,
289 unsigned long ip);
290
291 # define INIT_LOCKDEP .lockdep_recursion = 0,
292
293 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
294
295 #else /* !LOCKDEP */
296
297 static inline void lockdep_off(void)
298 {
299 }
300
301 static inline void lockdep_on(void)
302 {
303 }
304
305 # define lock_acquire(l, s, t, r, c, i) do { } while (0)
306 # define lock_release(l, n, i) do { } while (0)
307 # define lockdep_init() do { } while (0)
308 # define lockdep_info() do { } while (0)
309 # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
310 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
311 # define lockdep_set_class_and_name(lock, key, name) \
312 do { (void)(key); } while (0)
313 #define lockdep_set_class_and_subclass(lock, key, sub) \
314 do { (void)(key); } while (0)
315 #define lockdep_set_subclass(lock, sub) do { } while (0)
316
317 # define INIT_LOCKDEP
318 # define lockdep_reset() do { debug_locks = 1; } while (0)
319 # define lockdep_free_key_range(start, size) do { } while (0)
320 /*
321 * The class key takes no space if lockdep is disabled:
322 */
323 struct lock_class_key { };
324
325 #define lockdep_depth(tsk) (0)
326
327 #endif /* !LOCKDEP */
328
329 #ifdef CONFIG_LOCK_STAT
330
331 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
332 extern void lock_acquired(struct lockdep_map *lock);
333
334 #define LOCK_CONTENDED(_lock, try, lock) \
335 do { \
336 if (!try(_lock)) { \
337 lock_contended(&(_lock)->dep_map, _RET_IP_); \
338 lock(_lock); \
339 } \
340 lock_acquired(&(_lock)->dep_map); \
341 } while (0)
342
343 #else /* CONFIG_LOCK_STAT */
344
345 #define lock_contended(lockdep_map, ip) do {} while (0)
346 #define lock_acquired(lockdep_map) do {} while (0)
347
348 #define LOCK_CONTENDED(_lock, try, lock) \
349 lock(_lock)
350
351 #endif /* CONFIG_LOCK_STAT */
352
353 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
354 extern void early_init_irq_lock_class(void);
355 #else
356 static inline void early_init_irq_lock_class(void)
357 {
358 }
359 #endif
360
361 #ifdef CONFIG_TRACE_IRQFLAGS
362 extern void early_boot_irqs_off(void);
363 extern void early_boot_irqs_on(void);
364 extern void print_irqtrace_events(struct task_struct *curr);
365 #else
366 static inline void early_boot_irqs_off(void)
367 {
368 }
369 static inline void early_boot_irqs_on(void)
370 {
371 }
372 static inline void print_irqtrace_events(struct task_struct *curr)
373 {
374 }
375 #endif
376
377 /*
378 * For trivial one-depth nesting of a lock-class, the following
379 * global define can be used. (Subsystems with multiple levels
380 * of nesting should define their own lock-nesting subclasses.)
381 */
382 #define SINGLE_DEPTH_NESTING 1
383
384 /*
385 * Map the dependency ops to NOP or to real lockdep ops, depending
386 * on the per lock-class debug mode:
387 */
388
389 #ifdef CONFIG_DEBUG_LOCK_ALLOC
390 # ifdef CONFIG_PROVE_LOCKING
391 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
392 # else
393 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
394 # endif
395 # define spin_release(l, n, i) lock_release(l, n, i)
396 #else
397 # define spin_acquire(l, s, t, i) do { } while (0)
398 # define spin_release(l, n, i) do { } while (0)
399 #endif
400
401 #ifdef CONFIG_DEBUG_LOCK_ALLOC
402 # ifdef CONFIG_PROVE_LOCKING
403 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
404 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
405 # else
406 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
407 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
408 # endif
409 # define rwlock_release(l, n, i) lock_release(l, n, i)
410 #else
411 # define rwlock_acquire(l, s, t, i) do { } while (0)
412 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
413 # define rwlock_release(l, n, i) do { } while (0)
414 #endif
415
416 #ifdef CONFIG_DEBUG_LOCK_ALLOC
417 # ifdef CONFIG_PROVE_LOCKING
418 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
419 # else
420 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
421 # endif
422 # define mutex_release(l, n, i) lock_release(l, n, i)
423 #else
424 # define mutex_acquire(l, s, t, i) do { } while (0)
425 # define mutex_release(l, n, i) do { } while (0)
426 #endif
427
428 #ifdef CONFIG_DEBUG_LOCK_ALLOC
429 # ifdef CONFIG_PROVE_LOCKING
430 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
431 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
432 # else
433 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
434 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
435 # endif
436 # define rwsem_release(l, n, i) lock_release(l, n, i)
437 #else
438 # define rwsem_acquire(l, s, t, i) do { } while (0)
439 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
440 # define rwsem_release(l, n, i) do { } while (0)
441 #endif
442
443 #endif /* __LINUX_LOCKDEP_H */
This page took 0.04055 seconds and 6 git commands to generate.