Commit | Line | Data |
---|---|---|
408894ee IM |
1 | /* |
2 | * kernel/mutex-debug.c | |
3 | * | |
4 | * Debugging code for mutexes | |
5 | * | |
6 | * Started by Ingo Molnar: | |
7 | * | |
8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
9 | * | |
10 | * lock debugging, locking tree, deadlock detection started by: | |
11 | * | |
12 | * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey | |
13 | * Released under the General Public License (GPL). | |
14 | */ | |
15 | #include <linux/mutex.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/kallsyms.h> | |
21 | #include <linux/interrupt.h> | |
22 | ||
408894ee IM |
23 | #include "mutex-debug.h" |
24 | ||
25 | /* | |
26 | * We need a global lock when we walk through the multi-process | |
27 | * lock tree. Only used in the deadlock-debugging case. | |
28 | */ | |
29 | DEFINE_SPINLOCK(debug_mutex_lock); | |
30 | ||
31 | /* | |
32 | * All locks held by all tasks, in a single global list: | |
33 | */ | |
34 | LIST_HEAD(debug_mutex_held_locks); | |
35 | ||
36 | /* | |
37 | * In the debug case we carry the caller's instruction pointer into | |
38 | * other functions, but we dont want the function argument overhead | |
39 | * in the nondebug case - hence these macros: | |
40 | */ | |
41 | #define __IP_DECL__ , unsigned long ip | |
42 | #define __IP__ , ip | |
43 | #define __RET_IP__ , (unsigned long)__builtin_return_address(0) | |
44 | ||
45 | /* | |
46 | * "mutex debugging enabled" flag. We turn it off when we detect | |
47 | * the first problem because we dont want to recurse back | |
48 | * into the tracing code when doing error printk or | |
49 | * executing a BUG(): | |
50 | */ | |
51 | int debug_mutex_on = 1; | |
52 | ||
53 | static void printk_task(struct task_struct *p) | |
54 | { | |
55 | if (p) | |
56 | printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); | |
57 | else | |
58 | printk("<none>"); | |
59 | } | |
60 | ||
61 | static void printk_ti(struct thread_info *ti) | |
62 | { | |
63 | if (ti) | |
64 | printk_task(ti->task); | |
65 | else | |
66 | printk("<none>"); | |
67 | } | |
68 | ||
69 | static void printk_task_short(struct task_struct *p) | |
70 | { | |
71 | if (p) | |
72 | printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio); | |
73 | else | |
74 | printk("<none>"); | |
75 | } | |
76 | ||
77 | static void printk_lock(struct mutex *lock, int print_owner) | |
78 | { | |
79 | printk(" [%p] {%s}\n", lock, lock->name); | |
80 | ||
81 | if (print_owner && lock->owner) { | |
82 | printk(".. held by: "); | |
83 | printk_ti(lock->owner); | |
84 | printk("\n"); | |
85 | } | |
86 | if (lock->owner) { | |
87 | printk("... acquired at: "); | |
88 | print_symbol("%s\n", lock->acquire_ip); | |
89 | } | |
90 | } | |
91 | ||
92 | /* | |
93 | * printk locks held by a task: | |
94 | */ | |
95 | static void show_task_locks(struct task_struct *p) | |
96 | { | |
97 | switch (p->state) { | |
98 | case TASK_RUNNING: printk("R"); break; | |
99 | case TASK_INTERRUPTIBLE: printk("S"); break; | |
100 | case TASK_UNINTERRUPTIBLE: printk("D"); break; | |
101 | case TASK_STOPPED: printk("T"); break; | |
102 | case EXIT_ZOMBIE: printk("Z"); break; | |
103 | case EXIT_DEAD: printk("X"); break; | |
104 | default: printk("?"); break; | |
105 | } | |
106 | printk_task(p); | |
107 | if (p->blocked_on) { | |
108 | struct mutex *lock = p->blocked_on->lock; | |
109 | ||
110 | printk(" blocked on mutex:"); | |
111 | printk_lock(lock, 1); | |
112 | } else | |
113 | printk(" (not blocked on mutex)\n"); | |
114 | } | |
115 | ||
116 | /* | |
117 | * printk all locks held in the system (if filter == NULL), | |
118 | * or all locks belonging to a single task (if filter != NULL): | |
119 | */ | |
120 | void show_held_locks(struct task_struct *filter) | |
121 | { | |
122 | struct list_head *curr, *cursor = NULL; | |
123 | struct mutex *lock; | |
124 | struct thread_info *t; | |
125 | unsigned long flags; | |
126 | int count = 0; | |
127 | ||
128 | if (filter) { | |
129 | printk("------------------------------\n"); | |
130 | printk("| showing all locks held by: | ("); | |
131 | printk_task_short(filter); | |
132 | printk("):\n"); | |
133 | printk("------------------------------\n"); | |
134 | } else { | |
135 | printk("---------------------------\n"); | |
136 | printk("| showing all locks held: |\n"); | |
137 | printk("---------------------------\n"); | |
138 | } | |
139 | ||
140 | /* | |
141 | * Play safe and acquire the global trace lock. We | |
142 | * cannot printk with that lock held so we iterate | |
143 | * very carefully: | |
144 | */ | |
145 | next: | |
146 | debug_spin_lock_save(&debug_mutex_lock, flags); | |
147 | list_for_each(curr, &debug_mutex_held_locks) { | |
148 | if (cursor && curr != cursor) | |
149 | continue; | |
150 | lock = list_entry(curr, struct mutex, held_list); | |
151 | t = lock->owner; | |
152 | if (filter && (t != filter->thread_info)) | |
153 | continue; | |
154 | count++; | |
155 | cursor = curr->next; | |
156 | debug_spin_lock_restore(&debug_mutex_lock, flags); | |
157 | ||
158 | printk("\n#%03d: ", count); | |
159 | printk_lock(lock, filter ? 0 : 1); | |
160 | goto next; | |
161 | } | |
162 | debug_spin_lock_restore(&debug_mutex_lock, flags); | |
163 | printk("\n"); | |
164 | } | |
165 | ||
166 | void mutex_debug_show_all_locks(void) | |
167 | { | |
168 | struct task_struct *g, *p; | |
169 | int count = 10; | |
170 | int unlock = 1; | |
171 | ||
172 | printk("\nShowing all blocking locks in the system:\n"); | |
173 | ||
174 | /* | |
175 | * Here we try to get the tasklist_lock as hard as possible, | |
176 | * if not successful after 2 seconds we ignore it (but keep | |
177 | * trying). This is to enable a debug printout even if a | |
178 | * tasklist_lock-holding task deadlocks or crashes. | |
179 | */ | |
180 | retry: | |
181 | if (!read_trylock(&tasklist_lock)) { | |
182 | if (count == 10) | |
183 | printk("hm, tasklist_lock locked, retrying... "); | |
184 | if (count) { | |
185 | count--; | |
186 | printk(" #%d", 10-count); | |
187 | mdelay(200); | |
188 | goto retry; | |
189 | } | |
190 | printk(" ignoring it.\n"); | |
191 | unlock = 0; | |
192 | } | |
193 | if (count != 10) | |
194 | printk(" locked it.\n"); | |
195 | ||
196 | do_each_thread(g, p) { | |
197 | show_task_locks(p); | |
198 | if (!unlock) | |
199 | if (read_trylock(&tasklist_lock)) | |
200 | unlock = 1; | |
201 | } while_each_thread(g, p); | |
202 | ||
203 | printk("\n"); | |
204 | show_held_locks(NULL); | |
205 | printk("=============================================\n\n"); | |
206 | ||
207 | if (unlock) | |
208 | read_unlock(&tasklist_lock); | |
209 | } | |
210 | ||
211 | static void report_deadlock(struct task_struct *task, struct mutex *lock, | |
212 | struct mutex *lockblk, unsigned long ip) | |
213 | { | |
214 | printk("\n%s/%d is trying to acquire this lock:\n", | |
215 | current->comm, current->pid); | |
216 | printk_lock(lock, 1); | |
217 | printk("... trying at: "); | |
218 | print_symbol("%s\n", ip); | |
219 | show_held_locks(current); | |
220 | ||
221 | if (lockblk) { | |
222 | printk("but %s/%d is deadlocking current task %s/%d!\n\n", | |
223 | task->comm, task->pid, current->comm, current->pid); | |
224 | printk("\n%s/%d is blocked on this lock:\n", | |
225 | task->comm, task->pid); | |
226 | printk_lock(lockblk, 1); | |
227 | ||
228 | show_held_locks(task); | |
229 | ||
230 | printk("\n%s/%d's [blocked] stackdump:\n\n", | |
231 | task->comm, task->pid); | |
232 | show_stack(task, NULL); | |
233 | } | |
234 | ||
235 | printk("\n%s/%d's [current] stackdump:\n\n", | |
236 | current->comm, current->pid); | |
237 | dump_stack(); | |
238 | mutex_debug_show_all_locks(); | |
239 | printk("[ turning off deadlock detection. Please report this. ]\n\n"); | |
240 | local_irq_disable(); | |
241 | } | |
242 | ||
243 | /* | |
244 | * Recursively check for mutex deadlocks: | |
245 | */ | |
246 | static int check_deadlock(struct mutex *lock, int depth, | |
247 | struct thread_info *ti, unsigned long ip) | |
248 | { | |
249 | struct mutex *lockblk; | |
250 | struct task_struct *task; | |
251 | ||
252 | if (!debug_mutex_on) | |
253 | return 0; | |
254 | ||
255 | ti = lock->owner; | |
256 | if (!ti) | |
257 | return 0; | |
258 | ||
259 | task = ti->task; | |
260 | lockblk = NULL; | |
261 | if (task->blocked_on) | |
262 | lockblk = task->blocked_on->lock; | |
263 | ||
264 | /* Self-deadlock: */ | |
265 | if (current == task) { | |
266 | DEBUG_OFF(); | |
267 | if (depth) | |
268 | return 1; | |
269 | printk("\n==========================================\n"); | |
270 | printk( "[ BUG: lock recursion deadlock detected! |\n"); | |
271 | printk( "------------------------------------------\n"); | |
272 | report_deadlock(task, lock, NULL, ip); | |
273 | return 0; | |
274 | } | |
275 | ||
276 | /* Ugh, something corrupted the lock data structure? */ | |
277 | if (depth > 20) { | |
278 | DEBUG_OFF(); | |
279 | printk("\n===========================================\n"); | |
280 | printk( "[ BUG: infinite lock dependency detected!? |\n"); | |
281 | printk( "-------------------------------------------\n"); | |
282 | report_deadlock(task, lock, lockblk, ip); | |
283 | return 0; | |
284 | } | |
285 | ||
286 | /* Recursively check for dependencies: */ | |
287 | if (lockblk && check_deadlock(lockblk, depth+1, ti, ip)) { | |
288 | printk("\n============================================\n"); | |
289 | printk( "[ BUG: circular locking deadlock detected! ]\n"); | |
290 | printk( "--------------------------------------------\n"); | |
291 | report_deadlock(task, lock, lockblk, ip); | |
292 | return 0; | |
293 | } | |
294 | return 0; | |
295 | } | |
296 | ||
297 | /* | |
298 | * Called when a task exits, this function checks whether the | |
299 | * task is holding any locks, and reports the first one if so: | |
300 | */ | |
301 | void mutex_debug_check_no_locks_held(struct task_struct *task) | |
302 | { | |
303 | struct list_head *curr, *next; | |
304 | struct thread_info *t; | |
305 | unsigned long flags; | |
306 | struct mutex *lock; | |
307 | ||
308 | if (!debug_mutex_on) | |
309 | return; | |
310 | ||
311 | debug_spin_lock_save(&debug_mutex_lock, flags); | |
312 | list_for_each_safe(curr, next, &debug_mutex_held_locks) { | |
313 | lock = list_entry(curr, struct mutex, held_list); | |
314 | t = lock->owner; | |
315 | if (t != task->thread_info) | |
316 | continue; | |
317 | list_del_init(curr); | |
318 | DEBUG_OFF(); | |
319 | debug_spin_lock_restore(&debug_mutex_lock, flags); | |
320 | ||
321 | printk("BUG: %s/%d, lock held at task exit time!\n", | |
322 | task->comm, task->pid); | |
323 | printk_lock(lock, 1); | |
324 | if (lock->owner != task->thread_info) | |
325 | printk("exiting task is not even the owner??\n"); | |
326 | return; | |
327 | } | |
328 | debug_spin_lock_restore(&debug_mutex_lock, flags); | |
329 | } | |
330 | ||
331 | /* | |
332 | * Called when kernel memory is freed (or unmapped), or if a mutex | |
333 | * is destroyed or reinitialized - this code checks whether there is | |
334 | * any held lock in the memory range of <from> to <to>: | |
335 | */ | |
a4fc7ab1 | 336 | void mutex_debug_check_no_locks_freed(const void *from, unsigned long len) |
408894ee IM |
337 | { |
338 | struct list_head *curr, *next; | |
a4fc7ab1 | 339 | const void *to = from + len; |
408894ee IM |
340 | unsigned long flags; |
341 | struct mutex *lock; | |
342 | void *lock_addr; | |
343 | ||
344 | if (!debug_mutex_on) | |
345 | return; | |
346 | ||
347 | debug_spin_lock_save(&debug_mutex_lock, flags); | |
348 | list_for_each_safe(curr, next, &debug_mutex_held_locks) { | |
349 | lock = list_entry(curr, struct mutex, held_list); | |
350 | lock_addr = lock; | |
351 | if (lock_addr < from || lock_addr >= to) | |
352 | continue; | |
353 | list_del_init(curr); | |
354 | DEBUG_OFF(); | |
355 | debug_spin_lock_restore(&debug_mutex_lock, flags); | |
356 | ||
357 | printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", | |
358 | current->comm, current->pid, lock, from, to); | |
359 | dump_stack(); | |
360 | printk_lock(lock, 1); | |
361 | if (lock->owner != current_thread_info()) | |
362 | printk("freeing task is not even the owner??\n"); | |
363 | return; | |
364 | } | |
365 | debug_spin_lock_restore(&debug_mutex_lock, flags); | |
366 | } | |
367 | ||
368 | /* | |
369 | * Must be called with lock->wait_lock held. | |
370 | */ | |
371 | void debug_mutex_set_owner(struct mutex *lock, | |
372 | struct thread_info *new_owner __IP_DECL__) | |
373 | { | |
374 | lock->owner = new_owner; | |
375 | DEBUG_WARN_ON(!list_empty(&lock->held_list)); | |
376 | if (debug_mutex_on) { | |
377 | list_add_tail(&lock->held_list, &debug_mutex_held_locks); | |
378 | lock->acquire_ip = ip; | |
379 | } | |
380 | } | |
381 | ||
382 | void debug_mutex_init_waiter(struct mutex_waiter *waiter) | |
383 | { | |
384 | memset(waiter, 0x11, sizeof(*waiter)); | |
385 | waiter->magic = waiter; | |
386 | INIT_LIST_HEAD(&waiter->list); | |
387 | } | |
388 | ||
389 | void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) | |
390 | { | |
391 | SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock)); | |
392 | DEBUG_WARN_ON(list_empty(&lock->wait_list)); | |
393 | DEBUG_WARN_ON(waiter->magic != waiter); | |
394 | DEBUG_WARN_ON(list_empty(&waiter->list)); | |
395 | } | |
396 | ||
397 | void debug_mutex_free_waiter(struct mutex_waiter *waiter) | |
398 | { | |
399 | DEBUG_WARN_ON(!list_empty(&waiter->list)); | |
400 | memset(waiter, 0x22, sizeof(*waiter)); | |
401 | } | |
402 | ||
403 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, | |
404 | struct thread_info *ti __IP_DECL__) | |
405 | { | |
406 | SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock)); | |
407 | check_deadlock(lock, 0, ti, ip); | |
408 | /* Mark the current thread as blocked on the lock: */ | |
409 | ti->task->blocked_on = waiter; | |
410 | waiter->lock = lock; | |
411 | } | |
412 | ||
413 | void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | |
414 | struct thread_info *ti) | |
415 | { | |
416 | DEBUG_WARN_ON(list_empty(&waiter->list)); | |
417 | DEBUG_WARN_ON(waiter->task != ti->task); | |
418 | DEBUG_WARN_ON(ti->task->blocked_on != waiter); | |
419 | ti->task->blocked_on = NULL; | |
420 | ||
421 | list_del_init(&waiter->list); | |
422 | waiter->task = NULL; | |
423 | } | |
424 | ||
425 | void debug_mutex_unlock(struct mutex *lock) | |
426 | { | |
427 | DEBUG_WARN_ON(lock->magic != lock); | |
428 | DEBUG_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | |
429 | DEBUG_WARN_ON(lock->owner != current_thread_info()); | |
430 | if (debug_mutex_on) { | |
431 | DEBUG_WARN_ON(list_empty(&lock->held_list)); | |
432 | list_del_init(&lock->held_list); | |
433 | } | |
434 | } | |
435 | ||
436 | void debug_mutex_init(struct mutex *lock, const char *name) | |
437 | { | |
438 | /* | |
439 | * Make sure we are not reinitializing a held lock: | |
440 | */ | |
a4fc7ab1 | 441 | mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
408894ee IM |
442 | lock->owner = NULL; |
443 | INIT_LIST_HEAD(&lock->held_list); | |
444 | lock->name = name; | |
445 | lock->magic = lock; | |
446 | } | |
447 | ||
448 | /*** | |
449 | * mutex_destroy - mark a mutex unusable | |
450 | * @lock: the mutex to be destroyed | |
451 | * | |
452 | * This function marks the mutex uninitialized, and any subsequent | |
453 | * use of the mutex is forbidden. The mutex must not be locked when | |
454 | * this function is called. | |
455 | */ | |
456 | void fastcall mutex_destroy(struct mutex *lock) | |
457 | { | |
458 | DEBUG_WARN_ON(mutex_is_locked(lock)); | |
459 | lock->magic = NULL; | |
460 | } | |
461 | ||
462 | EXPORT_SYMBOL_GPL(mutex_destroy); |