net: phy: broadcom: define Broadcom pseudo-PHY address in brcmphy.h
[deliverable/linux.git] / kernel / panic.c
1 /*
2 * linux/kernel/panic.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * This function is used through-out the kernel (including mm and fs)
9 * to indicate a major problem.
10 */
11 #include <linux/debug_locks.h>
12 #include <linux/interrupt.h>
13 #include <linux/kmsg_dump.h>
14 #include <linux/kallsyms.h>
15 #include <linux/notifier.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/ftrace.h>
19 #include <linux/reboot.h>
20 #include <linux/delay.h>
21 #include <linux/kexec.h>
22 #include <linux/sched.h>
23 #include <linux/sysrq.h>
24 #include <linux/init.h>
25 #include <linux/nmi.h>
26
27 #define PANIC_TIMER_STEP 100
28 #define PANIC_BLINK_SPD 18
29
30 int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
31 static unsigned long tainted_mask;
32 static int pause_on_oops;
33 static int pause_on_oops_flag;
34 static DEFINE_SPINLOCK(pause_on_oops_lock);
35 static bool crash_kexec_post_notifiers;
36 int panic_on_warn __read_mostly;
37
38 int panic_timeout = CONFIG_PANIC_TIMEOUT;
39 EXPORT_SYMBOL_GPL(panic_timeout);
40
41 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
42
43 EXPORT_SYMBOL(panic_notifier_list);
44
45 static long no_blink(int state)
46 {
47 return 0;
48 }
49
50 /* Returns how long it waited in ms */
51 long (*panic_blink)(int state);
52 EXPORT_SYMBOL(panic_blink);
53
54 /*
55 * Stop ourself in panic -- architecture code may override this
56 */
57 void __weak panic_smp_self_stop(void)
58 {
59 while (1)
60 cpu_relax();
61 }
62
63 /**
64 * panic - halt the system
65 * @fmt: The text string to print
66 *
67 * Display a message, then perform cleanups.
68 *
69 * This function never returns.
70 */
71 void panic(const char *fmt, ...)
72 {
73 static DEFINE_SPINLOCK(panic_lock);
74 static char buf[1024];
75 va_list args;
76 long i, i_next = 0;
77 int state = 0;
78
79 /*
80 * Disable local interrupts. This will prevent panic_smp_self_stop
81 * from deadlocking the first cpu that invokes the panic, since
82 * there is nothing to prevent an interrupt handler (that runs
83 * after the panic_lock is acquired) from invoking panic again.
84 */
85 local_irq_disable();
86
87 /*
88 * It's possible to come here directly from a panic-assertion and
89 * not have preempt disabled. Some functions called from here want
90 * preempt to be disabled. No point enabling it later though...
91 *
92 * Only one CPU is allowed to execute the panic code from here. For
93 * multiple parallel invocations of panic, all other CPUs either
94 * stop themself or will wait until they are stopped by the 1st CPU
95 * with smp_send_stop().
96 */
97 if (!spin_trylock(&panic_lock))
98 panic_smp_self_stop();
99
100 console_verbose();
101 bust_spinlocks(1);
102 va_start(args, fmt);
103 vsnprintf(buf, sizeof(buf), fmt, args);
104 va_end(args);
105 pr_emerg("Kernel panic - not syncing: %s\n", buf);
106 #ifdef CONFIG_DEBUG_BUGVERBOSE
107 /*
108 * Avoid nested stack-dumping if a panic occurs during oops processing
109 */
110 if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
111 dump_stack();
112 #endif
113
114 /*
115 * If we have crashed and we have a crash kernel loaded let it handle
116 * everything else.
117 * If we want to run this after calling panic_notifiers, pass
118 * the "crash_kexec_post_notifiers" option to the kernel.
119 */
120 if (!crash_kexec_post_notifiers)
121 crash_kexec(NULL);
122
123 /*
124 * Note smp_send_stop is the usual smp shutdown function, which
125 * unfortunately means it may not be hardened to work in a panic
126 * situation.
127 */
128 smp_send_stop();
129
130 /*
131 * Run any panic handlers, including those that might need to
132 * add information to the kmsg dump output.
133 */
134 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
135
136 kmsg_dump(KMSG_DUMP_PANIC);
137
138 /*
139 * If you doubt kdump always works fine in any situation,
140 * "crash_kexec_post_notifiers" offers you a chance to run
141 * panic_notifiers and dumping kmsg before kdump.
142 * Note: since some panic_notifiers can make crashed kernel
143 * more unstable, it can increase risks of the kdump failure too.
144 */
145 crash_kexec(NULL);
146
147 bust_spinlocks(0);
148
149 if (!panic_blink)
150 panic_blink = no_blink;
151
152 if (panic_timeout > 0) {
153 /*
154 * Delay timeout seconds before rebooting the machine.
155 * We can't use the "normal" timers since we just panicked.
156 */
157 pr_emerg("Rebooting in %d seconds..", panic_timeout);
158
159 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
160 touch_nmi_watchdog();
161 if (i >= i_next) {
162 i += panic_blink(state ^= 1);
163 i_next = i + 3600 / PANIC_BLINK_SPD;
164 }
165 mdelay(PANIC_TIMER_STEP);
166 }
167 }
168 if (panic_timeout != 0) {
169 /*
170 * This will not be a clean reboot, with everything
171 * shutting down. But if there is a chance of
172 * rebooting the system it will be rebooted.
173 */
174 emergency_restart();
175 }
176 #ifdef __sparc__
177 {
178 extern int stop_a_enabled;
179 /* Make sure the user can actually press Stop-A (L1-A) */
180 stop_a_enabled = 1;
181 pr_emerg("Press Stop-A (L1-A) to return to the boot prom\n");
182 }
183 #endif
184 #if defined(CONFIG_S390)
185 {
186 unsigned long caller;
187
188 caller = (unsigned long)__builtin_return_address(0);
189 disabled_wait(caller);
190 }
191 #endif
192 pr_emerg("---[ end Kernel panic - not syncing: %s\n", buf);
193 local_irq_enable();
194 for (i = 0; ; i += PANIC_TIMER_STEP) {
195 touch_softlockup_watchdog();
196 if (i >= i_next) {
197 i += panic_blink(state ^= 1);
198 i_next = i + 3600 / PANIC_BLINK_SPD;
199 }
200 mdelay(PANIC_TIMER_STEP);
201 }
202 }
203
204 EXPORT_SYMBOL(panic);
205
206
207 struct tnt {
208 u8 bit;
209 char true;
210 char false;
211 };
212
213 static const struct tnt tnts[] = {
214 { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
215 { TAINT_FORCED_MODULE, 'F', ' ' },
216 { TAINT_CPU_OUT_OF_SPEC, 'S', ' ' },
217 { TAINT_FORCED_RMMOD, 'R', ' ' },
218 { TAINT_MACHINE_CHECK, 'M', ' ' },
219 { TAINT_BAD_PAGE, 'B', ' ' },
220 { TAINT_USER, 'U', ' ' },
221 { TAINT_DIE, 'D', ' ' },
222 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
223 { TAINT_WARN, 'W', ' ' },
224 { TAINT_CRAP, 'C', ' ' },
225 { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' },
226 { TAINT_OOT_MODULE, 'O', ' ' },
227 { TAINT_UNSIGNED_MODULE, 'E', ' ' },
228 { TAINT_SOFTLOCKUP, 'L', ' ' },
229 { TAINT_LIVEPATCH, 'K', ' ' },
230 };
231
232 /**
233 * print_tainted - return a string to represent the kernel taint state.
234 *
235 * 'P' - Proprietary module has been loaded.
236 * 'F' - Module has been forcibly loaded.
237 * 'S' - SMP with CPUs not designed for SMP.
238 * 'R' - User forced a module unload.
239 * 'M' - System experienced a machine check exception.
240 * 'B' - System has hit bad_page.
241 * 'U' - Userspace-defined naughtiness.
242 * 'D' - Kernel has oopsed before
243 * 'A' - ACPI table overridden.
244 * 'W' - Taint on warning.
245 * 'C' - modules from drivers/staging are loaded.
246 * 'I' - Working around severe firmware bug.
247 * 'O' - Out-of-tree module has been loaded.
248 * 'E' - Unsigned module has been loaded.
249 * 'L' - A soft lockup has previously occurred.
250 * 'K' - Kernel has been live patched.
251 *
252 * The string is overwritten by the next call to print_tainted().
253 */
254 const char *print_tainted(void)
255 {
256 static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ")];
257
258 if (tainted_mask) {
259 char *s;
260 int i;
261
262 s = buf + sprintf(buf, "Tainted: ");
263 for (i = 0; i < ARRAY_SIZE(tnts); i++) {
264 const struct tnt *t = &tnts[i];
265 *s++ = test_bit(t->bit, &tainted_mask) ?
266 t->true : t->false;
267 }
268 *s = 0;
269 } else
270 snprintf(buf, sizeof(buf), "Not tainted");
271
272 return buf;
273 }
274
275 int test_taint(unsigned flag)
276 {
277 return test_bit(flag, &tainted_mask);
278 }
279 EXPORT_SYMBOL(test_taint);
280
281 unsigned long get_taint(void)
282 {
283 return tainted_mask;
284 }
285
286 /**
287 * add_taint: add a taint flag if not already set.
288 * @flag: one of the TAINT_* constants.
289 * @lockdep_ok: whether lock debugging is still OK.
290 *
291 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
292 * some notewortht-but-not-corrupting cases, it can be set to true.
293 */
294 void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
295 {
296 if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
297 pr_warn("Disabling lock debugging due to kernel taint\n");
298
299 set_bit(flag, &tainted_mask);
300 }
301 EXPORT_SYMBOL(add_taint);
302
303 static void spin_msec(int msecs)
304 {
305 int i;
306
307 for (i = 0; i < msecs; i++) {
308 touch_nmi_watchdog();
309 mdelay(1);
310 }
311 }
312
313 /*
314 * It just happens that oops_enter() and oops_exit() are identically
315 * implemented...
316 */
317 static void do_oops_enter_exit(void)
318 {
319 unsigned long flags;
320 static int spin_counter;
321
322 if (!pause_on_oops)
323 return;
324
325 spin_lock_irqsave(&pause_on_oops_lock, flags);
326 if (pause_on_oops_flag == 0) {
327 /* This CPU may now print the oops message */
328 pause_on_oops_flag = 1;
329 } else {
330 /* We need to stall this CPU */
331 if (!spin_counter) {
332 /* This CPU gets to do the counting */
333 spin_counter = pause_on_oops;
334 do {
335 spin_unlock(&pause_on_oops_lock);
336 spin_msec(MSEC_PER_SEC);
337 spin_lock(&pause_on_oops_lock);
338 } while (--spin_counter);
339 pause_on_oops_flag = 0;
340 } else {
341 /* This CPU waits for a different one */
342 while (spin_counter) {
343 spin_unlock(&pause_on_oops_lock);
344 spin_msec(1);
345 spin_lock(&pause_on_oops_lock);
346 }
347 }
348 }
349 spin_unlock_irqrestore(&pause_on_oops_lock, flags);
350 }
351
352 /*
353 * Return true if the calling CPU is allowed to print oops-related info.
354 * This is a bit racy..
355 */
356 int oops_may_print(void)
357 {
358 return pause_on_oops_flag == 0;
359 }
360
361 /*
362 * Called when the architecture enters its oops handler, before it prints
363 * anything. If this is the first CPU to oops, and it's oopsing the first
364 * time then let it proceed.
365 *
366 * This is all enabled by the pause_on_oops kernel boot option. We do all
367 * this to ensure that oopses don't scroll off the screen. It has the
368 * side-effect of preventing later-oopsing CPUs from mucking up the display,
369 * too.
370 *
371 * It turns out that the CPU which is allowed to print ends up pausing for
372 * the right duration, whereas all the other CPUs pause for twice as long:
373 * once in oops_enter(), once in oops_exit().
374 */
375 void oops_enter(void)
376 {
377 tracing_off();
378 /* can't trust the integrity of the kernel anymore: */
379 debug_locks_off();
380 do_oops_enter_exit();
381 }
382
383 /*
384 * 64-bit random ID for oopses:
385 */
386 static u64 oops_id;
387
388 static int init_oops_id(void)
389 {
390 if (!oops_id)
391 get_random_bytes(&oops_id, sizeof(oops_id));
392 else
393 oops_id++;
394
395 return 0;
396 }
397 late_initcall(init_oops_id);
398
399 void print_oops_end_marker(void)
400 {
401 init_oops_id();
402 pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
403 }
404
405 /*
406 * Called when the architecture exits its oops handler, after printing
407 * everything.
408 */
409 void oops_exit(void)
410 {
411 do_oops_enter_exit();
412 print_oops_end_marker();
413 kmsg_dump(KMSG_DUMP_OOPS);
414 }
415
416 #ifdef WANT_WARN_ON_SLOWPATH
417 struct slowpath_args {
418 const char *fmt;
419 va_list args;
420 };
421
422 static void warn_slowpath_common(const char *file, int line, void *caller,
423 unsigned taint, struct slowpath_args *args)
424 {
425 disable_trace_on_warning();
426
427 pr_warn("------------[ cut here ]------------\n");
428 pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
429 raw_smp_processor_id(), current->pid, file, line, caller);
430
431 if (args)
432 vprintk(args->fmt, args->args);
433
434 if (panic_on_warn) {
435 /*
436 * This thread may hit another WARN() in the panic path.
437 * Resetting this prevents additional WARN() from panicking the
438 * system on this thread. Other threads are blocked by the
439 * panic_mutex in panic().
440 */
441 panic_on_warn = 0;
442 panic("panic_on_warn set ...\n");
443 }
444
445 print_modules();
446 dump_stack();
447 print_oops_end_marker();
448 /* Just a warning, don't kill lockdep. */
449 add_taint(taint, LOCKDEP_STILL_OK);
450 }
451
452 void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
453 {
454 struct slowpath_args args;
455
456 args.fmt = fmt;
457 va_start(args.args, fmt);
458 warn_slowpath_common(file, line, __builtin_return_address(0),
459 TAINT_WARN, &args);
460 va_end(args.args);
461 }
462 EXPORT_SYMBOL(warn_slowpath_fmt);
463
464 void warn_slowpath_fmt_taint(const char *file, int line,
465 unsigned taint, const char *fmt, ...)
466 {
467 struct slowpath_args args;
468
469 args.fmt = fmt;
470 va_start(args.args, fmt);
471 warn_slowpath_common(file, line, __builtin_return_address(0),
472 taint, &args);
473 va_end(args.args);
474 }
475 EXPORT_SYMBOL(warn_slowpath_fmt_taint);
476
477 void warn_slowpath_null(const char *file, int line)
478 {
479 warn_slowpath_common(file, line, __builtin_return_address(0),
480 TAINT_WARN, NULL);
481 }
482 EXPORT_SYMBOL(warn_slowpath_null);
483 #endif
484
485 #ifdef CONFIG_CC_STACKPROTECTOR
486
487 /*
488 * Called when gcc's -fstack-protector feature is used, and
489 * gcc detects corruption of the on-stack canary value
490 */
491 __visible void __stack_chk_fail(void)
492 {
493 panic("stack-protector: Kernel stack is corrupted in: %p\n",
494 __builtin_return_address(0));
495 }
496 EXPORT_SYMBOL(__stack_chk_fail);
497
498 #endif
499
500 core_param(panic, panic_timeout, int, 0644);
501 core_param(pause_on_oops, pause_on_oops, int, 0644);
502 core_param(panic_on_warn, panic_on_warn, int, 0644);
503
504 static int __init setup_crash_kexec_post_notifiers(char *s)
505 {
506 crash_kexec_post_notifiers = true;
507 return 0;
508 }
509 early_param("crash_kexec_post_notifiers", setup_crash_kexec_post_notifiers);
510
511 static int __init oops_setup(char *s)
512 {
513 if (!s)
514 return -EINVAL;
515 if (!strcmp(s, "panic"))
516 panic_on_oops = 1;
517 return 0;
518 }
519 early_param("oops", oops_setup);
This page took 0.039716 seconds and 5 git commands to generate.