panic, smp: provide smp_send_stop() wrapper on UP too
[deliverable/linux.git] / kernel / panic.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/panic.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * This function is used through-out the kernel (including mm and fs)
9 * to indicate a major problem.
10 */
1da177e4
LT
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/delay.h>
14#include <linux/reboot.h>
15#include <linux/notifier.h>
16#include <linux/init.h>
17#include <linux/sysrq.h>
18#include <linux/interrupt.h>
19#include <linux/nmi.h>
dc009d92 20#include <linux/kexec.h>
657b3010 21#include <linux/debug_locks.h>
2c3b20e9 22#include <linux/random.h>
79b4cc5e 23#include <linux/kallsyms.h>
bd89bb29 24#include <linux/dmi.h>
1da177e4 25
1da177e4 26int panic_on_oops;
25ddbb18 27static unsigned long tainted_mask;
dd287796
AM
28static int pause_on_oops;
29static int pause_on_oops_flag;
30static DEFINE_SPINLOCK(pause_on_oops_lock);
1da177e4 31
dd287796 32int panic_timeout;
1da177e4 33
e041c683 34ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
1da177e4
LT
35
36EXPORT_SYMBOL(panic_notifier_list);
37
1da177e4
LT
38static long no_blink(long time)
39{
40 return 0;
41}
42
43/* Returns how long it waited in ms */
44long (*panic_blink)(long time);
45EXPORT_SYMBOL(panic_blink);
46
47/**
48 * panic - halt the system
49 * @fmt: The text string to print
50 *
51 * Display a message, then perform cleanups.
52 *
53 * This function never returns.
54 */
c277e63f 55
1da177e4
LT
56NORET_TYPE void panic(const char * fmt, ...)
57{
58 long i;
59 static char buf[1024];
60 va_list args;
347a8dc3 61#if defined(CONFIG_S390)
c277e63f 62 unsigned long caller = (unsigned long) __builtin_return_address(0);
1da177e4
LT
63#endif
64
dc009d92
EB
65 /*
66 * It's possible to come here directly from a panic-assertion and not
67 * have preempt disabled. Some functions called from here want
68 * preempt to be disabled. No point enabling it later though...
69 */
70 preempt_disable();
71
1da177e4
LT
72 bust_spinlocks(1);
73 va_start(args, fmt);
74 vsnprintf(buf, sizeof(buf), fmt, args);
75 va_end(args);
76 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
5cb27301
IM
77#ifdef CONFIG_DEBUG_BUGVERBOSE
78 dump_stack();
79#endif
1da177e4 80
dc009d92
EB
81 /*
82 * If we have crashed and we have a crash kernel loaded let it handle
83 * everything else.
84 * Do we want to call this before we try to display a message?
85 */
6e274d14 86 crash_kexec(NULL);
dc009d92 87
dc009d92
EB
88 /*
89 * Note smp_send_stop is the usual smp shutdown function, which
90 * unfortunately means it may not be hardened to work in a panic
91 * situation.
92 */
1da177e4 93 smp_send_stop();
1da177e4 94
e041c683 95 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
1da177e4
LT
96
97 if (!panic_blink)
98 panic_blink = no_blink;
99
dc009d92 100 if (panic_timeout > 0) {
1da177e4
LT
101 /*
102 * Delay timeout seconds before rebooting the machine.
103 * We can't use the "normal" timers since we just panicked..
104 */
105 printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
106 for (i = 0; i < panic_timeout*1000; ) {
107 touch_nmi_watchdog();
108 i += panic_blink(i);
109 mdelay(1);
110 i++;
111 }
2f048ea8
EB
112 /* This will not be a clean reboot, with everything
113 * shutting down. But if there is a chance of
114 * rebooting the system it will be rebooted.
1da177e4 115 */
2f048ea8 116 emergency_restart();
1da177e4
LT
117 }
118#ifdef __sparc__
119 {
120 extern int stop_a_enabled;
a271c241 121 /* Make sure the user can actually press Stop-A (L1-A) */
1da177e4 122 stop_a_enabled = 1;
a271c241 123 printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n");
1da177e4
LT
124 }
125#endif
347a8dc3 126#if defined(CONFIG_S390)
c277e63f 127 disabled_wait(caller);
1da177e4
LT
128#endif
129 local_irq_enable();
130 for (i = 0;;) {
c22db941 131 touch_softlockup_watchdog();
1da177e4
LT
132 i += panic_blink(i);
133 mdelay(1);
134 i++;
135 }
ffd71da4 136 bust_spinlocks(0);
1da177e4
LT
137}
138
139EXPORT_SYMBOL(panic);
140
c277e63f 141
25ddbb18
AK
142struct tnt {
143 u8 bit;
144 char true;
145 char false;
146};
147
148static const struct tnt tnts[] = {
149 { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
150 { TAINT_FORCED_MODULE, 'F', ' ' },
151 { TAINT_UNSAFE_SMP, 'S', ' ' },
152 { TAINT_FORCED_RMMOD, 'R', ' ' },
153 { TAINT_MACHINE_CHECK, 'M', ' ' },
154 { TAINT_BAD_PAGE, 'B', ' ' },
155 { TAINT_USER, 'U', ' ' },
156 { TAINT_DIE, 'D', ' ' },
157 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
158 { TAINT_WARN, 'W', ' ' },
26e9a397 159 { TAINT_CRAP, 'C', ' ' },
25ddbb18
AK
160};
161
1da177e4
LT
162/**
163 * print_tainted - return a string to represent the kernel taint state.
164 *
165 * 'P' - Proprietary module has been loaded.
166 * 'F' - Module has been forcibly loaded.
167 * 'S' - SMP with CPUs not designed for SMP.
168 * 'R' - User forced a module unload.
9aa5e993 169 * 'M' - System experienced a machine check exception.
1da177e4 170 * 'B' - System has hit bad_page.
34f5a398 171 * 'U' - Userspace-defined naughtiness.
a8005992 172 * 'D' - Kernel has oopsed before
95b570c9
NH
173 * 'A' - ACPI table overridden.
174 * 'W' - Taint on warning.
061b1bd3 175 * 'C' - modules from drivers/staging are loaded.
1da177e4
LT
176 *
177 * The string is overwritten by the next call to print_taint().
178 */
1da177e4
LT
179const char *print_tainted(void)
180{
25ddbb18
AK
181 static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1];
182
183 if (tainted_mask) {
184 char *s;
185 int i;
186
187 s = buf + sprintf(buf, "Tainted: ");
188 for (i = 0; i < ARRAY_SIZE(tnts); i++) {
189 const struct tnt *t = &tnts[i];
190 *s++ = test_bit(t->bit, &tainted_mask) ?
191 t->true : t->false;
192 }
193 *s = 0;
194 } else
1da177e4
LT
195 snprintf(buf, sizeof(buf), "Not tainted");
196 return(buf);
197}
198
25ddbb18 199int test_taint(unsigned flag)
1da177e4 200{
25ddbb18
AK
201 return test_bit(flag, &tainted_mask);
202}
203EXPORT_SYMBOL(test_taint);
204
205unsigned long get_taint(void)
206{
207 return tainted_mask;
1da177e4 208}
dd287796 209
1da177e4 210void add_taint(unsigned flag)
dd287796 211{
068c4579 212 debug_locks = 0; /* can't trust the integrity of the kernel anymore */
25ddbb18 213 set_bit(flag, &tainted_mask);
dd287796 214}
1da177e4 215EXPORT_SYMBOL(add_taint);
dd287796
AM
216
217static void spin_msec(int msecs)
218{
219 int i;
220
221 for (i = 0; i < msecs; i++) {
222 touch_nmi_watchdog();
223 mdelay(1);
224 }
225}
226
227/*
228 * It just happens that oops_enter() and oops_exit() are identically
229 * implemented...
230 */
231static void do_oops_enter_exit(void)
232{
233 unsigned long flags;
234 static int spin_counter;
235
236 if (!pause_on_oops)
237 return;
238
239 spin_lock_irqsave(&pause_on_oops_lock, flags);
240 if (pause_on_oops_flag == 0) {
241 /* This CPU may now print the oops message */
242 pause_on_oops_flag = 1;
243 } else {
244 /* We need to stall this CPU */
245 if (!spin_counter) {
246 /* This CPU gets to do the counting */
247 spin_counter = pause_on_oops;
248 do {
249 spin_unlock(&pause_on_oops_lock);
250 spin_msec(MSEC_PER_SEC);
251 spin_lock(&pause_on_oops_lock);
252 } while (--spin_counter);
253 pause_on_oops_flag = 0;
254 } else {
255 /* This CPU waits for a different one */
256 while (spin_counter) {
257 spin_unlock(&pause_on_oops_lock);
258 spin_msec(1);
259 spin_lock(&pause_on_oops_lock);
260 }
261 }
262 }
263 spin_unlock_irqrestore(&pause_on_oops_lock, flags);
264}
265
266/*
267 * Return true if the calling CPU is allowed to print oops-related info. This
268 * is a bit racy..
269 */
270int oops_may_print(void)
271{
272 return pause_on_oops_flag == 0;
273}
274
275/*
276 * Called when the architecture enters its oops handler, before it prints
277 * anything. If this is the first CPU to oops, and it's oopsing the first time
278 * then let it proceed.
279 *
280 * This is all enabled by the pause_on_oops kernel boot option. We do all this
281 * to ensure that oopses don't scroll off the screen. It has the side-effect
282 * of preventing later-oopsing CPUs from mucking up the display, too.
283 *
284 * It turns out that the CPU which is allowed to print ends up pausing for the
285 * right duration, whereas all the other CPUs pause for twice as long: once in
286 * oops_enter(), once in oops_exit().
287 */
288void oops_enter(void)
289{
2c16e9c8 290 debug_locks_off(); /* can't trust the integrity of the kernel anymore */
dd287796
AM
291 do_oops_enter_exit();
292}
293
2c3b20e9
AV
294/*
295 * 64-bit random ID for oopses:
296 */
297static u64 oops_id;
298
299static int init_oops_id(void)
300{
301 if (!oops_id)
302 get_random_bytes(&oops_id, sizeof(oops_id));
d6624f99
AV
303 else
304 oops_id++;
2c3b20e9
AV
305
306 return 0;
307}
308late_initcall(init_oops_id);
309
71c33911
AV
310static void print_oops_end_marker(void)
311{
312 init_oops_id();
313 printk(KERN_WARNING "---[ end trace %016llx ]---\n",
314 (unsigned long long)oops_id);
315}
316
dd287796
AM
317/*
318 * Called when the architecture exits its oops handler, after printing
319 * everything.
320 */
321void oops_exit(void)
322{
323 do_oops_enter_exit();
71c33911 324 print_oops_end_marker();
dd287796 325}
3162f751 326
79b4cc5e 327#ifdef WANT_WARN_ON_SLOWPATH
a8f18b90
AV
328void warn_slowpath(const char *file, int line, const char *fmt, ...)
329{
330 va_list args;
331 char function[KSYM_SYMBOL_LEN];
332 unsigned long caller = (unsigned long)__builtin_return_address(0);
bd89bb29
AV
333 const char *board;
334
a8f18b90
AV
335 sprint_symbol(function, caller);
336
337 printk(KERN_WARNING "------------[ cut here ]------------\n");
338 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
339 line, function);
bd89bb29
AV
340 board = dmi_get_system_info(DMI_PRODUCT_NAME);
341 if (board)
342 printk(KERN_WARNING "Hardware name: %s\n", board);
74853dba
AV
343
344 if (fmt) {
345 va_start(args, fmt);
346 vprintk(fmt, args);
347 va_end(args);
348 }
a8f18b90
AV
349
350 print_modules();
351 dump_stack();
352 print_oops_end_marker();
353 add_taint(TAINT_WARN);
354}
355EXPORT_SYMBOL(warn_slowpath);
79b4cc5e
AV
356#endif
357
3162f751 358#ifdef CONFIG_CC_STACKPROTECTOR
54371a43 359
3162f751
AV
360/*
361 * Called when gcc's -fstack-protector feature is used, and
362 * gcc detects corruption of the on-stack canary value
363 */
364void __stack_chk_fail(void)
365{
517a92c4
IM
366 panic("stack-protector: Kernel stack is corrupted in: %p\n",
367 __builtin_return_address(0));
3162f751
AV
368}
369EXPORT_SYMBOL(__stack_chk_fail);
54371a43 370
3162f751 371#endif
f44dd164
RR
372
373core_param(panic, panic_timeout, int, 0644);
374core_param(pause_on_oops, pause_on_oops, int, 0644);
This page took 0.450712 seconds and 5 git commands to generate.