2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/ftrace.h>
14 #include <linux/kexec.h>
15 #include <linux/bug.h>
16 #include <linux/nmi.h>
17 #include <linux/sysfs.h>
19 #include <asm/stacktrace.h>
22 int panic_on_unrecovered_nmi
;
24 unsigned int code_bytes
= 64;
25 int kstack_depth_to_print
= 3 * STACKSLOTS_PER_LINE
;
26 static int die_counter
;
28 static void printk_stack_address(unsigned long address
, int reliable
,
32 printk("%s [<%p>] %s%pB\n",
33 log_lvl
, (void *)address
, reliable
? "" : "? ",
37 void printk_address(unsigned long address
)
39 pr_cont(" [<%p>] %pS\n", (void *)address
, (void *)address
);
43 * x86-64 can have up to three kernel stacks:
46 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
49 static inline int valid_stack_ptr(struct task_struct
*task
,
50 void *p
, unsigned int size
, void *end
)
52 void *t
= task_stack_page(task
);
54 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
59 return p
>= t
&& p
< t
+ THREAD_SIZE
- size
;
63 print_context_stack(struct task_struct
*task
,
64 unsigned long *stack
, unsigned long bp
,
65 const struct stacktrace_ops
*ops
, void *data
,
66 unsigned long *end
, int *graph
)
68 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
71 * If we overflowed the stack into a guard page, jump back to the
72 * bottom of the usable stack.
74 if ((unsigned long)task_stack_page(task
) - (unsigned long)stack
<
76 stack
= (unsigned long *)task_stack_page(task
);
78 while (valid_stack_ptr(task
, stack
, sizeof(*stack
), end
)) {
79 unsigned long addr
= *stack
;
81 if (__kernel_text_address(addr
)) {
82 unsigned long real_addr
;
85 if ((unsigned long) stack
== bp
+ sizeof(long)) {
87 frame
= frame
->next_frame
;
88 bp
= (unsigned long) frame
;
92 * When function graph tracing is enabled for a
93 * function, its return address on the stack is
94 * replaced with the address of an ftrace handler
95 * (return_to_handler). In that case, before printing
96 * the "real" address, we want to print the handler
97 * address as an "unreliable" hint that function graph
98 * tracing was involved.
100 real_addr
= ftrace_graph_ret_addr(task
, graph
, addr
,
102 if (real_addr
!= addr
)
103 ops
->address(data
, addr
, 0);
105 ops
->address(data
, real_addr
, reliable
);
111 EXPORT_SYMBOL_GPL(print_context_stack
);
114 print_context_stack_bp(struct task_struct
*task
,
115 unsigned long *stack
, unsigned long bp
,
116 const struct stacktrace_ops
*ops
, void *data
,
117 unsigned long *end
, int *graph
)
119 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
120 unsigned long *retp
= &frame
->return_address
;
122 while (valid_stack_ptr(task
, retp
, sizeof(*retp
), end
)) {
123 unsigned long addr
= *retp
;
124 unsigned long real_addr
;
126 if (!__kernel_text_address(addr
))
129 real_addr
= ftrace_graph_ret_addr(task
, graph
, addr
, retp
);
130 if (ops
->address(data
, real_addr
, 1))
133 frame
= frame
->next_frame
;
134 retp
= &frame
->return_address
;
137 return (unsigned long)frame
;
139 EXPORT_SYMBOL_GPL(print_context_stack_bp
);
141 static int print_trace_stack(void *data
, char *name
)
143 printk("%s <%s> ", (char *)data
, name
);
148 * Print one address/symbol entries per line.
150 static int print_trace_address(void *data
, unsigned long addr
, int reliable
)
152 printk_stack_address(addr
, reliable
, data
);
156 static const struct stacktrace_ops print_trace_ops
= {
157 .stack
= print_trace_stack
,
158 .address
= print_trace_address
,
159 .walk_stack
= print_context_stack
,
163 show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
164 unsigned long *stack
, unsigned long bp
, char *log_lvl
)
166 printk("%sCall Trace:\n", log_lvl
);
167 dump_trace(task
, regs
, stack
, bp
, &print_trace_ops
, log_lvl
);
170 void show_stack(struct task_struct
*task
, unsigned long *sp
)
172 unsigned long bp
= 0;
175 * Stack frames below this one aren't interesting. Don't show them
176 * if we're printing for %current.
178 if (!sp
&& (!task
|| task
== current
)) {
179 sp
= get_stack_pointer(current
, NULL
);
180 bp
= (unsigned long)get_frame_pointer(current
, NULL
);
183 show_stack_log_lvl(task
, NULL
, sp
, bp
, "");
186 void show_stack_regs(struct pt_regs
*regs
)
188 show_stack_log_lvl(current
, regs
, NULL
, 0, "");
191 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
192 static int die_owner
= -1;
193 static unsigned int die_nest_count
;
195 unsigned long oops_begin(void)
202 /* racy, but better than risking deadlock. */
203 raw_local_irq_save(flags
);
204 cpu
= smp_processor_id();
205 if (!arch_spin_trylock(&die_lock
)) {
206 if (cpu
== die_owner
)
207 /* nested oops. should stop eventually */;
209 arch_spin_lock(&die_lock
);
217 EXPORT_SYMBOL_GPL(oops_begin
);
218 NOKPROBE_SYMBOL(oops_begin
);
220 void __noreturn
rewind_stack_do_exit(int signr
);
222 void oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
224 if (regs
&& kexec_should_crash(current
))
229 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
232 /* Nest count reaches zero, release the lock. */
233 arch_spin_unlock(&die_lock
);
234 raw_local_irq_restore(flags
);
240 panic("Fatal exception in interrupt");
242 panic("Fatal exception");
245 * We're not going to return, but we might be on an IST stack or
246 * have very little stack space left. Rewind the stack and kill
249 rewind_stack_do_exit(signr
);
251 NOKPROBE_SYMBOL(oops_end
);
253 int __die(const char *str
, struct pt_regs
*regs
, long err
)
260 "%s: %04lx [#%d]%s%s%s%s\n", str
, err
& 0xffff, ++die_counter
,
261 IS_ENABLED(CONFIG_PREEMPT
) ? " PREEMPT" : "",
262 IS_ENABLED(CONFIG_SMP
) ? " SMP" : "",
263 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
264 IS_ENABLED(CONFIG_KASAN
) ? " KASAN" : "");
266 if (notify_die(DIE_OOPS
, str
, regs
, err
,
267 current
->thread
.trap_nr
, SIGSEGV
) == NOTIFY_STOP
)
273 if (user_mode(regs
)) {
275 ss
= regs
->ss
& 0xffff;
277 sp
= kernel_stack_pointer(regs
);
280 printk(KERN_EMERG
"EIP: [<%08lx>] ", regs
->ip
);
281 print_symbol("%s", regs
->ip
);
282 printk(" SS:ESP %04x:%08lx\n", ss
, sp
);
284 /* Executive summary in case the oops scrolled away */
285 printk(KERN_ALERT
"RIP ");
286 printk_address(regs
->ip
);
287 printk(" RSP <%016lx>\n", regs
->sp
);
291 NOKPROBE_SYMBOL(__die
);
294 * This is gone through when something in the kernel has done something bad
295 * and is about to be terminated:
297 void die(const char *str
, struct pt_regs
*regs
, long err
)
299 unsigned long flags
= oops_begin();
302 if (!user_mode(regs
))
303 report_bug(regs
->ip
, regs
);
305 if (__die(str
, regs
, err
))
307 oops_end(flags
, regs
, sig
);
310 static int __init
kstack_setup(char *s
)
318 ret
= kstrtoul(s
, 0, &val
);
321 kstack_depth_to_print
= val
;
324 early_param("kstack", kstack_setup
);
326 static int __init
code_bytes_setup(char *s
)
334 ret
= kstrtoul(s
, 0, &val
);
339 if (code_bytes
> 8192)
344 __setup("code_bytes=", code_bytes_setup
);