Commit | Line | Data |
---|---|---|
6fcbede3 AH |
1 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds | |
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | |
4 | */ | |
5 | #include <linux/kallsyms.h> | |
6 | #include <linux/kprobes.h> | |
7 | #include <linux/uaccess.h> | |
6fcbede3 AH |
8 | #include <linux/hardirq.h> |
9 | #include <linux/kdebug.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/ptrace.h> | |
12 | #include <linux/kexec.h> | |
b8030906 | 13 | #include <linux/sysfs.h> |
6fcbede3 AH |
14 | #include <linux/bug.h> |
15 | #include <linux/nmi.h> | |
16 | ||
17 | #include <asm/stacktrace.h> | |
18 | ||
878719e8 | 19 | #include "dumpstack.h" |
6fcbede3 | 20 | |
b8030906 IM |
21 | #define N_EXCEPTION_STACKS_END \ |
22 | (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) | |
0406ca6d FW |
23 | |
24 | static char x86_stack_ids[][8] = { | |
b8030906 IM |
25 | [ DEBUG_STACK-1 ] = "#DB", |
26 | [ NMI_STACK-1 ] = "NMI", | |
27 | [ DOUBLEFAULT_STACK-1 ] = "#DF", | |
28 | [ STACKFAULT_STACK-1 ] = "#SS", | |
29 | [ MCE_STACK-1 ] = "#MC", | |
6fcbede3 | 30 | #if DEBUG_STKSZ > EXCEPTION_STKSZ |
b8030906 IM |
31 | [ N_EXCEPTION_STACKS ... |
32 | N_EXCEPTION_STACKS_END ] = "#DB[?]" | |
6fcbede3 | 33 | #endif |
b8030906 | 34 | }; |
0406ca6d FW |
35 | |
36 | int x86_is_stack_id(int id, char *name) | |
37 | { | |
38 | return x86_stack_ids[id - 1] == name; | |
39 | } | |
40 | ||
41 | static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, | |
b8030906 | 42 | unsigned *usedp, char **idp) |
0406ca6d | 43 | { |
6fcbede3 AH |
44 | unsigned k; |
45 | ||
46 | /* | |
47 | * Iterate over all exception stacks, and figure out whether | |
48 | * 'stack' is in one of them: | |
49 | */ | |
50 | for (k = 0; k < N_EXCEPTION_STACKS; k++) { | |
51 | unsigned long end = per_cpu(orig_ist, cpu).ist[k]; | |
52 | /* | |
53 | * Is 'stack' above this exception frame's end? | |
54 | * If yes then skip to the next frame. | |
55 | */ | |
56 | if (stack >= end) | |
57 | continue; | |
58 | /* | |
59 | * Is 'stack' above this exception frame's start address? | |
60 | * If yes then we found the right frame. | |
61 | */ | |
62 | if (stack >= end - EXCEPTION_STKSZ) { | |
63 | /* | |
64 | * Make sure we only iterate through an exception | |
65 | * stack once. If it comes up for the second time | |
66 | * then there's something wrong going on - just | |
67 | * break out and return NULL: | |
68 | */ | |
69 | if (*usedp & (1U << k)) | |
70 | break; | |
71 | *usedp |= 1U << k; | |
0406ca6d | 72 | *idp = x86_stack_ids[k]; |
6fcbede3 AH |
73 | return (unsigned long *)end; |
74 | } | |
75 | /* | |
76 | * If this is a debug stack, and if it has a larger size than | |
77 | * the usual exception stacks, then 'stack' might still | |
78 | * be within the lower portion of the debug stack: | |
79 | */ | |
80 | #if DEBUG_STKSZ > EXCEPTION_STKSZ | |
81 | if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) { | |
82 | unsigned j = N_EXCEPTION_STACKS - 1; | |
83 | ||
84 | /* | |
85 | * Black magic. A large debug stack is composed of | |
86 | * multiple exception stack entries, which we | |
87 | * iterate through now. Dont look: | |
88 | */ | |
89 | do { | |
90 | ++j; | |
91 | end -= EXCEPTION_STKSZ; | |
0406ca6d FW |
92 | x86_stack_ids[j][4] = '1' + |
93 | (j - N_EXCEPTION_STACKS); | |
6fcbede3 AH |
94 | } while (stack < end - EXCEPTION_STKSZ); |
95 | if (*usedp & (1U << j)) | |
96 | break; | |
97 | *usedp |= 1U << j; | |
0406ca6d | 98 | *idp = x86_stack_ids[j]; |
6fcbede3 AH |
99 | return (unsigned long *)end; |
100 | } | |
101 | #endif | |
102 | } | |
103 | return NULL; | |
104 | } | |
105 | ||
af2d8289 FW |
106 | static inline int |
107 | in_irq_stack(unsigned long *stack, unsigned long *irq_stack, | |
108 | unsigned long *irq_stack_end) | |
109 | { | |
110 | return (stack >= irq_stack && stack < irq_stack_end); | |
111 | } | |
112 | ||
113 | /* | |
114 | * We are returning from the irq stack and go to the previous one. | |
115 | * If the previous stack is also in the irq stack, then bp in the first | |
116 | * frame of the irq stack points to the previous, interrupted one. | |
117 | * Otherwise we have another level of indirection: We first save | |
118 | * the bp of the previous stack, then we switch the stack to the irq one | |
119 | * and save a new bp that links to the previous one. | |
120 | * (See save_args()) | |
121 | */ | |
122 | static inline unsigned long | |
123 | fixup_bp_irq_link(unsigned long bp, unsigned long *stack, | |
124 | unsigned long *irq_stack, unsigned long *irq_stack_end) | |
125 | { | |
126 | #ifdef CONFIG_FRAME_POINTER | |
127 | struct stack_frame *frame = (struct stack_frame *)bp; | |
128 | ||
129 | if (!in_irq_stack(stack, irq_stack, irq_stack_end)) | |
130 | return (unsigned long)frame->next_frame; | |
131 | #endif | |
132 | return bp; | |
133 | } | |
134 | ||
6fcbede3 AH |
135 | /* |
136 | * x86-64 can have up to three kernel stacks: | |
137 | * process stack | |
138 | * interrupt stack | |
139 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | |
140 | */ | |
141 | ||
6fcbede3 AH |
142 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
143 | unsigned long *stack, unsigned long bp, | |
144 | const struct stacktrace_ops *ops, void *data) | |
145 | { | |
146 | const unsigned cpu = get_cpu(); | |
26f80bd6 BG |
147 | unsigned long *irq_stack_end = |
148 | (unsigned long *)per_cpu(irq_stack_ptr, cpu); | |
6fcbede3 AH |
149 | unsigned used = 0; |
150 | struct thread_info *tinfo; | |
7ee991fb | 151 | int graph = 0; |
6fcbede3 AH |
152 | |
153 | if (!task) | |
154 | task = current; | |
155 | ||
156 | if (!stack) { | |
157 | unsigned long dummy; | |
158 | stack = &dummy; | |
159 | if (task && task != current) | |
160 | stack = (unsigned long *)task->thread.sp; | |
161 | } | |
162 | ||
163 | #ifdef CONFIG_FRAME_POINTER | |
164 | if (!bp) { | |
165 | if (task == current) { | |
166 | /* Grab bp right from our regs */ | |
8a541665 | 167 | get_bp(bp); |
6fcbede3 AH |
168 | } else { |
169 | /* bp is the last reg pushed by switch_to */ | |
170 | bp = *(unsigned long *) task->thread.sp; | |
171 | } | |
172 | } | |
173 | #endif | |
174 | ||
175 | /* | |
176 | * Print function call entries in all stacks, starting at the | |
177 | * current stack address. If the stacks consist of nested | |
178 | * exceptions | |
179 | */ | |
180 | tinfo = task_thread_info(task); | |
181 | for (;;) { | |
182 | char *id; | |
183 | unsigned long *estack_end; | |
184 | estack_end = in_exception_stack(cpu, (unsigned long)stack, | |
185 | &used, &id); | |
186 | ||
187 | if (estack_end) { | |
188 | if (ops->stack(data, id) < 0) | |
189 | break; | |
190 | ||
61c1917f FW |
191 | bp = ops->walk_stack(tinfo, stack, bp, ops, |
192 | data, estack_end, &graph); | |
6fcbede3 AH |
193 | ops->stack(data, "<EOE>"); |
194 | /* | |
195 | * We link to the next stack via the | |
196 | * second-to-last pointer (index -2 to end) in the | |
197 | * exception stack: | |
198 | */ | |
199 | stack = (unsigned long *) estack_end[-2]; | |
200 | continue; | |
201 | } | |
26f80bd6 BG |
202 | if (irq_stack_end) { |
203 | unsigned long *irq_stack; | |
204 | irq_stack = irq_stack_end - | |
205 | (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); | |
6fcbede3 | 206 | |
af2d8289 | 207 | if (in_irq_stack(stack, irq_stack, irq_stack_end)) { |
6fcbede3 AH |
208 | if (ops->stack(data, "IRQ") < 0) |
209 | break; | |
210 | bp = print_context_stack(tinfo, stack, bp, | |
26f80bd6 | 211 | ops, data, irq_stack_end, &graph); |
6fcbede3 AH |
212 | /* |
213 | * We link to the next stack (which would be | |
214 | * the process stack normally) the last | |
215 | * pointer (index -1 to end) in the IRQ stack: | |
216 | */ | |
26f80bd6 | 217 | stack = (unsigned long *) (irq_stack_end[-1]); |
af2d8289 FW |
218 | bp = fixup_bp_irq_link(bp, stack, irq_stack, |
219 | irq_stack_end); | |
26f80bd6 | 220 | irq_stack_end = NULL; |
6fcbede3 AH |
221 | ops->stack(data, "EOI"); |
222 | continue; | |
223 | } | |
224 | } | |
225 | break; | |
226 | } | |
227 | ||
228 | /* | |
229 | * This handles the process stack: | |
230 | */ | |
7ee991fb | 231 | bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); |
6fcbede3 AH |
232 | put_cpu(); |
233 | } | |
234 | EXPORT_SYMBOL(dump_trace); | |
235 | ||
878719e8 | 236 | void |
6fcbede3 | 237 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
b8030906 | 238 | unsigned long *sp, unsigned long bp, char *log_lvl) |
6fcbede3 | 239 | { |
67f2de0b IM |
240 | unsigned long *irq_stack_end; |
241 | unsigned long *irq_stack; | |
6fcbede3 | 242 | unsigned long *stack; |
67f2de0b | 243 | int cpu; |
6fcbede3 | 244 | int i; |
67f2de0b IM |
245 | |
246 | preempt_disable(); | |
247 | cpu = smp_processor_id(); | |
248 | ||
249 | irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); | |
250 | irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); | |
6fcbede3 AH |
251 | |
252 | /* | |
67f2de0b IM |
253 | * Debugging aid: "show_stack(NULL, NULL);" prints the |
254 | * back trace for this cpu: | |
6fcbede3 | 255 | */ |
6fcbede3 AH |
256 | if (sp == NULL) { |
257 | if (task) | |
258 | sp = (unsigned long *)task->thread.sp; | |
259 | else | |
260 | sp = (unsigned long *)&sp; | |
261 | } | |
262 | ||
263 | stack = sp; | |
264 | for (i = 0; i < kstack_depth_to_print; i++) { | |
26f80bd6 BG |
265 | if (stack >= irq_stack && stack <= irq_stack_end) { |
266 | if (stack == irq_stack_end) { | |
267 | stack = (unsigned long *) (irq_stack_end[-1]); | |
6fcbede3 AH |
268 | printk(" <EOI> "); |
269 | } | |
270 | } else { | |
271 | if (((long) stack & (THREAD_SIZE-1)) == 0) | |
272 | break; | |
273 | } | |
8a541665 | 274 | if (i && ((i % STACKSLOTS_PER_LINE) == 0)) |
ca0a8164 | 275 | printk("\n%s", log_lvl); |
6fcbede3 AH |
276 | printk(" %016lx", *stack++); |
277 | touch_nmi_watchdog(); | |
278 | } | |
67f2de0b IM |
279 | preempt_enable(); |
280 | ||
6fcbede3 AH |
281 | printk("\n"); |
282 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); | |
283 | } | |
284 | ||
6fcbede3 AH |
285 | void show_registers(struct pt_regs *regs) |
286 | { | |
287 | int i; | |
288 | unsigned long sp; | |
289 | const int cpu = smp_processor_id(); | |
c6f5e0ac | 290 | struct task_struct *cur = current; |
6fcbede3 AH |
291 | |
292 | sp = regs->sp; | |
293 | printk("CPU %d ", cpu); | |
294 | __show_regs(regs, 1); | |
295 | printk("Process %s (pid: %d, threadinfo %p, task %p)\n", | |
296 | cur->comm, cur->pid, task_thread_info(cur), cur); | |
297 | ||
298 | /* | |
299 | * When in-kernel, we also print out the stack and code at the | |
300 | * time of the fault.. | |
301 | */ | |
302 | if (!user_mode(regs)) { | |
303 | unsigned int code_prologue = code_bytes * 43 / 64; | |
304 | unsigned int code_len = code_bytes; | |
305 | unsigned char c; | |
306 | u8 *ip; | |
307 | ||
ca0a8164 | 308 | printk(KERN_EMERG "Stack:\n"); |
6fcbede3 | 309 | show_stack_log_lvl(NULL, regs, (unsigned long *)sp, |
ca0a8164 | 310 | regs->bp, KERN_EMERG); |
6fcbede3 AH |
311 | |
312 | printk(KERN_EMERG "Code: "); | |
313 | ||
314 | ip = (u8 *)regs->ip - code_prologue; | |
315 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { | |
8a541665 | 316 | /* try starting at IP */ |
6fcbede3 AH |
317 | ip = (u8 *)regs->ip; |
318 | code_len = code_len - code_prologue + 1; | |
319 | } | |
320 | for (i = 0; i < code_len; i++, ip++) { | |
321 | if (ip < (u8 *)PAGE_OFFSET || | |
322 | probe_kernel_address(ip, c)) { | |
323 | printk(" Bad RIP value."); | |
324 | break; | |
325 | } | |
326 | if (ip == (u8 *)regs->ip) | |
327 | printk("<%02x> ", c); | |
328 | else | |
329 | printk("%02x ", c); | |
330 | } | |
331 | } | |
332 | printk("\n"); | |
333 | } | |
334 | ||
335 | int is_valid_bugaddr(unsigned long ip) | |
336 | { | |
337 | unsigned short ud2; | |
338 | ||
339 | if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2))) | |
340 | return 0; | |
341 | ||
342 | return ud2 == 0x0b0f; | |
343 | } |