2 * linux/arch/x86-64/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * 'Traps.c' handles hardware traps and faults after we have saved some
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/nmi.h>
29 #include <linux/kprobes.h>
30 #include <linux/kexec.h>
31 #include <linux/unwind.h>
33 #include <asm/system.h>
34 #include <asm/uaccess.h>
36 #include <asm/atomic.h>
37 #include <asm/debugreg.h>
40 #include <asm/kdebug.h>
41 #include <asm/processor.h>
42 #include <asm/unwind.h>
44 #include <asm/pgalloc.h>
46 #include <asm/proto.h>
49 asmlinkage
void divide_error(void);
50 asmlinkage
void debug(void);
51 asmlinkage
void nmi(void);
52 asmlinkage
void int3(void);
53 asmlinkage
void overflow(void);
54 asmlinkage
void bounds(void);
55 asmlinkage
void invalid_op(void);
56 asmlinkage
void device_not_available(void);
57 asmlinkage
void double_fault(void);
58 asmlinkage
void coprocessor_segment_overrun(void);
59 asmlinkage
void invalid_TSS(void);
60 asmlinkage
void segment_not_present(void);
61 asmlinkage
void stack_segment(void);
62 asmlinkage
void general_protection(void);
63 asmlinkage
void page_fault(void);
64 asmlinkage
void coprocessor_error(void);
65 asmlinkage
void simd_coprocessor_error(void);
66 asmlinkage
void reserved(void);
67 asmlinkage
void alignment_check(void);
68 asmlinkage
void machine_check(void);
69 asmlinkage
void spurious_interrupt_bug(void);
71 ATOMIC_NOTIFIER_HEAD(die_chain
);
72 EXPORT_SYMBOL(die_chain
);
74 int register_die_notifier(struct notifier_block
*nb
)
77 return atomic_notifier_chain_register(&die_chain
, nb
);
79 EXPORT_SYMBOL(register_die_notifier
); /* used modular by kdb */
81 int unregister_die_notifier(struct notifier_block
*nb
)
83 return atomic_notifier_chain_unregister(&die_chain
, nb
);
85 EXPORT_SYMBOL(unregister_die_notifier
); /* used modular by kdb */
87 static inline void conditional_sti(struct pt_regs
*regs
)
89 if (regs
->eflags
& X86_EFLAGS_IF
)
93 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
96 if (regs
->eflags
& X86_EFLAGS_IF
)
100 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
102 if (regs
->eflags
& X86_EFLAGS_IF
)
104 /* Make sure to not schedule here because we could be running
105 on an exception stack. */
106 preempt_enable_no_resched();
109 static int kstack_depth_to_print
= 12;
110 static int call_trace
= 1;
112 #ifdef CONFIG_KALLSYMS
113 # include <linux/kallsyms.h>
114 void printk_address(unsigned long address
)
116 unsigned long offset
= 0, symsize
;
122 symname
= kallsyms_lookup(address
, &symsize
, &offset
,
125 printk(" [<%016lx>]\n", address
);
129 modname
= delim
= "";
130 printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
131 address
, delim
, modname
, delim
, symname
, offset
, symsize
);
134 void printk_address(unsigned long address
)
136 printk(" [<%016lx>]\n", address
);
140 static unsigned long *in_exception_stack(unsigned cpu
, unsigned long stack
,
141 unsigned *usedp
, const char **idp
)
143 static char ids
[][8] = {
144 [DEBUG_STACK
- 1] = "#DB",
145 [NMI_STACK
- 1] = "NMI",
146 [DOUBLEFAULT_STACK
- 1] = "#DF",
147 [STACKFAULT_STACK
- 1] = "#SS",
148 [MCE_STACK
- 1] = "#MC",
149 #if DEBUG_STKSZ > EXCEPTION_STKSZ
150 [N_EXCEPTION_STACKS
... N_EXCEPTION_STACKS
+ DEBUG_STKSZ
/ EXCEPTION_STKSZ
- 2] = "#DB[?]"
156 * Iterate over all exception stacks, and figure out whether
157 * 'stack' is in one of them:
159 for (k
= 0; k
< N_EXCEPTION_STACKS
; k
++) {
163 * set 'end' to the end of the exception stack.
167 * TODO: this block is not needed i think, because
168 * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
171 #if DEBUG_STKSZ > EXCEPTION_STKSZ
173 end
= cpu_pda(cpu
)->debugstack
+ DEBUG_STKSZ
;
177 end
= per_cpu(init_tss
, cpu
).ist
[k
];
181 * Is 'stack' above this exception frame's end?
182 * If yes then skip to the next frame.
187 * Is 'stack' above this exception frame's start address?
188 * If yes then we found the right frame.
190 if (stack
>= end
- EXCEPTION_STKSZ
) {
192 * Make sure we only iterate through an exception
193 * stack once. If it comes up for the second time
194 * then there's something wrong going on - just
195 * break out and return NULL:
197 if (*usedp
& (1U << k
))
201 return (unsigned long *)end
;
204 * If this is a debug stack, and if it has a larger size than
205 * the usual exception stacks, then 'stack' might still
206 * be within the lower portion of the debug stack:
208 #if DEBUG_STKSZ > EXCEPTION_STKSZ
209 if (k
== DEBUG_STACK
- 1 && stack
>= end
- DEBUG_STKSZ
) {
210 unsigned j
= N_EXCEPTION_STACKS
- 1;
213 * Black magic. A large debug stack is composed of
214 * multiple exception stack entries, which we
215 * iterate through now. Dont look:
219 end
-= EXCEPTION_STKSZ
;
220 ids
[j
][4] = '1' + (j
- N_EXCEPTION_STACKS
);
221 } while (stack
< end
- EXCEPTION_STKSZ
);
222 if (*usedp
& (1U << j
))
226 return (unsigned long *)end
;
233 static int show_trace_unwind(struct unwind_frame_info
*info
, void *context
)
237 while (unwind(info
) == 0 && UNW_PC(info
)) {
239 printk_address(UNW_PC(info
));
240 if (arch_unw_user_mode(info
))
247 * x86-64 can have upto three kernel stacks:
250 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
253 void show_trace(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long * stack
)
255 const unsigned cpu
= safe_smp_processor_id();
256 unsigned long *irqstack_end
= (unsigned long *)cpu_pda(cpu
)->irqstackptr
;
260 printk("\nCall Trace:\n");
265 if (call_trace
>= 0) {
267 struct unwind_frame_info info
;
270 if (unwind_init_frame_info(&info
, tsk
, regs
) == 0)
271 unw_ret
= show_trace_unwind(&info
, NULL
);
272 } else if (tsk
== current
)
273 unw_ret
= unwind_init_running(&info
, show_trace_unwind
, NULL
);
275 if (unwind_init_blocked(&info
, tsk
) == 0)
276 unw_ret
= show_trace_unwind(&info
, NULL
);
281 printk("Legacy call trace:");
287 * Print function call entries within a stack. 'cond' is the
288 * "end of stackframe" condition, that the 'stack++'
289 * iteration will eventually trigger.
291 #define HANDLE_STACK(cond) \
293 unsigned long addr = *stack++; \
294 if (kernel_text_address(addr)) { \
296 * If the address is either in the text segment of the \
297 * kernel, or in the region which contains vmalloc'ed \
298 * memory, it *may* be the address of a calling \
299 * routine; if so, print it so that someone tracing \
300 * down the cause of the crash will be able to figure \
301 * out the call path that was taken. \
303 printk_address(addr); \
308 * Print function call entries in all stacks, starting at the
309 * current stack address. If the stacks consist of nested
314 unsigned long *estack_end
;
315 estack_end
= in_exception_stack(cpu
, (unsigned long)stack
,
320 HANDLE_STACK (stack
< estack_end
);
323 * We link to the next stack via the
324 * second-to-last pointer (index -2 to end) in the
327 stack
= (unsigned long *) estack_end
[-2];
331 unsigned long *irqstack
;
332 irqstack
= irqstack_end
-
333 (IRQSTACKSIZE
- 64) / sizeof(*irqstack
);
335 if (stack
>= irqstack
&& stack
< irqstack_end
) {
337 HANDLE_STACK (stack
< irqstack_end
);
339 * We link to the next stack (which would be
340 * the process stack normally) the last
341 * pointer (index -1 to end) in the IRQ stack:
343 stack
= (unsigned long *) (irqstack_end
[-1]);
353 * This prints the process stack:
355 HANDLE_STACK (((long) stack
& (THREAD_SIZE
-1)) != 0);
361 static void _show_stack(struct task_struct
*tsk
, struct pt_regs
*regs
, unsigned long * rsp
)
363 unsigned long *stack
;
365 const int cpu
= safe_smp_processor_id();
366 unsigned long *irqstack_end
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
);
367 unsigned long *irqstack
= (unsigned long *) (cpu_pda(cpu
)->irqstackptr
- IRQSTACKSIZE
);
369 // debugging aid: "show_stack(NULL, NULL);" prints the
370 // back trace for this cpu.
374 rsp
= (unsigned long *)tsk
->thread
.rsp
;
376 rsp
= (unsigned long *)&rsp
;
380 for(i
=0; i
< kstack_depth_to_print
; i
++) {
381 if (stack
>= irqstack
&& stack
<= irqstack_end
) {
382 if (stack
== irqstack_end
) {
383 stack
= (unsigned long *) (irqstack_end
[-1]);
387 if (((long) stack
& (THREAD_SIZE
-1)) == 0)
390 if (i
&& ((i
% 4) == 0))
392 printk(" %016lx", *stack
++);
393 touch_nmi_watchdog();
395 show_trace(tsk
, regs
, rsp
);
398 void show_stack(struct task_struct
*tsk
, unsigned long * rsp
)
400 _show_stack(tsk
, NULL
, rsp
);
404 * The architecture-independent dump_stack generator
406 void dump_stack(void)
409 show_trace(NULL
, NULL
, &dummy
);
412 EXPORT_SYMBOL(dump_stack
);
414 void show_registers(struct pt_regs
*regs
)
417 int in_kernel
= !user_mode(regs
);
419 const int cpu
= safe_smp_processor_id();
420 struct task_struct
*cur
= cpu_pda(cpu
)->pcurrent
;
424 printk("CPU %d ", cpu
);
426 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
427 cur
->comm
, cur
->pid
, task_thread_info(cur
), cur
);
430 * When in-kernel, we also print out the stack and code at the
431 * time of the fault..
436 _show_stack(NULL
, regs
, (unsigned long*)rsp
);
439 if (regs
->rip
< PAGE_OFFSET
)
442 for (i
=0; i
<20; i
++) {
444 if (__get_user(c
, &((unsigned char*)regs
->rip
)[i
])) {
446 printk(" Bad RIP value.");
455 void handle_BUG(struct pt_regs
*regs
)
459 const char *prefix
= "";
463 if (__copy_from_user(&f
, (const void __user
*) regs
->rip
,
464 sizeof(struct bug_frame
)))
466 if (f
.filename
>= 0 ||
467 f
.ud2
[0] != 0x0f || f
.ud2
[1] != 0x0b)
469 len
= __strnlen_user((char *)(long)f
.filename
, PATH_MAX
) - 1;
470 if (len
< 0 || len
>= PATH_MAX
)
471 f
.filename
= (int)(long)"unmapped filename";
473 f
.filename
+= len
- 50;
476 printk("----------- [cut here ] --------- [please bite here ] ---------\n");
477 printk(KERN_ALERT
"Kernel BUG at %s%.50s:%d\n", prefix
, (char *)(long)f
.filename
, f
.line
);
481 void out_of_line_bug(void)
485 EXPORT_SYMBOL(out_of_line_bug
);
488 static DEFINE_SPINLOCK(die_lock
);
489 static int die_owner
= -1;
490 static unsigned int die_nest_count
;
492 unsigned __kprobes
long oops_begin(void)
494 int cpu
= safe_smp_processor_id();
497 /* racy, but better than risking deadlock. */
498 local_irq_save(flags
);
499 if (!spin_trylock(&die_lock
)) {
500 if (cpu
== die_owner
)
501 /* nested oops. should stop eventually */;
503 spin_lock(&die_lock
);
512 void __kprobes
oops_end(unsigned long flags
)
518 /* We still own the lock */
519 local_irq_restore(flags
);
521 /* Nest count reaches zero, release the lock. */
522 spin_unlock_irqrestore(&die_lock
, flags
);
527 void __kprobes
__die(const char * str
, struct pt_regs
* regs
, long err
)
529 static int die_counter
;
530 printk(KERN_EMERG
"%s: %04lx [%u] ", str
, err
& 0xffff,++die_counter
);
531 #ifdef CONFIG_PREEMPT
537 #ifdef CONFIG_DEBUG_PAGEALLOC
538 printk("DEBUG_PAGEALLOC");
541 notify_die(DIE_OOPS
, str
, regs
, err
, current
->thread
.trap_no
, SIGSEGV
);
542 show_registers(regs
);
543 /* Executive summary in case the oops scrolled away */
544 printk(KERN_ALERT
"RIP ");
545 printk_address(regs
->rip
);
546 printk(" RSP <%016lx>\n", regs
->rsp
);
547 if (kexec_should_crash(current
))
551 void die(const char * str
, struct pt_regs
* regs
, long err
)
553 unsigned long flags
= oops_begin();
556 __die(str
, regs
, err
);
561 void __kprobes
die_nmi(char *str
, struct pt_regs
*regs
)
563 unsigned long flags
= oops_begin();
566 * We are in trouble anyway, lets at least try
567 * to get a message out.
569 printk(str
, safe_smp_processor_id());
570 show_registers(regs
);
571 if (kexec_should_crash(current
))
573 if (panic_on_timeout
|| panic_on_oops
)
574 panic("nmi watchdog");
575 printk("console shuts up ...\n");
582 static void __kprobes
do_trap(int trapnr
, int signr
, char *str
,
583 struct pt_regs
* regs
, long error_code
,
586 struct task_struct
*tsk
= current
;
588 tsk
->thread
.error_code
= error_code
;
589 tsk
->thread
.trap_no
= trapnr
;
591 if (user_mode(regs
)) {
592 if (exception_trace
&& unhandled_signal(tsk
, signr
))
594 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
595 tsk
->comm
, tsk
->pid
, str
,
596 regs
->rip
, regs
->rsp
, error_code
);
599 force_sig_info(signr
, info
, tsk
);
601 force_sig(signr
, tsk
);
608 const struct exception_table_entry
*fixup
;
609 fixup
= search_exception_tables(regs
->rip
);
611 regs
->rip
= fixup
->fixup
;
613 die(str
, regs
, error_code
);
618 #define DO_ERROR(trapnr, signr, str, name) \
619 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
621 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
624 conditional_sti(regs); \
625 do_trap(trapnr, signr, str, regs, error_code, NULL); \
628 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
629 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
632 info.si_signo = signr; \
634 info.si_code = sicode; \
635 info.si_addr = (void __user *)siaddr; \
636 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
639 conditional_sti(regs); \
640 do_trap(trapnr, signr, str, regs, error_code, &info); \
643 DO_ERROR_INFO( 0, SIGFPE
, "divide error", divide_error
, FPE_INTDIV
, regs
->rip
)
644 DO_ERROR( 4, SIGSEGV
, "overflow", overflow
)
645 DO_ERROR( 5, SIGSEGV
, "bounds", bounds
)
646 DO_ERROR_INFO( 6, SIGILL
, "invalid opcode", invalid_op
, ILL_ILLOPN
, regs
->rip
)
647 DO_ERROR( 7, SIGSEGV
, "device not available", device_not_available
)
648 DO_ERROR( 9, SIGFPE
, "coprocessor segment overrun", coprocessor_segment_overrun
)
649 DO_ERROR(10, SIGSEGV
, "invalid TSS", invalid_TSS
)
650 DO_ERROR(11, SIGBUS
, "segment not present", segment_not_present
)
651 DO_ERROR_INFO(17, SIGBUS
, "alignment check", alignment_check
, BUS_ADRALN
, 0)
652 DO_ERROR(18, SIGSEGV
, "reserved", reserved
)
654 /* Runs on IST stack */
655 asmlinkage
void do_stack_segment(struct pt_regs
*regs
, long error_code
)
657 if (notify_die(DIE_TRAP
, "stack segment", regs
, error_code
,
658 12, SIGBUS
) == NOTIFY_STOP
)
660 preempt_conditional_sti(regs
);
661 do_trap(12, SIGBUS
, "stack segment", regs
, error_code
, NULL
);
662 preempt_conditional_cli(regs
);
665 asmlinkage
void do_double_fault(struct pt_regs
* regs
, long error_code
)
667 static const char str
[] = "double fault";
668 struct task_struct
*tsk
= current
;
670 /* Return not checked because double check cannot be ignored */
671 notify_die(DIE_TRAP
, str
, regs
, error_code
, 8, SIGSEGV
);
673 tsk
->thread
.error_code
= error_code
;
674 tsk
->thread
.trap_no
= 8;
676 /* This is always a kernel trap and never fixable (and thus must
679 die(str
, regs
, error_code
);
682 asmlinkage
void __kprobes
do_general_protection(struct pt_regs
* regs
,
685 struct task_struct
*tsk
= current
;
687 conditional_sti(regs
);
689 tsk
->thread
.error_code
= error_code
;
690 tsk
->thread
.trap_no
= 13;
692 if (user_mode(regs
)) {
693 if (exception_trace
&& unhandled_signal(tsk
, SIGSEGV
))
695 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
697 regs
->rip
, regs
->rsp
, error_code
);
699 force_sig(SIGSEGV
, tsk
);
705 const struct exception_table_entry
*fixup
;
706 fixup
= search_exception_tables(regs
->rip
);
708 regs
->rip
= fixup
->fixup
;
711 if (notify_die(DIE_GPF
, "general protection fault", regs
,
712 error_code
, 13, SIGSEGV
) == NOTIFY_STOP
)
714 die("general protection fault", regs
, error_code
);
718 static __kprobes
void
719 mem_parity_error(unsigned char reason
, struct pt_regs
* regs
)
721 printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
722 printk("You probably have a hardware problem with your RAM chips\n");
724 /* Clear and disable the memory parity error line. */
725 reason
= (reason
& 0xf) | 4;
729 static __kprobes
void
730 io_check_error(unsigned char reason
, struct pt_regs
* regs
)
732 printk("NMI: IOCK error (debug interrupt?)\n");
733 show_registers(regs
);
735 /* Re-enable the IOCK line, wait for a few seconds */
736 reason
= (reason
& 0xf) | 8;
743 static __kprobes
void
744 unknown_nmi_error(unsigned char reason
, struct pt_regs
* regs
)
745 { printk("Uhhuh. NMI received for unknown reason %02x.\n", reason
);
746 printk("Dazed and confused, but trying to continue\n");
747 printk("Do you have a strange power saving mode enabled?\n");
750 /* Runs on IST stack. This code must keep interrupts off all the time.
751 Nested NMIs are prevented by the CPU. */
752 asmlinkage __kprobes
void default_do_nmi(struct pt_regs
*regs
)
754 unsigned char reason
= 0;
757 cpu
= smp_processor_id();
759 /* Only the BSP gets external NMIs from the system. */
761 reason
= get_nmi_reason();
763 if (!(reason
& 0xc0)) {
764 if (notify_die(DIE_NMI_IPI
, "nmi_ipi", regs
, reason
, 2, SIGINT
)
767 #ifdef CONFIG_X86_LOCAL_APIC
769 * Ok, so this is none of the documented NMI sources,
770 * so it must be the NMI watchdog.
772 if (nmi_watchdog
> 0) {
773 nmi_watchdog_tick(regs
,reason
);
777 unknown_nmi_error(reason
, regs
);
780 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
) == NOTIFY_STOP
)
783 /* AK: following checks seem to be broken on modern chipsets. FIXME */
786 mem_parity_error(reason
, regs
);
788 io_check_error(reason
, regs
);
791 /* runs on IST stack. */
792 asmlinkage
void __kprobes
do_int3(struct pt_regs
* regs
, long error_code
)
794 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, 3, SIGTRAP
) == NOTIFY_STOP
) {
797 preempt_conditional_sti(regs
);
798 do_trap(3, SIGTRAP
, "int3", regs
, error_code
, NULL
);
799 preempt_conditional_cli(regs
);
802 /* Help handler running on IST stack to switch back to user stack
803 for scheduling or signal handling. The actual stack switch is done in
805 asmlinkage __kprobes
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
807 struct pt_regs
*regs
= eregs
;
808 /* Did already sync */
809 if (eregs
== (struct pt_regs
*)eregs
->rsp
)
811 /* Exception from user space */
812 else if (user_mode(eregs
))
813 regs
= task_pt_regs(current
);
814 /* Exception from kernel and interrupts are enabled. Move to
815 kernel process stack. */
816 else if (eregs
->eflags
& X86_EFLAGS_IF
)
817 regs
= (struct pt_regs
*)(eregs
->rsp
-= sizeof(struct pt_regs
));
823 /* runs on IST stack. */
824 asmlinkage
void __kprobes
do_debug(struct pt_regs
* regs
,
825 unsigned long error_code
)
827 unsigned long condition
;
828 struct task_struct
*tsk
= current
;
831 get_debugreg(condition
, 6);
833 if (notify_die(DIE_DEBUG
, "debug", regs
, condition
, error_code
,
834 SIGTRAP
) == NOTIFY_STOP
)
837 preempt_conditional_sti(regs
);
839 /* Mask out spurious debug traps due to lazy DR7 setting */
840 if (condition
& (DR_TRAP0
|DR_TRAP1
|DR_TRAP2
|DR_TRAP3
)) {
841 if (!tsk
->thread
.debugreg7
) {
846 tsk
->thread
.debugreg6
= condition
;
848 /* Mask out spurious TF errors due to lazy TF clearing */
849 if (condition
& DR_STEP
) {
851 * The TF error should be masked out only if the current
852 * process is not traced and if the TRAP flag has been set
853 * previously by a tracing process (condition detected by
854 * the PT_DTRACE flag); remember that the i386 TRAP flag
855 * can be modified by the process itself in user mode,
856 * allowing programs to debug themselves without the ptrace()
859 if (!user_mode(regs
))
860 goto clear_TF_reenable
;
862 * Was the TF flag set by a debugger? If so, clear it now,
863 * so that register information is correct.
865 if (tsk
->ptrace
& PT_DTRACE
) {
866 regs
->eflags
&= ~TF_MASK
;
867 tsk
->ptrace
&= ~PT_DTRACE
;
871 /* Ok, finally something we can handle */
872 tsk
->thread
.trap_no
= 1;
873 tsk
->thread
.error_code
= error_code
;
874 info
.si_signo
= SIGTRAP
;
876 info
.si_code
= TRAP_BRKPT
;
877 info
.si_addr
= user_mode(regs
) ? (void __user
*)regs
->rip
: NULL
;
878 force_sig_info(SIGTRAP
, &info
, tsk
);
881 set_debugreg(0UL, 7);
882 preempt_conditional_cli(regs
);
886 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
887 regs
->eflags
&= ~TF_MASK
;
888 preempt_conditional_cli(regs
);
891 static int kernel_math_error(struct pt_regs
*regs
, const char *str
, int trapnr
)
893 const struct exception_table_entry
*fixup
;
894 fixup
= search_exception_tables(regs
->rip
);
896 regs
->rip
= fixup
->fixup
;
899 notify_die(DIE_GPF
, str
, regs
, 0, trapnr
, SIGFPE
);
900 /* Illegal floating point operation in the kernel */
901 current
->thread
.trap_no
= trapnr
;
907 * Note that we play around with the 'TS' bit in an attempt to get
908 * the correct behaviour even in the presence of the asynchronous
911 asmlinkage
void do_coprocessor_error(struct pt_regs
*regs
)
913 void __user
*rip
= (void __user
*)(regs
->rip
);
914 struct task_struct
* task
;
916 unsigned short cwd
, swd
;
918 conditional_sti(regs
);
919 if (!user_mode(regs
) &&
920 kernel_math_error(regs
, "kernel x87 math error", 16))
924 * Save the info for the exception handler and clear the error.
928 task
->thread
.trap_no
= 16;
929 task
->thread
.error_code
= 0;
930 info
.si_signo
= SIGFPE
;
932 info
.si_code
= __SI_FAULT
;
935 * (~cwd & swd) will mask out exceptions that are not set to unmasked
936 * status. 0x3f is the exception bits in these regs, 0x200 is the
937 * C1 reg you need in case of a stack fault, 0x040 is the stack
938 * fault bit. We should only be taking one exception at a time,
939 * so if this combination doesn't produce any single exception,
940 * then we have a bad program that isn't synchronizing its FPU usage
941 * and it will suffer the consequences since we won't be able to
942 * fully reproduce the context of the exception
944 cwd
= get_fpu_cwd(task
);
945 swd
= get_fpu_swd(task
);
946 switch (swd
& ~cwd
& 0x3f) {
950 case 0x001: /* Invalid Op */
952 * swd & 0x240 == 0x040: Stack Underflow
953 * swd & 0x240 == 0x240: Stack Overflow
954 * User must clear the SF bit (0x40) if set
956 info
.si_code
= FPE_FLTINV
;
958 case 0x002: /* Denormalize */
959 case 0x010: /* Underflow */
960 info
.si_code
= FPE_FLTUND
;
962 case 0x004: /* Zero Divide */
963 info
.si_code
= FPE_FLTDIV
;
965 case 0x008: /* Overflow */
966 info
.si_code
= FPE_FLTOVF
;
968 case 0x020: /* Precision */
969 info
.si_code
= FPE_FLTRES
;
972 force_sig_info(SIGFPE
, &info
, task
);
975 asmlinkage
void bad_intr(void)
977 printk("bad interrupt");
980 asmlinkage
void do_simd_coprocessor_error(struct pt_regs
*regs
)
982 void __user
*rip
= (void __user
*)(regs
->rip
);
983 struct task_struct
* task
;
985 unsigned short mxcsr
;
987 conditional_sti(regs
);
988 if (!user_mode(regs
) &&
989 kernel_math_error(regs
, "kernel simd math error", 19))
993 * Save the info for the exception handler and clear the error.
997 task
->thread
.trap_no
= 19;
998 task
->thread
.error_code
= 0;
999 info
.si_signo
= SIGFPE
;
1001 info
.si_code
= __SI_FAULT
;
1004 * The SIMD FPU exceptions are handled a little differently, as there
1005 * is only a single status/control register. Thus, to determine which
1006 * unmasked exception was caught we must mask the exception mask bits
1007 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1009 mxcsr
= get_fpu_mxcsr(task
);
1010 switch (~((mxcsr
& 0x1f80) >> 7) & (mxcsr
& 0x3f)) {
1014 case 0x001: /* Invalid Op */
1015 info
.si_code
= FPE_FLTINV
;
1017 case 0x002: /* Denormalize */
1018 case 0x010: /* Underflow */
1019 info
.si_code
= FPE_FLTUND
;
1021 case 0x004: /* Zero Divide */
1022 info
.si_code
= FPE_FLTDIV
;
1024 case 0x008: /* Overflow */
1025 info
.si_code
= FPE_FLTOVF
;
1027 case 0x020: /* Precision */
1028 info
.si_code
= FPE_FLTRES
;
1031 force_sig_info(SIGFPE
, &info
, task
);
1034 asmlinkage
void do_spurious_interrupt_bug(struct pt_regs
* regs
)
1038 asmlinkage
void __attribute__((weak
)) smp_thermal_interrupt(void)
1042 asmlinkage
void __attribute__((weak
)) mce_threshold_interrupt(void)
1047 * 'math_state_restore()' saves the current math information in the
1048 * old math state array, and gets the new ones from the current task
1050 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1051 * Don't touch unless you *really* know how it works.
1053 asmlinkage
void math_state_restore(void)
1055 struct task_struct
*me
= current
;
1056 clts(); /* Allow maths ops (or we recurse) */
1060 restore_fpu_checking(&me
->thread
.i387
.fxsave
);
1061 task_thread_info(me
)->status
|= TS_USEDFPU
;
1064 void __init
trap_init(void)
1066 set_intr_gate(0,÷_error
);
1067 set_intr_gate_ist(1,&debug
,DEBUG_STACK
);
1068 set_intr_gate_ist(2,&nmi
,NMI_STACK
);
1069 set_system_gate_ist(3,&int3
,DEBUG_STACK
); /* int3 can be called from all */
1070 set_system_gate(4,&overflow
); /* int4 can be called from all */
1071 set_intr_gate(5,&bounds
);
1072 set_intr_gate(6,&invalid_op
);
1073 set_intr_gate(7,&device_not_available
);
1074 set_intr_gate_ist(8,&double_fault
, DOUBLEFAULT_STACK
);
1075 set_intr_gate(9,&coprocessor_segment_overrun
);
1076 set_intr_gate(10,&invalid_TSS
);
1077 set_intr_gate(11,&segment_not_present
);
1078 set_intr_gate_ist(12,&stack_segment
,STACKFAULT_STACK
);
1079 set_intr_gate(13,&general_protection
);
1080 set_intr_gate(14,&page_fault
);
1081 set_intr_gate(15,&spurious_interrupt_bug
);
1082 set_intr_gate(16,&coprocessor_error
);
1083 set_intr_gate(17,&alignment_check
);
1084 #ifdef CONFIG_X86_MCE
1085 set_intr_gate_ist(18,&machine_check
, MCE_STACK
);
1087 set_intr_gate(19,&simd_coprocessor_error
);
1089 #ifdef CONFIG_IA32_EMULATION
1090 set_system_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
1094 * Should be a barrier for any external CPU state.
1100 /* Actual parsing is done early in setup.c. */
1101 static int __init
oops_dummy(char *s
)
1106 __setup("oops=", oops_dummy
);
1108 static int __init
kstack_setup(char *s
)
1110 kstack_depth_to_print
= simple_strtoul(s
,NULL
,0);
1113 __setup("kstack=", kstack_setup
);
1115 static int __init
call_trace_setup(char *s
)
1117 if (strcmp(s
, "old") == 0)
1119 else if (strcmp(s
, "both") == 0)
1121 else if (strcmp(s
, "new") == 0)
1125 __setup("call_trace=", call_trace_setup
);