2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/kexec.h>
31 #include <linux/sched.h>
32 #include <linux/timer.h>
33 #include <linux/init.h>
34 #include <linux/bug.h>
35 #include <linux/nmi.h>
37 #include <linux/smp.h>
41 #include <linux/ioport.h>
42 #include <linux/eisa.h>
45 #if defined(CONFIG_EDAC)
46 #include <linux/edac.h>
49 #include <asm/kmemcheck.h>
50 #include <asm/stacktrace.h>
51 #include <asm/processor.h>
52 #include <asm/debugreg.h>
53 #include <linux/atomic.h>
54 #include <asm/ftrace.h>
55 #include <asm/traps.h>
58 #include <asm/fpu-internal.h>
60 #include <asm/fixmap.h>
61 #include <asm/mach_traps.h>
62 #include <asm/alternative.h>
66 #include <asm/x86_init.h>
67 #include <asm/pgalloc.h>
68 #include <asm/proto.h>
70 /* No need to be aligned, but done to keep all IDTs defined the same way. */
71 gate_desc debug_idt_table
[NR_VECTORS
] __page_aligned_bss
;
73 #include <asm/processor-flags.h>
74 #include <asm/setup.h>
76 asmlinkage
int system_call(void);
79 /* Must be page-aligned because the real IDT is used in a fixmap. */
80 gate_desc idt_table
[NR_VECTORS
] __page_aligned_bss
;
82 DECLARE_BITMAP(used_vectors
, NR_VECTORS
);
83 EXPORT_SYMBOL_GPL(used_vectors
);
85 static inline void conditional_sti(struct pt_regs
*regs
)
87 if (regs
->flags
& X86_EFLAGS_IF
)
91 static inline void preempt_conditional_sti(struct pt_regs
*regs
)
94 if (regs
->flags
& X86_EFLAGS_IF
)
98 static inline void conditional_cli(struct pt_regs
*regs
)
100 if (regs
->flags
& X86_EFLAGS_IF
)
104 static inline void preempt_conditional_cli(struct pt_regs
*regs
)
106 if (regs
->flags
& X86_EFLAGS_IF
)
111 enum ctx_state
ist_enter(struct pt_regs
*regs
)
113 enum ctx_state prev_state
;
115 if (user_mode_vm(regs
)) {
116 /* Other than that, we're just an exception. */
117 prev_state
= exception_enter();
120 * We might have interrupted pretty much anything. In
121 * fact, if we're a machine check, we can even interrupt
122 * NMI processing. We don't want in_nmi() to return true,
123 * but we need to notify RCU.
126 prev_state
= IN_KERNEL
; /* the value is irrelevant. */
130 * We are atomic because we're on the IST stack (or we're on x86_32,
131 * in which case we still shouldn't schedule).
133 * This must be after exception_enter(), because exception_enter()
134 * won't do anything if in_interrupt() returns true.
136 preempt_count_add(HARDIRQ_OFFSET
);
138 /* This code is a bit fragile. Test it. */
139 rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
144 void ist_exit(struct pt_regs
*regs
, enum ctx_state prev_state
)
146 /* Must be before exception_exit. */
147 preempt_count_sub(HARDIRQ_OFFSET
);
149 if (user_mode_vm(regs
))
150 return exception_exit(prev_state
);
156 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
157 * @regs: regs passed to the IST exception handler
159 * IST exception handlers normally cannot schedule. As a special
160 * exception, if the exception interrupted userspace code (i.e.
161 * user_mode_vm(regs) would return true) and the exception was not
162 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
163 * begins a non-atomic section within an ist_enter()/ist_exit() region.
164 * Callers are responsible for enabling interrupts themselves inside
165 * the non-atomic section, and callers must call is_end_non_atomic()
168 void ist_begin_non_atomic(struct pt_regs
*regs
)
170 BUG_ON(!user_mode_vm(regs
));
173 * Sanity check: we need to be on the normal thread stack. This
174 * will catch asm bugs and any attempt to use ist_preempt_enable
177 BUG_ON(((current_stack_pointer() ^ this_cpu_read_stable(kernel_stack
))
178 & ~(THREAD_SIZE
- 1)) != 0);
180 preempt_count_sub(HARDIRQ_OFFSET
);
184 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
186 * Ends a non-atomic section started with ist_begin_non_atomic().
188 void ist_end_non_atomic(void)
190 preempt_count_add(HARDIRQ_OFFSET
);
193 static nokprobe_inline
int
194 do_trap_no_signal(struct task_struct
*tsk
, int trapnr
, char *str
,
195 struct pt_regs
*regs
, long error_code
)
198 if (regs
->flags
& X86_VM_MASK
) {
200 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
201 * On nmi (interrupt 2), do_trap should not be called.
203 if (trapnr
< X86_TRAP_UD
) {
204 if (!handle_vm86_trap((struct kernel_vm86_regs
*) regs
,
211 if (!user_mode(regs
)) {
212 if (!fixup_exception(regs
)) {
213 tsk
->thread
.error_code
= error_code
;
214 tsk
->thread
.trap_nr
= trapnr
;
215 die(str
, regs
, error_code
);
223 static siginfo_t
*fill_trap_info(struct pt_regs
*regs
, int signr
, int trapnr
,
226 unsigned long siaddr
;
231 return SEND_SIG_PRIV
;
235 siaddr
= uprobe_get_trap_addr(regs
);
239 siaddr
= uprobe_get_trap_addr(regs
);
247 info
->si_signo
= signr
;
249 info
->si_code
= sicode
;
250 info
->si_addr
= (void __user
*)siaddr
;
255 do_trap(int trapnr
, int signr
, char *str
, struct pt_regs
*regs
,
256 long error_code
, siginfo_t
*info
)
258 struct task_struct
*tsk
= current
;
261 if (!do_trap_no_signal(tsk
, trapnr
, str
, regs
, error_code
))
264 * We want error_code and trap_nr set for userspace faults and
265 * kernelspace faults which result in die(), but not
266 * kernelspace faults which are fixed up. die() gives the
267 * process no chance to handle the signal and notice the
268 * kernel fault information, so that won't result in polluting
269 * the information about previously queued, but not yet
270 * delivered, faults. See also do_general_protection below.
272 tsk
->thread
.error_code
= error_code
;
273 tsk
->thread
.trap_nr
= trapnr
;
276 if (show_unhandled_signals
&& unhandled_signal(tsk
, signr
) &&
277 printk_ratelimit()) {
278 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
279 tsk
->comm
, tsk
->pid
, str
,
280 regs
->ip
, regs
->sp
, error_code
);
281 print_vma_addr(" in ", regs
->ip
);
286 force_sig_info(signr
, info
?: SEND_SIG_PRIV
, tsk
);
288 NOKPROBE_SYMBOL(do_trap
);
290 static void do_error_trap(struct pt_regs
*regs
, long error_code
, char *str
,
291 unsigned long trapnr
, int signr
)
293 enum ctx_state prev_state
= exception_enter();
296 if (notify_die(DIE_TRAP
, str
, regs
, error_code
, trapnr
, signr
) !=
298 conditional_sti(regs
);
299 do_trap(trapnr
, signr
, str
, regs
, error_code
,
300 fill_trap_info(regs
, signr
, trapnr
, &info
));
303 exception_exit(prev_state
);
306 #define DO_ERROR(trapnr, signr, str, name) \
307 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
309 do_error_trap(regs, error_code, str, trapnr, signr); \
312 DO_ERROR(X86_TRAP_DE
, SIGFPE
, "divide error", divide_error
)
313 DO_ERROR(X86_TRAP_OF
, SIGSEGV
, "overflow", overflow
)
314 DO_ERROR(X86_TRAP_UD
, SIGILL
, "invalid opcode", invalid_op
)
315 DO_ERROR(X86_TRAP_OLD_MF
, SIGFPE
, "coprocessor segment overrun",coprocessor_segment_overrun
)
316 DO_ERROR(X86_TRAP_TS
, SIGSEGV
, "invalid TSS", invalid_TSS
)
317 DO_ERROR(X86_TRAP_NP
, SIGBUS
, "segment not present", segment_not_present
)
318 DO_ERROR(X86_TRAP_SS
, SIGBUS
, "stack segment", stack_segment
)
319 DO_ERROR(X86_TRAP_AC
, SIGBUS
, "alignment check", alignment_check
)
322 /* Runs on IST stack */
323 dotraplinkage
void do_double_fault(struct pt_regs
*regs
, long error_code
)
325 static const char str
[] = "double fault";
326 struct task_struct
*tsk
= current
;
328 #ifdef CONFIG_X86_ESPFIX64
329 extern unsigned char native_irq_return_iret
[];
332 * If IRET takes a non-IST fault on the espfix64 stack, then we
333 * end up promoting it to a doublefault. In that case, modify
334 * the stack to make it look like we just entered the #GP
335 * handler from user space, similar to bad_iret.
337 * No need for ist_enter here because we don't use RCU.
339 if (((long)regs
->sp
>> PGDIR_SHIFT
) == ESPFIX_PGD_ENTRY
&&
340 regs
->cs
== __KERNEL_CS
&&
341 regs
->ip
== (unsigned long)native_irq_return_iret
)
343 struct pt_regs
*normal_regs
= task_pt_regs(current
);
345 /* Fake a #GP(0) from userspace. */
346 memmove(&normal_regs
->ip
, (void *)regs
->sp
, 5*8);
347 normal_regs
->orig_ax
= 0; /* Missing (lost) #GP error code */
348 regs
->ip
= (unsigned long)general_protection
;
349 regs
->sp
= (unsigned long)&normal_regs
->orig_ax
;
355 ist_enter(regs
); /* Discard prev_state because we won't return. */
356 notify_die(DIE_TRAP
, str
, regs
, error_code
, X86_TRAP_DF
, SIGSEGV
);
358 tsk
->thread
.error_code
= error_code
;
359 tsk
->thread
.trap_nr
= X86_TRAP_DF
;
361 #ifdef CONFIG_DOUBLEFAULT
362 df_debug(regs
, error_code
);
365 * This is always a kernel trap and never fixable (and thus must
369 die(str
, regs
, error_code
);
373 dotraplinkage
void do_bounds(struct pt_regs
*regs
, long error_code
)
375 struct task_struct
*tsk
= current
;
376 struct xsave_struct
*xsave_buf
;
377 enum ctx_state prev_state
;
378 struct bndcsr
*bndcsr
;
381 prev_state
= exception_enter();
382 if (notify_die(DIE_TRAP
, "bounds", regs
, error_code
,
383 X86_TRAP_BR
, SIGSEGV
) == NOTIFY_STOP
)
385 conditional_sti(regs
);
387 if (!user_mode(regs
))
388 die("bounds", regs
, error_code
);
390 if (!cpu_feature_enabled(X86_FEATURE_MPX
)) {
391 /* The exception is not from Intel MPX */
396 * We need to look at BNDSTATUS to resolve this exception.
397 * It is not directly accessible, though, so we need to
398 * do an xsave and then pull it out of the xsave buffer.
400 fpu_save_init(&tsk
->thread
.fpu
);
401 xsave_buf
= &(tsk
->thread
.fpu
.state
->xsave
);
402 bndcsr
= get_xsave_addr(xsave_buf
, XSTATE_BNDCSR
);
407 * The error code field of the BNDSTATUS register communicates status
408 * information of a bound range exception #BR or operation involving
411 switch (bndcsr
->bndstatus
& MPX_BNDSTA_ERROR_CODE
) {
412 case 2: /* Bound directory has invalid entry. */
413 if (mpx_handle_bd_fault(xsave_buf
))
415 break; /* Success, it was handled */
416 case 1: /* Bound violation. */
417 info
= mpx_generate_siginfo(regs
, xsave_buf
);
420 * We failed to decode the MPX instruction. Act as if
421 * the exception was not caused by MPX.
426 * Success, we decoded the instruction and retrieved
427 * an 'info' containing the address being accessed
428 * which caused the exception. This information
429 * allows and application to possibly handle the
430 * #BR exception itself.
432 do_trap(X86_TRAP_BR
, SIGSEGV
, "bounds", regs
, error_code
, info
);
435 case 0: /* No exception caused by Intel MPX operations. */
438 die("bounds", regs
, error_code
);
442 exception_exit(prev_state
);
446 * This path out is for all the cases where we could not
447 * handle the exception in some way (like allocating a
448 * table or telling userspace about it. We will also end
449 * up here if the kernel has MPX turned off at compile
452 do_trap(X86_TRAP_BR
, SIGSEGV
, "bounds", regs
, error_code
, NULL
);
453 exception_exit(prev_state
);
457 do_general_protection(struct pt_regs
*regs
, long error_code
)
459 struct task_struct
*tsk
;
460 enum ctx_state prev_state
;
462 prev_state
= exception_enter();
463 conditional_sti(regs
);
466 if (regs
->flags
& X86_VM_MASK
) {
468 handle_vm86_fault((struct kernel_vm86_regs
*) regs
, error_code
);
474 if (!user_mode(regs
)) {
475 if (fixup_exception(regs
))
478 tsk
->thread
.error_code
= error_code
;
479 tsk
->thread
.trap_nr
= X86_TRAP_GP
;
480 if (notify_die(DIE_GPF
, "general protection fault", regs
, error_code
,
481 X86_TRAP_GP
, SIGSEGV
) != NOTIFY_STOP
)
482 die("general protection fault", regs
, error_code
);
486 tsk
->thread
.error_code
= error_code
;
487 tsk
->thread
.trap_nr
= X86_TRAP_GP
;
489 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGSEGV
) &&
490 printk_ratelimit()) {
491 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
492 tsk
->comm
, task_pid_nr(tsk
),
493 regs
->ip
, regs
->sp
, error_code
);
494 print_vma_addr(" in ", regs
->ip
);
498 force_sig_info(SIGSEGV
, SEND_SIG_PRIV
, tsk
);
500 exception_exit(prev_state
);
502 NOKPROBE_SYMBOL(do_general_protection
);
504 /* May run on IST stack. */
505 dotraplinkage
void notrace
do_int3(struct pt_regs
*regs
, long error_code
)
507 enum ctx_state prev_state
;
509 #ifdef CONFIG_DYNAMIC_FTRACE
511 * ftrace must be first, everything else may cause a recursive crash.
512 * See note by declaration of modifying_ftrace_code in ftrace.c
514 if (unlikely(atomic_read(&modifying_ftrace_code
)) &&
515 ftrace_int3_handler(regs
))
518 if (poke_int3_handler(regs
))
521 prev_state
= ist_enter(regs
);
522 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
523 if (kgdb_ll_trap(DIE_INT3
, "int3", regs
, error_code
, X86_TRAP_BP
,
524 SIGTRAP
) == NOTIFY_STOP
)
526 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
528 #ifdef CONFIG_KPROBES
529 if (kprobe_int3_handler(regs
))
533 if (notify_die(DIE_INT3
, "int3", regs
, error_code
, X86_TRAP_BP
,
534 SIGTRAP
) == NOTIFY_STOP
)
538 * Let others (NMI) know that the debug stack is in use
539 * as we may switch to the interrupt stack.
541 debug_stack_usage_inc();
542 preempt_conditional_sti(regs
);
543 do_trap(X86_TRAP_BP
, SIGTRAP
, "int3", regs
, error_code
, NULL
);
544 preempt_conditional_cli(regs
);
545 debug_stack_usage_dec();
547 ist_exit(regs
, prev_state
);
549 NOKPROBE_SYMBOL(do_int3
);
553 * Help handler running on IST stack to switch off the IST stack if the
554 * interrupted code was in user mode. The actual stack switch is done in
557 asmlinkage __visible notrace
struct pt_regs
*sync_regs(struct pt_regs
*eregs
)
559 struct pt_regs
*regs
= task_pt_regs(current
);
563 NOKPROBE_SYMBOL(sync_regs
);
565 struct bad_iret_stack
{
566 void *error_entry_ret
;
570 asmlinkage __visible notrace
571 struct bad_iret_stack
*fixup_bad_iret(struct bad_iret_stack
*s
)
574 * This is called from entry_64.S early in handling a fault
575 * caused by a bad iret to user mode. To handle the fault
576 * correctly, we want move our stack frame to task_pt_regs
577 * and we want to pretend that the exception came from the
580 struct bad_iret_stack
*new_stack
=
581 container_of(task_pt_regs(current
),
582 struct bad_iret_stack
, regs
);
584 /* Copy the IRET target to the new stack. */
585 memmove(&new_stack
->regs
.ip
, (void *)s
->regs
.sp
, 5*8);
587 /* Copy the remainder of the stack from the current stack. */
588 memmove(new_stack
, s
, offsetof(struct bad_iret_stack
, regs
.ip
));
590 BUG_ON(!user_mode_vm(&new_stack
->regs
));
593 NOKPROBE_SYMBOL(fixup_bad_iret
);
597 * Our handling of the processor debug registers is non-trivial.
598 * We do not clear them on entry and exit from the kernel. Therefore
599 * it is possible to get a watchpoint trap here from inside the kernel.
600 * However, the code in ./ptrace.c has ensured that the user can
601 * only set watchpoints on userspace addresses. Therefore the in-kernel
602 * watchpoint trap can only occur in code which is reading/writing
603 * from user space. Such code must not hold kernel locks (since it
604 * can equally take a page fault), therefore it is safe to call
605 * force_sig_info even though that claims and releases locks.
607 * Code in ./signal.c ensures that the debug control register
608 * is restored before we deliver any signal, and therefore that
609 * user code runs with the correct debug control register even though
612 * Being careful here means that we don't have to be as careful in a
613 * lot of more complicated places (task switching can be a bit lazy
614 * about restoring all the debug state, and ptrace doesn't have to
615 * find every occurrence of the TF bit that could be saved away even
618 * May run on IST stack.
620 dotraplinkage
void do_debug(struct pt_regs
*regs
, long error_code
)
622 struct task_struct
*tsk
= current
;
623 enum ctx_state prev_state
;
628 prev_state
= ist_enter(regs
);
630 get_debugreg(dr6
, 6);
632 /* Filter out all the reserved bits which are preset to 1 */
633 dr6
&= ~DR6_RESERVED
;
636 * If dr6 has no reason to give us about the origin of this trap,
637 * then it's very likely the result of an icebp/int01 trap.
638 * User wants a sigtrap for that.
640 if (!dr6
&& user_mode(regs
))
643 /* Catch kmemcheck conditions first of all! */
644 if ((dr6
& DR_STEP
) && kmemcheck_trap(regs
))
647 /* DR6 may or may not be cleared by the CPU */
651 * The processor cleared BTF, so don't mark that we need it set.
653 clear_tsk_thread_flag(tsk
, TIF_BLOCKSTEP
);
655 /* Store the virtualized DR6 value */
656 tsk
->thread
.debugreg6
= dr6
;
658 #ifdef CONFIG_KPROBES
659 if (kprobe_debug_handler(regs
))
663 if (notify_die(DIE_DEBUG
, "debug", regs
, (long)&dr6
, error_code
,
664 SIGTRAP
) == NOTIFY_STOP
)
668 * Let others (NMI) know that the debug stack is in use
669 * as we may switch to the interrupt stack.
671 debug_stack_usage_inc();
673 /* It's safe to allow irq's after DR6 has been saved */
674 preempt_conditional_sti(regs
);
676 if (regs
->flags
& X86_VM_MASK
) {
677 handle_vm86_trap((struct kernel_vm86_regs
*) regs
, error_code
,
679 preempt_conditional_cli(regs
);
680 debug_stack_usage_dec();
685 * Single-stepping through system calls: ignore any exceptions in
686 * kernel space, but re-enable TF when returning to user mode.
688 * We already checked v86 mode above, so we can check for kernel mode
689 * by just checking the CPL of CS.
691 if ((dr6
& DR_STEP
) && !user_mode(regs
)) {
692 tsk
->thread
.debugreg6
&= ~DR_STEP
;
693 set_tsk_thread_flag(tsk
, TIF_SINGLESTEP
);
694 regs
->flags
&= ~X86_EFLAGS_TF
;
696 si_code
= get_si_code(tsk
->thread
.debugreg6
);
697 if (tsk
->thread
.debugreg6
& (DR_STEP
| DR_TRAP_BITS
) || user_icebp
)
698 send_sigtrap(tsk
, regs
, error_code
, si_code
);
699 preempt_conditional_cli(regs
);
700 debug_stack_usage_dec();
703 ist_exit(regs
, prev_state
);
705 NOKPROBE_SYMBOL(do_debug
);
708 * Note that we play around with the 'TS' bit in an attempt to get
709 * the correct behaviour even in the presence of the asynchronous
712 static void math_error(struct pt_regs
*regs
, int error_code
, int trapnr
)
714 struct task_struct
*task
= current
;
717 char *str
= (trapnr
== X86_TRAP_MF
) ? "fpu exception" :
720 if (notify_die(DIE_TRAP
, str
, regs
, error_code
, trapnr
, SIGFPE
) == NOTIFY_STOP
)
722 conditional_sti(regs
);
724 if (!user_mode_vm(regs
))
726 if (!fixup_exception(regs
)) {
727 task
->thread
.error_code
= error_code
;
728 task
->thread
.trap_nr
= trapnr
;
729 die(str
, regs
, error_code
);
735 * Save the info for the exception handler and clear the error.
738 task
->thread
.trap_nr
= trapnr
;
739 task
->thread
.error_code
= error_code
;
740 info
.si_signo
= SIGFPE
;
742 info
.si_addr
= (void __user
*)uprobe_get_trap_addr(regs
);
743 if (trapnr
== X86_TRAP_MF
) {
744 unsigned short cwd
, swd
;
746 * (~cwd & swd) will mask out exceptions that are not set to unmasked
747 * status. 0x3f is the exception bits in these regs, 0x200 is the
748 * C1 reg you need in case of a stack fault, 0x040 is the stack
749 * fault bit. We should only be taking one exception at a time,
750 * so if this combination doesn't produce any single exception,
751 * then we have a bad program that isn't synchronizing its FPU usage
752 * and it will suffer the consequences since we won't be able to
753 * fully reproduce the context of the exception
755 cwd
= get_fpu_cwd(task
);
756 swd
= get_fpu_swd(task
);
761 * The SIMD FPU exceptions are handled a little differently, as there
762 * is only a single status/control register. Thus, to determine which
763 * unmasked exception was caught we must mask the exception mask bits
764 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
766 unsigned short mxcsr
= get_fpu_mxcsr(task
);
767 err
= ~(mxcsr
>> 7) & mxcsr
;
770 if (err
& 0x001) { /* Invalid op */
772 * swd & 0x240 == 0x040: Stack Underflow
773 * swd & 0x240 == 0x240: Stack Overflow
774 * User must clear the SF bit (0x40) if set
776 info
.si_code
= FPE_FLTINV
;
777 } else if (err
& 0x004) { /* Divide by Zero */
778 info
.si_code
= FPE_FLTDIV
;
779 } else if (err
& 0x008) { /* Overflow */
780 info
.si_code
= FPE_FLTOVF
;
781 } else if (err
& 0x012) { /* Denormal, Underflow */
782 info
.si_code
= FPE_FLTUND
;
783 } else if (err
& 0x020) { /* Precision */
784 info
.si_code
= FPE_FLTRES
;
787 * If we're using IRQ 13, or supposedly even some trap
788 * X86_TRAP_MF implementations, it's possible
789 * we get a spurious trap, which is not an error.
793 force_sig_info(SIGFPE
, &info
, task
);
796 dotraplinkage
void do_coprocessor_error(struct pt_regs
*regs
, long error_code
)
798 enum ctx_state prev_state
;
800 prev_state
= exception_enter();
801 math_error(regs
, error_code
, X86_TRAP_MF
);
802 exception_exit(prev_state
);
806 do_simd_coprocessor_error(struct pt_regs
*regs
, long error_code
)
808 enum ctx_state prev_state
;
810 prev_state
= exception_enter();
811 math_error(regs
, error_code
, X86_TRAP_XF
);
812 exception_exit(prev_state
);
816 do_spurious_interrupt_bug(struct pt_regs
*regs
, long error_code
)
818 conditional_sti(regs
);
820 /* No need to warn about this any longer. */
821 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
825 asmlinkage __visible
void __attribute__((weak
)) smp_thermal_interrupt(void)
829 asmlinkage __visible
void __attribute__((weak
)) smp_threshold_interrupt(void)
834 * 'math_state_restore()' saves the current math information in the
835 * old math state array, and gets the new ones from the current task
837 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
838 * Don't touch unless you *really* know how it works.
840 * Must be called with kernel preemption disabled (eg with local
841 * local interrupts as in the case of do_device_not_available).
843 void math_state_restore(void)
845 struct task_struct
*tsk
= current
;
847 if (!tsk_used_math(tsk
)) {
850 * does a slab alloc which can sleep
856 do_group_exit(SIGKILL
);
862 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
863 kernel_fpu_disable();
864 __thread_fpu_begin(tsk
);
865 if (unlikely(restore_fpu_checking(tsk
))) {
867 force_sig_info(SIGSEGV
, SEND_SIG_PRIV
, tsk
);
869 tsk
->thread
.fpu_counter
++;
873 EXPORT_SYMBOL_GPL(math_state_restore
);
876 do_device_not_available(struct pt_regs
*regs
, long error_code
)
878 enum ctx_state prev_state
;
880 prev_state
= exception_enter();
881 BUG_ON(use_eager_fpu());
883 #ifdef CONFIG_MATH_EMULATION
884 if (read_cr0() & X86_CR0_EM
) {
885 struct math_emu_info info
= { };
887 conditional_sti(regs
);
891 exception_exit(prev_state
);
895 math_state_restore(); /* interrupts still off */
897 conditional_sti(regs
);
899 exception_exit(prev_state
);
901 NOKPROBE_SYMBOL(do_device_not_available
);
904 dotraplinkage
void do_iret_error(struct pt_regs
*regs
, long error_code
)
907 enum ctx_state prev_state
;
909 prev_state
= exception_enter();
912 info
.si_signo
= SIGILL
;
914 info
.si_code
= ILL_BADSTK
;
916 if (notify_die(DIE_TRAP
, "iret exception", regs
, error_code
,
917 X86_TRAP_IRET
, SIGILL
) != NOTIFY_STOP
) {
918 do_trap(X86_TRAP_IRET
, SIGILL
, "iret exception", regs
, error_code
,
921 exception_exit(prev_state
);
925 /* Set of traps needed for early debugging. */
926 void __init
early_trap_init(void)
929 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
930 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
931 * CPU runs at ring 0 so it is impossible to hit an invalid
932 * stack. Using the original stack works well enough at this
933 * early stage. DEBUG_STACK will be equipped after cpu_init() in
936 * We don't need to set trace_idt_table like set_intr_gate(),
937 * since we don't have trace_debug and it will be reset to
938 * 'debug' in trap_init() by set_intr_gate_ist().
940 set_intr_gate_notrace(X86_TRAP_DB
, debug
);
941 /* int3 can be called from all */
942 set_system_intr_gate(X86_TRAP_BP
, &int3
);
944 set_intr_gate(X86_TRAP_PF
, page_fault
);
946 load_idt(&idt_descr
);
949 void __init
early_trap_pf_init(void)
952 set_intr_gate(X86_TRAP_PF
, page_fault
);
956 void __init
trap_init(void)
961 void __iomem
*p
= early_ioremap(0x0FFFD9, 4);
963 if (readl(p
) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
968 set_intr_gate(X86_TRAP_DE
, divide_error
);
969 set_intr_gate_ist(X86_TRAP_NMI
, &nmi
, NMI_STACK
);
970 /* int4 can be called from all */
971 set_system_intr_gate(X86_TRAP_OF
, &overflow
);
972 set_intr_gate(X86_TRAP_BR
, bounds
);
973 set_intr_gate(X86_TRAP_UD
, invalid_op
);
974 set_intr_gate(X86_TRAP_NM
, device_not_available
);
976 set_task_gate(X86_TRAP_DF
, GDT_ENTRY_DOUBLEFAULT_TSS
);
978 set_intr_gate_ist(X86_TRAP_DF
, &double_fault
, DOUBLEFAULT_STACK
);
980 set_intr_gate(X86_TRAP_OLD_MF
, coprocessor_segment_overrun
);
981 set_intr_gate(X86_TRAP_TS
, invalid_TSS
);
982 set_intr_gate(X86_TRAP_NP
, segment_not_present
);
983 set_intr_gate(X86_TRAP_SS
, stack_segment
);
984 set_intr_gate(X86_TRAP_GP
, general_protection
);
985 set_intr_gate(X86_TRAP_SPURIOUS
, spurious_interrupt_bug
);
986 set_intr_gate(X86_TRAP_MF
, coprocessor_error
);
987 set_intr_gate(X86_TRAP_AC
, alignment_check
);
988 #ifdef CONFIG_X86_MCE
989 set_intr_gate_ist(X86_TRAP_MC
, &machine_check
, MCE_STACK
);
991 set_intr_gate(X86_TRAP_XF
, simd_coprocessor_error
);
993 /* Reserve all the builtin and the syscall vector: */
994 for (i
= 0; i
< FIRST_EXTERNAL_VECTOR
; i
++)
995 set_bit(i
, used_vectors
);
997 #ifdef CONFIG_IA32_EMULATION
998 set_system_intr_gate(IA32_SYSCALL_VECTOR
, ia32_syscall
);
999 set_bit(IA32_SYSCALL_VECTOR
, used_vectors
);
1002 #ifdef CONFIG_X86_32
1003 set_system_trap_gate(SYSCALL_VECTOR
, &system_call
);
1004 set_bit(SYSCALL_VECTOR
, used_vectors
);
1008 * Set the IDT descriptor to a fixed read-only location, so that the
1009 * "sidt" instruction will not leak the location of the kernel, and
1010 * to defend the IDT against arbitrary memory write vulnerabilities.
1011 * It will be reloaded in cpu_init() */
1012 __set_fixmap(FIX_RO_IDT
, __pa_symbol(idt_table
), PAGE_KERNEL_RO
);
1013 idt_descr
.address
= fix_to_virt(FIX_RO_IDT
);
1016 * Should be a barrier for any external CPU state:
1021 * X86_TRAP_DB and X86_TRAP_BP have been set
1022 * in early_trap_init(). However, ITS works only after
1023 * cpu_init() loads TSS. See comments in early_trap_init().
1025 set_intr_gate_ist(X86_TRAP_DB
, &debug
, DEBUG_STACK
);
1026 /* int3 can be called from all */
1027 set_system_intr_gate_ist(X86_TRAP_BP
, &int3
, DEBUG_STACK
);
1029 x86_init
.irqs
.trap_init();
1031 #ifdef CONFIG_X86_64
1032 memcpy(&debug_idt_table
, &idt_table
, IDT_ENTRIES
* 16);
1033 set_nmi_gate(X86_TRAP_DB
, &debug
);
1034 set_nmi_gate(X86_TRAP_BP
, &int3
);