2 * Copyright (C) 1991,1992 Linus Torvalds
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
6 * Stack layout while running C code:
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
38 #include <asm/page_types.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/ftrace.h>
42 #include <asm/irq_vectors.h>
43 #include <asm/cpufeatures.h>
44 #include <asm/alternative-asm.h>
47 #include <asm/export.h>
49 .section .entry.text, "ax"
52 * We use macros for low-level operations which need to be overridden
53 * for paravirtualization. The following will never clobber any registers:
54 * INTERRUPT_RETURN (aka. "iret")
55 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
56 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
58 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
59 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
60 * Allowing a register to be clobbered can shrink the paravirt replacement
61 * enough to patch inline, increasing performance.
65 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
67 # define preempt_stop(clobbers)
68 # define resume_kernel restore_all
71 .macro TRACE_IRQS_IRET
72 #ifdef CONFIG_TRACE_IRQFLAGS
73 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
81 * User gs save/restore
83 * %gs is used for userland TLS and kernel only uses it for stack
84 * canary which is required to be at %gs:20 by gcc. Read the comment
85 * at the top of stackprotector.h for more info.
87 * Local labels 98 and 99 are used.
89 #ifdef CONFIG_X86_32_LAZY_GS
91 /* unfortunately push/pop can't be no-op */
96 addl $(4 + \pop), %esp
101 /* all the rest are no-op */
108 .macro REG_TO_PTGS reg
110 .macro SET_KERNEL_GS reg
113 #else /* CONFIG_X86_32_LAZY_GS */
126 .pushsection .fixup, "ax"
130 _ASM_EXTABLE(98b, 99b)
134 98: mov PT_GS(%esp), %gs
137 .pushsection .fixup, "ax"
138 99: movl $0, PT_GS(%esp)
141 _ASM_EXTABLE(98b, 99b)
147 .macro REG_TO_PTGS reg
148 movl \reg, PT_GS(%esp)
150 .macro SET_KERNEL_GS reg
151 movl $(__KERNEL_STACK_CANARY), \reg
155 #endif /* CONFIG_X86_32_LAZY_GS */
157 .macro SAVE_ALL pt_regs_ax=%eax
170 movl $(__USER_DS), %edx
173 movl $(__KERNEL_PERCPU), %edx
178 .macro RESTORE_INT_REGS
188 .macro RESTORE_REGS pop=0
194 .pushsection .fixup, "ax"
213 /* When we fork, we trace the syscall return in the child, too. */
215 call syscall_return_slowpath
219 ENTRY(ret_from_kernel_thread)
223 movl PT_EBP(%esp), %eax
225 movl $0, PT_EAX(%esp)
228 * Kernel threads return to userspace as if returning from a syscall.
229 * We should check whether anything actually uses this path and, if so,
230 * consider switching it over to ret_from_fork.
233 call syscall_return_slowpath
235 ENDPROC(ret_from_kernel_thread)
238 * Return to user mode is not as complex as all this looks,
239 * but we want the default path for a system call return to
240 * go as quickly as possible which is why some of this is
241 * less clear than it otherwise should be.
244 # userspace resumption stub bypassing syscall exit tracing
247 preempt_stop(CLBR_ANY)
250 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
251 movb PT_CS(%esp), %al
252 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
255 * We can be coming here from child spawned by kernel_thread().
257 movl PT_CS(%esp), %eax
258 andl $SEGMENT_RPL_MASK, %eax
261 jb resume_kernel # not returning to v8086 or userspace
263 ENTRY(resume_userspace)
264 DISABLE_INTERRUPTS(CLBR_ANY)
267 call prepare_exit_to_usermode
269 END(ret_from_exception)
271 #ifdef CONFIG_PREEMPT
273 DISABLE_INTERRUPTS(CLBR_ANY)
275 cmpl $0, PER_CPU_VAR(__preempt_count)
277 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
279 call preempt_schedule_irq
284 GLOBAL(__begin_SYSENTER_singlestep_region)
286 * All code from here through __end_SYSENTER_singlestep_region is subject
287 * to being single-stepped if a user program sets TF and executes SYSENTER.
288 * There is absolutely nothing that we can do to prevent this from happening
289 * (thanks Intel!). To keep our handling of this situation as simple as
290 * possible, we handle TF just like AC and NT, except that our #DB handler
291 * will ignore all of the single-step traps generated in this range.
296 * Xen doesn't set %esp to be precisely what the normal SYSENTER
297 * entry point expects, so fix it up before using the normal path.
299 ENTRY(xen_sysenter_target)
300 addl $5*4, %esp /* remove xen-provided frame */
301 jmp sysenter_past_esp
305 * 32-bit SYSENTER entry.
307 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
308 * if X86_FEATURE_SEP is available. This is the preferred system call
309 * entry on 32-bit systems.
311 * The SYSENTER instruction, in principle, should *only* occur in the
312 * vDSO. In practice, a small number of Android devices were shipped
313 * with a copy of Bionic that inlined a SYSENTER instruction. This
314 * never happened in any of Google's Bionic versions -- it only happened
315 * in a narrow range of Intel-provided versions.
317 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
318 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
319 * SYSENTER does not save anything on the stack,
320 * and does not save old EIP (!!!), ESP, or EFLAGS.
322 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
323 * user and/or vm86 state), we explicitly disable the SYSENTER
324 * instruction in vm86 mode by reprogramming the MSRs.
327 * eax system call number
336 ENTRY(entry_SYSENTER_32)
337 movl TSS_sysenter_sp0(%esp), %esp
339 pushl $__USER_DS /* pt_regs->ss */
340 pushl %ebp /* pt_regs->sp (stashed in bp) */
341 pushfl /* pt_regs->flags (except IF = 0) */
342 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
343 pushl $__USER_CS /* pt_regs->cs */
344 pushl $0 /* pt_regs->ip = 0 (placeholder) */
345 pushl %eax /* pt_regs->orig_ax */
346 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
349 * SYSENTER doesn't filter flags, so we need to clear NT, AC
350 * and TF ourselves. To save a few cycles, we can check whether
351 * either was set instead of doing an unconditional popfq.
352 * This needs to happen before enabling interrupts so that
353 * we don't get preempted with NT set.
355 * If TF is set, we will single-step all the way to here -- do_debug
356 * will ignore all the traps. (Yes, this is slow, but so is
357 * single-stepping in general. This allows us to avoid having
358 * a more complicated code to handle the case where a user program
359 * forces us to single-step through the SYSENTER entry code.)
361 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
362 * out-of-line as an optimization: NT is unlikely to be set in the
363 * majority of the cases and instead of polluting the I$ unnecessarily,
364 * we're keeping that code behind a branch which will predict as
365 * not-taken and therefore its instructions won't be fetched.
367 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
368 jnz .Lsysenter_fix_flags
369 .Lsysenter_flags_fixed:
372 * User mode is traced as though IRQs are on, and SYSENTER
378 call do_fast_syscall_32
379 /* XEN PV guests always use IRET path */
380 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
381 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
383 /* Opportunistic SYSEXIT */
384 TRACE_IRQS_ON /* User mode traces as IRQs on. */
385 movl PT_EIP(%esp), %edx /* pt_regs->ip */
386 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
387 1: mov PT_FS(%esp), %fs
389 popl %ebx /* pt_regs->bx */
390 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
391 popl %esi /* pt_regs->si */
392 popl %edi /* pt_regs->di */
393 popl %ebp /* pt_regs->bp */
394 popl %eax /* pt_regs->ax */
397 * Restore all flags except IF. (We restore IF separately because
398 * STI gives a one-instruction window in which we won't be interrupted,
399 * whereas POPF does not.)
401 addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
402 btr $X86_EFLAGS_IF_BIT, (%esp)
406 * Return back to the vDSO, which will pop ecx and edx.
407 * Don't bother with DS and ES (they already contain __USER_DS).
412 .pushsection .fixup, "ax"
413 2: movl $0, PT_FS(%esp)
419 .Lsysenter_fix_flags:
420 pushl $X86_EFLAGS_FIXED
422 jmp .Lsysenter_flags_fixed
423 GLOBAL(__end_SYSENTER_singlestep_region)
424 ENDPROC(entry_SYSENTER_32)
427 * 32-bit legacy system call entry.
429 * 32-bit x86 Linux system calls traditionally used the INT $0x80
430 * instruction. INT $0x80 lands here.
432 * This entry point can be used by any 32-bit perform system calls.
433 * Instances of INT $0x80 can be found inline in various programs and
434 * libraries. It is also used by the vDSO's __kernel_vsyscall
435 * fallback for hardware that doesn't support a faster entry method.
436 * Restarted 32-bit system calls also fall back to INT $0x80
437 * regardless of what instruction was originally used to do the system
438 * call. (64-bit programs can use INT $0x80 as well, but they can
439 * only run on 64-bit kernels and therefore land in
440 * entry_INT80_compat.)
442 * This is considered a slow path. It is not used by most libc
443 * implementations on modern hardware except during process startup.
446 * eax system call number
454 ENTRY(entry_INT80_32)
456 pushl %eax /* pt_regs->orig_ax */
457 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
460 * User mode is traced as though IRQs are on, and the interrupt gate
466 call do_int80_syscall_32
472 #ifdef CONFIG_X86_ESPFIX32
473 ALTERNATIVE "jmp restore_nocheck", "", X86_BUG_ESPFIX
475 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
477 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
478 * are returning to the kernel.
479 * See comments in process.c:copy_thread() for details.
481 movb PT_OLDSS(%esp), %ah
482 movb PT_CS(%esp), %al
483 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
484 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
485 je ldt_ss # returning to user-space with LDT SS
488 RESTORE_REGS 4 # skip orig_eax/error_code
491 .section .fixup, "ax"
493 pushl $0 # no error code
497 _ASM_EXTABLE(irq_return, iret_exc)
499 #ifdef CONFIG_X86_ESPFIX32
502 * Setup and switch to ESPFIX stack
504 * We're returning to userspace with a 16 bit stack. The CPU will not
505 * restore the high word of ESP for us on executing iret... This is an
506 * "official" bug of all the x86-compatible CPUs, which we can work
507 * around to make dosemu and wine happy. We do this by preloading the
508 * high word of ESP with the high word of the userspace ESP while
509 * compensating for the offset by changing to the ESPFIX segment with
510 * a base address that matches for the difference.
512 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
513 mov %esp, %edx /* load kernel esp */
514 mov PT_OLDESP(%esp), %eax /* load userspace esp */
515 mov %dx, %ax /* eax: new kernel esp */
516 sub %eax, %edx /* offset (low word is 0) */
518 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
519 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
521 pushl %eax /* new kernel esp */
523 * Disable interrupts, but do not irqtrace this section: we
524 * will soon execute iret and the tracer was already set to
525 * the irqstate after the IRET:
527 DISABLE_INTERRUPTS(CLBR_EAX)
528 lss (%esp), %esp /* switch to espfix segment */
531 ENDPROC(entry_INT80_32)
533 .macro FIXUP_ESPFIX_STACK
535 * Switch back for ESPFIX stack to the normal zerobased stack
537 * We can't call C functions using the ESPFIX stack. This code reads
538 * the high word of the segment base from the GDT and swiches to the
539 * normal stack and adjusts ESP with the matching offset.
541 #ifdef CONFIG_X86_ESPFIX32
542 /* fixup the stack */
543 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
544 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
546 addl %esp, %eax /* the adjusted stack pointer */
549 lss (%esp), %esp /* switch to the normal stack segment */
552 .macro UNWIND_ESPFIX_STACK
553 #ifdef CONFIG_X86_ESPFIX32
555 /* see if on espfix stack */
556 cmpw $__ESPFIX_SS, %ax
558 movl $__KERNEL_DS, %eax
561 /* switch to normal stack */
568 * Build the entry stubs with some assembler magic.
569 * We pack 1 stub into every 8-byte block.
572 ENTRY(irq_entries_start)
573 vector=FIRST_EXTERNAL_VECTOR
574 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
575 pushl $(~vector+0x80) /* Note: always in signed byte range */
580 END(irq_entries_start)
583 * the CPU automatically disables interrupts when executing an IRQ vector,
584 * so IRQ-flags tracing has to follow that:
586 .p2align CONFIG_X86_L1_CACHE_SHIFT
589 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
595 ENDPROC(common_interrupt)
597 #define BUILD_INTERRUPT3(name, nr, fn) \
609 #ifdef CONFIG_TRACING
610 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
612 # define TRACE_BUILD_INTERRUPT(name, nr)
615 #define BUILD_INTERRUPT(name, nr) \
616 BUILD_INTERRUPT3(name, nr, smp_##name); \
617 TRACE_BUILD_INTERRUPT(name, nr)
619 /* The include is where all of the SMP etc. interrupts come from */
620 #include <asm/entry_arch.h>
622 ENTRY(coprocessor_error)
625 pushl $do_coprocessor_error
627 END(coprocessor_error)
629 ENTRY(simd_coprocessor_error)
632 #ifdef CONFIG_X86_INVD_BUG
633 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
634 ALTERNATIVE "pushl $do_general_protection", \
635 "pushl $do_simd_coprocessor_error", \
638 pushl $do_simd_coprocessor_error
641 END(simd_coprocessor_error)
643 ENTRY(device_not_available)
645 pushl $-1 # mark this as an int
646 pushl $do_device_not_available
648 END(device_not_available)
650 #ifdef CONFIG_PARAVIRT
653 _ASM_EXTABLE(native_iret, iret_exc)
678 ENTRY(coprocessor_segment_overrun)
681 pushl $do_coprocessor_segment_overrun
683 END(coprocessor_segment_overrun)
687 pushl $do_invalid_TSS
691 ENTRY(segment_not_present)
693 pushl $do_segment_not_present
695 END(segment_not_present)
699 pushl $do_stack_segment
703 ENTRY(alignment_check)
705 pushl $do_alignment_check
711 pushl $0 # no error code
712 pushl $do_divide_error
716 #ifdef CONFIG_X86_MCE
720 pushl machine_check_vector
725 ENTRY(spurious_interrupt_bug)
728 pushl $do_spurious_interrupt_bug
730 END(spurious_interrupt_bug)
733 ENTRY(xen_hypervisor_callback)
734 pushl $-1 /* orig_ax = -1 => not a system call */
739 * Check to see if we got the event in the critical
740 * region in xen_iret_direct, after we've reenabled
741 * events and checked for pending events. This simulates
742 * iret instruction's behaviour where it delivers a
743 * pending interrupt when enabling interrupts:
745 movl PT_EIP(%esp), %eax
746 cmpl $xen_iret_start_crit, %eax
748 cmpl $xen_iret_end_crit, %eax
751 jmp xen_iret_crit_fixup
755 call xen_evtchn_do_upcall
756 #ifndef CONFIG_PREEMPT
757 call xen_maybe_preempt_hcall
760 ENDPROC(xen_hypervisor_callback)
763 * Hypervisor uses this for application faults while it executes.
764 * We get here for two reasons:
765 * 1. Fault while reloading DS, ES, FS or GS
766 * 2. Fault while executing IRET
767 * Category 1 we fix up by reattempting the load, and zeroing the segment
768 * register if the load fails.
769 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
770 * normal Linux return path in this case because if we use the IRET hypercall
771 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
772 * We distinguish between categories by maintaining a status value in EAX.
774 ENTRY(xen_failsafe_callback)
781 /* EAX == 0 => Category 1 (Bad segment)
782 EAX != 0 => Category 2 (Bad IRET) */
788 5: pushl $-1 /* orig_ax = -1 => not a system call */
790 jmp ret_from_exception
792 .section .fixup, "ax"
810 ENDPROC(xen_failsafe_callback)
812 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
813 xen_evtchn_do_upcall)
815 #endif /* CONFIG_XEN */
817 #if IS_ENABLED(CONFIG_HYPERV)
819 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
820 hyperv_vector_handler)
822 #endif /* CONFIG_HYPERV */
824 #ifdef CONFIG_FUNCTION_TRACER
825 #ifdef CONFIG_DYNAMIC_FTRACE
835 pushl $0 /* Pass NULL as regs pointer */
838 movl function_trace_op, %ecx
839 subl $MCOUNT_INSN_SIZE, %eax
845 addl $4, %esp /* skip NULL pointer */
850 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
851 .globl ftrace_graph_call
861 ENTRY(ftrace_regs_caller)
862 pushf /* push flags before compare (in cs location) */
865 * i386 does not save SS and ESP when coming from kernel.
866 * Instead, to get sp, ®s->sp is used (see ptrace.h).
867 * Unfortunately, that means eflags must be at the same location
868 * as the current return ip is. We move the return ip into the
869 * ip location, and move flags into the return ip location.
871 pushl 4(%esp) /* save return ip into ip slot */
873 pushl $0 /* Load 0 into orig_ax */
886 movl 13*4(%esp), %eax /* Get the saved flags */
887 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
888 /* clobbering return ip */
889 movl $__KERNEL_CS, 13*4(%esp)
891 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
892 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
893 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
894 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
895 pushl %esp /* Save pt_regs as 4th parameter */
897 GLOBAL(ftrace_regs_call)
900 addl $4, %esp /* Skip pt_regs */
901 movl 14*4(%esp), %eax /* Move flags back into cs */
902 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
903 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
904 movl %eax, 14*4(%esp) /* Put return ip back for ret */
917 addl $8, %esp /* Skip orig_ax and ip */
918 popf /* Pop flags at end (no addl to corrupt flags) */
923 #else /* ! CONFIG_DYNAMIC_FTRACE */
926 cmpl $__PAGE_OFFSET, %esp
927 jb ftrace_stub /* Paging not enabled yet? */
929 cmpl $ftrace_stub, ftrace_trace_function
931 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
932 cmpl $ftrace_stub, ftrace_graph_return
933 jnz ftrace_graph_caller
935 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
936 jnz ftrace_graph_caller
942 /* taken from glibc */
949 subl $MCOUNT_INSN_SIZE, %eax
951 call *ftrace_trace_function
958 #endif /* CONFIG_DYNAMIC_FTRACE */
959 EXPORT_SYMBOL(mcount)
960 #endif /* CONFIG_FUNCTION_TRACER */
962 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
963 ENTRY(ftrace_graph_caller)
970 subl $MCOUNT_INSN_SIZE, %eax
971 call prepare_ftrace_return
976 END(ftrace_graph_caller)
978 .globl return_to_handler
983 call ftrace_return_to_handler
990 #ifdef CONFIG_TRACING
991 ENTRY(trace_page_fault)
993 pushl $trace_do_page_fault
995 END(trace_page_fault)
1000 pushl $do_page_fault
1003 /* the function address is in %gs's slot on the stack */
1015 movl $(__KERNEL_PERCPU), %ecx
1019 movl PT_GS(%esp), %edi # get the function address
1020 movl PT_ORIG_EAX(%esp), %edx # get the error code
1021 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1024 movl $(__USER_DS), %ecx
1028 movl %esp, %eax # pt_regs pointer
1030 jmp ret_from_exception
1035 * #DB can happen at the first instruction of
1036 * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
1037 * happens, then we will be running on a very small stack. We
1038 * need to detect this condition and switch to the thread
1039 * stack before calling any C code at all.
1041 * If you edit this code, keep in mind that NMIs can happen in here.
1044 pushl $-1 # mark this as an int
1046 xorl %edx, %edx # error code 0
1047 movl %esp, %eax # pt_regs pointer
1049 /* Are we currently on the SYSENTER stack? */
1050 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
1051 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
1052 cmpl $SIZEOF_SYSENTER_stack, %ecx
1053 jb .Ldebug_from_sysenter_stack
1057 jmp ret_from_exception
1059 .Ldebug_from_sysenter_stack:
1060 /* We're on the SYSENTER stack. Switch off. */
1062 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1066 jmp ret_from_exception
1070 * NMI is doubly nasty. It can happen on the first instruction of
1071 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1072 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1073 * switched stacks. We handle both conditions by simply checking whether we
1074 * interrupted kernel code running on the SYSENTER stack.
1078 #ifdef CONFIG_X86_ESPFIX32
1081 cmpw $__ESPFIX_SS, %ax
1086 pushl %eax # pt_regs->orig_ax
1088 xorl %edx, %edx # zero error code
1089 movl %esp, %eax # pt_regs pointer
1091 /* Are we currently on the SYSENTER stack? */
1092 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
1093 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
1094 cmpl $SIZEOF_SYSENTER_stack, %ecx
1095 jb .Lnmi_from_sysenter_stack
1097 /* Not on SYSENTER stack. */
1099 jmp restore_all_notrace
1101 .Lnmi_from_sysenter_stack:
1103 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1104 * is using the thread stack right now, so it's safe for us to use it.
1107 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1110 jmp restore_all_notrace
1112 #ifdef CONFIG_X86_ESPFIX32
1115 * create the pointer to lss back
1120 /* copy the iret frame of 12 bytes */
1126 FIXUP_ESPFIX_STACK # %eax == %esp
1127 xorl %edx, %edx # zero error code
1130 lss 12+4(%esp), %esp # back to espfix stack
1137 pushl $-1 # mark this as an int
1140 xorl %edx, %edx # zero error code
1141 movl %esp, %eax # pt_regs pointer
1143 jmp ret_from_exception
1146 ENTRY(general_protection)
1147 pushl $do_general_protection
1149 END(general_protection)
1151 #ifdef CONFIG_KVM_GUEST
1152 ENTRY(async_page_fault)
1154 pushl $do_async_page_fault
1156 END(async_page_fault)
1159 ENTRY(rewind_stack_do_exit)
1160 /* Prevent any naive code from trying to unwind to our caller. */
1163 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1164 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1168 END(rewind_stack_do_exit)