x86/vm86: Use the normal pt_regs area for vm86
[deliverable/linux.git] / arch / x86 / entry / entry_32.S
1 /*
2 * Copyright (C) 1991,1992 Linus Torvalds
3 *
4 * entry_32.S contains the system-call and low-level fault and trap handling routines.
5 *
6 * Stack layout in 'syscall_exit':
7 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
10 * ptrace.c and ptrace.h
11 *
12 * 0(%esp) - %ebx
13 * 4(%esp) - %ecx
14 * 8(%esp) - %edx
15 * C(%esp) - %esi
16 * 10(%esp) - %edi
17 * 14(%esp) - %ebp
18 * 18(%esp) - %eax
19 * 1C(%esp) - %ds
20 * 20(%esp) - %es
21 * 24(%esp) - %fs
22 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
23 * 2C(%esp) - orig_eax
24 * 30(%esp) - %eip
25 * 34(%esp) - %cs
26 * 38(%esp) - %eflags
27 * 3C(%esp) - %oldesp
28 * 40(%esp) - %oldss
29 */
30
31 #include <linux/linkage.h>
32 #include <linux/err.h>
33 #include <asm/thread_info.h>
34 #include <asm/irqflags.h>
35 #include <asm/errno.h>
36 #include <asm/segment.h>
37 #include <asm/smp.h>
38 #include <asm/page_types.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/ftrace.h>
42 #include <asm/irq_vectors.h>
43 #include <asm/cpufeature.h>
44 #include <asm/alternative-asm.h>
45 #include <asm/asm.h>
46 #include <asm/smap.h>
47
48 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
49 #include <linux/elf-em.h>
50 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
51 #define __AUDIT_ARCH_LE 0x40000000
52
53 #ifndef CONFIG_AUDITSYSCALL
54 # define sysenter_audit syscall_trace_entry
55 # define sysexit_audit syscall_exit_work
56 #endif
57
58 .section .entry.text, "ax"
59
60 /*
61 * We use macros for low-level operations which need to be overridden
62 * for paravirtualization. The following will never clobber any registers:
63 * INTERRUPT_RETURN (aka. "iret")
64 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
65 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
66 *
67 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
68 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
69 * Allowing a register to be clobbered can shrink the paravirt replacement
70 * enough to patch inline, increasing performance.
71 */
72
73 #ifdef CONFIG_PREEMPT
74 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
75 #else
76 # define preempt_stop(clobbers)
77 # define resume_kernel restore_all
78 #endif
79
80 .macro TRACE_IRQS_IRET
81 #ifdef CONFIG_TRACE_IRQFLAGS
82 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
83 jz 1f
84 TRACE_IRQS_ON
85 1:
86 #endif
87 .endm
88
89 /*
90 * User gs save/restore
91 *
92 * %gs is used for userland TLS and kernel only uses it for stack
93 * canary which is required to be at %gs:20 by gcc. Read the comment
94 * at the top of stackprotector.h for more info.
95 *
96 * Local labels 98 and 99 are used.
97 */
98 #ifdef CONFIG_X86_32_LAZY_GS
99
100 /* unfortunately push/pop can't be no-op */
101 .macro PUSH_GS
102 pushl $0
103 .endm
104 .macro POP_GS pop=0
105 addl $(4 + \pop), %esp
106 .endm
107 .macro POP_GS_EX
108 .endm
109
110 /* all the rest are no-op */
111 .macro PTGS_TO_GS
112 .endm
113 .macro PTGS_TO_GS_EX
114 .endm
115 .macro GS_TO_REG reg
116 .endm
117 .macro REG_TO_PTGS reg
118 .endm
119 .macro SET_KERNEL_GS reg
120 .endm
121
122 #else /* CONFIG_X86_32_LAZY_GS */
123
124 .macro PUSH_GS
125 pushl %gs
126 .endm
127
128 .macro POP_GS pop=0
129 98: popl %gs
130 .if \pop <> 0
131 add $\pop, %esp
132 .endif
133 .endm
134 .macro POP_GS_EX
135 .pushsection .fixup, "ax"
136 99: movl $0, (%esp)
137 jmp 98b
138 .popsection
139 _ASM_EXTABLE(98b, 99b)
140 .endm
141
142 .macro PTGS_TO_GS
143 98: mov PT_GS(%esp), %gs
144 .endm
145 .macro PTGS_TO_GS_EX
146 .pushsection .fixup, "ax"
147 99: movl $0, PT_GS(%esp)
148 jmp 98b
149 .popsection
150 _ASM_EXTABLE(98b, 99b)
151 .endm
152
153 .macro GS_TO_REG reg
154 movl %gs, \reg
155 .endm
156 .macro REG_TO_PTGS reg
157 movl \reg, PT_GS(%esp)
158 .endm
159 .macro SET_KERNEL_GS reg
160 movl $(__KERNEL_STACK_CANARY), \reg
161 movl \reg, %gs
162 .endm
163
164 #endif /* CONFIG_X86_32_LAZY_GS */
165
166 .macro SAVE_ALL
167 cld
168 PUSH_GS
169 pushl %fs
170 pushl %es
171 pushl %ds
172 pushl %eax
173 pushl %ebp
174 pushl %edi
175 pushl %esi
176 pushl %edx
177 pushl %ecx
178 pushl %ebx
179 movl $(__USER_DS), %edx
180 movl %edx, %ds
181 movl %edx, %es
182 movl $(__KERNEL_PERCPU), %edx
183 movl %edx, %fs
184 SET_KERNEL_GS %edx
185 .endm
186
187 .macro RESTORE_INT_REGS
188 popl %ebx
189 popl %ecx
190 popl %edx
191 popl %esi
192 popl %edi
193 popl %ebp
194 popl %eax
195 .endm
196
197 .macro RESTORE_REGS pop=0
198 RESTORE_INT_REGS
199 1: popl %ds
200 2: popl %es
201 3: popl %fs
202 POP_GS \pop
203 .pushsection .fixup, "ax"
204 4: movl $0, (%esp)
205 jmp 1b
206 5: movl $0, (%esp)
207 jmp 2b
208 6: movl $0, (%esp)
209 jmp 3b
210 .popsection
211 _ASM_EXTABLE(1b, 4b)
212 _ASM_EXTABLE(2b, 5b)
213 _ASM_EXTABLE(3b, 6b)
214 POP_GS_EX
215 .endm
216
217 ENTRY(ret_from_fork)
218 pushl %eax
219 call schedule_tail
220 GET_THREAD_INFO(%ebp)
221 popl %eax
222 pushl $0x0202 # Reset kernel eflags
223 popfl
224 jmp syscall_exit
225 END(ret_from_fork)
226
227 ENTRY(ret_from_kernel_thread)
228 pushl %eax
229 call schedule_tail
230 GET_THREAD_INFO(%ebp)
231 popl %eax
232 pushl $0x0202 # Reset kernel eflags
233 popfl
234 movl PT_EBP(%esp), %eax
235 call *PT_EBX(%esp)
236 movl $0, PT_EAX(%esp)
237 jmp syscall_exit
238 ENDPROC(ret_from_kernel_thread)
239
240 /*
241 * Return to user mode is not as complex as all this looks,
242 * but we want the default path for a system call return to
243 * go as quickly as possible which is why some of this is
244 * less clear than it otherwise should be.
245 */
246
247 # userspace resumption stub bypassing syscall exit tracing
248 ALIGN
249 ret_from_exception:
250 preempt_stop(CLBR_ANY)
251 ret_from_intr:
252 GET_THREAD_INFO(%ebp)
253 #ifdef CONFIG_VM86
254 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
255 movb PT_CS(%esp), %al
256 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
257 #else
258 /*
259 * We can be coming here from child spawned by kernel_thread().
260 */
261 movl PT_CS(%esp), %eax
262 andl $SEGMENT_RPL_MASK, %eax
263 #endif
264 cmpl $USER_RPL, %eax
265 jb resume_kernel # not returning to v8086 or userspace
266
267 ENTRY(resume_userspace)
268 LOCKDEP_SYS_EXIT
269 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
270 # setting need_resched or sigpending
271 # between sampling and the iret
272 TRACE_IRQS_OFF
273 movl TI_flags(%ebp), %ecx
274 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
275 # int/exception return?
276 jne work_pending
277 jmp restore_all
278 END(ret_from_exception)
279
280 #ifdef CONFIG_PREEMPT
281 ENTRY(resume_kernel)
282 DISABLE_INTERRUPTS(CLBR_ANY)
283 need_resched:
284 cmpl $0, PER_CPU_VAR(__preempt_count)
285 jnz restore_all
286 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
287 jz restore_all
288 call preempt_schedule_irq
289 jmp need_resched
290 END(resume_kernel)
291 #endif
292
293 /*
294 * SYSENTER_RETURN points to after the SYSENTER instruction
295 * in the vsyscall page. See vsyscall-sysentry.S, which defines
296 * the symbol.
297 */
298
299 # SYSENTER call handler stub
300 ENTRY(entry_SYSENTER_32)
301 movl TSS_sysenter_sp0(%esp), %esp
302 sysenter_past_esp:
303 /*
304 * Interrupts are disabled here, but we can't trace it until
305 * enough kernel state to call TRACE_IRQS_OFF can be called - but
306 * we immediately enable interrupts at that point anyway.
307 */
308 pushl $__USER_DS
309 pushl %ebp
310 pushfl
311 orl $X86_EFLAGS_IF, (%esp)
312 pushl $__USER_CS
313 /*
314 * Push current_thread_info()->sysenter_return to the stack.
315 * A tiny bit of offset fixup is necessary: TI_sysenter_return
316 * is relative to thread_info, which is at the bottom of the
317 * kernel stack page. 4*4 means the 4 words pushed above;
318 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
319 * and THREAD_SIZE takes us to the bottom.
320 */
321 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
322
323 pushl %eax
324 SAVE_ALL
325 ENABLE_INTERRUPTS(CLBR_NONE)
326
327 /*
328 * Load the potential sixth argument from user stack.
329 * Careful about security.
330 */
331 cmpl $__PAGE_OFFSET-3, %ebp
332 jae syscall_fault
333 ASM_STAC
334 1: movl (%ebp), %ebp
335 ASM_CLAC
336 movl %ebp, PT_EBP(%esp)
337 _ASM_EXTABLE(1b, syscall_fault)
338
339 GET_THREAD_INFO(%ebp)
340
341 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
342 jnz sysenter_audit
343 sysenter_do_call:
344 cmpl $(NR_syscalls), %eax
345 jae sysenter_badsys
346 call *sys_call_table(, %eax, 4)
347 sysenter_after_call:
348 movl %eax, PT_EAX(%esp)
349 LOCKDEP_SYS_EXIT
350 DISABLE_INTERRUPTS(CLBR_ANY)
351 TRACE_IRQS_OFF
352 movl TI_flags(%ebp), %ecx
353 testl $_TIF_ALLWORK_MASK, %ecx
354 jnz sysexit_audit
355 sysenter_exit:
356 /* if something modifies registers it must also disable sysexit */
357 movl PT_EIP(%esp), %edx
358 movl PT_OLDESP(%esp), %ecx
359 xorl %ebp, %ebp
360 TRACE_IRQS_ON
361 1: mov PT_FS(%esp), %fs
362 PTGS_TO_GS
363 ENABLE_INTERRUPTS_SYSEXIT
364
365 #ifdef CONFIG_AUDITSYSCALL
366 sysenter_audit:
367 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
368 jnz syscall_trace_entry
369 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
370 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
371 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
372 pushl PT_ESI(%esp) /* a3: 5th arg */
373 pushl PT_EDX+4(%esp) /* a2: 4th arg */
374 call __audit_syscall_entry
375 popl %ecx /* get that remapped edx off the stack */
376 popl %ecx /* get that remapped esi off the stack */
377 movl PT_EAX(%esp), %eax /* reload syscall number */
378 jmp sysenter_do_call
379
380 sysexit_audit:
381 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
382 jnz syscall_exit_work
383 TRACE_IRQS_ON
384 ENABLE_INTERRUPTS(CLBR_ANY)
385 movl %eax, %edx /* second arg, syscall return value */
386 cmpl $-MAX_ERRNO, %eax /* is it an error ? */
387 setbe %al /* 1 if so, 0 if not */
388 movzbl %al, %eax /* zero-extend that */
389 call __audit_syscall_exit
390 DISABLE_INTERRUPTS(CLBR_ANY)
391 TRACE_IRQS_OFF
392 movl TI_flags(%ebp), %ecx
393 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
394 jnz syscall_exit_work
395 movl PT_EAX(%esp), %eax /* reload syscall return value */
396 jmp sysenter_exit
397 #endif
398
399 .pushsection .fixup, "ax"
400 2: movl $0, PT_FS(%esp)
401 jmp 1b
402 .popsection
403 _ASM_EXTABLE(1b, 2b)
404 PTGS_TO_GS_EX
405 ENDPROC(entry_SYSENTER_32)
406
407 # system call handler stub
408 ENTRY(entry_INT80_32)
409 ASM_CLAC
410 pushl %eax # save orig_eax
411 SAVE_ALL
412 GET_THREAD_INFO(%ebp)
413 # system call tracing in operation / emulation
414 testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
415 jnz syscall_trace_entry
416 cmpl $(NR_syscalls), %eax
417 jae syscall_badsys
418 syscall_call:
419 call *sys_call_table(, %eax, 4)
420 syscall_after_call:
421 movl %eax, PT_EAX(%esp) # store the return value
422 syscall_exit:
423 LOCKDEP_SYS_EXIT
424 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
425 # setting need_resched or sigpending
426 # between sampling and the iret
427 TRACE_IRQS_OFF
428 movl TI_flags(%ebp), %ecx
429 testl $_TIF_ALLWORK_MASK, %ecx # current->work
430 jnz syscall_exit_work
431
432 restore_all:
433 TRACE_IRQS_IRET
434 restore_all_notrace:
435 #ifdef CONFIG_X86_ESPFIX32
436 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
437 /*
438 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
439 * are returning to the kernel.
440 * See comments in process.c:copy_thread() for details.
441 */
442 movb PT_OLDSS(%esp), %ah
443 movb PT_CS(%esp), %al
444 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
445 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
446 je ldt_ss # returning to user-space with LDT SS
447 #endif
448 restore_nocheck:
449 RESTORE_REGS 4 # skip orig_eax/error_code
450 irq_return:
451 INTERRUPT_RETURN
452 .section .fixup, "ax"
453 ENTRY(iret_exc )
454 pushl $0 # no error code
455 pushl $do_iret_error
456 jmp error_code
457 .previous
458 _ASM_EXTABLE(irq_return, iret_exc)
459
460 #ifdef CONFIG_X86_ESPFIX32
461 ldt_ss:
462 #ifdef CONFIG_PARAVIRT
463 /*
464 * The kernel can't run on a non-flat stack if paravirt mode
465 * is active. Rather than try to fixup the high bits of
466 * ESP, bypass this code entirely. This may break DOSemu
467 * and/or Wine support in a paravirt VM, although the option
468 * is still available to implement the setting of the high
469 * 16-bits in the INTERRUPT_RETURN paravirt-op.
470 */
471 cmpl $0, pv_info+PARAVIRT_enabled
472 jne restore_nocheck
473 #endif
474
475 /*
476 * Setup and switch to ESPFIX stack
477 *
478 * We're returning to userspace with a 16 bit stack. The CPU will not
479 * restore the high word of ESP for us on executing iret... This is an
480 * "official" bug of all the x86-compatible CPUs, which we can work
481 * around to make dosemu and wine happy. We do this by preloading the
482 * high word of ESP with the high word of the userspace ESP while
483 * compensating for the offset by changing to the ESPFIX segment with
484 * a base address that matches for the difference.
485 */
486 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
487 mov %esp, %edx /* load kernel esp */
488 mov PT_OLDESP(%esp), %eax /* load userspace esp */
489 mov %dx, %ax /* eax: new kernel esp */
490 sub %eax, %edx /* offset (low word is 0) */
491 shr $16, %edx
492 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
493 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
494 pushl $__ESPFIX_SS
495 pushl %eax /* new kernel esp */
496 /*
497 * Disable interrupts, but do not irqtrace this section: we
498 * will soon execute iret and the tracer was already set to
499 * the irqstate after the IRET:
500 */
501 DISABLE_INTERRUPTS(CLBR_EAX)
502 lss (%esp), %esp /* switch to espfix segment */
503 jmp restore_nocheck
504 #endif
505 ENDPROC(entry_INT80_32)
506
507 # perform work that needs to be done immediately before resumption
508 ALIGN
509 work_pending:
510 testb $_TIF_NEED_RESCHED, %cl
511 jz work_notifysig
512 work_resched:
513 call schedule
514 LOCKDEP_SYS_EXIT
515 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
516 # setting need_resched or sigpending
517 # between sampling and the iret
518 TRACE_IRQS_OFF
519 movl TI_flags(%ebp), %ecx
520 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
521 # than syscall tracing?
522 jz restore_all
523 testb $_TIF_NEED_RESCHED, %cl
524 jnz work_resched
525
526 work_notifysig: # deal with pending signals and
527 # notify-resume requests
528 TRACE_IRQS_ON
529 ENABLE_INTERRUPTS(CLBR_NONE)
530 movl %esp, %eax
531 xorl %edx, %edx
532 call do_notify_resume
533 jmp resume_userspace
534 END(work_pending)
535
536 # perform syscall exit tracing
537 ALIGN
538 syscall_trace_entry:
539 movl $-ENOSYS, PT_EAX(%esp)
540 movl %esp, %eax
541 call syscall_trace_enter
542 /* What it returned is what we'll actually use. */
543 cmpl $(NR_syscalls), %eax
544 jnae syscall_call
545 jmp syscall_exit
546 END(syscall_trace_entry)
547
548 # perform syscall exit tracing
549 ALIGN
550 syscall_exit_work:
551 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
552 jz work_pending
553 TRACE_IRQS_ON
554 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
555 # schedule() instead
556 movl %esp, %eax
557 call syscall_trace_leave
558 jmp resume_userspace
559 END(syscall_exit_work)
560
561 syscall_fault:
562 ASM_CLAC
563 GET_THREAD_INFO(%ebp)
564 movl $-EFAULT, PT_EAX(%esp)
565 jmp resume_userspace
566 END(syscall_fault)
567
568 syscall_badsys:
569 movl $-ENOSYS, %eax
570 jmp syscall_after_call
571 END(syscall_badsys)
572
573 sysenter_badsys:
574 movl $-ENOSYS, %eax
575 jmp sysenter_after_call
576 END(sysenter_badsys)
577
578 .macro FIXUP_ESPFIX_STACK
579 /*
580 * Switch back for ESPFIX stack to the normal zerobased stack
581 *
582 * We can't call C functions using the ESPFIX stack. This code reads
583 * the high word of the segment base from the GDT and swiches to the
584 * normal stack and adjusts ESP with the matching offset.
585 */
586 #ifdef CONFIG_X86_ESPFIX32
587 /* fixup the stack */
588 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
589 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
590 shl $16, %eax
591 addl %esp, %eax /* the adjusted stack pointer */
592 pushl $__KERNEL_DS
593 pushl %eax
594 lss (%esp), %esp /* switch to the normal stack segment */
595 #endif
596 .endm
597 .macro UNWIND_ESPFIX_STACK
598 #ifdef CONFIG_X86_ESPFIX32
599 movl %ss, %eax
600 /* see if on espfix stack */
601 cmpw $__ESPFIX_SS, %ax
602 jne 27f
603 movl $__KERNEL_DS, %eax
604 movl %eax, %ds
605 movl %eax, %es
606 /* switch to normal stack */
607 FIXUP_ESPFIX_STACK
608 27:
609 #endif
610 .endm
611
612 /*
613 * Build the entry stubs with some assembler magic.
614 * We pack 1 stub into every 8-byte block.
615 */
616 .align 8
617 ENTRY(irq_entries_start)
618 vector=FIRST_EXTERNAL_VECTOR
619 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
620 pushl $(~vector+0x80) /* Note: always in signed byte range */
621 vector=vector+1
622 jmp common_interrupt
623 .align 8
624 .endr
625 END(irq_entries_start)
626
627 /*
628 * the CPU automatically disables interrupts when executing an IRQ vector,
629 * so IRQ-flags tracing has to follow that:
630 */
631 .p2align CONFIG_X86_L1_CACHE_SHIFT
632 common_interrupt:
633 ASM_CLAC
634 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
635 SAVE_ALL
636 TRACE_IRQS_OFF
637 movl %esp, %eax
638 call do_IRQ
639 jmp ret_from_intr
640 ENDPROC(common_interrupt)
641
642 #define BUILD_INTERRUPT3(name, nr, fn) \
643 ENTRY(name) \
644 ASM_CLAC; \
645 pushl $~(nr); \
646 SAVE_ALL; \
647 TRACE_IRQS_OFF \
648 movl %esp, %eax; \
649 call fn; \
650 jmp ret_from_intr; \
651 ENDPROC(name)
652
653
654 #ifdef CONFIG_TRACING
655 # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
656 #else
657 # define TRACE_BUILD_INTERRUPT(name, nr)
658 #endif
659
660 #define BUILD_INTERRUPT(name, nr) \
661 BUILD_INTERRUPT3(name, nr, smp_##name); \
662 TRACE_BUILD_INTERRUPT(name, nr)
663
664 /* The include is where all of the SMP etc. interrupts come from */
665 #include <asm/entry_arch.h>
666
667 ENTRY(coprocessor_error)
668 ASM_CLAC
669 pushl $0
670 pushl $do_coprocessor_error
671 jmp error_code
672 END(coprocessor_error)
673
674 ENTRY(simd_coprocessor_error)
675 ASM_CLAC
676 pushl $0
677 #ifdef CONFIG_X86_INVD_BUG
678 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
679 ALTERNATIVE "pushl $do_general_protection", \
680 "pushl $do_simd_coprocessor_error", \
681 X86_FEATURE_XMM
682 #else
683 pushl $do_simd_coprocessor_error
684 #endif
685 jmp error_code
686 END(simd_coprocessor_error)
687
688 ENTRY(device_not_available)
689 ASM_CLAC
690 pushl $-1 # mark this as an int
691 pushl $do_device_not_available
692 jmp error_code
693 END(device_not_available)
694
695 #ifdef CONFIG_PARAVIRT
696 ENTRY(native_iret)
697 iret
698 _ASM_EXTABLE(native_iret, iret_exc)
699 END(native_iret)
700
701 ENTRY(native_irq_enable_sysexit)
702 sti
703 sysexit
704 END(native_irq_enable_sysexit)
705 #endif
706
707 ENTRY(overflow)
708 ASM_CLAC
709 pushl $0
710 pushl $do_overflow
711 jmp error_code
712 END(overflow)
713
714 ENTRY(bounds)
715 ASM_CLAC
716 pushl $0
717 pushl $do_bounds
718 jmp error_code
719 END(bounds)
720
721 ENTRY(invalid_op)
722 ASM_CLAC
723 pushl $0
724 pushl $do_invalid_op
725 jmp error_code
726 END(invalid_op)
727
728 ENTRY(coprocessor_segment_overrun)
729 ASM_CLAC
730 pushl $0
731 pushl $do_coprocessor_segment_overrun
732 jmp error_code
733 END(coprocessor_segment_overrun)
734
735 ENTRY(invalid_TSS)
736 ASM_CLAC
737 pushl $do_invalid_TSS
738 jmp error_code
739 END(invalid_TSS)
740
741 ENTRY(segment_not_present)
742 ASM_CLAC
743 pushl $do_segment_not_present
744 jmp error_code
745 END(segment_not_present)
746
747 ENTRY(stack_segment)
748 ASM_CLAC
749 pushl $do_stack_segment
750 jmp error_code
751 END(stack_segment)
752
753 ENTRY(alignment_check)
754 ASM_CLAC
755 pushl $do_alignment_check
756 jmp error_code
757 END(alignment_check)
758
759 ENTRY(divide_error)
760 ASM_CLAC
761 pushl $0 # no error code
762 pushl $do_divide_error
763 jmp error_code
764 END(divide_error)
765
766 #ifdef CONFIG_X86_MCE
767 ENTRY(machine_check)
768 ASM_CLAC
769 pushl $0
770 pushl machine_check_vector
771 jmp error_code
772 END(machine_check)
773 #endif
774
775 ENTRY(spurious_interrupt_bug)
776 ASM_CLAC
777 pushl $0
778 pushl $do_spurious_interrupt_bug
779 jmp error_code
780 END(spurious_interrupt_bug)
781
782 #ifdef CONFIG_XEN
783 /*
784 * Xen doesn't set %esp to be precisely what the normal SYSENTER
785 * entry point expects, so fix it up before using the normal path.
786 */
787 ENTRY(xen_sysenter_target)
788 addl $5*4, %esp /* remove xen-provided frame */
789 jmp sysenter_past_esp
790
791 ENTRY(xen_hypervisor_callback)
792 pushl $-1 /* orig_ax = -1 => not a system call */
793 SAVE_ALL
794 TRACE_IRQS_OFF
795
796 /*
797 * Check to see if we got the event in the critical
798 * region in xen_iret_direct, after we've reenabled
799 * events and checked for pending events. This simulates
800 * iret instruction's behaviour where it delivers a
801 * pending interrupt when enabling interrupts:
802 */
803 movl PT_EIP(%esp), %eax
804 cmpl $xen_iret_start_crit, %eax
805 jb 1f
806 cmpl $xen_iret_end_crit, %eax
807 jae 1f
808
809 jmp xen_iret_crit_fixup
810
811 ENTRY(xen_do_upcall)
812 1: mov %esp, %eax
813 call xen_evtchn_do_upcall
814 #ifndef CONFIG_PREEMPT
815 call xen_maybe_preempt_hcall
816 #endif
817 jmp ret_from_intr
818 ENDPROC(xen_hypervisor_callback)
819
820 /*
821 * Hypervisor uses this for application faults while it executes.
822 * We get here for two reasons:
823 * 1. Fault while reloading DS, ES, FS or GS
824 * 2. Fault while executing IRET
825 * Category 1 we fix up by reattempting the load, and zeroing the segment
826 * register if the load fails.
827 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
828 * normal Linux return path in this case because if we use the IRET hypercall
829 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
830 * We distinguish between categories by maintaining a status value in EAX.
831 */
832 ENTRY(xen_failsafe_callback)
833 pushl %eax
834 movl $1, %eax
835 1: mov 4(%esp), %ds
836 2: mov 8(%esp), %es
837 3: mov 12(%esp), %fs
838 4: mov 16(%esp), %gs
839 /* EAX == 0 => Category 1 (Bad segment)
840 EAX != 0 => Category 2 (Bad IRET) */
841 testl %eax, %eax
842 popl %eax
843 lea 16(%esp), %esp
844 jz 5f
845 jmp iret_exc
846 5: pushl $-1 /* orig_ax = -1 => not a system call */
847 SAVE_ALL
848 jmp ret_from_exception
849
850 .section .fixup, "ax"
851 6: xorl %eax, %eax
852 movl %eax, 4(%esp)
853 jmp 1b
854 7: xorl %eax, %eax
855 movl %eax, 8(%esp)
856 jmp 2b
857 8: xorl %eax, %eax
858 movl %eax, 12(%esp)
859 jmp 3b
860 9: xorl %eax, %eax
861 movl %eax, 16(%esp)
862 jmp 4b
863 .previous
864 _ASM_EXTABLE(1b, 6b)
865 _ASM_EXTABLE(2b, 7b)
866 _ASM_EXTABLE(3b, 8b)
867 _ASM_EXTABLE(4b, 9b)
868 ENDPROC(xen_failsafe_callback)
869
870 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
871 xen_evtchn_do_upcall)
872
873 #endif /* CONFIG_XEN */
874
875 #if IS_ENABLED(CONFIG_HYPERV)
876
877 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
878 hyperv_vector_handler)
879
880 #endif /* CONFIG_HYPERV */
881
882 #ifdef CONFIG_FUNCTION_TRACER
883 #ifdef CONFIG_DYNAMIC_FTRACE
884
885 ENTRY(mcount)
886 ret
887 END(mcount)
888
889 ENTRY(ftrace_caller)
890 pushl %eax
891 pushl %ecx
892 pushl %edx
893 pushl $0 /* Pass NULL as regs pointer */
894 movl 4*4(%esp), %eax
895 movl 0x4(%ebp), %edx
896 movl function_trace_op, %ecx
897 subl $MCOUNT_INSN_SIZE, %eax
898
899 .globl ftrace_call
900 ftrace_call:
901 call ftrace_stub
902
903 addl $4, %esp /* skip NULL pointer */
904 popl %edx
905 popl %ecx
906 popl %eax
907 ftrace_ret:
908 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
909 .globl ftrace_graph_call
910 ftrace_graph_call:
911 jmp ftrace_stub
912 #endif
913
914 .globl ftrace_stub
915 ftrace_stub:
916 ret
917 END(ftrace_caller)
918
919 ENTRY(ftrace_regs_caller)
920 pushf /* push flags before compare (in cs location) */
921
922 /*
923 * i386 does not save SS and ESP when coming from kernel.
924 * Instead, to get sp, &regs->sp is used (see ptrace.h).
925 * Unfortunately, that means eflags must be at the same location
926 * as the current return ip is. We move the return ip into the
927 * ip location, and move flags into the return ip location.
928 */
929 pushl 4(%esp) /* save return ip into ip slot */
930
931 pushl $0 /* Load 0 into orig_ax */
932 pushl %gs
933 pushl %fs
934 pushl %es
935 pushl %ds
936 pushl %eax
937 pushl %ebp
938 pushl %edi
939 pushl %esi
940 pushl %edx
941 pushl %ecx
942 pushl %ebx
943
944 movl 13*4(%esp), %eax /* Get the saved flags */
945 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
946 /* clobbering return ip */
947 movl $__KERNEL_CS, 13*4(%esp)
948
949 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
950 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
951 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
952 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
953 pushl %esp /* Save pt_regs as 4th parameter */
954
955 GLOBAL(ftrace_regs_call)
956 call ftrace_stub
957
958 addl $4, %esp /* Skip pt_regs */
959 movl 14*4(%esp), %eax /* Move flags back into cs */
960 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
961 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
962 movl %eax, 14*4(%esp) /* Put return ip back for ret */
963
964 popl %ebx
965 popl %ecx
966 popl %edx
967 popl %esi
968 popl %edi
969 popl %ebp
970 popl %eax
971 popl %ds
972 popl %es
973 popl %fs
974 popl %gs
975 addl $8, %esp /* Skip orig_ax and ip */
976 popf /* Pop flags at end (no addl to corrupt flags) */
977 jmp ftrace_ret
978
979 popf
980 jmp ftrace_stub
981 #else /* ! CONFIG_DYNAMIC_FTRACE */
982
983 ENTRY(mcount)
984 cmpl $__PAGE_OFFSET, %esp
985 jb ftrace_stub /* Paging not enabled yet? */
986
987 cmpl $ftrace_stub, ftrace_trace_function
988 jnz trace
989 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
990 cmpl $ftrace_stub, ftrace_graph_return
991 jnz ftrace_graph_caller
992
993 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
994 jnz ftrace_graph_caller
995 #endif
996 .globl ftrace_stub
997 ftrace_stub:
998 ret
999
1000 /* taken from glibc */
1001 trace:
1002 pushl %eax
1003 pushl %ecx
1004 pushl %edx
1005 movl 0xc(%esp), %eax
1006 movl 0x4(%ebp), %edx
1007 subl $MCOUNT_INSN_SIZE, %eax
1008
1009 call *ftrace_trace_function
1010
1011 popl %edx
1012 popl %ecx
1013 popl %eax
1014 jmp ftrace_stub
1015 END(mcount)
1016 #endif /* CONFIG_DYNAMIC_FTRACE */
1017 #endif /* CONFIG_FUNCTION_TRACER */
1018
1019 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1020 ENTRY(ftrace_graph_caller)
1021 pushl %eax
1022 pushl %ecx
1023 pushl %edx
1024 movl 0xc(%esp), %eax
1025 lea 0x4(%ebp), %edx
1026 movl (%ebp), %ecx
1027 subl $MCOUNT_INSN_SIZE, %eax
1028 call prepare_ftrace_return
1029 popl %edx
1030 popl %ecx
1031 popl %eax
1032 ret
1033 END(ftrace_graph_caller)
1034
1035 .globl return_to_handler
1036 return_to_handler:
1037 pushl %eax
1038 pushl %edx
1039 movl %ebp, %eax
1040 call ftrace_return_to_handler
1041 movl %eax, %ecx
1042 popl %edx
1043 popl %eax
1044 jmp *%ecx
1045 #endif
1046
1047 #ifdef CONFIG_TRACING
1048 ENTRY(trace_page_fault)
1049 ASM_CLAC
1050 pushl $trace_do_page_fault
1051 jmp error_code
1052 END(trace_page_fault)
1053 #endif
1054
1055 ENTRY(page_fault)
1056 ASM_CLAC
1057 pushl $do_page_fault
1058 ALIGN
1059 error_code:
1060 /* the function address is in %gs's slot on the stack */
1061 pushl %fs
1062 pushl %es
1063 pushl %ds
1064 pushl %eax
1065 pushl %ebp
1066 pushl %edi
1067 pushl %esi
1068 pushl %edx
1069 pushl %ecx
1070 pushl %ebx
1071 cld
1072 movl $(__KERNEL_PERCPU), %ecx
1073 movl %ecx, %fs
1074 UNWIND_ESPFIX_STACK
1075 GS_TO_REG %ecx
1076 movl PT_GS(%esp), %edi # get the function address
1077 movl PT_ORIG_EAX(%esp), %edx # get the error code
1078 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1079 REG_TO_PTGS %ecx
1080 SET_KERNEL_GS %ecx
1081 movl $(__USER_DS), %ecx
1082 movl %ecx, %ds
1083 movl %ecx, %es
1084 TRACE_IRQS_OFF
1085 movl %esp, %eax # pt_regs pointer
1086 call *%edi
1087 jmp ret_from_exception
1088 END(page_fault)
1089
1090 /*
1091 * Debug traps and NMI can happen at the one SYSENTER instruction
1092 * that sets up the real kernel stack. Check here, since we can't
1093 * allow the wrong stack to be used.
1094 *
1095 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1096 * already pushed 3 words if it hits on the sysenter instruction:
1097 * eflags, cs and eip.
1098 *
1099 * We just load the right stack, and push the three (known) values
1100 * by hand onto the new stack - while updating the return eip past
1101 * the instruction that would have done it for sysenter.
1102 */
1103 .macro FIX_STACK offset ok label
1104 cmpw $__KERNEL_CS, 4(%esp)
1105 jne \ok
1106 \label:
1107 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1108 pushfl
1109 pushl $__KERNEL_CS
1110 pushl $sysenter_past_esp
1111 .endm
1112
1113 ENTRY(debug)
1114 ASM_CLAC
1115 cmpl $entry_SYSENTER_32, (%esp)
1116 jne debug_stack_correct
1117 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1118 debug_stack_correct:
1119 pushl $-1 # mark this as an int
1120 SAVE_ALL
1121 TRACE_IRQS_OFF
1122 xorl %edx, %edx # error code 0
1123 movl %esp, %eax # pt_regs pointer
1124 call do_debug
1125 jmp ret_from_exception
1126 END(debug)
1127
1128 /*
1129 * NMI is doubly nasty. It can happen _while_ we're handling
1130 * a debug fault, and the debug fault hasn't yet been able to
1131 * clear up the stack. So we first check whether we got an
1132 * NMI on the sysenter entry path, but after that we need to
1133 * check whether we got an NMI on the debug path where the debug
1134 * fault happened on the sysenter path.
1135 */
1136 ENTRY(nmi)
1137 ASM_CLAC
1138 #ifdef CONFIG_X86_ESPFIX32
1139 pushl %eax
1140 movl %ss, %eax
1141 cmpw $__ESPFIX_SS, %ax
1142 popl %eax
1143 je nmi_espfix_stack
1144 #endif
1145 cmpl $entry_SYSENTER_32, (%esp)
1146 je nmi_stack_fixup
1147 pushl %eax
1148 movl %esp, %eax
1149 /*
1150 * Do not access memory above the end of our stack page,
1151 * it might not exist.
1152 */
1153 andl $(THREAD_SIZE-1), %eax
1154 cmpl $(THREAD_SIZE-20), %eax
1155 popl %eax
1156 jae nmi_stack_correct
1157 cmpl $entry_SYSENTER_32, 12(%esp)
1158 je nmi_debug_stack_check
1159 nmi_stack_correct:
1160 pushl %eax
1161 SAVE_ALL
1162 xorl %edx, %edx # zero error code
1163 movl %esp, %eax # pt_regs pointer
1164 call do_nmi
1165 jmp restore_all_notrace
1166
1167 nmi_stack_fixup:
1168 FIX_STACK 12, nmi_stack_correct, 1
1169 jmp nmi_stack_correct
1170
1171 nmi_debug_stack_check:
1172 cmpw $__KERNEL_CS, 16(%esp)
1173 jne nmi_stack_correct
1174 cmpl $debug, (%esp)
1175 jb nmi_stack_correct
1176 cmpl $debug_esp_fix_insn, (%esp)
1177 ja nmi_stack_correct
1178 FIX_STACK 24, nmi_stack_correct, 1
1179 jmp nmi_stack_correct
1180
1181 #ifdef CONFIG_X86_ESPFIX32
1182 nmi_espfix_stack:
1183 /*
1184 * create the pointer to lss back
1185 */
1186 pushl %ss
1187 pushl %esp
1188 addl $4, (%esp)
1189 /* copy the iret frame of 12 bytes */
1190 .rept 3
1191 pushl 16(%esp)
1192 .endr
1193 pushl %eax
1194 SAVE_ALL
1195 FIXUP_ESPFIX_STACK # %eax == %esp
1196 xorl %edx, %edx # zero error code
1197 call do_nmi
1198 RESTORE_REGS
1199 lss 12+4(%esp), %esp # back to espfix stack
1200 jmp irq_return
1201 #endif
1202 END(nmi)
1203
1204 ENTRY(int3)
1205 ASM_CLAC
1206 pushl $-1 # mark this as an int
1207 SAVE_ALL
1208 TRACE_IRQS_OFF
1209 xorl %edx, %edx # zero error code
1210 movl %esp, %eax # pt_regs pointer
1211 call do_int3
1212 jmp ret_from_exception
1213 END(int3)
1214
1215 ENTRY(general_protection)
1216 pushl $do_general_protection
1217 jmp error_code
1218 END(general_protection)
1219
1220 #ifdef CONFIG_KVM_GUEST
1221 ENTRY(async_page_fault)
1222 ASM_CLAC
1223 pushl $do_async_page_fault
1224 jmp error_code
1225 END(async_page_fault)
1226 #endif
This page took 0.09076 seconds and 5 git commands to generate.