Merge tag 'jfs-3.7' of git://github.com/kleikamp/linux-shaggy
[deliverable/linux.git] / arch / x86 / kernel / entry_32.S
CommitLineData
1da177e4 1/*
1da177e4
LT
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 */
5
6/*
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
10 *
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 *
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
15 * on a 486.
16 *
889f21ce 17 * Stack layout in 'syscall_exit':
1da177e4
LT
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
22 *
23 * 0(%esp) - %ebx
24 * 4(%esp) - %ecx
25 * 8(%esp) - %edx
26 * C(%esp) - %esi
27 * 10(%esp) - %edi
28 * 14(%esp) - %ebp
29 * 18(%esp) - %eax
30 * 1C(%esp) - %ds
31 * 20(%esp) - %es
464d1a78 32 * 24(%esp) - %fs
ccbeed3a
TH
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %eip
36 * 34(%esp) - %cs
37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
1da177e4
LT
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
1da177e4 44#include <linux/linkage.h>
d7e7528b 45#include <linux/err.h>
1da177e4 46#include <asm/thread_info.h>
55f327fa 47#include <asm/irqflags.h>
1da177e4
LT
48#include <asm/errno.h>
49#include <asm/segment.h>
50#include <asm/smp.h>
0341c14d 51#include <asm/page_types.h>
be44d2aa 52#include <asm/percpu.h>
fe7cacc1 53#include <asm/dwarf2.h>
ab68ed98 54#include <asm/processor-flags.h>
395a59d0 55#include <asm/ftrace.h>
9b7dc567 56#include <asm/irq_vectors.h>
40d2e763 57#include <asm/cpufeature.h>
b4ca46e4 58#include <asm/alternative-asm.h>
6837a54d 59#include <asm/asm.h>
e59d1b0a 60#include <asm/smap.h>
1da177e4 61
af0575bb
RM
62/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
63#include <linux/elf-em.h>
64#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
65#define __AUDIT_ARCH_LE 0x40000000
66
67#ifndef CONFIG_AUDITSYSCALL
68#define sysenter_audit syscall_trace_entry
69#define sysexit_audit syscall_exit_work
70#endif
71
ea714547
JO
72 .section .entry.text, "ax"
73
139ec7c4
RR
74/*
75 * We use macros for low-level operations which need to be overridden
76 * for paravirtualization. The following will never clobber any registers:
77 * INTERRUPT_RETURN (aka. "iret")
78 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
d75cd22f 79 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
139ec7c4
RR
80 *
81 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
82 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
83 * Allowing a register to be clobbered can shrink the paravirt replacement
84 * enough to patch inline, increasing performance.
85 */
86
1da177e4 87#ifdef CONFIG_PREEMPT
139ec7c4 88#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
1da177e4 89#else
139ec7c4 90#define preempt_stop(clobbers)
2e04bc76 91#define resume_kernel restore_all
1da177e4
LT
92#endif
93
55f327fa
IM
94.macro TRACE_IRQS_IRET
95#ifdef CONFIG_TRACE_IRQFLAGS
ab68ed98 96 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
55f327fa
IM
97 jz 1f
98 TRACE_IRQS_ON
991:
100#endif
101.endm
102
ccbeed3a
TH
103/*
104 * User gs save/restore
105 *
106 * %gs is used for userland TLS and kernel only uses it for stack
107 * canary which is required to be at %gs:20 by gcc. Read the comment
108 * at the top of stackprotector.h for more info.
109 *
110 * Local labels 98 and 99 are used.
111 */
112#ifdef CONFIG_X86_32_LAZY_GS
113
114 /* unfortunately push/pop can't be no-op */
115.macro PUSH_GS
df5d1874 116 pushl_cfi $0
ccbeed3a
TH
117.endm
118.macro POP_GS pop=0
119 addl $(4 + \pop), %esp
120 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
121.endm
122.macro POP_GS_EX
123.endm
124
125 /* all the rest are no-op */
126.macro PTGS_TO_GS
127.endm
128.macro PTGS_TO_GS_EX
129.endm
130.macro GS_TO_REG reg
131.endm
132.macro REG_TO_PTGS reg
133.endm
134.macro SET_KERNEL_GS reg
135.endm
136
137#else /* CONFIG_X86_32_LAZY_GS */
138
139.macro PUSH_GS
df5d1874 140 pushl_cfi %gs
ccbeed3a
TH
141 /*CFI_REL_OFFSET gs, 0*/
142.endm
143
144.macro POP_GS pop=0
df5d1874 14598: popl_cfi %gs
ccbeed3a
TH
146 /*CFI_RESTORE gs*/
147 .if \pop <> 0
148 add $\pop, %esp
149 CFI_ADJUST_CFA_OFFSET -\pop
150 .endif
151.endm
152.macro POP_GS_EX
153.pushsection .fixup, "ax"
15499: movl $0, (%esp)
155 jmp 98b
ccbeed3a 156.popsection
6837a54d 157 _ASM_EXTABLE(98b,99b)
ccbeed3a
TH
158.endm
159
160.macro PTGS_TO_GS
16198: mov PT_GS(%esp), %gs
162.endm
163.macro PTGS_TO_GS_EX
164.pushsection .fixup, "ax"
16599: movl $0, PT_GS(%esp)
166 jmp 98b
ccbeed3a 167.popsection
6837a54d 168 _ASM_EXTABLE(98b,99b)
ccbeed3a
TH
169.endm
170
171.macro GS_TO_REG reg
172 movl %gs, \reg
173 /*CFI_REGISTER gs, \reg*/
174.endm
175.macro REG_TO_PTGS reg
176 movl \reg, PT_GS(%esp)
177 /*CFI_REL_OFFSET gs, PT_GS*/
178.endm
179.macro SET_KERNEL_GS reg
60a5317f 180 movl $(__KERNEL_STACK_CANARY), \reg
ccbeed3a
TH
181 movl \reg, %gs
182.endm
183
184#endif /* CONFIG_X86_32_LAZY_GS */
185
f0d96110
TH
186.macro SAVE_ALL
187 cld
ccbeed3a 188 PUSH_GS
df5d1874 189 pushl_cfi %fs
f0d96110 190 /*CFI_REL_OFFSET fs, 0;*/
df5d1874 191 pushl_cfi %es
f0d96110 192 /*CFI_REL_OFFSET es, 0;*/
df5d1874 193 pushl_cfi %ds
f0d96110 194 /*CFI_REL_OFFSET ds, 0;*/
df5d1874 195 pushl_cfi %eax
f0d96110 196 CFI_REL_OFFSET eax, 0
df5d1874 197 pushl_cfi %ebp
f0d96110 198 CFI_REL_OFFSET ebp, 0
df5d1874 199 pushl_cfi %edi
f0d96110 200 CFI_REL_OFFSET edi, 0
df5d1874 201 pushl_cfi %esi
f0d96110 202 CFI_REL_OFFSET esi, 0
df5d1874 203 pushl_cfi %edx
f0d96110 204 CFI_REL_OFFSET edx, 0
df5d1874 205 pushl_cfi %ecx
f0d96110 206 CFI_REL_OFFSET ecx, 0
df5d1874 207 pushl_cfi %ebx
f0d96110
TH
208 CFI_REL_OFFSET ebx, 0
209 movl $(__USER_DS), %edx
210 movl %edx, %ds
211 movl %edx, %es
212 movl $(__KERNEL_PERCPU), %edx
464d1a78 213 movl %edx, %fs
ccbeed3a 214 SET_KERNEL_GS %edx
f0d96110 215.endm
1da177e4 216
f0d96110 217.macro RESTORE_INT_REGS
df5d1874 218 popl_cfi %ebx
f0d96110 219 CFI_RESTORE ebx
df5d1874 220 popl_cfi %ecx
f0d96110 221 CFI_RESTORE ecx
df5d1874 222 popl_cfi %edx
f0d96110 223 CFI_RESTORE edx
df5d1874 224 popl_cfi %esi
f0d96110 225 CFI_RESTORE esi
df5d1874 226 popl_cfi %edi
f0d96110 227 CFI_RESTORE edi
df5d1874 228 popl_cfi %ebp
f0d96110 229 CFI_RESTORE ebp
df5d1874 230 popl_cfi %eax
fe7cacc1 231 CFI_RESTORE eax
f0d96110 232.endm
1da177e4 233
ccbeed3a 234.macro RESTORE_REGS pop=0
f0d96110 235 RESTORE_INT_REGS
df5d1874 2361: popl_cfi %ds
f0d96110 237 /*CFI_RESTORE ds;*/
df5d1874 2382: popl_cfi %es
f0d96110 239 /*CFI_RESTORE es;*/
df5d1874 2403: popl_cfi %fs
f0d96110 241 /*CFI_RESTORE fs;*/
ccbeed3a 242 POP_GS \pop
f0d96110
TH
243.pushsection .fixup, "ax"
2444: movl $0, (%esp)
245 jmp 1b
2465: movl $0, (%esp)
247 jmp 2b
2486: movl $0, (%esp)
249 jmp 3b
f95d47ca 250.popsection
6837a54d
PA
251 _ASM_EXTABLE(1b,4b)
252 _ASM_EXTABLE(2b,5b)
253 _ASM_EXTABLE(3b,6b)
ccbeed3a 254 POP_GS_EX
f0d96110 255.endm
1da177e4 256
f0d96110
TH
257.macro RING0_INT_FRAME
258 CFI_STARTPROC simple
259 CFI_SIGNAL_FRAME
260 CFI_DEF_CFA esp, 3*4
261 /*CFI_OFFSET cs, -2*4;*/
fe7cacc1 262 CFI_OFFSET eip, -3*4
f0d96110 263.endm
fe7cacc1 264
f0d96110
TH
265.macro RING0_EC_FRAME
266 CFI_STARTPROC simple
267 CFI_SIGNAL_FRAME
268 CFI_DEF_CFA esp, 4*4
269 /*CFI_OFFSET cs, -2*4;*/
fe7cacc1 270 CFI_OFFSET eip, -3*4
f0d96110 271.endm
fe7cacc1 272
f0d96110
TH
273.macro RING0_PTREGS_FRAME
274 CFI_STARTPROC simple
275 CFI_SIGNAL_FRAME
276 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
277 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
278 CFI_OFFSET eip, PT_EIP-PT_OLDESP
279 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
280 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
281 CFI_OFFSET eax, PT_EAX-PT_OLDESP
282 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
283 CFI_OFFSET edi, PT_EDI-PT_OLDESP
284 CFI_OFFSET esi, PT_ESI-PT_OLDESP
285 CFI_OFFSET edx, PT_EDX-PT_OLDESP
286 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
eb5b7b9d 287 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
f0d96110 288.endm
1da177e4
LT
289
290ENTRY(ret_from_fork)
fe7cacc1 291 CFI_STARTPROC
df5d1874 292 pushl_cfi %eax
1da177e4
LT
293 call schedule_tail
294 GET_THREAD_INFO(%ebp)
df5d1874
JB
295 popl_cfi %eax
296 pushl_cfi $0x0202 # Reset kernel eflags
297 popfl_cfi
1da177e4 298 jmp syscall_exit
fe7cacc1 299 CFI_ENDPROC
47a55cd7 300END(ret_from_fork)
1da177e4 301
a00e817f
MH
302/*
303 * Interrupt exit functions should be protected against kprobes
304 */
305 .pushsection .kprobes.text, "ax"
1da177e4
LT
306/*
307 * Return to user mode is not as complex as all this looks,
308 * but we want the default path for a system call return to
309 * go as quickly as possible which is why some of this is
310 * less clear than it otherwise should be.
311 */
312
313 # userspace resumption stub bypassing syscall exit tracing
314 ALIGN
fe7cacc1 315 RING0_PTREGS_FRAME
1da177e4 316ret_from_exception:
139ec7c4 317 preempt_stop(CLBR_ANY)
1da177e4
LT
318ret_from_intr:
319 GET_THREAD_INFO(%ebp)
29a2e283 320#ifdef CONFIG_VM86
eb5b7b9d
JF
321 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
322 movb PT_CS(%esp), %al
ab68ed98 323 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
29a2e283
DA
324#else
325 /*
326 * We can be coming here from a syscall done in the kernel space,
327 * e.g. a failed kernel_execve().
328 */
329 movl PT_CS(%esp), %eax
330 andl $SEGMENT_RPL_MASK, %eax
331#endif
78be3706
RR
332 cmpl $USER_RPL, %eax
333 jb resume_kernel # not returning to v8086 or userspace
f95d47ca 334
1da177e4 335ENTRY(resume_userspace)
c7e872e7 336 LOCKDEP_SYS_EXIT
139ec7c4 337 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
338 # setting need_resched or sigpending
339 # between sampling and the iret
e32e58a9 340 TRACE_IRQS_OFF
1da177e4
LT
341 movl TI_flags(%ebp), %ecx
342 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
343 # int/exception return?
344 jne work_pending
345 jmp restore_all
47a55cd7 346END(ret_from_exception)
1da177e4
LT
347
348#ifdef CONFIG_PREEMPT
349ENTRY(resume_kernel)
139ec7c4 350 DISABLE_INTERRUPTS(CLBR_ANY)
1da177e4 351 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
2e04bc76 352 jnz restore_all
1da177e4
LT
353need_resched:
354 movl TI_flags(%ebp), %ecx # need_resched set ?
355 testb $_TIF_NEED_RESCHED, %cl
356 jz restore_all
ab68ed98 357 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
1da177e4
LT
358 jz restore_all
359 call preempt_schedule_irq
360 jmp need_resched
47a55cd7 361END(resume_kernel)
1da177e4 362#endif
fe7cacc1 363 CFI_ENDPROC
a00e817f
MH
364/*
365 * End of kprobes section
366 */
367 .popsection
1da177e4
LT
368
369/* SYSENTER_RETURN points to after the "sysenter" instruction in
370 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
371
372 # sysenter call handler stub
0aa97fb2 373ENTRY(ia32_sysenter_target)
fe7cacc1 374 CFI_STARTPROC simple
adf14236 375 CFI_SIGNAL_FRAME
fe7cacc1
JB
376 CFI_DEF_CFA esp, 0
377 CFI_REGISTER esp, ebp
faca6227 378 movl TSS_sysenter_sp0(%esp),%esp
1da177e4 379sysenter_past_esp:
55f327fa 380 /*
d93c870b
JF
381 * Interrupts are disabled here, but we can't trace it until
382 * enough kernel state to call TRACE_IRQS_OFF can be called - but
383 * we immediately enable interrupts at that point anyway.
55f327fa 384 */
3234282f 385 pushl_cfi $__USER_DS
fe7cacc1 386 /*CFI_REL_OFFSET ss, 0*/
df5d1874 387 pushl_cfi %ebp
fe7cacc1 388 CFI_REL_OFFSET esp, 0
df5d1874 389 pushfl_cfi
d93c870b 390 orl $X86_EFLAGS_IF, (%esp)
3234282f 391 pushl_cfi $__USER_CS
fe7cacc1 392 /*CFI_REL_OFFSET cs, 0*/
e6e5494c
IM
393 /*
394 * Push current_thread_info()->sysenter_return to the stack.
395 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
396 * pushed above; +8 corresponds to copy_thread's esp0 setting.
397 */
7bf04be8 398 pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
fe7cacc1 399 CFI_REL_OFFSET eip, 0
1da177e4 400
df5d1874 401 pushl_cfi %eax
d93c870b
JF
402 SAVE_ALL
403 ENABLE_INTERRUPTS(CLBR_NONE)
404
1da177e4
LT
405/*
406 * Load the potential sixth argument from user stack.
407 * Careful about security.
408 */
409 cmpl $__PAGE_OFFSET-3,%ebp
410 jae syscall_fault
e59d1b0a 411 ASM_STAC
1da177e4 4121: movl (%ebp),%ebp
e59d1b0a 413 ASM_CLAC
d93c870b 414 movl %ebp,PT_EBP(%esp)
6837a54d 415 _ASM_EXTABLE(1b,syscall_fault)
1da177e4 416
1da177e4
LT
417 GET_THREAD_INFO(%ebp)
418
88200bc2 419 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
af0575bb
RM
420 jnz sysenter_audit
421sysenter_do_call:
303395ac 422 cmpl $(NR_syscalls), %eax
1da177e4
LT
423 jae syscall_badsys
424 call *sys_call_table(,%eax,4)
eb5b7b9d 425 movl %eax,PT_EAX(%esp)
c7e872e7 426 LOCKDEP_SYS_EXIT
42c24fa2 427 DISABLE_INTERRUPTS(CLBR_ANY)
55f327fa 428 TRACE_IRQS_OFF
1da177e4 429 movl TI_flags(%ebp), %ecx
88200bc2 430 testl $_TIF_ALLWORK_MASK, %ecx
af0575bb
RM
431 jne sysexit_audit
432sysenter_exit:
1da177e4 433/* if something modifies registers it must also disable sysexit */
eb5b7b9d
JF
434 movl PT_EIP(%esp), %edx
435 movl PT_OLDESP(%esp), %ecx
1da177e4 436 xorl %ebp,%ebp
55f327fa 437 TRACE_IRQS_ON
464d1a78 4381: mov PT_FS(%esp), %fs
ccbeed3a 439 PTGS_TO_GS
d75cd22f 440 ENABLE_INTERRUPTS_SYSEXIT
af0575bb
RM
441
442#ifdef CONFIG_AUDITSYSCALL
443sysenter_audit:
88200bc2 444 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
af0575bb
RM
445 jnz syscall_trace_entry
446 addl $4,%esp
447 CFI_ADJUST_CFA_OFFSET -4
448 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
449 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
450 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
451 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
452 movl %eax,%edx /* 2nd arg: syscall number */
453 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
b05d8447 454 call __audit_syscall_entry
df5d1874 455 pushl_cfi %ebx
af0575bb
RM
456 movl PT_EAX(%esp),%eax /* reload syscall number */
457 jmp sysenter_do_call
458
459sysexit_audit:
88200bc2 460 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
af0575bb
RM
461 jne syscall_exit_work
462 TRACE_IRQS_ON
463 ENABLE_INTERRUPTS(CLBR_ANY)
464 movl %eax,%edx /* second arg, syscall return value */
d7e7528b
EP
465 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
466 setbe %al /* 1 if so, 0 if not */
af0575bb 467 movzbl %al,%eax /* zero-extend that */
d7e7528b 468 call __audit_syscall_exit
af0575bb
RM
469 DISABLE_INTERRUPTS(CLBR_ANY)
470 TRACE_IRQS_OFF
471 movl TI_flags(%ebp), %ecx
88200bc2 472 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
af0575bb
RM
473 jne syscall_exit_work
474 movl PT_EAX(%esp),%eax /* reload syscall return value */
475 jmp sysenter_exit
476#endif
477
fe7cacc1 478 CFI_ENDPROC
f95d47ca 479.pushsection .fixup,"ax"
464d1a78 4802: movl $0,PT_FS(%esp)
f95d47ca 481 jmp 1b
f95d47ca 482.popsection
6837a54d 483 _ASM_EXTABLE(1b,2b)
ccbeed3a 484 PTGS_TO_GS_EX
0aa97fb2 485ENDPROC(ia32_sysenter_target)
1da177e4 486
a00e817f
MH
487/*
488 * syscall stub including irq exit should be protected against kprobes
489 */
490 .pushsection .kprobes.text, "ax"
1da177e4
LT
491 # system call handler stub
492ENTRY(system_call)
fe7cacc1 493 RING0_INT_FRAME # can't unwind into user space anyway
e59d1b0a 494 ASM_CLAC
df5d1874 495 pushl_cfi %eax # save orig_eax
1da177e4
LT
496 SAVE_ALL
497 GET_THREAD_INFO(%ebp)
ed75e8d5 498 # system call tracing in operation / emulation
88200bc2 499 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
1da177e4 500 jnz syscall_trace_entry
303395ac 501 cmpl $(NR_syscalls), %eax
1da177e4
LT
502 jae syscall_badsys
503syscall_call:
504 call *sys_call_table(,%eax,4)
eb5b7b9d 505 movl %eax,PT_EAX(%esp) # store the return value
1da177e4 506syscall_exit:
c7e872e7 507 LOCKDEP_SYS_EXIT
139ec7c4 508 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
509 # setting need_resched or sigpending
510 # between sampling and the iret
55f327fa 511 TRACE_IRQS_OFF
1da177e4 512 movl TI_flags(%ebp), %ecx
88200bc2 513 testl $_TIF_ALLWORK_MASK, %ecx # current->work
1da177e4
LT
514 jne syscall_exit_work
515
516restore_all:
2e04bc76
AH
517 TRACE_IRQS_IRET
518restore_all_notrace:
eb5b7b9d
JF
519 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
520 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
5df24082
SS
521 # are returning to the kernel.
522 # See comments in process.c:copy_thread() for details.
eb5b7b9d
JF
523 movb PT_OLDSS(%esp), %ah
524 movb PT_CS(%esp), %al
ab68ed98 525 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
78be3706 526 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
fe7cacc1 527 CFI_REMEMBER_STATE
1da177e4
LT
528 je ldt_ss # returning to user-space with LDT SS
529restore_nocheck:
ccbeed3a 530 RESTORE_REGS 4 # skip orig_eax/error_code
f7f3d791 531irq_return:
3701d863 532 INTERRUPT_RETURN
1da177e4 533.section .fixup,"ax"
90e9f536 534ENTRY(iret_exc)
a879cbbb
LT
535 pushl $0 # no error code
536 pushl $do_iret_error
537 jmp error_code
1da177e4 538.previous
6837a54d 539 _ASM_EXTABLE(irq_return,iret_exc)
1da177e4 540
fe7cacc1 541 CFI_RESTORE_STATE
1da177e4 542ldt_ss:
eb5b7b9d 543 larl PT_OLDSS(%esp), %eax
1da177e4
LT
544 jnz restore_nocheck
545 testl $0x00400000, %eax # returning to 32bit stack?
546 jnz restore_nocheck # allright, normal return
d3561b7f
RR
547
548#ifdef CONFIG_PARAVIRT
549 /*
550 * The kernel can't run on a non-flat stack if paravirt mode
551 * is active. Rather than try to fixup the high bits of
552 * ESP, bypass this code entirely. This may break DOSemu
553 * and/or Wine support in a paravirt VM, although the option
554 * is still available to implement the setting of the high
555 * 16-bits in the INTERRUPT_RETURN paravirt-op.
556 */
93b1eab3 557 cmpl $0, pv_info+PARAVIRT_enabled
d3561b7f
RR
558 jne restore_nocheck
559#endif
560
dc4c2a0a
AH
561/*
562 * Setup and switch to ESPFIX stack
563 *
564 * We're returning to userspace with a 16 bit stack. The CPU will not
565 * restore the high word of ESP for us on executing iret... This is an
566 * "official" bug of all the x86-compatible CPUs, which we can work
567 * around to make dosemu and wine happy. We do this by preloading the
568 * high word of ESP with the high word of the userspace ESP while
569 * compensating for the offset by changing to the ESPFIX segment with
570 * a base address that matches for the difference.
571 */
72c511dd 572#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
dc4c2a0a
AH
573 mov %esp, %edx /* load kernel esp */
574 mov PT_OLDESP(%esp), %eax /* load userspace esp */
575 mov %dx, %ax /* eax: new kernel esp */
576 sub %eax, %edx /* offset (low word is 0) */
dc4c2a0a 577 shr $16, %edx
72c511dd
BG
578 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
579 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
df5d1874
JB
580 pushl_cfi $__ESPFIX_SS
581 pushl_cfi %eax /* new kernel esp */
2e04bc76
AH
582 /* Disable interrupts, but do not irqtrace this section: we
583 * will soon execute iret and the tracer was already set to
584 * the irqstate after the iret */
139ec7c4 585 DISABLE_INTERRUPTS(CLBR_EAX)
dc4c2a0a 586 lss (%esp), %esp /* switch to espfix segment */
be44d2aa
SS
587 CFI_ADJUST_CFA_OFFSET -8
588 jmp restore_nocheck
fe7cacc1 589 CFI_ENDPROC
47a55cd7 590ENDPROC(system_call)
1da177e4
LT
591
592 # perform work that needs to be done immediately before resumption
593 ALIGN
fe7cacc1 594 RING0_PTREGS_FRAME # can't unwind into user space anyway
1da177e4
LT
595work_pending:
596 testb $_TIF_NEED_RESCHED, %cl
597 jz work_notifysig
598work_resched:
599 call schedule
c7e872e7 600 LOCKDEP_SYS_EXIT
139ec7c4 601 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
602 # setting need_resched or sigpending
603 # between sampling and the iret
55f327fa 604 TRACE_IRQS_OFF
1da177e4
LT
605 movl TI_flags(%ebp), %ecx
606 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
607 # than syscall tracing?
608 jz restore_all
609 testb $_TIF_NEED_RESCHED, %cl
610 jnz work_resched
611
612work_notifysig: # deal with pending signals and
613 # notify-resume requests
74b47a78 614#ifdef CONFIG_VM86
ab68ed98 615 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
1da177e4
LT
616 movl %esp, %eax
617 jne work_notifysig_v86 # returning to kernel-space or
618 # vm86-space
3596ff4e
SD
619 TRACE_IRQS_ON
620 ENABLE_INTERRUPTS(CLBR_NONE)
44fbbb3d
AV
621 movb PT_CS(%esp), %bl
622 andb $SEGMENT_RPL_MASK, %bl
623 cmpb $USER_RPL, %bl
624 jb resume_kernel
1da177e4
LT
625 xorl %edx, %edx
626 call do_notify_resume
44fbbb3d 627 jmp resume_userspace
1da177e4
LT
628
629 ALIGN
630work_notifysig_v86:
df5d1874 631 pushl_cfi %ecx # save ti_flags for do_notify_resume
1da177e4 632 call save_v86_state # %eax contains pt_regs pointer
df5d1874 633 popl_cfi %ecx
1da177e4 634 movl %eax, %esp
74b47a78
JK
635#else
636 movl %esp, %eax
637#endif
3596ff4e
SD
638 TRACE_IRQS_ON
639 ENABLE_INTERRUPTS(CLBR_NONE)
44fbbb3d
AV
640 movb PT_CS(%esp), %bl
641 andb $SEGMENT_RPL_MASK, %bl
642 cmpb $USER_RPL, %bl
643 jb resume_kernel
1da177e4
LT
644 xorl %edx, %edx
645 call do_notify_resume
44fbbb3d 646 jmp resume_userspace
47a55cd7 647END(work_pending)
1da177e4
LT
648
649 # perform syscall exit tracing
650 ALIGN
651syscall_trace_entry:
eb5b7b9d 652 movl $-ENOSYS,PT_EAX(%esp)
1da177e4 653 movl %esp, %eax
d4d67150
RM
654 call syscall_trace_enter
655 /* What it returned is what we'll actually use. */
303395ac 656 cmpl $(NR_syscalls), %eax
1da177e4
LT
657 jnae syscall_call
658 jmp syscall_exit
47a55cd7 659END(syscall_trace_entry)
1da177e4
LT
660
661 # perform syscall exit tracing
662 ALIGN
663syscall_exit_work:
88200bc2 664 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
1da177e4 665 jz work_pending
55f327fa 666 TRACE_IRQS_ON
d4d67150 667 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
1da177e4
LT
668 # schedule() instead
669 movl %esp, %eax
d4d67150 670 call syscall_trace_leave
1da177e4 671 jmp resume_userspace
47a55cd7 672END(syscall_exit_work)
fe7cacc1 673 CFI_ENDPROC
1da177e4 674
fe7cacc1 675 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4 676syscall_fault:
e59d1b0a 677 ASM_CLAC
1da177e4 678 GET_THREAD_INFO(%ebp)
eb5b7b9d 679 movl $-EFAULT,PT_EAX(%esp)
1da177e4 680 jmp resume_userspace
47a55cd7 681END(syscall_fault)
1da177e4 682
1da177e4 683syscall_badsys:
eb5b7b9d 684 movl $-ENOSYS,PT_EAX(%esp)
1da177e4 685 jmp resume_userspace
47a55cd7 686END(syscall_badsys)
fe7cacc1 687 CFI_ENDPROC
a00e817f
MH
688/*
689 * End of kprobes section
690 */
691 .popsection
1da177e4 692
253f29a4
BG
693/*
694 * System calls that need a pt_regs pointer.
695 */
e258e4e0 696#define PTREGSCALL0(name) \
303395ac 697ENTRY(ptregs_##name) ; \
253f29a4 698 leal 4(%esp),%eax; \
303395ac
PA
699 jmp sys_##name; \
700ENDPROC(ptregs_##name)
253f29a4 701
e258e4e0 702#define PTREGSCALL1(name) \
303395ac 703ENTRY(ptregs_##name) ; \
e258e4e0 704 leal 4(%esp),%edx; \
ce9119ad 705 movl (PT_EBX+4)(%esp),%eax; \
303395ac
PA
706 jmp sys_##name; \
707ENDPROC(ptregs_##name)
e258e4e0
BG
708
709#define PTREGSCALL2(name) \
303395ac 710ENTRY(ptregs_##name) ; \
e258e4e0 711 leal 4(%esp),%ecx; \
ce9119ad
PA
712 movl (PT_ECX+4)(%esp),%edx; \
713 movl (PT_EBX+4)(%esp),%eax; \
303395ac
PA
714 jmp sys_##name; \
715ENDPROC(ptregs_##name)
e258e4e0
BG
716
717#define PTREGSCALL3(name) \
303395ac 718ENTRY(ptregs_##name) ; \
a34107b5 719 CFI_STARTPROC; \
e258e4e0 720 leal 4(%esp),%eax; \
a34107b5 721 pushl_cfi %eax; \
e258e4e0
BG
722 movl PT_EDX(%eax),%ecx; \
723 movl PT_ECX(%eax),%edx; \
724 movl PT_EBX(%eax),%eax; \
725 call sys_##name; \
726 addl $4,%esp; \
a34107b5
JB
727 CFI_ADJUST_CFA_OFFSET -4; \
728 ret; \
729 CFI_ENDPROC; \
730ENDPROC(ptregs_##name)
e258e4e0 731
27f59559 732PTREGSCALL1(iopl)
e258e4e0 733PTREGSCALL0(fork)
e258e4e0 734PTREGSCALL0(vfork)
11cf88bd 735PTREGSCALL3(execve)
052acad4 736PTREGSCALL2(sigaltstack)
e258e4e0
BG
737PTREGSCALL0(sigreturn)
738PTREGSCALL0(rt_sigreturn)
f1382f15
BG
739PTREGSCALL2(vm86)
740PTREGSCALL1(vm86old)
253f29a4 741
f839bbc5 742/* Clone is an oddball. The 4th arg is in %edi */
303395ac 743ENTRY(ptregs_clone)
a34107b5 744 CFI_STARTPROC
f839bbc5 745 leal 4(%esp),%eax
a34107b5
JB
746 pushl_cfi %eax
747 pushl_cfi PT_EDI(%eax)
f839bbc5
BG
748 movl PT_EDX(%eax),%ecx
749 movl PT_ECX(%eax),%edx
750 movl PT_EBX(%eax),%eax
751 call sys_clone
752 addl $8,%esp
a34107b5 753 CFI_ADJUST_CFA_OFFSET -8
f839bbc5 754 ret
a34107b5
JB
755 CFI_ENDPROC
756ENDPROC(ptregs_clone)
f839bbc5 757
f0d96110 758.macro FIXUP_ESPFIX_STACK
dc4c2a0a
AH
759/*
760 * Switch back for ESPFIX stack to the normal zerobased stack
761 *
762 * We can't call C functions using the ESPFIX stack. This code reads
763 * the high word of the segment base from the GDT and swiches to the
764 * normal stack and adjusts ESP with the matching offset.
765 */
766 /* fixup the stack */
72c511dd
BG
767 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
768 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
dc4c2a0a
AH
769 shl $16, %eax
770 addl %esp, %eax /* the adjusted stack pointer */
df5d1874
JB
771 pushl_cfi $__KERNEL_DS
772 pushl_cfi %eax
dc4c2a0a 773 lss (%esp), %esp /* switch to the normal stack segment */
f0d96110
TH
774 CFI_ADJUST_CFA_OFFSET -8
775.endm
776.macro UNWIND_ESPFIX_STACK
777 movl %ss, %eax
778 /* see if on espfix stack */
779 cmpw $__ESPFIX_SS, %ax
780 jne 27f
781 movl $__KERNEL_DS, %eax
782 movl %eax, %ds
783 movl %eax, %es
784 /* switch to normal stack */
785 FIXUP_ESPFIX_STACK
78627:
787.endm
1da177e4
LT
788
789/*
b7c6244f
PA
790 * Build the entry stubs and pointer table with some assembler magic.
791 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
792 * single cache line on all modern x86 implementations.
1da177e4 793 */
4687518c 794.section .init.rodata,"a"
1da177e4 795ENTRY(interrupt)
ea714547 796.section .entry.text, "ax"
b7c6244f
PA
797 .p2align 5
798 .p2align CONFIG_X86_L1_CACHE_SHIFT
1da177e4 799ENTRY(irq_entries_start)
fe7cacc1 800 RING0_INT_FRAME
4687518c 801vector=FIRST_EXTERNAL_VECTOR
b7c6244f
PA
802.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
803 .balign 32
804 .rept 7
805 .if vector < NR_VECTORS
8665596e 806 .if vector <> FIRST_EXTERNAL_VECTOR
fe7cacc1 807 CFI_ADJUST_CFA_OFFSET -4
b7c6244f 808 .endif
df5d1874 8091: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
8665596e 810 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
b7c6244f
PA
811 jmp 2f
812 .endif
813 .previous
1da177e4 814 .long 1b
ea714547 815 .section .entry.text, "ax"
1da177e4 816vector=vector+1
b7c6244f
PA
817 .endif
818 .endr
8192: jmp common_interrupt
1da177e4 820.endr
47a55cd7
JB
821END(irq_entries_start)
822
823.previous
824END(interrupt)
825.previous
1da177e4 826
55f327fa
IM
827/*
828 * the CPU automatically disables interrupts when executing an IRQ vector,
829 * so IRQ-flags tracing has to follow that:
830 */
b7c6244f 831 .p2align CONFIG_X86_L1_CACHE_SHIFT
1da177e4 832common_interrupt:
e59d1b0a 833 ASM_CLAC
b7c6244f 834 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
1da177e4 835 SAVE_ALL
55f327fa 836 TRACE_IRQS_OFF
1da177e4
LT
837 movl %esp,%eax
838 call do_IRQ
839 jmp ret_from_intr
47a55cd7 840ENDPROC(common_interrupt)
fe7cacc1 841 CFI_ENDPROC
1da177e4 842
a00e817f
MH
843/*
844 * Irq entries should be protected against kprobes
845 */
846 .pushsection .kprobes.text, "ax"
02cf94c3 847#define BUILD_INTERRUPT3(name, nr, fn) \
1da177e4 848ENTRY(name) \
fe7cacc1 849 RING0_INT_FRAME; \
e59d1b0a 850 ASM_CLAC; \
df5d1874 851 pushl_cfi $~(nr); \
fe7cacc1 852 SAVE_ALL; \
55f327fa 853 TRACE_IRQS_OFF \
1da177e4 854 movl %esp,%eax; \
02cf94c3 855 call fn; \
55f327fa 856 jmp ret_from_intr; \
47a55cd7
JB
857 CFI_ENDPROC; \
858ENDPROC(name)
1da177e4 859
02cf94c3
TH
860#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
861
1da177e4 862/* The include is where all of the SMP etc. interrupts come from */
1164dd00 863#include <asm/entry_arch.h>
1da177e4 864
1da177e4 865ENTRY(coprocessor_error)
fe7cacc1 866 RING0_INT_FRAME
e59d1b0a 867 ASM_CLAC
df5d1874
JB
868 pushl_cfi $0
869 pushl_cfi $do_coprocessor_error
1da177e4 870 jmp error_code
fe7cacc1 871 CFI_ENDPROC
47a55cd7 872END(coprocessor_error)
1da177e4
LT
873
874ENTRY(simd_coprocessor_error)
fe7cacc1 875 RING0_INT_FRAME
e59d1b0a 876 ASM_CLAC
df5d1874 877 pushl_cfi $0
40d2e763
BG
878#ifdef CONFIG_X86_INVD_BUG
879 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
df5d1874 880661: pushl_cfi $do_general_protection
40d2e763
BG
881662:
882.section .altinstructions,"a"
b4ca46e4 883 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
40d2e763
BG
884.previous
885.section .altinstr_replacement,"ax"
886663: pushl $do_simd_coprocessor_error
887664:
888.previous
889#else
df5d1874 890 pushl_cfi $do_simd_coprocessor_error
40d2e763 891#endif
1da177e4 892 jmp error_code
fe7cacc1 893 CFI_ENDPROC
47a55cd7 894END(simd_coprocessor_error)
1da177e4
LT
895
896ENTRY(device_not_available)
fe7cacc1 897 RING0_INT_FRAME
e59d1b0a 898 ASM_CLAC
df5d1874
JB
899 pushl_cfi $-1 # mark this as an int
900 pushl_cfi $do_device_not_available
7643e9b9 901 jmp error_code
fe7cacc1 902 CFI_ENDPROC
47a55cd7 903END(device_not_available)
1da177e4 904
d3561b7f
RR
905#ifdef CONFIG_PARAVIRT
906ENTRY(native_iret)
3701d863 907 iret
6837a54d 908 _ASM_EXTABLE(native_iret, iret_exc)
47a55cd7 909END(native_iret)
d3561b7f 910
d75cd22f 911ENTRY(native_irq_enable_sysexit)
d3561b7f
RR
912 sti
913 sysexit
d75cd22f 914END(native_irq_enable_sysexit)
d3561b7f
RR
915#endif
916
1da177e4 917ENTRY(overflow)
fe7cacc1 918 RING0_INT_FRAME
e59d1b0a 919 ASM_CLAC
df5d1874
JB
920 pushl_cfi $0
921 pushl_cfi $do_overflow
1da177e4 922 jmp error_code
fe7cacc1 923 CFI_ENDPROC
47a55cd7 924END(overflow)
1da177e4
LT
925
926ENTRY(bounds)
fe7cacc1 927 RING0_INT_FRAME
e59d1b0a 928 ASM_CLAC
df5d1874
JB
929 pushl_cfi $0
930 pushl_cfi $do_bounds
1da177e4 931 jmp error_code
fe7cacc1 932 CFI_ENDPROC
47a55cd7 933END(bounds)
1da177e4
LT
934
935ENTRY(invalid_op)
fe7cacc1 936 RING0_INT_FRAME
e59d1b0a 937 ASM_CLAC
df5d1874
JB
938 pushl_cfi $0
939 pushl_cfi $do_invalid_op
1da177e4 940 jmp error_code
fe7cacc1 941 CFI_ENDPROC
47a55cd7 942END(invalid_op)
1da177e4
LT
943
944ENTRY(coprocessor_segment_overrun)
fe7cacc1 945 RING0_INT_FRAME
e59d1b0a 946 ASM_CLAC
df5d1874
JB
947 pushl_cfi $0
948 pushl_cfi $do_coprocessor_segment_overrun
1da177e4 949 jmp error_code
fe7cacc1 950 CFI_ENDPROC
47a55cd7 951END(coprocessor_segment_overrun)
1da177e4
LT
952
953ENTRY(invalid_TSS)
fe7cacc1 954 RING0_EC_FRAME
e59d1b0a 955 ASM_CLAC
df5d1874 956 pushl_cfi $do_invalid_TSS
1da177e4 957 jmp error_code
fe7cacc1 958 CFI_ENDPROC
47a55cd7 959END(invalid_TSS)
1da177e4
LT
960
961ENTRY(segment_not_present)
fe7cacc1 962 RING0_EC_FRAME
e59d1b0a 963 ASM_CLAC
df5d1874 964 pushl_cfi $do_segment_not_present
1da177e4 965 jmp error_code
fe7cacc1 966 CFI_ENDPROC
47a55cd7 967END(segment_not_present)
1da177e4
LT
968
969ENTRY(stack_segment)
fe7cacc1 970 RING0_EC_FRAME
e59d1b0a 971 ASM_CLAC
df5d1874 972 pushl_cfi $do_stack_segment
1da177e4 973 jmp error_code
fe7cacc1 974 CFI_ENDPROC
47a55cd7 975END(stack_segment)
1da177e4 976
1da177e4 977ENTRY(alignment_check)
fe7cacc1 978 RING0_EC_FRAME
e59d1b0a 979 ASM_CLAC
df5d1874 980 pushl_cfi $do_alignment_check
1da177e4 981 jmp error_code
fe7cacc1 982 CFI_ENDPROC
47a55cd7 983END(alignment_check)
1da177e4 984
d28c4393
P
985ENTRY(divide_error)
986 RING0_INT_FRAME
e59d1b0a 987 ASM_CLAC
df5d1874
JB
988 pushl_cfi $0 # no error code
989 pushl_cfi $do_divide_error
1da177e4 990 jmp error_code
fe7cacc1 991 CFI_ENDPROC
47a55cd7 992END(divide_error)
1da177e4
LT
993
994#ifdef CONFIG_X86_MCE
995ENTRY(machine_check)
fe7cacc1 996 RING0_INT_FRAME
e59d1b0a 997 ASM_CLAC
df5d1874
JB
998 pushl_cfi $0
999 pushl_cfi machine_check_vector
1da177e4 1000 jmp error_code
fe7cacc1 1001 CFI_ENDPROC
47a55cd7 1002END(machine_check)
1da177e4
LT
1003#endif
1004
1005ENTRY(spurious_interrupt_bug)
fe7cacc1 1006 RING0_INT_FRAME
e59d1b0a 1007 ASM_CLAC
df5d1874
JB
1008 pushl_cfi $0
1009 pushl_cfi $do_spurious_interrupt_bug
1da177e4 1010 jmp error_code
fe7cacc1 1011 CFI_ENDPROC
47a55cd7 1012END(spurious_interrupt_bug)
a00e817f
MH
1013/*
1014 * End of kprobes section
1015 */
1016 .popsection
1da177e4 1017
02ba1a32
AK
1018ENTRY(kernel_thread_helper)
1019 pushl $0 # fake return address for unwinder
1020 CFI_STARTPROC
e840227c
BG
1021 movl %edi,%eax
1022 call *%esi
02ba1a32 1023 call do_exit
5f5db591 1024 ud2 # padding for call trace
02ba1a32
AK
1025 CFI_ENDPROC
1026ENDPROC(kernel_thread_helper)
1027
5ead97c8 1028#ifdef CONFIG_XEN
e2a81baf
JF
1029/* Xen doesn't set %esp to be precisely what the normal sysenter
1030 entrypoint expects, so fix it up before using the normal path. */
1031ENTRY(xen_sysenter_target)
1032 RING0_INT_FRAME
1033 addl $5*4, %esp /* remove xen-provided frame */
2ddf9b7b 1034 CFI_ADJUST_CFA_OFFSET -5*4
e2a81baf 1035 jmp sysenter_past_esp
557d7d4e 1036 CFI_ENDPROC
e2a81baf 1037
5ead97c8
JF
1038ENTRY(xen_hypervisor_callback)
1039 CFI_STARTPROC
df5d1874 1040 pushl_cfi $0
5ead97c8
JF
1041 SAVE_ALL
1042 TRACE_IRQS_OFF
9ec2b804
JF
1043
1044 /* Check to see if we got the event in the critical
1045 region in xen_iret_direct, after we've reenabled
1046 events and checked for pending events. This simulates
1047 iret instruction's behaviour where it delivers a
1048 pending interrupt when enabling interrupts. */
1049 movl PT_EIP(%esp),%eax
1050 cmpl $xen_iret_start_crit,%eax
1051 jb 1f
1052 cmpl $xen_iret_end_crit,%eax
1053 jae 1f
1054
0f2c8769 1055 jmp xen_iret_crit_fixup
e2a81baf 1056
e2a81baf 1057ENTRY(xen_do_upcall)
b77797fb 10581: mov %esp, %eax
5ead97c8
JF
1059 call xen_evtchn_do_upcall
1060 jmp ret_from_intr
1061 CFI_ENDPROC
1062ENDPROC(xen_hypervisor_callback)
1063
1064# Hypervisor uses this for application faults while it executes.
1065# We get here for two reasons:
1066# 1. Fault while reloading DS, ES, FS or GS
1067# 2. Fault while executing IRET
1068# Category 1 we fix up by reattempting the load, and zeroing the segment
1069# register if the load fails.
1070# Category 2 we fix up by jumping to do_iret_error. We cannot use the
1071# normal Linux return path in this case because if we use the IRET hypercall
1072# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1073# We distinguish between categories by maintaining a status value in EAX.
1074ENTRY(xen_failsafe_callback)
1075 CFI_STARTPROC
df5d1874 1076 pushl_cfi %eax
5ead97c8
JF
1077 movl $1,%eax
10781: mov 4(%esp),%ds
10792: mov 8(%esp),%es
10803: mov 12(%esp),%fs
10814: mov 16(%esp),%gs
1082 testl %eax,%eax
df5d1874 1083 popl_cfi %eax
5ead97c8
JF
1084 lea 16(%esp),%esp
1085 CFI_ADJUST_CFA_OFFSET -16
1086 jz 5f
1087 addl $16,%esp
1088 jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
df5d1874 10895: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
5ead97c8
JF
1090 SAVE_ALL
1091 jmp ret_from_exception
1092 CFI_ENDPROC
1093
1094.section .fixup,"ax"
10956: xorl %eax,%eax
1096 movl %eax,4(%esp)
1097 jmp 1b
10987: xorl %eax,%eax
1099 movl %eax,8(%esp)
1100 jmp 2b
11018: xorl %eax,%eax
1102 movl %eax,12(%esp)
1103 jmp 3b
11049: xorl %eax,%eax
1105 movl %eax,16(%esp)
1106 jmp 4b
1107.previous
6837a54d
PA
1108 _ASM_EXTABLE(1b,6b)
1109 _ASM_EXTABLE(2b,7b)
1110 _ASM_EXTABLE(3b,8b)
1111 _ASM_EXTABLE(4b,9b)
5ead97c8
JF
1112ENDPROC(xen_failsafe_callback)
1113
38e20b07
SY
1114BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
1115 xen_evtchn_do_upcall)
1116
5ead97c8
JF
1117#endif /* CONFIG_XEN */
1118
606576ce 1119#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
1120#ifdef CONFIG_DYNAMIC_FTRACE
1121
1122ENTRY(mcount)
d61f82d0
SR
1123 ret
1124END(mcount)
1125
1126ENTRY(ftrace_caller)
60a7ecf4
SR
1127 cmpl $0, function_trace_stop
1128 jne ftrace_stub
1129
d61f82d0
SR
1130 pushl %eax
1131 pushl %ecx
1132 pushl %edx
08f6fba5
SR
1133 pushl $0 /* Pass NULL as regs pointer */
1134 movl 4*4(%esp), %eax
d61f82d0 1135 movl 0x4(%ebp), %edx
28fb5dfa 1136 leal function_trace_op, %ecx
395a59d0 1137 subl $MCOUNT_INSN_SIZE, %eax
d61f82d0
SR
1138
1139.globl ftrace_call
1140ftrace_call:
1141 call ftrace_stub
1142
08f6fba5 1143 addl $4,%esp /* skip NULL pointer */
d61f82d0
SR
1144 popl %edx
1145 popl %ecx
1146 popl %eax
4de72395 1147ftrace_ret:
5a45cfe1
SR
1148#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1149.globl ftrace_graph_call
1150ftrace_graph_call:
1151 jmp ftrace_stub
1152#endif
d61f82d0
SR
1153
1154.globl ftrace_stub
1155ftrace_stub:
1156 ret
1157END(ftrace_caller)
1158
4de72395
SR
1159ENTRY(ftrace_regs_caller)
1160 pushf /* push flags before compare (in cs location) */
1161 cmpl $0, function_trace_stop
1162 jne ftrace_restore_flags
1163
1164 /*
1165 * i386 does not save SS and ESP when coming from kernel.
1166 * Instead, to get sp, &regs->sp is used (see ptrace.h).
1167 * Unfortunately, that means eflags must be at the same location
1168 * as the current return ip is. We move the return ip into the
1169 * ip location, and move flags into the return ip location.
1170 */
1171 pushl 4(%esp) /* save return ip into ip slot */
4de72395
SR
1172
1173 pushl $0 /* Load 0 into orig_ax */
1174 pushl %gs
1175 pushl %fs
1176 pushl %es
1177 pushl %ds
1178 pushl %eax
1179 pushl %ebp
1180 pushl %edi
1181 pushl %esi
1182 pushl %edx
1183 pushl %ecx
1184 pushl %ebx
1185
1186 movl 13*4(%esp), %eax /* Get the saved flags */
1187 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
1188 /* clobbering return ip */
1189 movl $__KERNEL_CS,13*4(%esp)
1190
1191 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
a5e37863 1192 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
e4ea3f6b 1193 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
4de72395 1194 leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
e4ea3f6b 1195 pushl %esp /* Save pt_regs as 4th parameter */
4de72395
SR
1196
1197GLOBAL(ftrace_regs_call)
1198 call ftrace_stub
1199
1200 addl $4, %esp /* Skip pt_regs */
1201 movl 14*4(%esp), %eax /* Move flags back into cs */
1202 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
1203 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
4de72395
SR
1204 movl %eax, 14*4(%esp) /* Put return ip back for ret */
1205
1206 popl %ebx
1207 popl %ecx
1208 popl %edx
1209 popl %esi
1210 popl %edi
1211 popl %ebp
1212 popl %eax
1213 popl %ds
1214 popl %es
1215 popl %fs
1216 popl %gs
1217 addl $8, %esp /* Skip orig_ax and ip */
1218 popf /* Pop flags at end (no addl to corrupt flags) */
1219 jmp ftrace_ret
1220
1221ftrace_restore_flags:
1222 popf
1223 jmp ftrace_stub
d61f82d0
SR
1224#else /* ! CONFIG_DYNAMIC_FTRACE */
1225
16444a8a 1226ENTRY(mcount)
60a7ecf4
SR
1227 cmpl $0, function_trace_stop
1228 jne ftrace_stub
1229
16444a8a
ACM
1230 cmpl $ftrace_stub, ftrace_trace_function
1231 jnz trace
fb52607a 1232#ifdef CONFIG_FUNCTION_GRAPH_TRACER
c2324b69 1233 cmpl $ftrace_stub, ftrace_graph_return
fb52607a 1234 jnz ftrace_graph_caller
e49dc19c
SR
1235
1236 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1237 jnz ftrace_graph_caller
caf4b323 1238#endif
16444a8a
ACM
1239.globl ftrace_stub
1240ftrace_stub:
1241 ret
1242
1243 /* taken from glibc */
1244trace:
1245 pushl %eax
1246 pushl %ecx
1247 pushl %edx
1248 movl 0xc(%esp), %eax
1249 movl 0x4(%ebp), %edx
395a59d0 1250 subl $MCOUNT_INSN_SIZE, %eax
16444a8a 1251
d61f82d0 1252 call *ftrace_trace_function
16444a8a
ACM
1253
1254 popl %edx
1255 popl %ecx
1256 popl %eax
16444a8a
ACM
1257 jmp ftrace_stub
1258END(mcount)
d61f82d0 1259#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 1260#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 1261
fb52607a
FW
1262#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1263ENTRY(ftrace_graph_caller)
caf4b323
FW
1264 pushl %eax
1265 pushl %ecx
1266 pushl %edx
1dc1c6ad 1267 movl 0xc(%esp), %edx
caf4b323 1268 lea 0x4(%ebp), %eax
71e308a2 1269 movl (%ebp), %ecx
bb4304c7 1270 subl $MCOUNT_INSN_SIZE, %edx
caf4b323 1271 call prepare_ftrace_return
caf4b323
FW
1272 popl %edx
1273 popl %ecx
1274 popl %eax
e7d3737e 1275 ret
fb52607a 1276END(ftrace_graph_caller)
caf4b323
FW
1277
1278.globl return_to_handler
1279return_to_handler:
caf4b323 1280 pushl %eax
caf4b323 1281 pushl %edx
71e308a2 1282 movl %ebp, %eax
caf4b323 1283 call ftrace_return_to_handler
194ec341 1284 movl %eax, %ecx
caf4b323 1285 popl %edx
caf4b323 1286 popl %eax
194ec341 1287 jmp *%ecx
e7d3737e 1288#endif
16444a8a 1289
d211af05
AH
1290/*
1291 * Some functions should be protected against kprobes
1292 */
1293 .pushsection .kprobes.text, "ax"
1294
1295ENTRY(page_fault)
1296 RING0_EC_FRAME
e59d1b0a 1297 ASM_CLAC
df5d1874 1298 pushl_cfi $do_page_fault
d211af05
AH
1299 ALIGN
1300error_code:
ccbeed3a 1301 /* the function address is in %gs's slot on the stack */
df5d1874 1302 pushl_cfi %fs
ccbeed3a 1303 /*CFI_REL_OFFSET fs, 0*/
df5d1874 1304 pushl_cfi %es
d211af05 1305 /*CFI_REL_OFFSET es, 0*/
df5d1874 1306 pushl_cfi %ds
d211af05 1307 /*CFI_REL_OFFSET ds, 0*/
df5d1874 1308 pushl_cfi %eax
d211af05 1309 CFI_REL_OFFSET eax, 0
df5d1874 1310 pushl_cfi %ebp
d211af05 1311 CFI_REL_OFFSET ebp, 0
df5d1874 1312 pushl_cfi %edi
d211af05 1313 CFI_REL_OFFSET edi, 0
df5d1874 1314 pushl_cfi %esi
d211af05 1315 CFI_REL_OFFSET esi, 0
df5d1874 1316 pushl_cfi %edx
d211af05 1317 CFI_REL_OFFSET edx, 0
df5d1874 1318 pushl_cfi %ecx
d211af05 1319 CFI_REL_OFFSET ecx, 0
df5d1874 1320 pushl_cfi %ebx
d211af05
AH
1321 CFI_REL_OFFSET ebx, 0
1322 cld
d211af05
AH
1323 movl $(__KERNEL_PERCPU), %ecx
1324 movl %ecx, %fs
1325 UNWIND_ESPFIX_STACK
ccbeed3a
TH
1326 GS_TO_REG %ecx
1327 movl PT_GS(%esp), %edi # get the function address
d211af05
AH
1328 movl PT_ORIG_EAX(%esp), %edx # get the error code
1329 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
ccbeed3a
TH
1330 REG_TO_PTGS %ecx
1331 SET_KERNEL_GS %ecx
d211af05
AH
1332 movl $(__USER_DS), %ecx
1333 movl %ecx, %ds
1334 movl %ecx, %es
1335 TRACE_IRQS_OFF
1336 movl %esp,%eax # pt_regs pointer
1337 call *%edi
1338 jmp ret_from_exception
1339 CFI_ENDPROC
1340END(page_fault)
1341
1342/*
1343 * Debug traps and NMI can happen at the one SYSENTER instruction
1344 * that sets up the real kernel stack. Check here, since we can't
1345 * allow the wrong stack to be used.
1346 *
1347 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1348 * already pushed 3 words if it hits on the sysenter instruction:
1349 * eflags, cs and eip.
1350 *
1351 * We just load the right stack, and push the three (known) values
1352 * by hand onto the new stack - while updating the return eip past
1353 * the instruction that would have done it for sysenter.
1354 */
f0d96110
TH
1355.macro FIX_STACK offset ok label
1356 cmpw $__KERNEL_CS, 4(%esp)
1357 jne \ok
1358\label:
1359 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1360 CFI_DEF_CFA esp, 0
1361 CFI_UNDEFINED eip
df5d1874
JB
1362 pushfl_cfi
1363 pushl_cfi $__KERNEL_CS
1364 pushl_cfi $sysenter_past_esp
d211af05 1365 CFI_REL_OFFSET eip, 0
f0d96110 1366.endm
d211af05
AH
1367
1368ENTRY(debug)
1369 RING0_INT_FRAME
e59d1b0a 1370 ASM_CLAC
d211af05
AH
1371 cmpl $ia32_sysenter_target,(%esp)
1372 jne debug_stack_correct
f0d96110 1373 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
d211af05 1374debug_stack_correct:
df5d1874 1375 pushl_cfi $-1 # mark this as an int
d211af05
AH
1376 SAVE_ALL
1377 TRACE_IRQS_OFF
1378 xorl %edx,%edx # error code 0
1379 movl %esp,%eax # pt_regs pointer
1380 call do_debug
1381 jmp ret_from_exception
1382 CFI_ENDPROC
1383END(debug)
1384
1385/*
1386 * NMI is doubly nasty. It can happen _while_ we're handling
1387 * a debug fault, and the debug fault hasn't yet been able to
1388 * clear up the stack. So we first check whether we got an
1389 * NMI on the sysenter entry path, but after that we need to
1390 * check whether we got an NMI on the debug path where the debug
1391 * fault happened on the sysenter path.
1392 */
1393ENTRY(nmi)
1394 RING0_INT_FRAME
e59d1b0a 1395 ASM_CLAC
df5d1874 1396 pushl_cfi %eax
d211af05
AH
1397 movl %ss, %eax
1398 cmpw $__ESPFIX_SS, %ax
df5d1874 1399 popl_cfi %eax
d211af05
AH
1400 je nmi_espfix_stack
1401 cmpl $ia32_sysenter_target,(%esp)
1402 je nmi_stack_fixup
df5d1874 1403 pushl_cfi %eax
d211af05
AH
1404 movl %esp,%eax
1405 /* Do not access memory above the end of our stack page,
1406 * it might not exist.
1407 */
1408 andl $(THREAD_SIZE-1),%eax
1409 cmpl $(THREAD_SIZE-20),%eax
df5d1874 1410 popl_cfi %eax
d211af05
AH
1411 jae nmi_stack_correct
1412 cmpl $ia32_sysenter_target,12(%esp)
1413 je nmi_debug_stack_check
1414nmi_stack_correct:
1415 /* We have a RING0_INT_FRAME here */
df5d1874 1416 pushl_cfi %eax
d211af05 1417 SAVE_ALL
d211af05
AH
1418 xorl %edx,%edx # zero error code
1419 movl %esp,%eax # pt_regs pointer
1420 call do_nmi
2e04bc76 1421 jmp restore_all_notrace
d211af05
AH
1422 CFI_ENDPROC
1423
1424nmi_stack_fixup:
1425 RING0_INT_FRAME
f0d96110 1426 FIX_STACK 12, nmi_stack_correct, 1
d211af05
AH
1427 jmp nmi_stack_correct
1428
1429nmi_debug_stack_check:
1430 /* We have a RING0_INT_FRAME here */
1431 cmpw $__KERNEL_CS,16(%esp)
1432 jne nmi_stack_correct
1433 cmpl $debug,(%esp)
1434 jb nmi_stack_correct
1435 cmpl $debug_esp_fix_insn,(%esp)
1436 ja nmi_stack_correct
f0d96110 1437 FIX_STACK 24, nmi_stack_correct, 1
d211af05
AH
1438 jmp nmi_stack_correct
1439
1440nmi_espfix_stack:
1441 /* We have a RING0_INT_FRAME here.
1442 *
1443 * create the pointer to lss back
1444 */
df5d1874
JB
1445 pushl_cfi %ss
1446 pushl_cfi %esp
bda3a897 1447 addl $4, (%esp)
d211af05
AH
1448 /* copy the iret frame of 12 bytes */
1449 .rept 3
df5d1874 1450 pushl_cfi 16(%esp)
d211af05 1451 .endr
df5d1874 1452 pushl_cfi %eax
d211af05 1453 SAVE_ALL
d211af05
AH
1454 FIXUP_ESPFIX_STACK # %eax == %esp
1455 xorl %edx,%edx # zero error code
1456 call do_nmi
1457 RESTORE_REGS
1458 lss 12+4(%esp), %esp # back to espfix stack
1459 CFI_ADJUST_CFA_OFFSET -24
1460 jmp irq_return
1461 CFI_ENDPROC
1462END(nmi)
1463
1464ENTRY(int3)
1465 RING0_INT_FRAME
e59d1b0a 1466 ASM_CLAC
df5d1874 1467 pushl_cfi $-1 # mark this as an int
d211af05
AH
1468 SAVE_ALL
1469 TRACE_IRQS_OFF
1470 xorl %edx,%edx # zero error code
1471 movl %esp,%eax # pt_regs pointer
1472 call do_int3
1473 jmp ret_from_exception
1474 CFI_ENDPROC
1475END(int3)
1476
1477ENTRY(general_protection)
1478 RING0_EC_FRAME
df5d1874 1479 pushl_cfi $do_general_protection
d211af05
AH
1480 jmp error_code
1481 CFI_ENDPROC
1482END(general_protection)
1483
631bc487
GN
1484#ifdef CONFIG_KVM_GUEST
1485ENTRY(async_page_fault)
1486 RING0_EC_FRAME
e59d1b0a 1487 ASM_CLAC
60cf637a 1488 pushl_cfi $do_async_page_fault
631bc487
GN
1489 jmp error_code
1490 CFI_ENDPROC
2ae9d293 1491END(async_page_fault)
631bc487
GN
1492#endif
1493
d211af05
AH
1494/*
1495 * End of kprobes section
1496 */
1497 .popsection
This page took 0.941753 seconds and 5 git commands to generate.