Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * |
3 | * Copyright (C) 1991, 1992 Linus Torvalds | |
4 | */ | |
5 | ||
6 | /* | |
7 | * entry.S contains the system-call and fault low-level handling routines. | |
8 | * This also contains the timer-interrupt handler, as well as all interrupts | |
9 | * and faults that can result in a task-switch. | |
10 | * | |
11 | * NOTE: This code handles signal-recognition, which happens every time | |
12 | * after a timer-interrupt and after each system call. | |
13 | * | |
14 | * I changed all the .align's to 4 (16 byte alignment), as that's faster | |
15 | * on a 486. | |
16 | * | |
889f21ce | 17 | * Stack layout in 'syscall_exit': |
1da177e4 LT |
18 | * ptrace needs to have all regs on the stack. |
19 | * if the order here is changed, it needs to be | |
20 | * updated in fork.c:copy_process, signal.c:do_signal, | |
21 | * ptrace.c and ptrace.h | |
22 | * | |
23 | * 0(%esp) - %ebx | |
24 | * 4(%esp) - %ecx | |
25 | * 8(%esp) - %edx | |
26 | * C(%esp) - %esi | |
27 | * 10(%esp) - %edi | |
28 | * 14(%esp) - %ebp | |
29 | * 18(%esp) - %eax | |
30 | * 1C(%esp) - %ds | |
31 | * 20(%esp) - %es | |
464d1a78 | 32 | * 24(%esp) - %fs |
ccbeed3a TH |
33 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
34 | * 2C(%esp) - orig_eax | |
35 | * 30(%esp) - %eip | |
36 | * 34(%esp) - %cs | |
37 | * 38(%esp) - %eflags | |
38 | * 3C(%esp) - %oldesp | |
39 | * 40(%esp) - %oldss | |
1da177e4 LT |
40 | * |
41 | * "current" is in register %ebx during any slow entries. | |
42 | */ | |
43 | ||
1da177e4 | 44 | #include <linux/linkage.h> |
d7e7528b | 45 | #include <linux/err.h> |
1da177e4 | 46 | #include <asm/thread_info.h> |
55f327fa | 47 | #include <asm/irqflags.h> |
1da177e4 LT |
48 | #include <asm/errno.h> |
49 | #include <asm/segment.h> | |
50 | #include <asm/smp.h> | |
0341c14d | 51 | #include <asm/page_types.h> |
be44d2aa | 52 | #include <asm/percpu.h> |
fe7cacc1 | 53 | #include <asm/dwarf2.h> |
ab68ed98 | 54 | #include <asm/processor-flags.h> |
395a59d0 | 55 | #include <asm/ftrace.h> |
9b7dc567 | 56 | #include <asm/irq_vectors.h> |
40d2e763 | 57 | #include <asm/cpufeature.h> |
b4ca46e4 | 58 | #include <asm/alternative-asm.h> |
6837a54d | 59 | #include <asm/asm.h> |
1da177e4 | 60 | |
af0575bb RM |
61 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
62 | #include <linux/elf-em.h> | |
63 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | |
64 | #define __AUDIT_ARCH_LE 0x40000000 | |
65 | ||
66 | #ifndef CONFIG_AUDITSYSCALL | |
67 | #define sysenter_audit syscall_trace_entry | |
68 | #define sysexit_audit syscall_exit_work | |
69 | #endif | |
70 | ||
ea714547 JO |
71 | .section .entry.text, "ax" |
72 | ||
139ec7c4 RR |
73 | /* |
74 | * We use macros for low-level operations which need to be overridden | |
75 | * for paravirtualization. The following will never clobber any registers: | |
76 | * INTERRUPT_RETURN (aka. "iret") | |
77 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | |
d75cd22f | 78 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
139ec7c4 RR |
79 | * |
80 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | |
81 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | |
82 | * Allowing a register to be clobbered can shrink the paravirt replacement | |
83 | * enough to patch inline, increasing performance. | |
84 | */ | |
85 | ||
1da177e4 | 86 | #ifdef CONFIG_PREEMPT |
139ec7c4 | 87 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
1da177e4 | 88 | #else |
139ec7c4 | 89 | #define preempt_stop(clobbers) |
2e04bc76 | 90 | #define resume_kernel restore_all |
1da177e4 LT |
91 | #endif |
92 | ||
55f327fa IM |
93 | .macro TRACE_IRQS_IRET |
94 | #ifdef CONFIG_TRACE_IRQFLAGS | |
ab68ed98 | 95 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off? |
55f327fa IM |
96 | jz 1f |
97 | TRACE_IRQS_ON | |
98 | 1: | |
99 | #endif | |
100 | .endm | |
101 | ||
ccbeed3a TH |
102 | /* |
103 | * User gs save/restore | |
104 | * | |
105 | * %gs is used for userland TLS and kernel only uses it for stack | |
106 | * canary which is required to be at %gs:20 by gcc. Read the comment | |
107 | * at the top of stackprotector.h for more info. | |
108 | * | |
109 | * Local labels 98 and 99 are used. | |
110 | */ | |
111 | #ifdef CONFIG_X86_32_LAZY_GS | |
112 | ||
113 | /* unfortunately push/pop can't be no-op */ | |
114 | .macro PUSH_GS | |
df5d1874 | 115 | pushl_cfi $0 |
ccbeed3a TH |
116 | .endm |
117 | .macro POP_GS pop=0 | |
118 | addl $(4 + \pop), %esp | |
119 | CFI_ADJUST_CFA_OFFSET -(4 + \pop) | |
120 | .endm | |
121 | .macro POP_GS_EX | |
122 | .endm | |
123 | ||
124 | /* all the rest are no-op */ | |
125 | .macro PTGS_TO_GS | |
126 | .endm | |
127 | .macro PTGS_TO_GS_EX | |
128 | .endm | |
129 | .macro GS_TO_REG reg | |
130 | .endm | |
131 | .macro REG_TO_PTGS reg | |
132 | .endm | |
133 | .macro SET_KERNEL_GS reg | |
134 | .endm | |
135 | ||
136 | #else /* CONFIG_X86_32_LAZY_GS */ | |
137 | ||
138 | .macro PUSH_GS | |
df5d1874 | 139 | pushl_cfi %gs |
ccbeed3a TH |
140 | /*CFI_REL_OFFSET gs, 0*/ |
141 | .endm | |
142 | ||
143 | .macro POP_GS pop=0 | |
df5d1874 | 144 | 98: popl_cfi %gs |
ccbeed3a TH |
145 | /*CFI_RESTORE gs*/ |
146 | .if \pop <> 0 | |
147 | add $\pop, %esp | |
148 | CFI_ADJUST_CFA_OFFSET -\pop | |
149 | .endif | |
150 | .endm | |
151 | .macro POP_GS_EX | |
152 | .pushsection .fixup, "ax" | |
153 | 99: movl $0, (%esp) | |
154 | jmp 98b | |
ccbeed3a | 155 | .popsection |
6837a54d | 156 | _ASM_EXTABLE(98b,99b) |
ccbeed3a TH |
157 | .endm |
158 | ||
159 | .macro PTGS_TO_GS | |
160 | 98: mov PT_GS(%esp), %gs | |
161 | .endm | |
162 | .macro PTGS_TO_GS_EX | |
163 | .pushsection .fixup, "ax" | |
164 | 99: movl $0, PT_GS(%esp) | |
165 | jmp 98b | |
ccbeed3a | 166 | .popsection |
6837a54d | 167 | _ASM_EXTABLE(98b,99b) |
ccbeed3a TH |
168 | .endm |
169 | ||
170 | .macro GS_TO_REG reg | |
171 | movl %gs, \reg | |
172 | /*CFI_REGISTER gs, \reg*/ | |
173 | .endm | |
174 | .macro REG_TO_PTGS reg | |
175 | movl \reg, PT_GS(%esp) | |
176 | /*CFI_REL_OFFSET gs, PT_GS*/ | |
177 | .endm | |
178 | .macro SET_KERNEL_GS reg | |
60a5317f | 179 | movl $(__KERNEL_STACK_CANARY), \reg |
ccbeed3a TH |
180 | movl \reg, %gs |
181 | .endm | |
182 | ||
183 | #endif /* CONFIG_X86_32_LAZY_GS */ | |
184 | ||
f0d96110 TH |
185 | .macro SAVE_ALL |
186 | cld | |
ccbeed3a | 187 | PUSH_GS |
df5d1874 | 188 | pushl_cfi %fs |
f0d96110 | 189 | /*CFI_REL_OFFSET fs, 0;*/ |
df5d1874 | 190 | pushl_cfi %es |
f0d96110 | 191 | /*CFI_REL_OFFSET es, 0;*/ |
df5d1874 | 192 | pushl_cfi %ds |
f0d96110 | 193 | /*CFI_REL_OFFSET ds, 0;*/ |
df5d1874 | 194 | pushl_cfi %eax |
f0d96110 | 195 | CFI_REL_OFFSET eax, 0 |
df5d1874 | 196 | pushl_cfi %ebp |
f0d96110 | 197 | CFI_REL_OFFSET ebp, 0 |
df5d1874 | 198 | pushl_cfi %edi |
f0d96110 | 199 | CFI_REL_OFFSET edi, 0 |
df5d1874 | 200 | pushl_cfi %esi |
f0d96110 | 201 | CFI_REL_OFFSET esi, 0 |
df5d1874 | 202 | pushl_cfi %edx |
f0d96110 | 203 | CFI_REL_OFFSET edx, 0 |
df5d1874 | 204 | pushl_cfi %ecx |
f0d96110 | 205 | CFI_REL_OFFSET ecx, 0 |
df5d1874 | 206 | pushl_cfi %ebx |
f0d96110 TH |
207 | CFI_REL_OFFSET ebx, 0 |
208 | movl $(__USER_DS), %edx | |
209 | movl %edx, %ds | |
210 | movl %edx, %es | |
211 | movl $(__KERNEL_PERCPU), %edx | |
464d1a78 | 212 | movl %edx, %fs |
ccbeed3a | 213 | SET_KERNEL_GS %edx |
f0d96110 | 214 | .endm |
1da177e4 | 215 | |
f0d96110 | 216 | .macro RESTORE_INT_REGS |
df5d1874 | 217 | popl_cfi %ebx |
f0d96110 | 218 | CFI_RESTORE ebx |
df5d1874 | 219 | popl_cfi %ecx |
f0d96110 | 220 | CFI_RESTORE ecx |
df5d1874 | 221 | popl_cfi %edx |
f0d96110 | 222 | CFI_RESTORE edx |
df5d1874 | 223 | popl_cfi %esi |
f0d96110 | 224 | CFI_RESTORE esi |
df5d1874 | 225 | popl_cfi %edi |
f0d96110 | 226 | CFI_RESTORE edi |
df5d1874 | 227 | popl_cfi %ebp |
f0d96110 | 228 | CFI_RESTORE ebp |
df5d1874 | 229 | popl_cfi %eax |
fe7cacc1 | 230 | CFI_RESTORE eax |
f0d96110 | 231 | .endm |
1da177e4 | 232 | |
ccbeed3a | 233 | .macro RESTORE_REGS pop=0 |
f0d96110 | 234 | RESTORE_INT_REGS |
df5d1874 | 235 | 1: popl_cfi %ds |
f0d96110 | 236 | /*CFI_RESTORE ds;*/ |
df5d1874 | 237 | 2: popl_cfi %es |
f0d96110 | 238 | /*CFI_RESTORE es;*/ |
df5d1874 | 239 | 3: popl_cfi %fs |
f0d96110 | 240 | /*CFI_RESTORE fs;*/ |
ccbeed3a | 241 | POP_GS \pop |
f0d96110 TH |
242 | .pushsection .fixup, "ax" |
243 | 4: movl $0, (%esp) | |
244 | jmp 1b | |
245 | 5: movl $0, (%esp) | |
246 | jmp 2b | |
247 | 6: movl $0, (%esp) | |
248 | jmp 3b | |
f95d47ca | 249 | .popsection |
6837a54d PA |
250 | _ASM_EXTABLE(1b,4b) |
251 | _ASM_EXTABLE(2b,5b) | |
252 | _ASM_EXTABLE(3b,6b) | |
ccbeed3a | 253 | POP_GS_EX |
f0d96110 | 254 | .endm |
1da177e4 | 255 | |
f0d96110 TH |
256 | .macro RING0_INT_FRAME |
257 | CFI_STARTPROC simple | |
258 | CFI_SIGNAL_FRAME | |
259 | CFI_DEF_CFA esp, 3*4 | |
260 | /*CFI_OFFSET cs, -2*4;*/ | |
fe7cacc1 | 261 | CFI_OFFSET eip, -3*4 |
f0d96110 | 262 | .endm |
fe7cacc1 | 263 | |
f0d96110 TH |
264 | .macro RING0_EC_FRAME |
265 | CFI_STARTPROC simple | |
266 | CFI_SIGNAL_FRAME | |
267 | CFI_DEF_CFA esp, 4*4 | |
268 | /*CFI_OFFSET cs, -2*4;*/ | |
fe7cacc1 | 269 | CFI_OFFSET eip, -3*4 |
f0d96110 | 270 | .endm |
fe7cacc1 | 271 | |
f0d96110 TH |
272 | .macro RING0_PTREGS_FRAME |
273 | CFI_STARTPROC simple | |
274 | CFI_SIGNAL_FRAME | |
275 | CFI_DEF_CFA esp, PT_OLDESP-PT_EBX | |
276 | /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/ | |
277 | CFI_OFFSET eip, PT_EIP-PT_OLDESP | |
278 | /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/ | |
279 | /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/ | |
280 | CFI_OFFSET eax, PT_EAX-PT_OLDESP | |
281 | CFI_OFFSET ebp, PT_EBP-PT_OLDESP | |
282 | CFI_OFFSET edi, PT_EDI-PT_OLDESP | |
283 | CFI_OFFSET esi, PT_ESI-PT_OLDESP | |
284 | CFI_OFFSET edx, PT_EDX-PT_OLDESP | |
285 | CFI_OFFSET ecx, PT_ECX-PT_OLDESP | |
eb5b7b9d | 286 | CFI_OFFSET ebx, PT_EBX-PT_OLDESP |
f0d96110 | 287 | .endm |
1da177e4 LT |
288 | |
289 | ENTRY(ret_from_fork) | |
fe7cacc1 | 290 | CFI_STARTPROC |
df5d1874 | 291 | pushl_cfi %eax |
1da177e4 LT |
292 | call schedule_tail |
293 | GET_THREAD_INFO(%ebp) | |
df5d1874 JB |
294 | popl_cfi %eax |
295 | pushl_cfi $0x0202 # Reset kernel eflags | |
296 | popfl_cfi | |
1da177e4 | 297 | jmp syscall_exit |
fe7cacc1 | 298 | CFI_ENDPROC |
47a55cd7 | 299 | END(ret_from_fork) |
1da177e4 | 300 | |
a00e817f MH |
301 | /* |
302 | * Interrupt exit functions should be protected against kprobes | |
303 | */ | |
304 | .pushsection .kprobes.text, "ax" | |
1da177e4 LT |
305 | /* |
306 | * Return to user mode is not as complex as all this looks, | |
307 | * but we want the default path for a system call return to | |
308 | * go as quickly as possible which is why some of this is | |
309 | * less clear than it otherwise should be. | |
310 | */ | |
311 | ||
312 | # userspace resumption stub bypassing syscall exit tracing | |
313 | ALIGN | |
fe7cacc1 | 314 | RING0_PTREGS_FRAME |
1da177e4 | 315 | ret_from_exception: |
139ec7c4 | 316 | preempt_stop(CLBR_ANY) |
1da177e4 LT |
317 | ret_from_intr: |
318 | GET_THREAD_INFO(%ebp) | |
29a2e283 DA |
319 | resume_userspace_sig: |
320 | #ifdef CONFIG_VM86 | |
eb5b7b9d JF |
321 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
322 | movb PT_CS(%esp), %al | |
ab68ed98 | 323 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
29a2e283 DA |
324 | #else |
325 | /* | |
326 | * We can be coming here from a syscall done in the kernel space, | |
327 | * e.g. a failed kernel_execve(). | |
328 | */ | |
329 | movl PT_CS(%esp), %eax | |
330 | andl $SEGMENT_RPL_MASK, %eax | |
331 | #endif | |
78be3706 RR |
332 | cmpl $USER_RPL, %eax |
333 | jb resume_kernel # not returning to v8086 or userspace | |
f95d47ca | 334 | |
1da177e4 | 335 | ENTRY(resume_userspace) |
c7e872e7 | 336 | LOCKDEP_SYS_EXIT |
139ec7c4 | 337 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
338 | # setting need_resched or sigpending |
339 | # between sampling and the iret | |
e32e58a9 | 340 | TRACE_IRQS_OFF |
1da177e4 LT |
341 | movl TI_flags(%ebp), %ecx |
342 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | |
343 | # int/exception return? | |
344 | jne work_pending | |
345 | jmp restore_all | |
47a55cd7 | 346 | END(ret_from_exception) |
1da177e4 LT |
347 | |
348 | #ifdef CONFIG_PREEMPT | |
349 | ENTRY(resume_kernel) | |
139ec7c4 | 350 | DISABLE_INTERRUPTS(CLBR_ANY) |
1da177e4 | 351 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
2e04bc76 | 352 | jnz restore_all |
1da177e4 LT |
353 | need_resched: |
354 | movl TI_flags(%ebp), %ecx # need_resched set ? | |
355 | testb $_TIF_NEED_RESCHED, %cl | |
356 | jz restore_all | |
ab68ed98 | 357 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? |
1da177e4 LT |
358 | jz restore_all |
359 | call preempt_schedule_irq | |
360 | jmp need_resched | |
47a55cd7 | 361 | END(resume_kernel) |
1da177e4 | 362 | #endif |
fe7cacc1 | 363 | CFI_ENDPROC |
a00e817f MH |
364 | /* |
365 | * End of kprobes section | |
366 | */ | |
367 | .popsection | |
1da177e4 LT |
368 | |
369 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | |
370 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | |
371 | ||
372 | # sysenter call handler stub | |
0aa97fb2 | 373 | ENTRY(ia32_sysenter_target) |
fe7cacc1 | 374 | CFI_STARTPROC simple |
adf14236 | 375 | CFI_SIGNAL_FRAME |
fe7cacc1 JB |
376 | CFI_DEF_CFA esp, 0 |
377 | CFI_REGISTER esp, ebp | |
faca6227 | 378 | movl TSS_sysenter_sp0(%esp),%esp |
1da177e4 | 379 | sysenter_past_esp: |
55f327fa | 380 | /* |
d93c870b JF |
381 | * Interrupts are disabled here, but we can't trace it until |
382 | * enough kernel state to call TRACE_IRQS_OFF can be called - but | |
383 | * we immediately enable interrupts at that point anyway. | |
55f327fa | 384 | */ |
3234282f | 385 | pushl_cfi $__USER_DS |
fe7cacc1 | 386 | /*CFI_REL_OFFSET ss, 0*/ |
df5d1874 | 387 | pushl_cfi %ebp |
fe7cacc1 | 388 | CFI_REL_OFFSET esp, 0 |
df5d1874 | 389 | pushfl_cfi |
d93c870b | 390 | orl $X86_EFLAGS_IF, (%esp) |
3234282f | 391 | pushl_cfi $__USER_CS |
fe7cacc1 | 392 | /*CFI_REL_OFFSET cs, 0*/ |
e6e5494c IM |
393 | /* |
394 | * Push current_thread_info()->sysenter_return to the stack. | |
395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | |
396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | |
397 | */ | |
7bf04be8 | 398 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) |
fe7cacc1 | 399 | CFI_REL_OFFSET eip, 0 |
1da177e4 | 400 | |
df5d1874 | 401 | pushl_cfi %eax |
d93c870b JF |
402 | SAVE_ALL |
403 | ENABLE_INTERRUPTS(CLBR_NONE) | |
404 | ||
1da177e4 LT |
405 | /* |
406 | * Load the potential sixth argument from user stack. | |
407 | * Careful about security. | |
408 | */ | |
409 | cmpl $__PAGE_OFFSET-3,%ebp | |
410 | jae syscall_fault | |
411 | 1: movl (%ebp),%ebp | |
d93c870b | 412 | movl %ebp,PT_EBP(%esp) |
6837a54d | 413 | _ASM_EXTABLE(1b,syscall_fault) |
1da177e4 | 414 | |
1da177e4 LT |
415 | GET_THREAD_INFO(%ebp) |
416 | ||
88200bc2 | 417 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
af0575bb RM |
418 | jnz sysenter_audit |
419 | sysenter_do_call: | |
303395ac | 420 | cmpl $(NR_syscalls), %eax |
1da177e4 LT |
421 | jae syscall_badsys |
422 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 423 | movl %eax,PT_EAX(%esp) |
c7e872e7 | 424 | LOCKDEP_SYS_EXIT |
42c24fa2 | 425 | DISABLE_INTERRUPTS(CLBR_ANY) |
55f327fa | 426 | TRACE_IRQS_OFF |
1da177e4 | 427 | movl TI_flags(%ebp), %ecx |
88200bc2 | 428 | testl $_TIF_ALLWORK_MASK, %ecx |
af0575bb RM |
429 | jne sysexit_audit |
430 | sysenter_exit: | |
1da177e4 | 431 | /* if something modifies registers it must also disable sysexit */ |
eb5b7b9d JF |
432 | movl PT_EIP(%esp), %edx |
433 | movl PT_OLDESP(%esp), %ecx | |
1da177e4 | 434 | xorl %ebp,%ebp |
55f327fa | 435 | TRACE_IRQS_ON |
464d1a78 | 436 | 1: mov PT_FS(%esp), %fs |
ccbeed3a | 437 | PTGS_TO_GS |
d75cd22f | 438 | ENABLE_INTERRUPTS_SYSEXIT |
af0575bb RM |
439 | |
440 | #ifdef CONFIG_AUDITSYSCALL | |
441 | sysenter_audit: | |
88200bc2 | 442 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
af0575bb RM |
443 | jnz syscall_trace_entry |
444 | addl $4,%esp | |
445 | CFI_ADJUST_CFA_OFFSET -4 | |
446 | /* %esi already in 8(%esp) 6th arg: 4th syscall arg */ | |
447 | /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */ | |
448 | /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */ | |
449 | movl %ebx,%ecx /* 3rd arg: 1st syscall arg */ | |
450 | movl %eax,%edx /* 2nd arg: syscall number */ | |
451 | movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ | |
b05d8447 | 452 | call __audit_syscall_entry |
df5d1874 | 453 | pushl_cfi %ebx |
af0575bb RM |
454 | movl PT_EAX(%esp),%eax /* reload syscall number */ |
455 | jmp sysenter_do_call | |
456 | ||
457 | sysexit_audit: | |
88200bc2 | 458 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
af0575bb RM |
459 | jne syscall_exit_work |
460 | TRACE_IRQS_ON | |
461 | ENABLE_INTERRUPTS(CLBR_ANY) | |
462 | movl %eax,%edx /* second arg, syscall return value */ | |
d7e7528b EP |
463 | cmpl $-MAX_ERRNO,%eax /* is it an error ? */ |
464 | setbe %al /* 1 if so, 0 if not */ | |
af0575bb | 465 | movzbl %al,%eax /* zero-extend that */ |
d7e7528b | 466 | call __audit_syscall_exit |
af0575bb RM |
467 | DISABLE_INTERRUPTS(CLBR_ANY) |
468 | TRACE_IRQS_OFF | |
469 | movl TI_flags(%ebp), %ecx | |
88200bc2 | 470 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
af0575bb RM |
471 | jne syscall_exit_work |
472 | movl PT_EAX(%esp),%eax /* reload syscall return value */ | |
473 | jmp sysenter_exit | |
474 | #endif | |
475 | ||
fe7cacc1 | 476 | CFI_ENDPROC |
f95d47ca | 477 | .pushsection .fixup,"ax" |
464d1a78 | 478 | 2: movl $0,PT_FS(%esp) |
f95d47ca | 479 | jmp 1b |
f95d47ca | 480 | .popsection |
6837a54d | 481 | _ASM_EXTABLE(1b,2b) |
ccbeed3a | 482 | PTGS_TO_GS_EX |
0aa97fb2 | 483 | ENDPROC(ia32_sysenter_target) |
1da177e4 | 484 | |
a00e817f MH |
485 | /* |
486 | * syscall stub including irq exit should be protected against kprobes | |
487 | */ | |
488 | .pushsection .kprobes.text, "ax" | |
1da177e4 LT |
489 | # system call handler stub |
490 | ENTRY(system_call) | |
fe7cacc1 | 491 | RING0_INT_FRAME # can't unwind into user space anyway |
df5d1874 | 492 | pushl_cfi %eax # save orig_eax |
1da177e4 LT |
493 | SAVE_ALL |
494 | GET_THREAD_INFO(%ebp) | |
ed75e8d5 | 495 | # system call tracing in operation / emulation |
88200bc2 | 496 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) |
1da177e4 | 497 | jnz syscall_trace_entry |
303395ac | 498 | cmpl $(NR_syscalls), %eax |
1da177e4 LT |
499 | jae syscall_badsys |
500 | syscall_call: | |
501 | call *sys_call_table(,%eax,4) | |
eb5b7b9d | 502 | movl %eax,PT_EAX(%esp) # store the return value |
1da177e4 | 503 | syscall_exit: |
c7e872e7 | 504 | LOCKDEP_SYS_EXIT |
139ec7c4 | 505 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
506 | # setting need_resched or sigpending |
507 | # between sampling and the iret | |
55f327fa | 508 | TRACE_IRQS_OFF |
1da177e4 | 509 | movl TI_flags(%ebp), %ecx |
88200bc2 | 510 | testl $_TIF_ALLWORK_MASK, %ecx # current->work |
1da177e4 LT |
511 | jne syscall_exit_work |
512 | ||
513 | restore_all: | |
2e04bc76 AH |
514 | TRACE_IRQS_IRET |
515 | restore_all_notrace: | |
eb5b7b9d JF |
516 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
517 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
5df24082 SS |
518 | # are returning to the kernel. |
519 | # See comments in process.c:copy_thread() for details. | |
eb5b7b9d JF |
520 | movb PT_OLDSS(%esp), %ah |
521 | movb PT_CS(%esp), %al | |
ab68ed98 | 522 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
78be3706 | 523 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
fe7cacc1 | 524 | CFI_REMEMBER_STATE |
1da177e4 LT |
525 | je ldt_ss # returning to user-space with LDT SS |
526 | restore_nocheck: | |
ccbeed3a | 527 | RESTORE_REGS 4 # skip orig_eax/error_code |
f7f3d791 | 528 | irq_return: |
3701d863 | 529 | INTERRUPT_RETURN |
1da177e4 | 530 | .section .fixup,"ax" |
90e9f536 | 531 | ENTRY(iret_exc) |
a879cbbb LT |
532 | pushl $0 # no error code |
533 | pushl $do_iret_error | |
534 | jmp error_code | |
1da177e4 | 535 | .previous |
6837a54d | 536 | _ASM_EXTABLE(irq_return,iret_exc) |
1da177e4 | 537 | |
fe7cacc1 | 538 | CFI_RESTORE_STATE |
1da177e4 | 539 | ldt_ss: |
eb5b7b9d | 540 | larl PT_OLDSS(%esp), %eax |
1da177e4 LT |
541 | jnz restore_nocheck |
542 | testl $0x00400000, %eax # returning to 32bit stack? | |
543 | jnz restore_nocheck # allright, normal return | |
d3561b7f RR |
544 | |
545 | #ifdef CONFIG_PARAVIRT | |
546 | /* | |
547 | * The kernel can't run on a non-flat stack if paravirt mode | |
548 | * is active. Rather than try to fixup the high bits of | |
549 | * ESP, bypass this code entirely. This may break DOSemu | |
550 | * and/or Wine support in a paravirt VM, although the option | |
551 | * is still available to implement the setting of the high | |
552 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | |
553 | */ | |
93b1eab3 | 554 | cmpl $0, pv_info+PARAVIRT_enabled |
d3561b7f RR |
555 | jne restore_nocheck |
556 | #endif | |
557 | ||
dc4c2a0a AH |
558 | /* |
559 | * Setup and switch to ESPFIX stack | |
560 | * | |
561 | * We're returning to userspace with a 16 bit stack. The CPU will not | |
562 | * restore the high word of ESP for us on executing iret... This is an | |
563 | * "official" bug of all the x86-compatible CPUs, which we can work | |
564 | * around to make dosemu and wine happy. We do this by preloading the | |
565 | * high word of ESP with the high word of the userspace ESP while | |
566 | * compensating for the offset by changing to the ESPFIX segment with | |
567 | * a base address that matches for the difference. | |
568 | */ | |
72c511dd | 569 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
dc4c2a0a AH |
570 | mov %esp, %edx /* load kernel esp */ |
571 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | |
572 | mov %dx, %ax /* eax: new kernel esp */ | |
573 | sub %eax, %edx /* offset (low word is 0) */ | |
dc4c2a0a | 574 | shr $16, %edx |
72c511dd BG |
575 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
576 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | |
df5d1874 JB |
577 | pushl_cfi $__ESPFIX_SS |
578 | pushl_cfi %eax /* new kernel esp */ | |
2e04bc76 AH |
579 | /* Disable interrupts, but do not irqtrace this section: we |
580 | * will soon execute iret and the tracer was already set to | |
581 | * the irqstate after the iret */ | |
139ec7c4 | 582 | DISABLE_INTERRUPTS(CLBR_EAX) |
dc4c2a0a | 583 | lss (%esp), %esp /* switch to espfix segment */ |
be44d2aa SS |
584 | CFI_ADJUST_CFA_OFFSET -8 |
585 | jmp restore_nocheck | |
fe7cacc1 | 586 | CFI_ENDPROC |
47a55cd7 | 587 | ENDPROC(system_call) |
1da177e4 LT |
588 | |
589 | # perform work that needs to be done immediately before resumption | |
590 | ALIGN | |
fe7cacc1 | 591 | RING0_PTREGS_FRAME # can't unwind into user space anyway |
1da177e4 LT |
592 | work_pending: |
593 | testb $_TIF_NEED_RESCHED, %cl | |
594 | jz work_notifysig | |
595 | work_resched: | |
596 | call schedule | |
c7e872e7 | 597 | LOCKDEP_SYS_EXIT |
139ec7c4 | 598 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
1da177e4 LT |
599 | # setting need_resched or sigpending |
600 | # between sampling and the iret | |
55f327fa | 601 | TRACE_IRQS_OFF |
1da177e4 LT |
602 | movl TI_flags(%ebp), %ecx |
603 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | |
604 | # than syscall tracing? | |
605 | jz restore_all | |
606 | testb $_TIF_NEED_RESCHED, %cl | |
607 | jnz work_resched | |
608 | ||
609 | work_notifysig: # deal with pending signals and | |
610 | # notify-resume requests | |
74b47a78 | 611 | #ifdef CONFIG_VM86 |
ab68ed98 | 612 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) |
1da177e4 LT |
613 | movl %esp, %eax |
614 | jne work_notifysig_v86 # returning to kernel-space or | |
615 | # vm86-space | |
3596ff4e SD |
616 | TRACE_IRQS_ON |
617 | ENABLE_INTERRUPTS(CLBR_NONE) | |
1da177e4 LT |
618 | xorl %edx, %edx |
619 | call do_notify_resume | |
4031ff38 | 620 | jmp resume_userspace_sig |
1da177e4 LT |
621 | |
622 | ALIGN | |
623 | work_notifysig_v86: | |
df5d1874 | 624 | pushl_cfi %ecx # save ti_flags for do_notify_resume |
1da177e4 | 625 | call save_v86_state # %eax contains pt_regs pointer |
df5d1874 | 626 | popl_cfi %ecx |
1da177e4 | 627 | movl %eax, %esp |
74b47a78 JK |
628 | #else |
629 | movl %esp, %eax | |
630 | #endif | |
3596ff4e SD |
631 | TRACE_IRQS_ON |
632 | ENABLE_INTERRUPTS(CLBR_NONE) | |
1da177e4 LT |
633 | xorl %edx, %edx |
634 | call do_notify_resume | |
4031ff38 | 635 | jmp resume_userspace_sig |
47a55cd7 | 636 | END(work_pending) |
1da177e4 LT |
637 | |
638 | # perform syscall exit tracing | |
639 | ALIGN | |
640 | syscall_trace_entry: | |
eb5b7b9d | 641 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 642 | movl %esp, %eax |
d4d67150 RM |
643 | call syscall_trace_enter |
644 | /* What it returned is what we'll actually use. */ | |
303395ac | 645 | cmpl $(NR_syscalls), %eax |
1da177e4 LT |
646 | jnae syscall_call |
647 | jmp syscall_exit | |
47a55cd7 | 648 | END(syscall_trace_entry) |
1da177e4 LT |
649 | |
650 | # perform syscall exit tracing | |
651 | ALIGN | |
652 | syscall_exit_work: | |
88200bc2 | 653 | testl $_TIF_WORK_SYSCALL_EXIT, %ecx |
1da177e4 | 654 | jz work_pending |
55f327fa | 655 | TRACE_IRQS_ON |
d4d67150 | 656 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
1da177e4 LT |
657 | # schedule() instead |
658 | movl %esp, %eax | |
d4d67150 | 659 | call syscall_trace_leave |
1da177e4 | 660 | jmp resume_userspace |
47a55cd7 | 661 | END(syscall_exit_work) |
fe7cacc1 | 662 | CFI_ENDPROC |
1da177e4 | 663 | |
fe7cacc1 | 664 | RING0_INT_FRAME # can't unwind into user space anyway |
1da177e4 | 665 | syscall_fault: |
1da177e4 | 666 | GET_THREAD_INFO(%ebp) |
eb5b7b9d | 667 | movl $-EFAULT,PT_EAX(%esp) |
1da177e4 | 668 | jmp resume_userspace |
47a55cd7 | 669 | END(syscall_fault) |
1da177e4 | 670 | |
1da177e4 | 671 | syscall_badsys: |
eb5b7b9d | 672 | movl $-ENOSYS,PT_EAX(%esp) |
1da177e4 | 673 | jmp resume_userspace |
47a55cd7 | 674 | END(syscall_badsys) |
fe7cacc1 | 675 | CFI_ENDPROC |
a00e817f MH |
676 | /* |
677 | * End of kprobes section | |
678 | */ | |
679 | .popsection | |
1da177e4 | 680 | |
253f29a4 BG |
681 | /* |
682 | * System calls that need a pt_regs pointer. | |
683 | */ | |
e258e4e0 | 684 | #define PTREGSCALL0(name) \ |
303395ac | 685 | ENTRY(ptregs_##name) ; \ |
253f29a4 | 686 | leal 4(%esp),%eax; \ |
303395ac PA |
687 | jmp sys_##name; \ |
688 | ENDPROC(ptregs_##name) | |
253f29a4 | 689 | |
e258e4e0 | 690 | #define PTREGSCALL1(name) \ |
303395ac | 691 | ENTRY(ptregs_##name) ; \ |
e258e4e0 | 692 | leal 4(%esp),%edx; \ |
ce9119ad | 693 | movl (PT_EBX+4)(%esp),%eax; \ |
303395ac PA |
694 | jmp sys_##name; \ |
695 | ENDPROC(ptregs_##name) | |
e258e4e0 BG |
696 | |
697 | #define PTREGSCALL2(name) \ | |
303395ac | 698 | ENTRY(ptregs_##name) ; \ |
e258e4e0 | 699 | leal 4(%esp),%ecx; \ |
ce9119ad PA |
700 | movl (PT_ECX+4)(%esp),%edx; \ |
701 | movl (PT_EBX+4)(%esp),%eax; \ | |
303395ac PA |
702 | jmp sys_##name; \ |
703 | ENDPROC(ptregs_##name) | |
e258e4e0 BG |
704 | |
705 | #define PTREGSCALL3(name) \ | |
303395ac | 706 | ENTRY(ptregs_##name) ; \ |
a34107b5 | 707 | CFI_STARTPROC; \ |
e258e4e0 | 708 | leal 4(%esp),%eax; \ |
a34107b5 | 709 | pushl_cfi %eax; \ |
e258e4e0 BG |
710 | movl PT_EDX(%eax),%ecx; \ |
711 | movl PT_ECX(%eax),%edx; \ | |
712 | movl PT_EBX(%eax),%eax; \ | |
713 | call sys_##name; \ | |
714 | addl $4,%esp; \ | |
a34107b5 JB |
715 | CFI_ADJUST_CFA_OFFSET -4; \ |
716 | ret; \ | |
717 | CFI_ENDPROC; \ | |
718 | ENDPROC(ptregs_##name) | |
e258e4e0 | 719 | |
27f59559 | 720 | PTREGSCALL1(iopl) |
e258e4e0 | 721 | PTREGSCALL0(fork) |
e258e4e0 | 722 | PTREGSCALL0(vfork) |
11cf88bd | 723 | PTREGSCALL3(execve) |
052acad4 | 724 | PTREGSCALL2(sigaltstack) |
e258e4e0 BG |
725 | PTREGSCALL0(sigreturn) |
726 | PTREGSCALL0(rt_sigreturn) | |
f1382f15 BG |
727 | PTREGSCALL2(vm86) |
728 | PTREGSCALL1(vm86old) | |
253f29a4 | 729 | |
f839bbc5 | 730 | /* Clone is an oddball. The 4th arg is in %edi */ |
303395ac | 731 | ENTRY(ptregs_clone) |
a34107b5 | 732 | CFI_STARTPROC |
f839bbc5 | 733 | leal 4(%esp),%eax |
a34107b5 JB |
734 | pushl_cfi %eax |
735 | pushl_cfi PT_EDI(%eax) | |
f839bbc5 BG |
736 | movl PT_EDX(%eax),%ecx |
737 | movl PT_ECX(%eax),%edx | |
738 | movl PT_EBX(%eax),%eax | |
739 | call sys_clone | |
740 | addl $8,%esp | |
a34107b5 | 741 | CFI_ADJUST_CFA_OFFSET -8 |
f839bbc5 | 742 | ret |
a34107b5 JB |
743 | CFI_ENDPROC |
744 | ENDPROC(ptregs_clone) | |
f839bbc5 | 745 | |
f0d96110 | 746 | .macro FIXUP_ESPFIX_STACK |
dc4c2a0a AH |
747 | /* |
748 | * Switch back for ESPFIX stack to the normal zerobased stack | |
749 | * | |
750 | * We can't call C functions using the ESPFIX stack. This code reads | |
751 | * the high word of the segment base from the GDT and swiches to the | |
752 | * normal stack and adjusts ESP with the matching offset. | |
753 | */ | |
754 | /* fixup the stack */ | |
72c511dd BG |
755 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
756 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | |
dc4c2a0a AH |
757 | shl $16, %eax |
758 | addl %esp, %eax /* the adjusted stack pointer */ | |
df5d1874 JB |
759 | pushl_cfi $__KERNEL_DS |
760 | pushl_cfi %eax | |
dc4c2a0a | 761 | lss (%esp), %esp /* switch to the normal stack segment */ |
f0d96110 TH |
762 | CFI_ADJUST_CFA_OFFSET -8 |
763 | .endm | |
764 | .macro UNWIND_ESPFIX_STACK | |
765 | movl %ss, %eax | |
766 | /* see if on espfix stack */ | |
767 | cmpw $__ESPFIX_SS, %ax | |
768 | jne 27f | |
769 | movl $__KERNEL_DS, %eax | |
770 | movl %eax, %ds | |
771 | movl %eax, %es | |
772 | /* switch to normal stack */ | |
773 | FIXUP_ESPFIX_STACK | |
774 | 27: | |
775 | .endm | |
1da177e4 LT |
776 | |
777 | /* | |
b7c6244f PA |
778 | * Build the entry stubs and pointer table with some assembler magic. |
779 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | |
780 | * single cache line on all modern x86 implementations. | |
1da177e4 | 781 | */ |
4687518c | 782 | .section .init.rodata,"a" |
1da177e4 | 783 | ENTRY(interrupt) |
ea714547 | 784 | .section .entry.text, "ax" |
b7c6244f PA |
785 | .p2align 5 |
786 | .p2align CONFIG_X86_L1_CACHE_SHIFT | |
1da177e4 | 787 | ENTRY(irq_entries_start) |
fe7cacc1 | 788 | RING0_INT_FRAME |
4687518c | 789 | vector=FIRST_EXTERNAL_VECTOR |
b7c6244f PA |
790 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 |
791 | .balign 32 | |
792 | .rept 7 | |
793 | .if vector < NR_VECTORS | |
8665596e | 794 | .if vector <> FIRST_EXTERNAL_VECTOR |
fe7cacc1 | 795 | CFI_ADJUST_CFA_OFFSET -4 |
b7c6244f | 796 | .endif |
df5d1874 | 797 | 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ |
8665596e | 798 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
b7c6244f PA |
799 | jmp 2f |
800 | .endif | |
801 | .previous | |
1da177e4 | 802 | .long 1b |
ea714547 | 803 | .section .entry.text, "ax" |
1da177e4 | 804 | vector=vector+1 |
b7c6244f PA |
805 | .endif |
806 | .endr | |
807 | 2: jmp common_interrupt | |
1da177e4 | 808 | .endr |
47a55cd7 JB |
809 | END(irq_entries_start) |
810 | ||
811 | .previous | |
812 | END(interrupt) | |
813 | .previous | |
1da177e4 | 814 | |
55f327fa IM |
815 | /* |
816 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
817 | * so IRQ-flags tracing has to follow that: | |
818 | */ | |
b7c6244f | 819 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1da177e4 | 820 | common_interrupt: |
b7c6244f | 821 | addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ |
1da177e4 | 822 | SAVE_ALL |
55f327fa | 823 | TRACE_IRQS_OFF |
1da177e4 LT |
824 | movl %esp,%eax |
825 | call do_IRQ | |
826 | jmp ret_from_intr | |
47a55cd7 | 827 | ENDPROC(common_interrupt) |
fe7cacc1 | 828 | CFI_ENDPROC |
1da177e4 | 829 | |
a00e817f MH |
830 | /* |
831 | * Irq entries should be protected against kprobes | |
832 | */ | |
833 | .pushsection .kprobes.text, "ax" | |
02cf94c3 | 834 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
1da177e4 | 835 | ENTRY(name) \ |
fe7cacc1 | 836 | RING0_INT_FRAME; \ |
df5d1874 | 837 | pushl_cfi $~(nr); \ |
fe7cacc1 | 838 | SAVE_ALL; \ |
55f327fa | 839 | TRACE_IRQS_OFF \ |
1da177e4 | 840 | movl %esp,%eax; \ |
02cf94c3 | 841 | call fn; \ |
55f327fa | 842 | jmp ret_from_intr; \ |
47a55cd7 JB |
843 | CFI_ENDPROC; \ |
844 | ENDPROC(name) | |
1da177e4 | 845 | |
02cf94c3 TH |
846 | #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name) |
847 | ||
1da177e4 | 848 | /* The include is where all of the SMP etc. interrupts come from */ |
1164dd00 | 849 | #include <asm/entry_arch.h> |
1da177e4 | 850 | |
1da177e4 | 851 | ENTRY(coprocessor_error) |
fe7cacc1 | 852 | RING0_INT_FRAME |
df5d1874 JB |
853 | pushl_cfi $0 |
854 | pushl_cfi $do_coprocessor_error | |
1da177e4 | 855 | jmp error_code |
fe7cacc1 | 856 | CFI_ENDPROC |
47a55cd7 | 857 | END(coprocessor_error) |
1da177e4 LT |
858 | |
859 | ENTRY(simd_coprocessor_error) | |
fe7cacc1 | 860 | RING0_INT_FRAME |
df5d1874 | 861 | pushl_cfi $0 |
40d2e763 BG |
862 | #ifdef CONFIG_X86_INVD_BUG |
863 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | |
df5d1874 | 864 | 661: pushl_cfi $do_general_protection |
40d2e763 BG |
865 | 662: |
866 | .section .altinstructions,"a" | |
b4ca46e4 | 867 | altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f |
40d2e763 BG |
868 | .previous |
869 | .section .altinstr_replacement,"ax" | |
870 | 663: pushl $do_simd_coprocessor_error | |
871 | 664: | |
872 | .previous | |
873 | #else | |
df5d1874 | 874 | pushl_cfi $do_simd_coprocessor_error |
40d2e763 | 875 | #endif |
1da177e4 | 876 | jmp error_code |
fe7cacc1 | 877 | CFI_ENDPROC |
47a55cd7 | 878 | END(simd_coprocessor_error) |
1da177e4 LT |
879 | |
880 | ENTRY(device_not_available) | |
fe7cacc1 | 881 | RING0_INT_FRAME |
df5d1874 JB |
882 | pushl_cfi $-1 # mark this as an int |
883 | pushl_cfi $do_device_not_available | |
7643e9b9 | 884 | jmp error_code |
fe7cacc1 | 885 | CFI_ENDPROC |
47a55cd7 | 886 | END(device_not_available) |
1da177e4 | 887 | |
d3561b7f RR |
888 | #ifdef CONFIG_PARAVIRT |
889 | ENTRY(native_iret) | |
3701d863 | 890 | iret |
6837a54d | 891 | _ASM_EXTABLE(native_iret, iret_exc) |
47a55cd7 | 892 | END(native_iret) |
d3561b7f | 893 | |
d75cd22f | 894 | ENTRY(native_irq_enable_sysexit) |
d3561b7f RR |
895 | sti |
896 | sysexit | |
d75cd22f | 897 | END(native_irq_enable_sysexit) |
d3561b7f RR |
898 | #endif |
899 | ||
1da177e4 | 900 | ENTRY(overflow) |
fe7cacc1 | 901 | RING0_INT_FRAME |
df5d1874 JB |
902 | pushl_cfi $0 |
903 | pushl_cfi $do_overflow | |
1da177e4 | 904 | jmp error_code |
fe7cacc1 | 905 | CFI_ENDPROC |
47a55cd7 | 906 | END(overflow) |
1da177e4 LT |
907 | |
908 | ENTRY(bounds) | |
fe7cacc1 | 909 | RING0_INT_FRAME |
df5d1874 JB |
910 | pushl_cfi $0 |
911 | pushl_cfi $do_bounds | |
1da177e4 | 912 | jmp error_code |
fe7cacc1 | 913 | CFI_ENDPROC |
47a55cd7 | 914 | END(bounds) |
1da177e4 LT |
915 | |
916 | ENTRY(invalid_op) | |
fe7cacc1 | 917 | RING0_INT_FRAME |
df5d1874 JB |
918 | pushl_cfi $0 |
919 | pushl_cfi $do_invalid_op | |
1da177e4 | 920 | jmp error_code |
fe7cacc1 | 921 | CFI_ENDPROC |
47a55cd7 | 922 | END(invalid_op) |
1da177e4 LT |
923 | |
924 | ENTRY(coprocessor_segment_overrun) | |
fe7cacc1 | 925 | RING0_INT_FRAME |
df5d1874 JB |
926 | pushl_cfi $0 |
927 | pushl_cfi $do_coprocessor_segment_overrun | |
1da177e4 | 928 | jmp error_code |
fe7cacc1 | 929 | CFI_ENDPROC |
47a55cd7 | 930 | END(coprocessor_segment_overrun) |
1da177e4 LT |
931 | |
932 | ENTRY(invalid_TSS) | |
fe7cacc1 | 933 | RING0_EC_FRAME |
df5d1874 | 934 | pushl_cfi $do_invalid_TSS |
1da177e4 | 935 | jmp error_code |
fe7cacc1 | 936 | CFI_ENDPROC |
47a55cd7 | 937 | END(invalid_TSS) |
1da177e4 LT |
938 | |
939 | ENTRY(segment_not_present) | |
fe7cacc1 | 940 | RING0_EC_FRAME |
df5d1874 | 941 | pushl_cfi $do_segment_not_present |
1da177e4 | 942 | jmp error_code |
fe7cacc1 | 943 | CFI_ENDPROC |
47a55cd7 | 944 | END(segment_not_present) |
1da177e4 LT |
945 | |
946 | ENTRY(stack_segment) | |
fe7cacc1 | 947 | RING0_EC_FRAME |
df5d1874 | 948 | pushl_cfi $do_stack_segment |
1da177e4 | 949 | jmp error_code |
fe7cacc1 | 950 | CFI_ENDPROC |
47a55cd7 | 951 | END(stack_segment) |
1da177e4 | 952 | |
1da177e4 | 953 | ENTRY(alignment_check) |
fe7cacc1 | 954 | RING0_EC_FRAME |
df5d1874 | 955 | pushl_cfi $do_alignment_check |
1da177e4 | 956 | jmp error_code |
fe7cacc1 | 957 | CFI_ENDPROC |
47a55cd7 | 958 | END(alignment_check) |
1da177e4 | 959 | |
d28c4393 P |
960 | ENTRY(divide_error) |
961 | RING0_INT_FRAME | |
df5d1874 JB |
962 | pushl_cfi $0 # no error code |
963 | pushl_cfi $do_divide_error | |
1da177e4 | 964 | jmp error_code |
fe7cacc1 | 965 | CFI_ENDPROC |
47a55cd7 | 966 | END(divide_error) |
1da177e4 LT |
967 | |
968 | #ifdef CONFIG_X86_MCE | |
969 | ENTRY(machine_check) | |
fe7cacc1 | 970 | RING0_INT_FRAME |
df5d1874 JB |
971 | pushl_cfi $0 |
972 | pushl_cfi machine_check_vector | |
1da177e4 | 973 | jmp error_code |
fe7cacc1 | 974 | CFI_ENDPROC |
47a55cd7 | 975 | END(machine_check) |
1da177e4 LT |
976 | #endif |
977 | ||
978 | ENTRY(spurious_interrupt_bug) | |
fe7cacc1 | 979 | RING0_INT_FRAME |
df5d1874 JB |
980 | pushl_cfi $0 |
981 | pushl_cfi $do_spurious_interrupt_bug | |
1da177e4 | 982 | jmp error_code |
fe7cacc1 | 983 | CFI_ENDPROC |
47a55cd7 | 984 | END(spurious_interrupt_bug) |
a00e817f MH |
985 | /* |
986 | * End of kprobes section | |
987 | */ | |
988 | .popsection | |
1da177e4 | 989 | |
02ba1a32 AK |
990 | ENTRY(kernel_thread_helper) |
991 | pushl $0 # fake return address for unwinder | |
992 | CFI_STARTPROC | |
e840227c BG |
993 | movl %edi,%eax |
994 | call *%esi | |
02ba1a32 | 995 | call do_exit |
5f5db591 | 996 | ud2 # padding for call trace |
02ba1a32 AK |
997 | CFI_ENDPROC |
998 | ENDPROC(kernel_thread_helper) | |
999 | ||
5ead97c8 | 1000 | #ifdef CONFIG_XEN |
e2a81baf JF |
1001 | /* Xen doesn't set %esp to be precisely what the normal sysenter |
1002 | entrypoint expects, so fix it up before using the normal path. */ | |
1003 | ENTRY(xen_sysenter_target) | |
1004 | RING0_INT_FRAME | |
1005 | addl $5*4, %esp /* remove xen-provided frame */ | |
2ddf9b7b | 1006 | CFI_ADJUST_CFA_OFFSET -5*4 |
e2a81baf | 1007 | jmp sysenter_past_esp |
557d7d4e | 1008 | CFI_ENDPROC |
e2a81baf | 1009 | |
5ead97c8 JF |
1010 | ENTRY(xen_hypervisor_callback) |
1011 | CFI_STARTPROC | |
df5d1874 | 1012 | pushl_cfi $0 |
5ead97c8 JF |
1013 | SAVE_ALL |
1014 | TRACE_IRQS_OFF | |
9ec2b804 JF |
1015 | |
1016 | /* Check to see if we got the event in the critical | |
1017 | region in xen_iret_direct, after we've reenabled | |
1018 | events and checked for pending events. This simulates | |
1019 | iret instruction's behaviour where it delivers a | |
1020 | pending interrupt when enabling interrupts. */ | |
1021 | movl PT_EIP(%esp),%eax | |
1022 | cmpl $xen_iret_start_crit,%eax | |
1023 | jb 1f | |
1024 | cmpl $xen_iret_end_crit,%eax | |
1025 | jae 1f | |
1026 | ||
0f2c8769 | 1027 | jmp xen_iret_crit_fixup |
e2a81baf | 1028 | |
e2a81baf | 1029 | ENTRY(xen_do_upcall) |
b77797fb | 1030 | 1: mov %esp, %eax |
5ead97c8 JF |
1031 | call xen_evtchn_do_upcall |
1032 | jmp ret_from_intr | |
1033 | CFI_ENDPROC | |
1034 | ENDPROC(xen_hypervisor_callback) | |
1035 | ||
1036 | # Hypervisor uses this for application faults while it executes. | |
1037 | # We get here for two reasons: | |
1038 | # 1. Fault while reloading DS, ES, FS or GS | |
1039 | # 2. Fault while executing IRET | |
1040 | # Category 1 we fix up by reattempting the load, and zeroing the segment | |
1041 | # register if the load fails. | |
1042 | # Category 2 we fix up by jumping to do_iret_error. We cannot use the | |
1043 | # normal Linux return path in this case because if we use the IRET hypercall | |
1044 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1045 | # We distinguish between categories by maintaining a status value in EAX. | |
1046 | ENTRY(xen_failsafe_callback) | |
1047 | CFI_STARTPROC | |
df5d1874 | 1048 | pushl_cfi %eax |
5ead97c8 JF |
1049 | movl $1,%eax |
1050 | 1: mov 4(%esp),%ds | |
1051 | 2: mov 8(%esp),%es | |
1052 | 3: mov 12(%esp),%fs | |
1053 | 4: mov 16(%esp),%gs | |
1054 | testl %eax,%eax | |
df5d1874 | 1055 | popl_cfi %eax |
5ead97c8 JF |
1056 | lea 16(%esp),%esp |
1057 | CFI_ADJUST_CFA_OFFSET -16 | |
1058 | jz 5f | |
1059 | addl $16,%esp | |
1060 | jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) | |
df5d1874 | 1061 | 5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) |
5ead97c8 JF |
1062 | SAVE_ALL |
1063 | jmp ret_from_exception | |
1064 | CFI_ENDPROC | |
1065 | ||
1066 | .section .fixup,"ax" | |
1067 | 6: xorl %eax,%eax | |
1068 | movl %eax,4(%esp) | |
1069 | jmp 1b | |
1070 | 7: xorl %eax,%eax | |
1071 | movl %eax,8(%esp) | |
1072 | jmp 2b | |
1073 | 8: xorl %eax,%eax | |
1074 | movl %eax,12(%esp) | |
1075 | jmp 3b | |
1076 | 9: xorl %eax,%eax | |
1077 | movl %eax,16(%esp) | |
1078 | jmp 4b | |
1079 | .previous | |
6837a54d PA |
1080 | _ASM_EXTABLE(1b,6b) |
1081 | _ASM_EXTABLE(2b,7b) | |
1082 | _ASM_EXTABLE(3b,8b) | |
1083 | _ASM_EXTABLE(4b,9b) | |
5ead97c8 JF |
1084 | ENDPROC(xen_failsafe_callback) |
1085 | ||
38e20b07 SY |
1086 | BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, |
1087 | xen_evtchn_do_upcall) | |
1088 | ||
5ead97c8 JF |
1089 | #endif /* CONFIG_XEN */ |
1090 | ||
606576ce | 1091 | #ifdef CONFIG_FUNCTION_TRACER |
d61f82d0 SR |
1092 | #ifdef CONFIG_DYNAMIC_FTRACE |
1093 | ||
1094 | ENTRY(mcount) | |
d61f82d0 SR |
1095 | ret |
1096 | END(mcount) | |
1097 | ||
1098 | ENTRY(ftrace_caller) | |
60a7ecf4 SR |
1099 | cmpl $0, function_trace_stop |
1100 | jne ftrace_stub | |
1101 | ||
d61f82d0 SR |
1102 | pushl %eax |
1103 | pushl %ecx | |
1104 | pushl %edx | |
1105 | movl 0xc(%esp), %eax | |
1106 | movl 0x4(%ebp), %edx | |
395a59d0 | 1107 | subl $MCOUNT_INSN_SIZE, %eax |
d61f82d0 SR |
1108 | |
1109 | .globl ftrace_call | |
1110 | ftrace_call: | |
1111 | call ftrace_stub | |
1112 | ||
1113 | popl %edx | |
1114 | popl %ecx | |
1115 | popl %eax | |
5a45cfe1 SR |
1116 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1117 | .globl ftrace_graph_call | |
1118 | ftrace_graph_call: | |
1119 | jmp ftrace_stub | |
1120 | #endif | |
d61f82d0 SR |
1121 | |
1122 | .globl ftrace_stub | |
1123 | ftrace_stub: | |
1124 | ret | |
1125 | END(ftrace_caller) | |
1126 | ||
1127 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | |
1128 | ||
16444a8a | 1129 | ENTRY(mcount) |
60a7ecf4 SR |
1130 | cmpl $0, function_trace_stop |
1131 | jne ftrace_stub | |
1132 | ||
16444a8a ACM |
1133 | cmpl $ftrace_stub, ftrace_trace_function |
1134 | jnz trace | |
fb52607a | 1135 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
c2324b69 | 1136 | cmpl $ftrace_stub, ftrace_graph_return |
fb52607a | 1137 | jnz ftrace_graph_caller |
e49dc19c SR |
1138 | |
1139 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry | |
1140 | jnz ftrace_graph_caller | |
caf4b323 | 1141 | #endif |
16444a8a ACM |
1142 | .globl ftrace_stub |
1143 | ftrace_stub: | |
1144 | ret | |
1145 | ||
1146 | /* taken from glibc */ | |
1147 | trace: | |
1148 | pushl %eax | |
1149 | pushl %ecx | |
1150 | pushl %edx | |
1151 | movl 0xc(%esp), %eax | |
1152 | movl 0x4(%ebp), %edx | |
395a59d0 | 1153 | subl $MCOUNT_INSN_SIZE, %eax |
16444a8a | 1154 | |
d61f82d0 | 1155 | call *ftrace_trace_function |
16444a8a ACM |
1156 | |
1157 | popl %edx | |
1158 | popl %ecx | |
1159 | popl %eax | |
16444a8a ACM |
1160 | jmp ftrace_stub |
1161 | END(mcount) | |
d61f82d0 | 1162 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce | 1163 | #endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a | 1164 | |
fb52607a FW |
1165 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1166 | ENTRY(ftrace_graph_caller) | |
e7d3737e FW |
1167 | cmpl $0, function_trace_stop |
1168 | jne ftrace_stub | |
1169 | ||
caf4b323 FW |
1170 | pushl %eax |
1171 | pushl %ecx | |
1172 | pushl %edx | |
1dc1c6ad | 1173 | movl 0xc(%esp), %edx |
caf4b323 | 1174 | lea 0x4(%ebp), %eax |
71e308a2 | 1175 | movl (%ebp), %ecx |
bb4304c7 | 1176 | subl $MCOUNT_INSN_SIZE, %edx |
caf4b323 | 1177 | call prepare_ftrace_return |
caf4b323 FW |
1178 | popl %edx |
1179 | popl %ecx | |
1180 | popl %eax | |
e7d3737e | 1181 | ret |
fb52607a | 1182 | END(ftrace_graph_caller) |
caf4b323 FW |
1183 | |
1184 | .globl return_to_handler | |
1185 | return_to_handler: | |
caf4b323 | 1186 | pushl %eax |
caf4b323 | 1187 | pushl %edx |
71e308a2 | 1188 | movl %ebp, %eax |
caf4b323 | 1189 | call ftrace_return_to_handler |
194ec341 | 1190 | movl %eax, %ecx |
caf4b323 | 1191 | popl %edx |
caf4b323 | 1192 | popl %eax |
194ec341 | 1193 | jmp *%ecx |
e7d3737e | 1194 | #endif |
16444a8a | 1195 | |
d211af05 AH |
1196 | /* |
1197 | * Some functions should be protected against kprobes | |
1198 | */ | |
1199 | .pushsection .kprobes.text, "ax" | |
1200 | ||
1201 | ENTRY(page_fault) | |
1202 | RING0_EC_FRAME | |
df5d1874 | 1203 | pushl_cfi $do_page_fault |
d211af05 AH |
1204 | ALIGN |
1205 | error_code: | |
ccbeed3a | 1206 | /* the function address is in %gs's slot on the stack */ |
df5d1874 | 1207 | pushl_cfi %fs |
ccbeed3a | 1208 | /*CFI_REL_OFFSET fs, 0*/ |
df5d1874 | 1209 | pushl_cfi %es |
d211af05 | 1210 | /*CFI_REL_OFFSET es, 0*/ |
df5d1874 | 1211 | pushl_cfi %ds |
d211af05 | 1212 | /*CFI_REL_OFFSET ds, 0*/ |
df5d1874 | 1213 | pushl_cfi %eax |
d211af05 | 1214 | CFI_REL_OFFSET eax, 0 |
df5d1874 | 1215 | pushl_cfi %ebp |
d211af05 | 1216 | CFI_REL_OFFSET ebp, 0 |
df5d1874 | 1217 | pushl_cfi %edi |
d211af05 | 1218 | CFI_REL_OFFSET edi, 0 |
df5d1874 | 1219 | pushl_cfi %esi |
d211af05 | 1220 | CFI_REL_OFFSET esi, 0 |
df5d1874 | 1221 | pushl_cfi %edx |
d211af05 | 1222 | CFI_REL_OFFSET edx, 0 |
df5d1874 | 1223 | pushl_cfi %ecx |
d211af05 | 1224 | CFI_REL_OFFSET ecx, 0 |
df5d1874 | 1225 | pushl_cfi %ebx |
d211af05 AH |
1226 | CFI_REL_OFFSET ebx, 0 |
1227 | cld | |
d211af05 AH |
1228 | movl $(__KERNEL_PERCPU), %ecx |
1229 | movl %ecx, %fs | |
1230 | UNWIND_ESPFIX_STACK | |
ccbeed3a TH |
1231 | GS_TO_REG %ecx |
1232 | movl PT_GS(%esp), %edi # get the function address | |
d211af05 AH |
1233 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
1234 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
ccbeed3a TH |
1235 | REG_TO_PTGS %ecx |
1236 | SET_KERNEL_GS %ecx | |
d211af05 AH |
1237 | movl $(__USER_DS), %ecx |
1238 | movl %ecx, %ds | |
1239 | movl %ecx, %es | |
1240 | TRACE_IRQS_OFF | |
1241 | movl %esp,%eax # pt_regs pointer | |
1242 | call *%edi | |
1243 | jmp ret_from_exception | |
1244 | CFI_ENDPROC | |
1245 | END(page_fault) | |
1246 | ||
1247 | /* | |
1248 | * Debug traps and NMI can happen at the one SYSENTER instruction | |
1249 | * that sets up the real kernel stack. Check here, since we can't | |
1250 | * allow the wrong stack to be used. | |
1251 | * | |
1252 | * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have | |
1253 | * already pushed 3 words if it hits on the sysenter instruction: | |
1254 | * eflags, cs and eip. | |
1255 | * | |
1256 | * We just load the right stack, and push the three (known) values | |
1257 | * by hand onto the new stack - while updating the return eip past | |
1258 | * the instruction that would have done it for sysenter. | |
1259 | */ | |
f0d96110 TH |
1260 | .macro FIX_STACK offset ok label |
1261 | cmpw $__KERNEL_CS, 4(%esp) | |
1262 | jne \ok | |
1263 | \label: | |
1264 | movl TSS_sysenter_sp0 + \offset(%esp), %esp | |
1265 | CFI_DEF_CFA esp, 0 | |
1266 | CFI_UNDEFINED eip | |
df5d1874 JB |
1267 | pushfl_cfi |
1268 | pushl_cfi $__KERNEL_CS | |
1269 | pushl_cfi $sysenter_past_esp | |
d211af05 | 1270 | CFI_REL_OFFSET eip, 0 |
f0d96110 | 1271 | .endm |
d211af05 AH |
1272 | |
1273 | ENTRY(debug) | |
1274 | RING0_INT_FRAME | |
1275 | cmpl $ia32_sysenter_target,(%esp) | |
1276 | jne debug_stack_correct | |
f0d96110 | 1277 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
d211af05 | 1278 | debug_stack_correct: |
df5d1874 | 1279 | pushl_cfi $-1 # mark this as an int |
d211af05 AH |
1280 | SAVE_ALL |
1281 | TRACE_IRQS_OFF | |
1282 | xorl %edx,%edx # error code 0 | |
1283 | movl %esp,%eax # pt_regs pointer | |
1284 | call do_debug | |
1285 | jmp ret_from_exception | |
1286 | CFI_ENDPROC | |
1287 | END(debug) | |
1288 | ||
1289 | /* | |
1290 | * NMI is doubly nasty. It can happen _while_ we're handling | |
1291 | * a debug fault, and the debug fault hasn't yet been able to | |
1292 | * clear up the stack. So we first check whether we got an | |
1293 | * NMI on the sysenter entry path, but after that we need to | |
1294 | * check whether we got an NMI on the debug path where the debug | |
1295 | * fault happened on the sysenter path. | |
1296 | */ | |
1297 | ENTRY(nmi) | |
1298 | RING0_INT_FRAME | |
df5d1874 | 1299 | pushl_cfi %eax |
d211af05 AH |
1300 | movl %ss, %eax |
1301 | cmpw $__ESPFIX_SS, %ax | |
df5d1874 | 1302 | popl_cfi %eax |
d211af05 AH |
1303 | je nmi_espfix_stack |
1304 | cmpl $ia32_sysenter_target,(%esp) | |
1305 | je nmi_stack_fixup | |
df5d1874 | 1306 | pushl_cfi %eax |
d211af05 AH |
1307 | movl %esp,%eax |
1308 | /* Do not access memory above the end of our stack page, | |
1309 | * it might not exist. | |
1310 | */ | |
1311 | andl $(THREAD_SIZE-1),%eax | |
1312 | cmpl $(THREAD_SIZE-20),%eax | |
df5d1874 | 1313 | popl_cfi %eax |
d211af05 AH |
1314 | jae nmi_stack_correct |
1315 | cmpl $ia32_sysenter_target,12(%esp) | |
1316 | je nmi_debug_stack_check | |
1317 | nmi_stack_correct: | |
1318 | /* We have a RING0_INT_FRAME here */ | |
df5d1874 | 1319 | pushl_cfi %eax |
d211af05 | 1320 | SAVE_ALL |
d211af05 AH |
1321 | xorl %edx,%edx # zero error code |
1322 | movl %esp,%eax # pt_regs pointer | |
1323 | call do_nmi | |
2e04bc76 | 1324 | jmp restore_all_notrace |
d211af05 AH |
1325 | CFI_ENDPROC |
1326 | ||
1327 | nmi_stack_fixup: | |
1328 | RING0_INT_FRAME | |
f0d96110 | 1329 | FIX_STACK 12, nmi_stack_correct, 1 |
d211af05 AH |
1330 | jmp nmi_stack_correct |
1331 | ||
1332 | nmi_debug_stack_check: | |
1333 | /* We have a RING0_INT_FRAME here */ | |
1334 | cmpw $__KERNEL_CS,16(%esp) | |
1335 | jne nmi_stack_correct | |
1336 | cmpl $debug,(%esp) | |
1337 | jb nmi_stack_correct | |
1338 | cmpl $debug_esp_fix_insn,(%esp) | |
1339 | ja nmi_stack_correct | |
f0d96110 | 1340 | FIX_STACK 24, nmi_stack_correct, 1 |
d211af05 AH |
1341 | jmp nmi_stack_correct |
1342 | ||
1343 | nmi_espfix_stack: | |
1344 | /* We have a RING0_INT_FRAME here. | |
1345 | * | |
1346 | * create the pointer to lss back | |
1347 | */ | |
df5d1874 JB |
1348 | pushl_cfi %ss |
1349 | pushl_cfi %esp | |
bda3a897 | 1350 | addl $4, (%esp) |
d211af05 AH |
1351 | /* copy the iret frame of 12 bytes */ |
1352 | .rept 3 | |
df5d1874 | 1353 | pushl_cfi 16(%esp) |
d211af05 | 1354 | .endr |
df5d1874 | 1355 | pushl_cfi %eax |
d211af05 | 1356 | SAVE_ALL |
d211af05 AH |
1357 | FIXUP_ESPFIX_STACK # %eax == %esp |
1358 | xorl %edx,%edx # zero error code | |
1359 | call do_nmi | |
1360 | RESTORE_REGS | |
1361 | lss 12+4(%esp), %esp # back to espfix stack | |
1362 | CFI_ADJUST_CFA_OFFSET -24 | |
1363 | jmp irq_return | |
1364 | CFI_ENDPROC | |
1365 | END(nmi) | |
1366 | ||
1367 | ENTRY(int3) | |
1368 | RING0_INT_FRAME | |
df5d1874 | 1369 | pushl_cfi $-1 # mark this as an int |
d211af05 AH |
1370 | SAVE_ALL |
1371 | TRACE_IRQS_OFF | |
1372 | xorl %edx,%edx # zero error code | |
1373 | movl %esp,%eax # pt_regs pointer | |
1374 | call do_int3 | |
1375 | jmp ret_from_exception | |
1376 | CFI_ENDPROC | |
1377 | END(int3) | |
1378 | ||
1379 | ENTRY(general_protection) | |
1380 | RING0_EC_FRAME | |
df5d1874 | 1381 | pushl_cfi $do_general_protection |
d211af05 AH |
1382 | jmp error_code |
1383 | CFI_ENDPROC | |
1384 | END(general_protection) | |
1385 | ||
631bc487 GN |
1386 | #ifdef CONFIG_KVM_GUEST |
1387 | ENTRY(async_page_fault) | |
1388 | RING0_EC_FRAME | |
60cf637a | 1389 | pushl_cfi $do_async_page_fault |
631bc487 GN |
1390 | jmp error_code |
1391 | CFI_ENDPROC | |
2ae9d293 | 1392 | END(async_page_fault) |
631bc487 GN |
1393 | #endif |
1394 | ||
d211af05 AH |
1395 | /* |
1396 | * End of kprobes section | |
1397 | */ | |
1398 | .popsection |