Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
1da177e4 LT |
7 | */ |
8 | ||
9 | /* | |
10 | * entry.S contains the system-call and fault low-level handling routines. | |
11 | * | |
8b4777a4 AL |
12 | * Some of this is documented in Documentation/x86/entry_64.txt |
13 | * | |
1da177e4 LT |
14 | * NOTE: This code handles signal-recognition, which happens every time |
15 | * after an interrupt and after each system call. | |
0bd7b798 | 16 | * |
0bd7b798 | 17 | * A note on terminology: |
7fcb3bc3 | 18 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
0bd7b798 | 19 | * at the top of the kernel process stack. |
2e91a17b AK |
20 | * |
21 | * Some macro usage: | |
22 | * - CFI macros are used to generate dwarf2 unwind information for better | |
23 | * backtraces. They don't change any code. | |
2e91a17b | 24 | * - ENTRY/END Define functions in the symbol table. |
2e91a17b | 25 | * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. |
cb5dd2c5 | 26 | * - idtentry - Define exception entry points. |
1da177e4 LT |
27 | */ |
28 | ||
1da177e4 LT |
29 | #include <linux/linkage.h> |
30 | #include <asm/segment.h> | |
1da177e4 LT |
31 | #include <asm/cache.h> |
32 | #include <asm/errno.h> | |
33 | #include <asm/dwarf2.h> | |
34 | #include <asm/calling.h> | |
e2d5df93 | 35 | #include <asm/asm-offsets.h> |
1da177e4 LT |
36 | #include <asm/msr.h> |
37 | #include <asm/unistd.h> | |
38 | #include <asm/thread_info.h> | |
39 | #include <asm/hw_irq.h> | |
0341c14d | 40 | #include <asm/page_types.h> |
2601e64d | 41 | #include <asm/irqflags.h> |
72fe4858 | 42 | #include <asm/paravirt.h> |
9939ddaf | 43 | #include <asm/percpu.h> |
d7abc0fa | 44 | #include <asm/asm.h> |
91d1aa43 | 45 | #include <asm/context_tracking.h> |
63bcff2a | 46 | #include <asm/smap.h> |
3891a04a | 47 | #include <asm/pgtable_types.h> |
d7e7528b | 48 | #include <linux/err.h> |
1da177e4 | 49 | |
86a1c34a RM |
50 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
51 | #include <linux/elf-em.h> | |
52 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | |
53 | #define __AUDIT_ARCH_64BIT 0x80000000 | |
54 | #define __AUDIT_ARCH_LE 0x40000000 | |
55 | ||
1da177e4 | 56 | .code64 |
ea714547 JO |
57 | .section .entry.text, "ax" |
58 | ||
16444a8a | 59 | |
72fe4858 | 60 | #ifdef CONFIG_PARAVIRT |
2be29982 | 61 | ENTRY(native_usergs_sysret64) |
72fe4858 GOC |
62 | swapgs |
63 | sysretq | |
b3baaa13 | 64 | ENDPROC(native_usergs_sysret64) |
72fe4858 GOC |
65 | #endif /* CONFIG_PARAVIRT */ |
66 | ||
2601e64d | 67 | |
f2db9382 | 68 | .macro TRACE_IRQS_IRETQ |
2601e64d | 69 | #ifdef CONFIG_TRACE_IRQFLAGS |
f2db9382 | 70 | bt $9,EFLAGS(%rsp) /* interrupts off? */ |
2601e64d IM |
71 | jnc 1f |
72 | TRACE_IRQS_ON | |
73 | 1: | |
74 | #endif | |
75 | .endm | |
76 | ||
5963e317 SR |
77 | /* |
78 | * When dynamic function tracer is enabled it will add a breakpoint | |
79 | * to all locations that it is about to modify, sync CPUs, update | |
80 | * all the code, sync CPUs, then remove the breakpoints. In this time | |
81 | * if lockdep is enabled, it might jump back into the debug handler | |
82 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). | |
83 | * | |
84 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to | |
85 | * make sure the stack pointer does not get reset back to the top | |
86 | * of the debug stack, and instead just reuses the current stack. | |
87 | */ | |
88 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) | |
89 | ||
90 | .macro TRACE_IRQS_OFF_DEBUG | |
91 | call debug_stack_set_zero | |
92 | TRACE_IRQS_OFF | |
93 | call debug_stack_reset | |
94 | .endm | |
95 | ||
96 | .macro TRACE_IRQS_ON_DEBUG | |
97 | call debug_stack_set_zero | |
98 | TRACE_IRQS_ON | |
99 | call debug_stack_reset | |
100 | .endm | |
101 | ||
f2db9382 DV |
102 | .macro TRACE_IRQS_IRETQ_DEBUG |
103 | bt $9,EFLAGS(%rsp) /* interrupts off? */ | |
5963e317 SR |
104 | jnc 1f |
105 | TRACE_IRQS_ON_DEBUG | |
106 | 1: | |
107 | .endm | |
108 | ||
109 | #else | |
110 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF | |
111 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON | |
112 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ | |
113 | #endif | |
114 | ||
dcd072e2 | 115 | /* |
e90e147c | 116 | * empty frame |
dcd072e2 AH |
117 | */ |
118 | .macro EMPTY_FRAME start=1 offset=0 | |
7effaa88 | 119 | .if \start |
dcd072e2 | 120 | CFI_STARTPROC simple |
adf14236 | 121 | CFI_SIGNAL_FRAME |
dcd072e2 | 122 | CFI_DEF_CFA rsp,8+\offset |
7effaa88 | 123 | .else |
dcd072e2 | 124 | CFI_DEF_CFA_OFFSET 8+\offset |
7effaa88 | 125 | .endif |
1da177e4 | 126 | .endm |
d99015b1 AH |
127 | |
128 | /* | |
dcd072e2 | 129 | * initial frame state for interrupts (and exceptions without error code) |
d99015b1 | 130 | */ |
dcd072e2 | 131 | .macro INTR_FRAME start=1 offset=0 |
911d2bb5 DV |
132 | EMPTY_FRAME \start, 5*8+\offset |
133 | /*CFI_REL_OFFSET ss, 4*8+\offset*/ | |
134 | CFI_REL_OFFSET rsp, 3*8+\offset | |
135 | /*CFI_REL_OFFSET rflags, 2*8+\offset*/ | |
136 | /*CFI_REL_OFFSET cs, 1*8+\offset*/ | |
137 | CFI_REL_OFFSET rip, 0*8+\offset | |
d99015b1 AH |
138 | .endm |
139 | ||
d99015b1 AH |
140 | /* |
141 | * initial frame state for exceptions with error code (and interrupts | |
142 | * with vector already pushed) | |
143 | */ | |
dcd072e2 | 144 | .macro XCPT_FRAME start=1 offset=0 |
911d2bb5 | 145 | INTR_FRAME \start, 1*8+\offset |
dcd072e2 AH |
146 | .endm |
147 | ||
148 | /* | |
76f5df43 | 149 | * frame that enables passing a complete pt_regs to a C function. |
dcd072e2 | 150 | */ |
76f5df43 | 151 | .macro DEFAULT_FRAME start=1 offset=0 |
f2db9382 DV |
152 | XCPT_FRAME \start, ORIG_RAX+\offset |
153 | CFI_REL_OFFSET rdi, RDI+\offset | |
154 | CFI_REL_OFFSET rsi, RSI+\offset | |
155 | CFI_REL_OFFSET rdx, RDX+\offset | |
156 | CFI_REL_OFFSET rcx, RCX+\offset | |
157 | CFI_REL_OFFSET rax, RAX+\offset | |
158 | CFI_REL_OFFSET r8, R8+\offset | |
159 | CFI_REL_OFFSET r9, R9+\offset | |
160 | CFI_REL_OFFSET r10, R10+\offset | |
161 | CFI_REL_OFFSET r11, R11+\offset | |
dcd072e2 AH |
162 | CFI_REL_OFFSET rbx, RBX+\offset |
163 | CFI_REL_OFFSET rbp, RBP+\offset | |
164 | CFI_REL_OFFSET r12, R12+\offset | |
165 | CFI_REL_OFFSET r13, R13+\offset | |
166 | CFI_REL_OFFSET r14, R14+\offset | |
167 | CFI_REL_OFFSET r15, R15+\offset | |
168 | .endm | |
d99015b1 | 169 | |
1da177e4 | 170 | /* |
b87cf63e | 171 | * 64bit SYSCALL instruction entry. Up to 6 arguments in registers. |
1da177e4 | 172 | * |
b87cf63e DV |
173 | * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
174 | * then loads new ss, cs, and rip from previously programmed MSRs. | |
175 | * rflags gets masked by a value from another MSR (so CLD and CLAC | |
176 | * are not needed). SYSCALL does not save anything on the stack | |
177 | * and does not change rsp. | |
178 | * | |
179 | * Registers on entry: | |
1da177e4 | 180 | * rax system call number |
b87cf63e DV |
181 | * rcx return address |
182 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) | |
1da177e4 | 183 | * rdi arg0 |
1da177e4 | 184 | * rsi arg1 |
0bd7b798 | 185 | * rdx arg2 |
b87cf63e | 186 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
1da177e4 LT |
187 | * r8 arg4 |
188 | * r9 arg5 | |
b87cf63e | 189 | * (note: r12-r15,rbp,rbx are callee-preserved in C ABI) |
0bd7b798 | 190 | * |
1da177e4 LT |
191 | * Only called from user space. |
192 | * | |
7fcb3bc3 | 193 | * When user can change pt_regs->foo always force IRET. That is because |
7bf36bbc AK |
194 | * it deals with uncanonical addresses better. SYSRET has trouble |
195 | * with them due to bugs in both AMD and Intel CPUs. | |
0bd7b798 | 196 | */ |
1da177e4 LT |
197 | |
198 | ENTRY(system_call) | |
7effaa88 | 199 | CFI_STARTPROC simple |
adf14236 | 200 | CFI_SIGNAL_FRAME |
ef593260 | 201 | CFI_DEF_CFA rsp,0 |
7effaa88 JB |
202 | CFI_REGISTER rip,rcx |
203 | /*CFI_REGISTER rflags,r11*/ | |
9ed8e7d8 DV |
204 | |
205 | /* | |
206 | * Interrupts are off on entry. | |
207 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
208 | * it is too small to ever cause noticeable irq latency. | |
209 | */ | |
72fe4858 GOC |
210 | SWAPGS_UNSAFE_STACK |
211 | /* | |
212 | * A hypervisor implementation might want to use a label | |
213 | * after the swapgs, so that it can do the swapgs | |
214 | * for the guest and jump here on syscall. | |
215 | */ | |
f6b2bc84 | 216 | GLOBAL(system_call_after_swapgs) |
72fe4858 | 217 | |
c38e5038 | 218 | movq %rsp,PER_CPU_VAR(rsp_scratch) |
9af45651 | 219 | movq PER_CPU_VAR(kernel_stack),%rsp |
9ed8e7d8 DV |
220 | |
221 | /* Construct struct pt_regs on stack */ | |
222 | pushq_cfi $__USER_DS /* pt_regs->ss */ | |
223 | pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ | |
33db1fd4 | 224 | /* |
9ed8e7d8 DV |
225 | * Re-enable interrupts. |
226 | * We use 'rsp_scratch' as a scratch space, hence irq-off block above | |
227 | * must execute atomically in the face of possible interrupt-driven | |
228 | * task preemption. We must enable interrupts only after we're done | |
229 | * with using rsp_scratch: | |
33db1fd4 DV |
230 | */ |
231 | ENABLE_INTERRUPTS(CLBR_NONE) | |
9ed8e7d8 DV |
232 | pushq_cfi %r11 /* pt_regs->flags */ |
233 | pushq_cfi $__USER_CS /* pt_regs->cs */ | |
234 | pushq_cfi %rcx /* pt_regs->ip */ | |
235 | CFI_REL_OFFSET rip,0 | |
236 | pushq_cfi_reg rax /* pt_regs->orig_ax */ | |
237 | pushq_cfi_reg rdi /* pt_regs->di */ | |
238 | pushq_cfi_reg rsi /* pt_regs->si */ | |
239 | pushq_cfi_reg rdx /* pt_regs->dx */ | |
240 | pushq_cfi_reg rcx /* pt_regs->cx */ | |
241 | pushq_cfi $-ENOSYS /* pt_regs->ax */ | |
242 | pushq_cfi_reg r8 /* pt_regs->r8 */ | |
243 | pushq_cfi_reg r9 /* pt_regs->r9 */ | |
244 | pushq_cfi_reg r10 /* pt_regs->r10 */ | |
a71ffdd7 DV |
245 | pushq_cfi_reg r11 /* pt_regs->r11 */ |
246 | sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */ | |
27be87c5 | 247 | CFI_ADJUST_CFA_OFFSET 6*8 |
9ed8e7d8 | 248 | |
dca5b52a | 249 | testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
1da177e4 | 250 | jnz tracesys |
86a1c34a | 251 | system_call_fastpath: |
fca460f9 | 252 | #if __SYSCALL_MASK == ~0 |
1da177e4 | 253 | cmpq $__NR_syscall_max,%rax |
fca460f9 PA |
254 | #else |
255 | andl $__SYSCALL_MASK,%eax | |
256 | cmpl $__NR_syscall_max,%eax | |
257 | #endif | |
146b2b09 | 258 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
1da177e4 | 259 | movq %r10,%rcx |
146b2b09 | 260 | call *sys_call_table(,%rax,8) |
f2db9382 | 261 | movq %rax,RAX(%rsp) |
146b2b09 | 262 | 1: |
1da177e4 | 263 | /* |
146b2b09 DV |
264 | * Syscall return path ending with SYSRET (fast path). |
265 | * Has incompletely filled pt_regs. | |
0bd7b798 | 266 | */ |
10cd706d | 267 | LOCKDEP_SYS_EXIT |
4416c5a6 DV |
268 | /* |
269 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
270 | * it is too small to ever cause noticeable irq latency. | |
271 | */ | |
72fe4858 | 272 | DISABLE_INTERRUPTS(CLBR_NONE) |
b3494a4a AL |
273 | |
274 | /* | |
275 | * We must check ti flags with interrupts (or at least preemption) | |
276 | * off because we must *never* return to userspace without | |
277 | * processing exit work that is enqueued if we're preempted here. | |
278 | * In particular, returning to userspace with any of the one-shot | |
279 | * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is | |
280 | * very bad. | |
281 | */ | |
06ab9c1b IM |
282 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
283 | jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ | |
b3494a4a | 284 | |
bcddc015 | 285 | CFI_REMEMBER_STATE |
4416c5a6 | 286 | |
29722cd4 DV |
287 | RESTORE_C_REGS_EXCEPT_RCX_R11 |
288 | movq RIP(%rsp),%rcx | |
7effaa88 | 289 | CFI_REGISTER rip,rcx |
29722cd4 | 290 | movq EFLAGS(%rsp),%r11 |
7effaa88 | 291 | /*CFI_REGISTER rflags,r11*/ |
263042e4 | 292 | movq RSP(%rsp),%rsp |
b87cf63e DV |
293 | /* |
294 | * 64bit SYSRET restores rip from rcx, | |
295 | * rflags from r11 (but RF and VM bits are forced to 0), | |
296 | * cs and ss are loaded from MSRs. | |
4416c5a6 | 297 | * Restoration of rflags re-enables interrupts. |
b87cf63e | 298 | */ |
2be29982 | 299 | USERGS_SYSRET64 |
1da177e4 | 300 | |
bcddc015 | 301 | CFI_RESTORE_STATE |
1da177e4 | 302 | |
7fcb3bc3 | 303 | /* Do syscall entry tracing */ |
0bd7b798 | 304 | tracesys: |
76f5df43 | 305 | movq %rsp, %rdi |
47eb582e | 306 | movl $AUDIT_ARCH_X86_64, %esi |
1dcf74f6 AL |
307 | call syscall_trace_enter_phase1 |
308 | test %rax, %rax | |
309 | jnz tracesys_phase2 /* if needed, run the slow path */ | |
76f5df43 | 310 | RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */ |
f2db9382 | 311 | movq ORIG_RAX(%rsp), %rax |
1dcf74f6 AL |
312 | jmp system_call_fastpath /* and return to the fast path */ |
313 | ||
314 | tracesys_phase2: | |
76f5df43 | 315 | SAVE_EXTRA_REGS |
1dcf74f6 | 316 | movq %rsp, %rdi |
47eb582e | 317 | movl $AUDIT_ARCH_X86_64, %esi |
1dcf74f6 AL |
318 | movq %rax,%rdx |
319 | call syscall_trace_enter_phase2 | |
320 | ||
d4d67150 | 321 | /* |
e90e147c | 322 | * Reload registers from stack in case ptrace changed them. |
1dcf74f6 | 323 | * We don't reload %rax because syscall_trace_entry_phase2() returned |
d4d67150 RM |
324 | * the value it wants us to use in the table lookup. |
325 | */ | |
76f5df43 DV |
326 | RESTORE_C_REGS_EXCEPT_RAX |
327 | RESTORE_EXTRA_REGS | |
fca460f9 | 328 | #if __SYSCALL_MASK == ~0 |
1da177e4 | 329 | cmpq $__NR_syscall_max,%rax |
fca460f9 PA |
330 | #else |
331 | andl $__SYSCALL_MASK,%eax | |
332 | cmpl $__NR_syscall_max,%eax | |
333 | #endif | |
a6de5a21 | 334 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
1da177e4 LT |
335 | movq %r10,%rcx /* fixup for C */ |
336 | call *sys_call_table(,%rax,8) | |
f2db9382 | 337 | movq %rax,RAX(%rsp) |
a6de5a21 | 338 | 1: |
7fcb3bc3 | 339 | /* Use IRET because user could have changed pt_regs->foo */ |
0bd7b798 AH |
340 | |
341 | /* | |
1da177e4 | 342 | * Syscall return path ending with IRET. |
7fcb3bc3 | 343 | * Has correct iret frame. |
bcddc015 | 344 | */ |
bc8b2b92 | 345 | GLOBAL(int_ret_from_sys_call) |
72fe4858 | 346 | DISABLE_INTERRUPTS(CLBR_NONE) |
4416c5a6 | 347 | int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */ |
2601e64d | 348 | TRACE_IRQS_OFF |
1da177e4 LT |
349 | movl $_TIF_ALLWORK_MASK,%edi |
350 | /* edi: mask to check */ | |
bc8b2b92 | 351 | GLOBAL(int_with_check) |
10cd706d | 352 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 | 353 | GET_THREAD_INFO(%rcx) |
26ccb8a7 | 354 | movl TI_flags(%rcx),%edx |
1da177e4 LT |
355 | andl %edi,%edx |
356 | jnz int_careful | |
fffbb5dc DV |
357 | andl $~TS_COMPAT,TI_status(%rcx) |
358 | jmp syscall_return | |
1da177e4 LT |
359 | |
360 | /* Either reschedule or signal or syscall exit tracking needed. */ | |
361 | /* First do a reschedule test. */ | |
362 | /* edx: work, edi: workmask */ | |
363 | int_careful: | |
364 | bt $TIF_NEED_RESCHED,%edx | |
365 | jnc int_very_careful | |
2601e64d | 366 | TRACE_IRQS_ON |
72fe4858 | 367 | ENABLE_INTERRUPTS(CLBR_NONE) |
df5d1874 | 368 | pushq_cfi %rdi |
0430499c | 369 | SCHEDULE_USER |
df5d1874 | 370 | popq_cfi %rdi |
72fe4858 | 371 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 372 | TRACE_IRQS_OFF |
1da177e4 LT |
373 | jmp int_with_check |
374 | ||
7fcb3bc3 | 375 | /* handle signals and tracing -- both require a full pt_regs */ |
1da177e4 | 376 | int_very_careful: |
2601e64d | 377 | TRACE_IRQS_ON |
72fe4858 | 378 | ENABLE_INTERRUPTS(CLBR_NONE) |
76f5df43 | 379 | SAVE_EXTRA_REGS |
0bd7b798 | 380 | /* Check for syscall exit trace */ |
d4d67150 | 381 | testl $_TIF_WORK_SYSCALL_EXIT,%edx |
1da177e4 | 382 | jz int_signal |
df5d1874 | 383 | pushq_cfi %rdi |
0bd7b798 | 384 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
1da177e4 | 385 | call syscall_trace_leave |
df5d1874 | 386 | popq_cfi %rdi |
d4d67150 | 387 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi |
1da177e4 | 388 | jmp int_restore_rest |
0bd7b798 | 389 | |
1da177e4 | 390 | int_signal: |
8f4d37ec | 391 | testl $_TIF_DO_NOTIFY_MASK,%edx |
1da177e4 LT |
392 | jz 1f |
393 | movq %rsp,%rdi # &ptregs -> arg1 | |
394 | xorl %esi,%esi # oldset -> arg2 | |
395 | call do_notify_resume | |
eca91e78 | 396 | 1: movl $_TIF_WORK_MASK,%edi |
1da177e4 | 397 | int_restore_rest: |
76f5df43 | 398 | RESTORE_EXTRA_REGS |
72fe4858 | 399 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 400 | TRACE_IRQS_OFF |
1da177e4 | 401 | jmp int_with_check |
fffbb5dc DV |
402 | |
403 | syscall_return: | |
404 | /* The IRETQ could re-enable interrupts: */ | |
405 | DISABLE_INTERRUPTS(CLBR_ANY) | |
406 | TRACE_IRQS_IRETQ | |
407 | ||
408 | /* | |
409 | * Try to use SYSRET instead of IRET if we're returning to | |
410 | * a completely clean 64-bit userspace context. | |
411 | */ | |
412 | movq RCX(%rsp),%rcx | |
17be0aec DV |
413 | movq RIP(%rsp),%r11 |
414 | cmpq %rcx,%r11 /* RCX == RIP */ | |
fffbb5dc DV |
415 | jne opportunistic_sysret_failed |
416 | ||
417 | /* | |
418 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP | |
419 | * in kernel space. This essentially lets the user take over | |
17be0aec | 420 | * the kernel, since userspace controls RSP. |
fffbb5dc | 421 | * |
17be0aec | 422 | * If width of "canonical tail" ever becomes variable, this will need |
fffbb5dc DV |
423 | * to be updated to remain correct on both old and new CPUs. |
424 | */ | |
425 | .ifne __VIRTUAL_MASK_SHIFT - 47 | |
426 | .error "virtual address width changed -- SYSRET checks need update" | |
427 | .endif | |
17be0aec DV |
428 | /* Change top 16 bits to be the sign-extension of 47th bit */ |
429 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx | |
430 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx | |
431 | /* If this changed %rcx, it was not canonical */ | |
432 | cmpq %rcx, %r11 | |
433 | jne opportunistic_sysret_failed | |
fffbb5dc DV |
434 | |
435 | cmpq $__USER_CS,CS(%rsp) /* CS must match SYSRET */ | |
436 | jne opportunistic_sysret_failed | |
437 | ||
438 | movq R11(%rsp),%r11 | |
439 | cmpq %r11,EFLAGS(%rsp) /* R11 == RFLAGS */ | |
440 | jne opportunistic_sysret_failed | |
441 | ||
442 | /* | |
443 | * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, | |
444 | * restoring TF results in a trap from userspace immediately after | |
445 | * SYSRET. This would cause an infinite loop whenever #DB happens | |
446 | * with register state that satisfies the opportunistic SYSRET | |
447 | * conditions. For example, single-stepping this user code: | |
448 | * | |
449 | * movq $stuck_here,%rcx | |
450 | * pushfq | |
451 | * popq %r11 | |
452 | * stuck_here: | |
453 | * | |
454 | * would never get past 'stuck_here'. | |
455 | */ | |
456 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 | |
457 | jnz opportunistic_sysret_failed | |
458 | ||
459 | /* nothing to check for RSP */ | |
460 | ||
461 | cmpq $__USER_DS,SS(%rsp) /* SS must match SYSRET */ | |
462 | jne opportunistic_sysret_failed | |
463 | ||
464 | /* | |
465 | * We win! This label is here just for ease of understanding | |
466 | * perf profiles. Nothing jumps here. | |
467 | */ | |
468 | syscall_return_via_sysret: | |
469 | CFI_REMEMBER_STATE | |
17be0aec DV |
470 | /* rcx and r11 are already restored (see code above) */ |
471 | RESTORE_C_REGS_EXCEPT_RCX_R11 | |
fffbb5dc DV |
472 | movq RSP(%rsp),%rsp |
473 | USERGS_SYSRET64 | |
474 | CFI_RESTORE_STATE | |
475 | ||
476 | opportunistic_sysret_failed: | |
477 | SWAPGS | |
478 | jmp restore_c_regs_and_iret | |
1da177e4 | 479 | CFI_ENDPROC |
bcddc015 | 480 | END(system_call) |
0bd7b798 | 481 | |
fffbb5dc | 482 | |
1d4b4b29 AV |
483 | .macro FORK_LIKE func |
484 | ENTRY(stub_\func) | |
485 | CFI_STARTPROC | |
76f5df43 DV |
486 | DEFAULT_FRAME 0, 8 /* offset 8: return address */ |
487 | SAVE_EXTRA_REGS 8 | |
772951c4 | 488 | jmp sys_\func |
1d4b4b29 AV |
489 | CFI_ENDPROC |
490 | END(stub_\func) | |
491 | .endm | |
492 | ||
493 | FORK_LIKE clone | |
494 | FORK_LIKE fork | |
495 | FORK_LIKE vfork | |
1da177e4 | 496 | |
1da177e4 LT |
497 | ENTRY(stub_execve) |
498 | CFI_STARTPROC | |
fc3e958a DV |
499 | DEFAULT_FRAME 0, 8 |
500 | call sys_execve | |
501 | return_from_execve: | |
502 | testl %eax, %eax | |
503 | jz 1f | |
504 | /* exec failed, can use fast SYSRET code path in this case */ | |
505 | ret | |
506 | 1: | |
507 | /* must use IRET code path (pt_regs->cs may have changed) */ | |
508 | addq $8, %rsp | |
8b3607b5 | 509 | CFI_ADJUST_CFA_OFFSET -8 |
fc3e958a DV |
510 | ZERO_EXTRA_REGS |
511 | movq %rax,RAX(%rsp) | |
512 | jmp int_ret_from_sys_call | |
1da177e4 | 513 | CFI_ENDPROC |
4b787e0b | 514 | END(stub_execve) |
a37f34a3 DV |
515 | /* |
516 | * Remaining execve stubs are only 7 bytes long. | |
517 | * ENTRY() often aligns to 16 bytes, which in this case has no benefits. | |
518 | */ | |
519 | .align 8 | |
520 | GLOBAL(stub_execveat) | |
27d6ec7a | 521 | CFI_STARTPROC |
fc3e958a DV |
522 | DEFAULT_FRAME 0, 8 |
523 | call sys_execveat | |
524 | jmp return_from_execve | |
27d6ec7a DD |
525 | CFI_ENDPROC |
526 | END(stub_execveat) | |
527 | ||
ac7f5dfb | 528 | #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) |
a37f34a3 DV |
529 | .align 8 |
530 | GLOBAL(stub_x32_execve) | |
ac7f5dfb | 531 | GLOBAL(stub32_execve) |
05f1752d DV |
532 | CFI_STARTPROC |
533 | DEFAULT_FRAME 0, 8 | |
534 | call compat_sys_execve | |
535 | jmp return_from_execve | |
536 | CFI_ENDPROC | |
ac7f5dfb | 537 | END(stub32_execve) |
05f1752d | 538 | END(stub_x32_execve) |
a37f34a3 DV |
539 | .align 8 |
540 | GLOBAL(stub_x32_execveat) | |
a37f34a3 | 541 | GLOBAL(stub32_execveat) |
0f90fb97 | 542 | CFI_STARTPROC |
ac7f5dfb | 543 | DEFAULT_FRAME 0, 8 |
0f90fb97 DV |
544 | call compat_sys_execveat |
545 | jmp return_from_execve | |
546 | CFI_ENDPROC | |
547 | END(stub32_execveat) | |
ac7f5dfb | 548 | END(stub_x32_execveat) |
0f90fb97 DV |
549 | #endif |
550 | ||
1da177e4 LT |
551 | /* |
552 | * sigreturn is special because it needs to restore all registers on return. | |
553 | * This cannot be done with SYSRET, so use the IRET return path instead. | |
0bd7b798 | 554 | */ |
1da177e4 LT |
555 | ENTRY(stub_rt_sigreturn) |
556 | CFI_STARTPROC | |
31f0119b DV |
557 | DEFAULT_FRAME 0, 8 |
558 | /* | |
559 | * SAVE_EXTRA_REGS result is not normally needed: | |
560 | * sigreturn overwrites all pt_regs->GPREGS. | |
561 | * But sigreturn can fail (!), and there is no easy way to detect that. | |
562 | * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error, | |
563 | * we SAVE_EXTRA_REGS here. | |
564 | */ | |
565 | SAVE_EXTRA_REGS 8 | |
1da177e4 | 566 | call sys_rt_sigreturn |
31f0119b DV |
567 | return_from_stub: |
568 | addq $8, %rsp | |
569 | CFI_ADJUST_CFA_OFFSET -8 | |
76f5df43 | 570 | RESTORE_EXTRA_REGS |
31f0119b | 571 | movq %rax,RAX(%rsp) |
1da177e4 LT |
572 | jmp int_ret_from_sys_call |
573 | CFI_ENDPROC | |
4b787e0b | 574 | END(stub_rt_sigreturn) |
1da177e4 | 575 | |
c5a37394 | 576 | #ifdef CONFIG_X86_X32_ABI |
c5a37394 PA |
577 | ENTRY(stub_x32_rt_sigreturn) |
578 | CFI_STARTPROC | |
31f0119b DV |
579 | DEFAULT_FRAME 0, 8 |
580 | SAVE_EXTRA_REGS 8 | |
c5a37394 | 581 | call sys32_x32_rt_sigreturn |
31f0119b | 582 | jmp return_from_stub |
c5a37394 PA |
583 | CFI_ENDPROC |
584 | END(stub_x32_rt_sigreturn) | |
c5a37394 PA |
585 | #endif |
586 | ||
1eeb207f DV |
587 | /* |
588 | * A newly forked process directly context switches into this address. | |
589 | * | |
590 | * rdi: prev task we switched from | |
591 | */ | |
592 | ENTRY(ret_from_fork) | |
593 | DEFAULT_FRAME | |
594 | ||
595 | LOCK ; btr $TIF_FORK,TI_flags(%r8) | |
596 | ||
597 | pushq_cfi $0x0002 | |
598 | popfq_cfi # reset kernel eflags | |
599 | ||
600 | call schedule_tail # rdi: 'prev' task parameter | |
601 | ||
1eeb207f DV |
602 | RESTORE_EXTRA_REGS |
603 | ||
604 | testl $3,CS(%rsp) # from kernel_thread? | |
1eeb207f | 605 | |
1e3fbb8a AL |
606 | /* |
607 | * By the time we get here, we have no idea whether our pt_regs, | |
608 | * ti flags, and ti status came from the 64-bit SYSCALL fast path, | |
609 | * the slow path, or one of the ia32entry paths. | |
66ad4efa | 610 | * Use IRET code path to return, since it can safely handle |
1e3fbb8a AL |
611 | * all of the above. |
612 | */ | |
66ad4efa | 613 | jnz int_ret_from_sys_call |
1eeb207f | 614 | |
66ad4efa DV |
615 | /* We came from kernel_thread */ |
616 | /* nb: we depend on RESTORE_EXTRA_REGS above */ | |
1eeb207f DV |
617 | movq %rbp, %rdi |
618 | call *%rbx | |
619 | movl $0, RAX(%rsp) | |
620 | RESTORE_EXTRA_REGS | |
621 | jmp int_ret_from_sys_call | |
622 | CFI_ENDPROC | |
623 | END(ret_from_fork) | |
624 | ||
939b7871 | 625 | /* |
3304c9c3 DV |
626 | * Build the entry stubs with some assembler magic. |
627 | * We pack 1 stub into every 8-byte block. | |
939b7871 | 628 | */ |
3304c9c3 | 629 | .align 8 |
939b7871 PA |
630 | ENTRY(irq_entries_start) |
631 | INTR_FRAME | |
3304c9c3 DV |
632 | vector=FIRST_EXTERNAL_VECTOR |
633 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
634 | pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ | |
635 | vector=vector+1 | |
636 | jmp common_interrupt | |
939b7871 | 637 | CFI_ADJUST_CFA_OFFSET -8 |
3304c9c3 DV |
638 | .align 8 |
639 | .endr | |
939b7871 PA |
640 | CFI_ENDPROC |
641 | END(irq_entries_start) | |
642 | ||
d99015b1 | 643 | /* |
1da177e4 LT |
644 | * Interrupt entry/exit. |
645 | * | |
646 | * Interrupt entry points save only callee clobbered registers in fast path. | |
d99015b1 AH |
647 | * |
648 | * Entry runs with interrupts off. | |
649 | */ | |
1da177e4 | 650 | |
722024db | 651 | /* 0(%rsp): ~(interrupt number) */ |
1da177e4 | 652 | .macro interrupt func |
f6f64681 | 653 | cld |
e90e147c DV |
654 | /* |
655 | * Since nothing in interrupt handling code touches r12...r15 members | |
656 | * of "struct pt_regs", and since interrupts can nest, we can save | |
657 | * four stack slots and simultaneously provide | |
658 | * an unwind-friendly stack layout by saving "truncated" pt_regs | |
659 | * exactly up to rbp slot, without these members. | |
660 | */ | |
76f5df43 DV |
661 | ALLOC_PT_GPREGS_ON_STACK -RBP |
662 | SAVE_C_REGS -RBP | |
663 | /* this goes to 0(%rsp) for unwinder, not for saving the value: */ | |
664 | SAVE_EXTRA_REGS_RBP -RBP | |
665 | ||
666 | leaq -RBP(%rsp),%rdi /* arg1 for \func (pointer to pt_regs) */ | |
f6f64681 | 667 | |
76f5df43 | 668 | testl $3, CS-RBP(%rsp) |
dde74f2e | 669 | jz 1f |
f6f64681 | 670 | SWAPGS |
76f5df43 | 671 | 1: |
f6f64681 | 672 | /* |
e90e147c | 673 | * Save previous stack pointer, optionally switch to interrupt stack. |
f6f64681 DV |
674 | * irq_count is used to check if a CPU is already on an interrupt stack |
675 | * or not. While this is essentially redundant with preempt_count it is | |
676 | * a little cheaper to use a separate counter in the PDA (short of | |
677 | * moving irq_enter into assembly, which would be too much work) | |
678 | */ | |
76f5df43 DV |
679 | movq %rsp, %rsi |
680 | incl PER_CPU_VAR(irq_count) | |
f6f64681 DV |
681 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
682 | CFI_DEF_CFA_REGISTER rsi | |
f6f64681 | 683 | pushq %rsi |
911d2bb5 DV |
684 | /* |
685 | * For debugger: | |
686 | * "CFA (Current Frame Address) is the value on stack + offset" | |
687 | */ | |
f6f64681 | 688 | CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ |
911d2bb5 | 689 | 0x77 /* DW_OP_breg7 (rsp) */, 0, \ |
f6f64681 | 690 | 0x06 /* DW_OP_deref */, \ |
911d2bb5 | 691 | 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \ |
f6f64681 DV |
692 | 0x22 /* DW_OP_plus */ |
693 | /* We entered an interrupt context - irqs are off: */ | |
694 | TRACE_IRQS_OFF | |
695 | ||
1da177e4 LT |
696 | call \func |
697 | .endm | |
698 | ||
722024db AH |
699 | /* |
700 | * The interrupt stubs push (~vector+0x80) onto the stack and | |
701 | * then jump to common_interrupt. | |
702 | */ | |
939b7871 PA |
703 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
704 | common_interrupt: | |
7effaa88 | 705 | XCPT_FRAME |
ee4eb87b | 706 | ASM_CLAC |
722024db | 707 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ |
1da177e4 | 708 | interrupt do_IRQ |
34061f13 | 709 | /* 0(%rsp): old RSP */ |
7effaa88 | 710 | ret_from_intr: |
72fe4858 | 711 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 712 | TRACE_IRQS_OFF |
56895530 | 713 | decl PER_CPU_VAR(irq_count) |
625dbc3b | 714 | |
a2bbe750 FW |
715 | /* Restore saved previous stack */ |
716 | popq %rsi | |
911d2bb5 | 717 | CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */ |
e90e147c | 718 | /* return code expects complete pt_regs - adjust rsp accordingly: */ |
f2db9382 | 719 | leaq -RBP(%rsi),%rsp |
7effaa88 | 720 | CFI_DEF_CFA_REGISTER rsp |
f2db9382 | 721 | CFI_ADJUST_CFA_OFFSET RBP |
625dbc3b | 722 | |
f2db9382 | 723 | testl $3,CS(%rsp) |
dde74f2e | 724 | jz retint_kernel |
1da177e4 | 725 | /* Interrupt came from user space */ |
a3675b32 DV |
726 | |
727 | GET_THREAD_INFO(%rcx) | |
1da177e4 | 728 | /* |
1da177e4 | 729 | * %rcx: thread info. Interrupts off. |
0bd7b798 | 730 | */ |
1da177e4 LT |
731 | retint_with_reschedule: |
732 | movl $_TIF_WORK_MASK,%edi | |
7effaa88 | 733 | retint_check: |
10cd706d | 734 | LOCKDEP_SYS_EXIT_IRQ |
26ccb8a7 | 735 | movl TI_flags(%rcx),%edx |
1da177e4 | 736 | andl %edi,%edx |
7effaa88 | 737 | CFI_REMEMBER_STATE |
1da177e4 | 738 | jnz retint_careful |
10cd706d PZ |
739 | |
740 | retint_swapgs: /* return to user-space */ | |
2601e64d IM |
741 | /* |
742 | * The iretq could re-enable interrupts: | |
743 | */ | |
72fe4858 | 744 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 745 | TRACE_IRQS_IRETQ |
2a23c6b8 | 746 | |
72fe4858 | 747 | SWAPGS |
fffbb5dc | 748 | jmp restore_c_regs_and_iret |
2601e64d | 749 | |
627276cb | 750 | /* Returning to kernel space */ |
6ba71b76 | 751 | retint_kernel: |
627276cb DV |
752 | #ifdef CONFIG_PREEMPT |
753 | /* Interrupts are off */ | |
754 | /* Check if we need preemption */ | |
627276cb | 755 | bt $9,EFLAGS(%rsp) /* interrupts were off? */ |
6ba71b76 | 756 | jnc 1f |
36acef25 DV |
757 | 0: cmpl $0,PER_CPU_VAR(__preempt_count) |
758 | jnz 1f | |
627276cb | 759 | call preempt_schedule_irq |
36acef25 | 760 | jmp 0b |
6ba71b76 | 761 | 1: |
627276cb | 762 | #endif |
2601e64d IM |
763 | /* |
764 | * The iretq could re-enable interrupts: | |
765 | */ | |
766 | TRACE_IRQS_IRETQ | |
fffbb5dc DV |
767 | |
768 | /* | |
769 | * At this label, code paths which return to kernel and to user, | |
770 | * which come from interrupts/exception and from syscalls, merge. | |
771 | */ | |
772 | restore_c_regs_and_iret: | |
76f5df43 DV |
773 | RESTORE_C_REGS |
774 | REMOVE_PT_GPREGS_FROM_STACK 8 | |
3701d863 | 775 | |
f7f3d791 | 776 | irq_return: |
7209a75d AL |
777 | INTERRUPT_RETURN |
778 | ||
779 | ENTRY(native_iret) | |
3891a04a PA |
780 | /* |
781 | * Are we returning to a stack segment from the LDT? Note: in | |
782 | * 64-bit mode SS:RSP on the exception stack is always valid. | |
783 | */ | |
34273f41 | 784 | #ifdef CONFIG_X86_ESPFIX64 |
3891a04a | 785 | testb $4,(SS-RIP)(%rsp) |
7209a75d | 786 | jnz native_irq_return_ldt |
34273f41 | 787 | #endif |
3891a04a | 788 | |
af726f21 | 789 | .global native_irq_return_iret |
7209a75d | 790 | native_irq_return_iret: |
b645af2d AL |
791 | /* |
792 | * This may fault. Non-paranoid faults on return to userspace are | |
793 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. | |
794 | * Double-faults due to espfix64 are handled in do_double_fault. | |
795 | * Other faults here are fatal. | |
796 | */ | |
1da177e4 | 797 | iretq |
3701d863 | 798 | |
34273f41 | 799 | #ifdef CONFIG_X86_ESPFIX64 |
7209a75d | 800 | native_irq_return_ldt: |
3891a04a PA |
801 | pushq_cfi %rax |
802 | pushq_cfi %rdi | |
803 | SWAPGS | |
804 | movq PER_CPU_VAR(espfix_waddr),%rdi | |
805 | movq %rax,(0*8)(%rdi) /* RAX */ | |
806 | movq (2*8)(%rsp),%rax /* RIP */ | |
807 | movq %rax,(1*8)(%rdi) | |
808 | movq (3*8)(%rsp),%rax /* CS */ | |
809 | movq %rax,(2*8)(%rdi) | |
810 | movq (4*8)(%rsp),%rax /* RFLAGS */ | |
811 | movq %rax,(3*8)(%rdi) | |
812 | movq (6*8)(%rsp),%rax /* SS */ | |
813 | movq %rax,(5*8)(%rdi) | |
814 | movq (5*8)(%rsp),%rax /* RSP */ | |
815 | movq %rax,(4*8)(%rdi) | |
816 | andl $0xffff0000,%eax | |
817 | popq_cfi %rdi | |
818 | orq PER_CPU_VAR(espfix_stack),%rax | |
819 | SWAPGS | |
820 | movq %rax,%rsp | |
821 | popq_cfi %rax | |
7209a75d | 822 | jmp native_irq_return_iret |
34273f41 | 823 | #endif |
3891a04a | 824 | |
7effaa88 | 825 | /* edi: workmask, edx: work */ |
1da177e4 | 826 | retint_careful: |
7effaa88 | 827 | CFI_RESTORE_STATE |
1da177e4 LT |
828 | bt $TIF_NEED_RESCHED,%edx |
829 | jnc retint_signal | |
2601e64d | 830 | TRACE_IRQS_ON |
72fe4858 | 831 | ENABLE_INTERRUPTS(CLBR_NONE) |
df5d1874 | 832 | pushq_cfi %rdi |
0430499c | 833 | SCHEDULE_USER |
df5d1874 | 834 | popq_cfi %rdi |
1da177e4 | 835 | GET_THREAD_INFO(%rcx) |
72fe4858 | 836 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 837 | TRACE_IRQS_OFF |
1da177e4 | 838 | jmp retint_check |
0bd7b798 | 839 | |
1da177e4 | 840 | retint_signal: |
8f4d37ec | 841 | testl $_TIF_DO_NOTIFY_MASK,%edx |
10ffdbb8 | 842 | jz retint_swapgs |
2601e64d | 843 | TRACE_IRQS_ON |
72fe4858 | 844 | ENABLE_INTERRUPTS(CLBR_NONE) |
76f5df43 | 845 | SAVE_EXTRA_REGS |
0bd7b798 | 846 | movq $-1,ORIG_RAX(%rsp) |
3829ee6b | 847 | xorl %esi,%esi # oldset |
1da177e4 LT |
848 | movq %rsp,%rdi # &pt_regs |
849 | call do_notify_resume | |
76f5df43 | 850 | RESTORE_EXTRA_REGS |
72fe4858 | 851 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 852 | TRACE_IRQS_OFF |
be9e6870 | 853 | GET_THREAD_INFO(%rcx) |
eca91e78 | 854 | jmp retint_with_reschedule |
1da177e4 | 855 | |
1da177e4 | 856 | CFI_ENDPROC |
4b787e0b | 857 | END(common_interrupt) |
3891a04a | 858 | |
1da177e4 LT |
859 | /* |
860 | * APIC interrupts. | |
0bd7b798 | 861 | */ |
cf910e83 | 862 | .macro apicinterrupt3 num sym do_sym |
322648d1 | 863 | ENTRY(\sym) |
7effaa88 | 864 | INTR_FRAME |
ee4eb87b | 865 | ASM_CLAC |
df5d1874 | 866 | pushq_cfi $~(\num) |
39e95433 | 867 | .Lcommon_\sym: |
322648d1 | 868 | interrupt \do_sym |
1da177e4 LT |
869 | jmp ret_from_intr |
870 | CFI_ENDPROC | |
322648d1 AH |
871 | END(\sym) |
872 | .endm | |
1da177e4 | 873 | |
cf910e83 SA |
874 | #ifdef CONFIG_TRACING |
875 | #define trace(sym) trace_##sym | |
876 | #define smp_trace(sym) smp_trace_##sym | |
877 | ||
878 | .macro trace_apicinterrupt num sym | |
879 | apicinterrupt3 \num trace(\sym) smp_trace(\sym) | |
880 | .endm | |
881 | #else | |
882 | .macro trace_apicinterrupt num sym do_sym | |
883 | .endm | |
884 | #endif | |
885 | ||
886 | .macro apicinterrupt num sym do_sym | |
887 | apicinterrupt3 \num \sym \do_sym | |
888 | trace_apicinterrupt \num \sym | |
889 | .endm | |
890 | ||
322648d1 | 891 | #ifdef CONFIG_SMP |
cf910e83 | 892 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR \ |
322648d1 | 893 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
cf910e83 | 894 | apicinterrupt3 REBOOT_VECTOR \ |
4ef702c1 | 895 | reboot_interrupt smp_reboot_interrupt |
322648d1 | 896 | #endif |
1da177e4 | 897 | |
03b48632 | 898 | #ifdef CONFIG_X86_UV |
cf910e83 | 899 | apicinterrupt3 UV_BAU_MESSAGE \ |
322648d1 | 900 | uv_bau_message_intr1 uv_bau_message_interrupt |
03b48632 | 901 | #endif |
322648d1 AH |
902 | apicinterrupt LOCAL_TIMER_VECTOR \ |
903 | apic_timer_interrupt smp_apic_timer_interrupt | |
4a4de9c7 DS |
904 | apicinterrupt X86_PLATFORM_IPI_VECTOR \ |
905 | x86_platform_ipi smp_x86_platform_ipi | |
89b831ef | 906 | |
d78f2664 | 907 | #ifdef CONFIG_HAVE_KVM |
cf910e83 | 908 | apicinterrupt3 POSTED_INTR_VECTOR \ |
d78f2664 YZ |
909 | kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
910 | #endif | |
911 | ||
33e5ff63 | 912 | #ifdef CONFIG_X86_MCE_THRESHOLD |
322648d1 | 913 | apicinterrupt THRESHOLD_APIC_VECTOR \ |
7856f6cc | 914 | threshold_interrupt smp_threshold_interrupt |
33e5ff63 SA |
915 | #endif |
916 | ||
917 | #ifdef CONFIG_X86_THERMAL_VECTOR | |
322648d1 AH |
918 | apicinterrupt THERMAL_APIC_VECTOR \ |
919 | thermal_interrupt smp_thermal_interrupt | |
33e5ff63 | 920 | #endif |
1812924b | 921 | |
322648d1 AH |
922 | #ifdef CONFIG_SMP |
923 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ | |
924 | call_function_single_interrupt smp_call_function_single_interrupt | |
925 | apicinterrupt CALL_FUNCTION_VECTOR \ | |
926 | call_function_interrupt smp_call_function_interrupt | |
927 | apicinterrupt RESCHEDULE_VECTOR \ | |
928 | reschedule_interrupt smp_reschedule_interrupt | |
929 | #endif | |
1da177e4 | 930 | |
322648d1 AH |
931 | apicinterrupt ERROR_APIC_VECTOR \ |
932 | error_interrupt smp_error_interrupt | |
933 | apicinterrupt SPURIOUS_APIC_VECTOR \ | |
934 | spurious_interrupt smp_spurious_interrupt | |
0bd7b798 | 935 | |
e360adbe PZ |
936 | #ifdef CONFIG_IRQ_WORK |
937 | apicinterrupt IRQ_WORK_VECTOR \ | |
938 | irq_work_interrupt smp_irq_work_interrupt | |
241771ef IM |
939 | #endif |
940 | ||
1da177e4 LT |
941 | /* |
942 | * Exception entry points. | |
0bd7b798 | 943 | */ |
9b476688 | 944 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) |
577ed45e AL |
945 | |
946 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 | |
322648d1 | 947 | ENTRY(\sym) |
577ed45e AL |
948 | /* Sanity check */ |
949 | .if \shift_ist != -1 && \paranoid == 0 | |
950 | .error "using shift_ist requires paranoid=1" | |
951 | .endif | |
952 | ||
cb5dd2c5 AL |
953 | .if \has_error_code |
954 | XCPT_FRAME | |
955 | .else | |
7effaa88 | 956 | INTR_FRAME |
cb5dd2c5 | 957 | .endif |
1da177e4 | 958 | |
ee4eb87b | 959 | ASM_CLAC |
b8b1d08b | 960 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
cb5dd2c5 AL |
961 | |
962 | .ifeq \has_error_code | |
963 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ | |
964 | .endif | |
965 | ||
76f5df43 | 966 | ALLOC_PT_GPREGS_ON_STACK |
cb5dd2c5 AL |
967 | |
968 | .if \paranoid | |
48e08d0f AL |
969 | .if \paranoid == 1 |
970 | CFI_REMEMBER_STATE | |
971 | testl $3, CS(%rsp) /* If coming from userspace, switch */ | |
972 | jnz 1f /* stacks. */ | |
973 | .endif | |
ebfc453e | 974 | call paranoid_entry |
cb5dd2c5 AL |
975 | .else |
976 | call error_entry | |
977 | .endif | |
ebfc453e | 978 | /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ |
cb5dd2c5 | 979 | |
1bd24efc | 980 | DEFAULT_FRAME 0 |
cb5dd2c5 AL |
981 | |
982 | .if \paranoid | |
577ed45e AL |
983 | .if \shift_ist != -1 |
984 | TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ | |
985 | .else | |
b8b1d08b | 986 | TRACE_IRQS_OFF |
cb5dd2c5 | 987 | .endif |
577ed45e | 988 | .endif |
cb5dd2c5 AL |
989 | |
990 | movq %rsp,%rdi /* pt_regs pointer */ | |
991 | ||
992 | .if \has_error_code | |
993 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
994 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | |
995 | .else | |
996 | xorl %esi,%esi /* no error code */ | |
997 | .endif | |
998 | ||
577ed45e | 999 | .if \shift_ist != -1 |
9b476688 | 1000 | subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
577ed45e AL |
1001 | .endif |
1002 | ||
322648d1 | 1003 | call \do_sym |
cb5dd2c5 | 1004 | |
577ed45e | 1005 | .if \shift_ist != -1 |
9b476688 | 1006 | addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
577ed45e AL |
1007 | .endif |
1008 | ||
ebfc453e | 1009 | /* these procedures expect "no swapgs" flag in ebx */ |
cb5dd2c5 | 1010 | .if \paranoid |
ebfc453e | 1011 | jmp paranoid_exit |
cb5dd2c5 | 1012 | .else |
ebfc453e | 1013 | jmp error_exit |
cb5dd2c5 AL |
1014 | .endif |
1015 | ||
48e08d0f AL |
1016 | .if \paranoid == 1 |
1017 | CFI_RESTORE_STATE | |
1018 | /* | |
1019 | * Paranoid entry from userspace. Switch stacks and treat it | |
1020 | * as a normal entry. This means that paranoid handlers | |
1021 | * run in real process context if user_mode(regs). | |
1022 | */ | |
1023 | 1: | |
1024 | call error_entry | |
1025 | ||
1026 | DEFAULT_FRAME 0 | |
1027 | ||
1028 | movq %rsp,%rdi /* pt_regs pointer */ | |
1029 | call sync_regs | |
1030 | movq %rax,%rsp /* switch stack */ | |
1031 | ||
1032 | movq %rsp,%rdi /* pt_regs pointer */ | |
1033 | ||
1034 | .if \has_error_code | |
1035 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
1036 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | |
1037 | .else | |
1038 | xorl %esi,%esi /* no error code */ | |
1039 | .endif | |
1040 | ||
1041 | call \do_sym | |
1042 | ||
1043 | jmp error_exit /* %ebx: no swapgs flag */ | |
1044 | .endif | |
1045 | ||
b8b1d08b | 1046 | CFI_ENDPROC |
ddeb8f21 | 1047 | END(\sym) |
322648d1 | 1048 | .endm |
b8b1d08b | 1049 | |
25c74b10 | 1050 | #ifdef CONFIG_TRACING |
cb5dd2c5 AL |
1051 | .macro trace_idtentry sym do_sym has_error_code:req |
1052 | idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code | |
1053 | idtentry \sym \do_sym has_error_code=\has_error_code | |
25c74b10 SA |
1054 | .endm |
1055 | #else | |
cb5dd2c5 AL |
1056 | .macro trace_idtentry sym do_sym has_error_code:req |
1057 | idtentry \sym \do_sym has_error_code=\has_error_code | |
25c74b10 SA |
1058 | .endm |
1059 | #endif | |
1060 | ||
cb5dd2c5 AL |
1061 | idtentry divide_error do_divide_error has_error_code=0 |
1062 | idtentry overflow do_overflow has_error_code=0 | |
1063 | idtentry bounds do_bounds has_error_code=0 | |
1064 | idtentry invalid_op do_invalid_op has_error_code=0 | |
1065 | idtentry device_not_available do_device_not_available has_error_code=0 | |
48e08d0f | 1066 | idtentry double_fault do_double_fault has_error_code=1 paranoid=2 |
cb5dd2c5 AL |
1067 | idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 |
1068 | idtentry invalid_TSS do_invalid_TSS has_error_code=1 | |
1069 | idtentry segment_not_present do_segment_not_present has_error_code=1 | |
1070 | idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 | |
1071 | idtentry coprocessor_error do_coprocessor_error has_error_code=0 | |
1072 | idtentry alignment_check do_alignment_check has_error_code=1 | |
1073 | idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 | |
5cec93c2 | 1074 | |
2601e64d | 1075 | |
9f1e87ea CG |
1076 | /* Reload gs selector with exception handling */ |
1077 | /* edi: new selector */ | |
9f9d489a | 1078 | ENTRY(native_load_gs_index) |
7effaa88 | 1079 | CFI_STARTPROC |
df5d1874 | 1080 | pushfq_cfi |
b8aa287f | 1081 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
9f1e87ea | 1082 | SWAPGS |
0bd7b798 | 1083 | gs_change: |
9f1e87ea | 1084 | movl %edi,%gs |
1da177e4 | 1085 | 2: mfence /* workaround */ |
72fe4858 | 1086 | SWAPGS |
df5d1874 | 1087 | popfq_cfi |
9f1e87ea | 1088 | ret |
7effaa88 | 1089 | CFI_ENDPROC |
6efdcfaf | 1090 | END(native_load_gs_index) |
0bd7b798 | 1091 | |
d7abc0fa | 1092 | _ASM_EXTABLE(gs_change,bad_gs) |
9f1e87ea | 1093 | .section .fixup,"ax" |
1da177e4 | 1094 | /* running with kernelgs */ |
0bd7b798 | 1095 | bad_gs: |
72fe4858 | 1096 | SWAPGS /* switch back to user gs */ |
1da177e4 | 1097 | xorl %eax,%eax |
9f1e87ea CG |
1098 | movl %eax,%gs |
1099 | jmp 2b | |
1100 | .previous | |
0bd7b798 | 1101 | |
2699500b | 1102 | /* Call softirq on interrupt stack. Interrupts are off. */ |
7d65f4a6 | 1103 | ENTRY(do_softirq_own_stack) |
7effaa88 | 1104 | CFI_STARTPROC |
df5d1874 | 1105 | pushq_cfi %rbp |
2699500b AK |
1106 | CFI_REL_OFFSET rbp,0 |
1107 | mov %rsp,%rbp | |
1108 | CFI_DEF_CFA_REGISTER rbp | |
56895530 | 1109 | incl PER_CPU_VAR(irq_count) |
26f80bd6 | 1110 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp |
2699500b | 1111 | push %rbp # backlink for old unwinder |
ed6b676c | 1112 | call __do_softirq |
2699500b | 1113 | leaveq |
df5d1874 | 1114 | CFI_RESTORE rbp |
7effaa88 | 1115 | CFI_DEF_CFA_REGISTER rsp |
2699500b | 1116 | CFI_ADJUST_CFA_OFFSET -8 |
56895530 | 1117 | decl PER_CPU_VAR(irq_count) |
ed6b676c | 1118 | ret |
7effaa88 | 1119 | CFI_ENDPROC |
7d65f4a6 | 1120 | END(do_softirq_own_stack) |
75154f40 | 1121 | |
3d75e1b8 | 1122 | #ifdef CONFIG_XEN |
cb5dd2c5 | 1123 | idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
3d75e1b8 JF |
1124 | |
1125 | /* | |
9f1e87ea CG |
1126 | * A note on the "critical region" in our callback handler. |
1127 | * We want to avoid stacking callback handlers due to events occurring | |
1128 | * during handling of the last event. To do this, we keep events disabled | |
1129 | * until we've done all processing. HOWEVER, we must enable events before | |
1130 | * popping the stack frame (can't be done atomically) and so it would still | |
1131 | * be possible to get enough handler activations to overflow the stack. | |
1132 | * Although unlikely, bugs of that kind are hard to track down, so we'd | |
1133 | * like to avoid the possibility. | |
1134 | * So, on entry to the handler we detect whether we interrupted an | |
1135 | * existing activation in its critical region -- if so, we pop the current | |
1136 | * activation and restart the handler using the previous one. | |
1137 | */ | |
3d75e1b8 JF |
1138 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) |
1139 | CFI_STARTPROC | |
9f1e87ea CG |
1140 | /* |
1141 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | |
1142 | * see the correct pointer to the pt_regs | |
1143 | */ | |
3d75e1b8 JF |
1144 | movq %rdi, %rsp # we don't return, adjust the stack frame |
1145 | CFI_ENDPROC | |
dcd072e2 | 1146 | DEFAULT_FRAME |
56895530 | 1147 | 11: incl PER_CPU_VAR(irq_count) |
3d75e1b8 JF |
1148 | movq %rsp,%rbp |
1149 | CFI_DEF_CFA_REGISTER rbp | |
26f80bd6 | 1150 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
3d75e1b8 JF |
1151 | pushq %rbp # backlink for old unwinder |
1152 | call xen_evtchn_do_upcall | |
1153 | popq %rsp | |
1154 | CFI_DEF_CFA_REGISTER rsp | |
56895530 | 1155 | decl PER_CPU_VAR(irq_count) |
fdfd811d DV |
1156 | #ifndef CONFIG_PREEMPT |
1157 | call xen_maybe_preempt_hcall | |
1158 | #endif | |
3d75e1b8 JF |
1159 | jmp error_exit |
1160 | CFI_ENDPROC | |
371c394a | 1161 | END(xen_do_hypervisor_callback) |
3d75e1b8 JF |
1162 | |
1163 | /* | |
9f1e87ea CG |
1164 | * Hypervisor uses this for application faults while it executes. |
1165 | * We get here for two reasons: | |
1166 | * 1. Fault while reloading DS, ES, FS or GS | |
1167 | * 2. Fault while executing IRET | |
1168 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | |
1169 | * registers that could be reloaded and zeroed the others. | |
1170 | * Category 2 we fix up by killing the current process. We cannot use the | |
1171 | * normal Linux return path in this case because if we use the IRET hypercall | |
1172 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1173 | * We distinguish between categories by comparing each saved segment register | |
1174 | * with its current contents: any discrepancy means we in category 1. | |
1175 | */ | |
3d75e1b8 | 1176 | ENTRY(xen_failsafe_callback) |
dcd072e2 AH |
1177 | INTR_FRAME 1 (6*8) |
1178 | /*CFI_REL_OFFSET gs,GS*/ | |
1179 | /*CFI_REL_OFFSET fs,FS*/ | |
1180 | /*CFI_REL_OFFSET es,ES*/ | |
1181 | /*CFI_REL_OFFSET ds,DS*/ | |
1182 | CFI_REL_OFFSET r11,8 | |
1183 | CFI_REL_OFFSET rcx,0 | |
3d75e1b8 JF |
1184 | movw %ds,%cx |
1185 | cmpw %cx,0x10(%rsp) | |
1186 | CFI_REMEMBER_STATE | |
1187 | jne 1f | |
1188 | movw %es,%cx | |
1189 | cmpw %cx,0x18(%rsp) | |
1190 | jne 1f | |
1191 | movw %fs,%cx | |
1192 | cmpw %cx,0x20(%rsp) | |
1193 | jne 1f | |
1194 | movw %gs,%cx | |
1195 | cmpw %cx,0x28(%rsp) | |
1196 | jne 1f | |
1197 | /* All segments match their saved values => Category 2 (Bad IRET). */ | |
1198 | movq (%rsp),%rcx | |
1199 | CFI_RESTORE rcx | |
1200 | movq 8(%rsp),%r11 | |
1201 | CFI_RESTORE r11 | |
1202 | addq $0x30,%rsp | |
1203 | CFI_ADJUST_CFA_OFFSET -0x30 | |
14ae22ba IM |
1204 | pushq_cfi $0 /* RIP */ |
1205 | pushq_cfi %r11 | |
1206 | pushq_cfi %rcx | |
4a5c3e77 | 1207 | jmp general_protection |
3d75e1b8 JF |
1208 | CFI_RESTORE_STATE |
1209 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ | |
1210 | movq (%rsp),%rcx | |
1211 | CFI_RESTORE rcx | |
1212 | movq 8(%rsp),%r11 | |
1213 | CFI_RESTORE r11 | |
1214 | addq $0x30,%rsp | |
1215 | CFI_ADJUST_CFA_OFFSET -0x30 | |
a349e23d | 1216 | pushq_cfi $-1 /* orig_ax = -1 => not a system call */ |
76f5df43 DV |
1217 | ALLOC_PT_GPREGS_ON_STACK |
1218 | SAVE_C_REGS | |
1219 | SAVE_EXTRA_REGS | |
3d75e1b8 JF |
1220 | jmp error_exit |
1221 | CFI_ENDPROC | |
3d75e1b8 JF |
1222 | END(xen_failsafe_callback) |
1223 | ||
cf910e83 | 1224 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
38e20b07 SY |
1225 | xen_hvm_callback_vector xen_evtchn_do_upcall |
1226 | ||
3d75e1b8 | 1227 | #endif /* CONFIG_XEN */ |
ddeb8f21 | 1228 | |
bc2b0331 | 1229 | #if IS_ENABLED(CONFIG_HYPERV) |
cf910e83 | 1230 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
bc2b0331 S |
1231 | hyperv_callback_vector hyperv_vector_handler |
1232 | #endif /* CONFIG_HYPERV */ | |
1233 | ||
577ed45e AL |
1234 | idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
1235 | idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK | |
6f442be2 | 1236 | idtentry stack_segment do_stack_segment has_error_code=1 |
6cac5a92 | 1237 | #ifdef CONFIG_XEN |
cb5dd2c5 AL |
1238 | idtentry xen_debug do_debug has_error_code=0 |
1239 | idtentry xen_int3 do_int3 has_error_code=0 | |
1240 | idtentry xen_stack_segment do_stack_segment has_error_code=1 | |
6cac5a92 | 1241 | #endif |
cb5dd2c5 AL |
1242 | idtentry general_protection do_general_protection has_error_code=1 |
1243 | trace_idtentry page_fault do_page_fault has_error_code=1 | |
631bc487 | 1244 | #ifdef CONFIG_KVM_GUEST |
cb5dd2c5 | 1245 | idtentry async_page_fault do_async_page_fault has_error_code=1 |
631bc487 | 1246 | #endif |
ddeb8f21 | 1247 | #ifdef CONFIG_X86_MCE |
cb5dd2c5 | 1248 | idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) |
ddeb8f21 AH |
1249 | #endif |
1250 | ||
ebfc453e DV |
1251 | /* |
1252 | * Save all registers in pt_regs, and switch gs if needed. | |
1253 | * Use slow, but surefire "are we in kernel?" check. | |
1254 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | |
1255 | */ | |
1256 | ENTRY(paranoid_entry) | |
1257 | XCPT_FRAME 1 15*8 | |
1eeb207f DV |
1258 | cld |
1259 | SAVE_C_REGS 8 | |
1260 | SAVE_EXTRA_REGS 8 | |
1261 | movl $1,%ebx | |
1262 | movl $MSR_GS_BASE,%ecx | |
1263 | rdmsr | |
1264 | testl %edx,%edx | |
1265 | js 1f /* negative -> in kernel */ | |
1266 | SWAPGS | |
1267 | xorl %ebx,%ebx | |
1268 | 1: ret | |
1269 | CFI_ENDPROC | |
ebfc453e | 1270 | END(paranoid_entry) |
ddeb8f21 | 1271 | |
ebfc453e DV |
1272 | /* |
1273 | * "Paranoid" exit path from exception stack. This is invoked | |
1274 | * only on return from non-NMI IST interrupts that came | |
1275 | * from kernel space. | |
1276 | * | |
1277 | * We may be returning to very strange contexts (e.g. very early | |
1278 | * in syscall entry), so checking for preemption here would | |
1279 | * be complicated. Fortunately, we there's no good reason | |
1280 | * to try to handle preemption here. | |
1281 | */ | |
1282 | /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ | |
ddeb8f21 | 1283 | ENTRY(paranoid_exit) |
1f130a78 | 1284 | DEFAULT_FRAME |
ddeb8f21 | 1285 | DISABLE_INTERRUPTS(CLBR_NONE) |
5963e317 | 1286 | TRACE_IRQS_OFF_DEBUG |
ddeb8f21 | 1287 | testl %ebx,%ebx /* swapgs needed? */ |
0d550836 | 1288 | jnz paranoid_exit_no_swapgs |
f2db9382 | 1289 | TRACE_IRQS_IRETQ |
ddeb8f21 | 1290 | SWAPGS_UNSAFE_STACK |
0d550836 DV |
1291 | jmp paranoid_exit_restore |
1292 | paranoid_exit_no_swapgs: | |
f2db9382 | 1293 | TRACE_IRQS_IRETQ_DEBUG |
0d550836 | 1294 | paranoid_exit_restore: |
76f5df43 DV |
1295 | RESTORE_EXTRA_REGS |
1296 | RESTORE_C_REGS | |
1297 | REMOVE_PT_GPREGS_FROM_STACK 8 | |
48e08d0f | 1298 | INTERRUPT_RETURN |
ddeb8f21 AH |
1299 | CFI_ENDPROC |
1300 | END(paranoid_exit) | |
1301 | ||
1302 | /* | |
ebfc453e DV |
1303 | * Save all registers in pt_regs, and switch gs if needed. |
1304 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | |
ddeb8f21 AH |
1305 | */ |
1306 | ENTRY(error_entry) | |
ebfc453e | 1307 | XCPT_FRAME 1 15*8 |
ddeb8f21 | 1308 | cld |
76f5df43 DV |
1309 | SAVE_C_REGS 8 |
1310 | SAVE_EXTRA_REGS 8 | |
ddeb8f21 AH |
1311 | xorl %ebx,%ebx |
1312 | testl $3,CS+8(%rsp) | |
dde74f2e | 1313 | jz error_kernelspace |
ddeb8f21 AH |
1314 | error_swapgs: |
1315 | SWAPGS | |
1316 | error_sti: | |
1317 | TRACE_IRQS_OFF | |
1318 | ret | |
ddeb8f21 | 1319 | |
ebfc453e DV |
1320 | /* |
1321 | * There are two places in the kernel that can potentially fault with | |
1322 | * usergs. Handle them here. B stepping K8s sometimes report a | |
1323 | * truncated RIP for IRET exceptions returning to compat mode. Check | |
1324 | * for these here too. | |
1325 | */ | |
ddeb8f21 | 1326 | error_kernelspace: |
3bab13b0 | 1327 | CFI_REL_OFFSET rcx, RCX+8 |
ddeb8f21 | 1328 | incl %ebx |
7209a75d | 1329 | leaq native_irq_return_iret(%rip),%rcx |
ddeb8f21 | 1330 | cmpq %rcx,RIP+8(%rsp) |
b645af2d | 1331 | je error_bad_iret |
ae24ffe5 BG |
1332 | movl %ecx,%eax /* zero extend */ |
1333 | cmpq %rax,RIP+8(%rsp) | |
1334 | je bstep_iret | |
ddeb8f21 | 1335 | cmpq $gs_change,RIP+8(%rsp) |
9f1e87ea | 1336 | je error_swapgs |
ddeb8f21 | 1337 | jmp error_sti |
ae24ffe5 BG |
1338 | |
1339 | bstep_iret: | |
1340 | /* Fix truncated RIP */ | |
1341 | movq %rcx,RIP+8(%rsp) | |
b645af2d AL |
1342 | /* fall through */ |
1343 | ||
1344 | error_bad_iret: | |
1345 | SWAPGS | |
1346 | mov %rsp,%rdi | |
1347 | call fixup_bad_iret | |
1348 | mov %rax,%rsp | |
1349 | decl %ebx /* Return to usergs */ | |
1350 | jmp error_sti | |
e6b04b6b | 1351 | CFI_ENDPROC |
ddeb8f21 AH |
1352 | END(error_entry) |
1353 | ||
1354 | ||
ebfc453e | 1355 | /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ |
ddeb8f21 AH |
1356 | ENTRY(error_exit) |
1357 | DEFAULT_FRAME | |
1358 | movl %ebx,%eax | |
76f5df43 | 1359 | RESTORE_EXTRA_REGS |
ddeb8f21 AH |
1360 | DISABLE_INTERRUPTS(CLBR_NONE) |
1361 | TRACE_IRQS_OFF | |
1362 | GET_THREAD_INFO(%rcx) | |
1363 | testl %eax,%eax | |
dde74f2e | 1364 | jnz retint_kernel |
ddeb8f21 AH |
1365 | LOCKDEP_SYS_EXIT_IRQ |
1366 | movl TI_flags(%rcx),%edx | |
1367 | movl $_TIF_WORK_MASK,%edi | |
1368 | andl %edi,%edx | |
1369 | jnz retint_careful | |
1370 | jmp retint_swapgs | |
1371 | CFI_ENDPROC | |
1372 | END(error_exit) | |
1373 | ||
0784b364 | 1374 | /* Runs on exception stack */ |
ddeb8f21 AH |
1375 | ENTRY(nmi) |
1376 | INTR_FRAME | |
1377 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
3f3c8b8c SR |
1378 | /* |
1379 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | |
1380 | * the iretq it performs will take us out of NMI context. | |
1381 | * This means that we can have nested NMIs where the next | |
1382 | * NMI is using the top of the stack of the previous NMI. We | |
1383 | * can't let it execute because the nested NMI will corrupt the | |
1384 | * stack of the previous NMI. NMI handlers are not re-entrant | |
1385 | * anyway. | |
1386 | * | |
1387 | * To handle this case we do the following: | |
1388 | * Check the a special location on the stack that contains | |
1389 | * a variable that is set when NMIs are executing. | |
1390 | * The interrupted task's stack is also checked to see if it | |
1391 | * is an NMI stack. | |
1392 | * If the variable is not set and the stack is not the NMI | |
1393 | * stack then: | |
1394 | * o Set the special variable on the stack | |
1395 | * o Copy the interrupt frame into a "saved" location on the stack | |
1396 | * o Copy the interrupt frame into a "copy" location on the stack | |
1397 | * o Continue processing the NMI | |
1398 | * If the variable is set or the previous stack is the NMI stack: | |
1399 | * o Modify the "copy" location to jump to the repeate_nmi | |
1400 | * o return back to the first NMI | |
1401 | * | |
1402 | * Now on exit of the first NMI, we first clear the stack variable | |
1403 | * The NMI stack will tell any nested NMIs at that point that it is | |
1404 | * nested. Then we pop the stack normally with iret, and if there was | |
1405 | * a nested NMI that updated the copy interrupt stack frame, a | |
1406 | * jump will be made to the repeat_nmi code that will handle the second | |
1407 | * NMI. | |
1408 | */ | |
1409 | ||
146b2b09 | 1410 | /* Use %rdx as our temp variable throughout */ |
3f3c8b8c | 1411 | pushq_cfi %rdx |
62610913 | 1412 | CFI_REL_OFFSET rdx, 0 |
3f3c8b8c | 1413 | |
45d5a168 SR |
1414 | /* |
1415 | * If %cs was not the kernel segment, then the NMI triggered in user | |
1416 | * space, which means it is definitely not nested. | |
1417 | */ | |
a38449ef | 1418 | cmpl $__KERNEL_CS, 16(%rsp) |
45d5a168 SR |
1419 | jne first_nmi |
1420 | ||
3f3c8b8c SR |
1421 | /* |
1422 | * Check the special variable on the stack to see if NMIs are | |
1423 | * executing. | |
1424 | */ | |
a38449ef | 1425 | cmpl $1, -8(%rsp) |
3f3c8b8c SR |
1426 | je nested_nmi |
1427 | ||
1428 | /* | |
1429 | * Now test if the previous stack was an NMI stack. | |
1430 | * We need the double check. We check the NMI stack to satisfy the | |
1431 | * race when the first NMI clears the variable before returning. | |
1432 | * We check the variable because the first NMI could be in a | |
1433 | * breakpoint routine using a breakpoint stack. | |
1434 | */ | |
0784b364 DV |
1435 | lea 6*8(%rsp), %rdx |
1436 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ | |
1437 | cmpq %rdx, 4*8(%rsp) | |
1438 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ | |
1439 | ja first_nmi | |
1440 | subq $EXCEPTION_STKSZ, %rdx | |
1441 | cmpq %rdx, 4*8(%rsp) | |
1442 | /* If it is below the NMI stack, it is a normal NMI */ | |
1443 | jb first_nmi | |
1444 | /* Ah, it is within the NMI stack, treat it as nested */ | |
0784b364 | 1445 | |
62610913 | 1446 | CFI_REMEMBER_STATE |
3f3c8b8c SR |
1447 | |
1448 | nested_nmi: | |
1449 | /* | |
1450 | * Do nothing if we interrupted the fixup in repeat_nmi. | |
1451 | * It's about to repeat the NMI handler, so we are fine | |
1452 | * with ignoring this one. | |
1453 | */ | |
1454 | movq $repeat_nmi, %rdx | |
1455 | cmpq 8(%rsp), %rdx | |
1456 | ja 1f | |
1457 | movq $end_repeat_nmi, %rdx | |
1458 | cmpq 8(%rsp), %rdx | |
1459 | ja nested_nmi_out | |
1460 | ||
1461 | 1: | |
1462 | /* Set up the interrupted NMIs stack to jump to repeat_nmi */ | |
28696f43 | 1463 | leaq -1*8(%rsp), %rdx |
3f3c8b8c | 1464 | movq %rdx, %rsp |
28696f43 SQ |
1465 | CFI_ADJUST_CFA_OFFSET 1*8 |
1466 | leaq -10*8(%rsp), %rdx | |
3f3c8b8c SR |
1467 | pushq_cfi $__KERNEL_DS |
1468 | pushq_cfi %rdx | |
1469 | pushfq_cfi | |
1470 | pushq_cfi $__KERNEL_CS | |
1471 | pushq_cfi $repeat_nmi | |
1472 | ||
1473 | /* Put stack back */ | |
28696f43 SQ |
1474 | addq $(6*8), %rsp |
1475 | CFI_ADJUST_CFA_OFFSET -6*8 | |
3f3c8b8c SR |
1476 | |
1477 | nested_nmi_out: | |
1478 | popq_cfi %rdx | |
62610913 | 1479 | CFI_RESTORE rdx |
3f3c8b8c SR |
1480 | |
1481 | /* No need to check faults here */ | |
1482 | INTERRUPT_RETURN | |
1483 | ||
62610913 | 1484 | CFI_RESTORE_STATE |
3f3c8b8c SR |
1485 | first_nmi: |
1486 | /* | |
1487 | * Because nested NMIs will use the pushed location that we | |
1488 | * stored in rdx, we must keep that space available. | |
1489 | * Here's what our stack frame will look like: | |
1490 | * +-------------------------+ | |
1491 | * | original SS | | |
1492 | * | original Return RSP | | |
1493 | * | original RFLAGS | | |
1494 | * | original CS | | |
1495 | * | original RIP | | |
1496 | * +-------------------------+ | |
1497 | * | temp storage for rdx | | |
1498 | * +-------------------------+ | |
1499 | * | NMI executing variable | | |
1500 | * +-------------------------+ | |
3f3c8b8c SR |
1501 | * | copied SS | |
1502 | * | copied Return RSP | | |
1503 | * | copied RFLAGS | | |
1504 | * | copied CS | | |
1505 | * | copied RIP | | |
1506 | * +-------------------------+ | |
28696f43 SQ |
1507 | * | Saved SS | |
1508 | * | Saved Return RSP | | |
1509 | * | Saved RFLAGS | | |
1510 | * | Saved CS | | |
1511 | * | Saved RIP | | |
1512 | * +-------------------------+ | |
3f3c8b8c SR |
1513 | * | pt_regs | |
1514 | * +-------------------------+ | |
1515 | * | |
79fb4ad6 SR |
1516 | * The saved stack frame is used to fix up the copied stack frame |
1517 | * that a nested NMI may change to make the interrupted NMI iret jump | |
1518 | * to the repeat_nmi. The original stack frame and the temp storage | |
3f3c8b8c SR |
1519 | * is also used by nested NMIs and can not be trusted on exit. |
1520 | */ | |
79fb4ad6 | 1521 | /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ |
62610913 JB |
1522 | movq (%rsp), %rdx |
1523 | CFI_RESTORE rdx | |
1524 | ||
3f3c8b8c SR |
1525 | /* Set the NMI executing variable on the stack. */ |
1526 | pushq_cfi $1 | |
1527 | ||
28696f43 SQ |
1528 | /* |
1529 | * Leave room for the "copied" frame | |
1530 | */ | |
1531 | subq $(5*8), %rsp | |
444723dc | 1532 | CFI_ADJUST_CFA_OFFSET 5*8 |
28696f43 | 1533 | |
3f3c8b8c SR |
1534 | /* Copy the stack frame to the Saved frame */ |
1535 | .rept 5 | |
28696f43 | 1536 | pushq_cfi 11*8(%rsp) |
3f3c8b8c | 1537 | .endr |
911d2bb5 | 1538 | CFI_DEF_CFA_OFFSET 5*8 |
62610913 | 1539 | |
79fb4ad6 SR |
1540 | /* Everything up to here is safe from nested NMIs */ |
1541 | ||
62610913 JB |
1542 | /* |
1543 | * If there was a nested NMI, the first NMI's iret will return | |
1544 | * here. But NMIs are still enabled and we can take another | |
1545 | * nested NMI. The nested NMI checks the interrupted RIP to see | |
1546 | * if it is between repeat_nmi and end_repeat_nmi, and if so | |
1547 | * it will just return, as we are about to repeat an NMI anyway. | |
1548 | * This makes it safe to copy to the stack frame that a nested | |
1549 | * NMI will update. | |
1550 | */ | |
1551 | repeat_nmi: | |
1552 | /* | |
1553 | * Update the stack variable to say we are still in NMI (the update | |
1554 | * is benign for the non-repeat case, where 1 was pushed just above | |
1555 | * to this very stack slot). | |
1556 | */ | |
28696f43 | 1557 | movq $1, 10*8(%rsp) |
3f3c8b8c SR |
1558 | |
1559 | /* Make another copy, this one may be modified by nested NMIs */ | |
28696f43 SQ |
1560 | addq $(10*8), %rsp |
1561 | CFI_ADJUST_CFA_OFFSET -10*8 | |
3f3c8b8c | 1562 | .rept 5 |
28696f43 | 1563 | pushq_cfi -6*8(%rsp) |
3f3c8b8c | 1564 | .endr |
28696f43 | 1565 | subq $(5*8), %rsp |
911d2bb5 | 1566 | CFI_DEF_CFA_OFFSET 5*8 |
62610913 | 1567 | end_repeat_nmi: |
3f3c8b8c SR |
1568 | |
1569 | /* | |
1570 | * Everything below this point can be preempted by a nested | |
79fb4ad6 SR |
1571 | * NMI if the first NMI took an exception and reset our iret stack |
1572 | * so that we repeat another NMI. | |
3f3c8b8c | 1573 | */ |
1fd466ef | 1574 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
76f5df43 DV |
1575 | ALLOC_PT_GPREGS_ON_STACK |
1576 | ||
1fd466ef | 1577 | /* |
ebfc453e | 1578 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
1fd466ef SR |
1579 | * as we should not be calling schedule in NMI context. |
1580 | * Even with normal interrupts enabled. An NMI should not be | |
1581 | * setting NEED_RESCHED or anything that normal interrupts and | |
1582 | * exceptions might do. | |
1583 | */ | |
ebfc453e | 1584 | call paranoid_entry |
ddeb8f21 | 1585 | DEFAULT_FRAME 0 |
7fbb98c5 SR |
1586 | |
1587 | /* | |
1588 | * Save off the CR2 register. If we take a page fault in the NMI then | |
1589 | * it could corrupt the CR2 value. If the NMI preempts a page fault | |
1590 | * handler before it was able to read the CR2 register, and then the | |
1591 | * NMI itself takes a page fault, the page fault that was preempted | |
1592 | * will read the information from the NMI page fault and not the | |
1593 | * origin fault. Save it off and restore it if it changes. | |
1594 | * Use the r12 callee-saved register. | |
1595 | */ | |
1596 | movq %cr2, %r12 | |
1597 | ||
ddeb8f21 AH |
1598 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
1599 | movq %rsp,%rdi | |
1600 | movq $-1,%rsi | |
1601 | call do_nmi | |
7fbb98c5 SR |
1602 | |
1603 | /* Did the NMI take a page fault? Restore cr2 if it did */ | |
1604 | movq %cr2, %rcx | |
1605 | cmpq %rcx, %r12 | |
1606 | je 1f | |
1607 | movq %r12, %cr2 | |
1608 | 1: | |
1609 | ||
ddeb8f21 AH |
1610 | testl %ebx,%ebx /* swapgs needed? */ |
1611 | jnz nmi_restore | |
ddeb8f21 AH |
1612 | nmi_swapgs: |
1613 | SWAPGS_UNSAFE_STACK | |
1614 | nmi_restore: | |
76f5df43 DV |
1615 | RESTORE_EXTRA_REGS |
1616 | RESTORE_C_REGS | |
444723dc | 1617 | /* Pop the extra iret frame at once */ |
76f5df43 | 1618 | REMOVE_PT_GPREGS_FROM_STACK 6*8 |
28696f43 | 1619 | |
3f3c8b8c | 1620 | /* Clear the NMI executing stack variable */ |
28696f43 | 1621 | movq $0, 5*8(%rsp) |
ddeb8f21 | 1622 | jmp irq_return |
9f1e87ea | 1623 | CFI_ENDPROC |
ddeb8f21 AH |
1624 | END(nmi) |
1625 | ||
1626 | ENTRY(ignore_sysret) | |
1627 | CFI_STARTPROC | |
1628 | mov $-ENOSYS,%eax | |
1629 | sysret | |
1630 | CFI_ENDPROC | |
1631 | END(ignore_sysret) | |
1632 |