x86/entry: Split and inline prepare_exit_to_usermode()
[deliverable/linux.git] / arch / x86 / entry / common.c
CommitLineData
1f484aa6
AL
1/*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
17#include <linux/audit.h>
18#include <linux/seccomp.h>
19#include <linux/signal.h>
20#include <linux/export.h>
21#include <linux/context_tracking.h>
22#include <linux/user-return-notifier.h>
23#include <linux/uprobes.h>
24
25#include <asm/desc.h>
26#include <asm/traps.h>
710246df
AL
27#include <asm/vdso.h>
28#include <asm/uaccess.h>
1f484aa6
AL
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/syscalls.h>
32
dd636071
AL
33static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
34{
35 unsigned long top_of_stack =
36 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
37 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
38}
39
feed36cd
AL
40#ifdef CONFIG_CONTEXT_TRACKING
41/* Called on entry from user mode with IRQs off. */
42__visible void enter_from_user_mode(void)
43{
44 CT_WARN_ON(ct_state() != CONTEXT_USER);
45 user_exit();
46}
47#endif
48
1f484aa6
AL
49static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
50{
51#ifdef CONFIG_X86_64
52 if (arch == AUDIT_ARCH_X86_64) {
53 audit_syscall_entry(regs->orig_ax, regs->di,
54 regs->si, regs->dx, regs->r10);
55 } else
56#endif
57 {
58 audit_syscall_entry(regs->orig_ax, regs->bx,
59 regs->cx, regs->dx, regs->si);
60 }
61}
62
63/*
64 * We can return 0 to resume the syscall or anything else to go to phase
65 * 2. If we resume the syscall, we need to put something appropriate in
66 * regs->orig_ax.
67 *
68 * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
69 * are fully functional.
70 *
71 * For phase 2's benefit, our return value is:
72 * 0: resume the syscall
73 * 1: go to phase 2; no seccomp phase 2 needed
74 * anything else: go to phase 2; pass return value to seccomp
75 */
76unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
77{
dd636071 78 struct thread_info *ti = pt_regs_to_thread_info(regs);
1f484aa6
AL
79 unsigned long ret = 0;
80 u32 work;
81
4aabd140
AL
82 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
83 BUG_ON(regs != task_pt_regs(current));
1f484aa6 84
dd636071 85 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
1f484aa6 86
feed36cd 87#ifdef CONFIG_CONTEXT_TRACKING
1f484aa6
AL
88 /*
89 * If TIF_NOHZ is set, we are required to call user_exit() before
90 * doing anything that could touch RCU.
91 */
92 if (work & _TIF_NOHZ) {
feed36cd 93 enter_from_user_mode();
1f484aa6
AL
94 work &= ~_TIF_NOHZ;
95 }
feed36cd 96#endif
1f484aa6
AL
97
98#ifdef CONFIG_SECCOMP
99 /*
100 * Do seccomp first -- it should minimize exposure of other
101 * code, and keeping seccomp fast is probably more valuable
102 * than the rest of this.
103 */
104 if (work & _TIF_SECCOMP) {
105 struct seccomp_data sd;
106
107 sd.arch = arch;
108 sd.nr = regs->orig_ax;
109 sd.instruction_pointer = regs->ip;
110#ifdef CONFIG_X86_64
111 if (arch == AUDIT_ARCH_X86_64) {
112 sd.args[0] = regs->di;
113 sd.args[1] = regs->si;
114 sd.args[2] = regs->dx;
115 sd.args[3] = regs->r10;
116 sd.args[4] = regs->r8;
117 sd.args[5] = regs->r9;
118 } else
119#endif
120 {
121 sd.args[0] = regs->bx;
122 sd.args[1] = regs->cx;
123 sd.args[2] = regs->dx;
124 sd.args[3] = regs->si;
125 sd.args[4] = regs->di;
126 sd.args[5] = regs->bp;
127 }
128
129 BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
130 BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
131
132 ret = seccomp_phase1(&sd);
133 if (ret == SECCOMP_PHASE1_SKIP) {
134 regs->orig_ax = -1;
135 ret = 0;
136 } else if (ret != SECCOMP_PHASE1_OK) {
137 return ret; /* Go directly to phase 2 */
138 }
139
140 work &= ~_TIF_SECCOMP;
141 }
142#endif
143
144 /* Do our best to finish without phase 2. */
145 if (work == 0)
146 return ret; /* seccomp and/or nohz only (ret == 0 here) */
147
148#ifdef CONFIG_AUDITSYSCALL
149 if (work == _TIF_SYSCALL_AUDIT) {
150 /*
151 * If there is no more work to be done except auditing,
152 * then audit in phase 1. Phase 2 always audits, so, if
153 * we audit here, then we can't go on to phase 2.
154 */
155 do_audit_syscall_entry(regs, arch);
156 return 0;
157 }
158#endif
159
160 return 1; /* Something is enabled that we can't handle in phase 1 */
161}
162
163/* Returns the syscall nr to run (which should match regs->orig_ax). */
164long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
165 unsigned long phase1_result)
166{
dd636071 167 struct thread_info *ti = pt_regs_to_thread_info(regs);
1f484aa6 168 long ret = 0;
dd636071 169 u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
1f484aa6 170
4aabd140
AL
171 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
172 BUG_ON(regs != task_pt_regs(current));
1f484aa6
AL
173
174 /*
175 * If we stepped into a sysenter/syscall insn, it trapped in
176 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
177 * If user-mode had set TF itself, then it's still clear from
178 * do_debug() and we need to set it again to restore the user
179 * state. If we entered on the slow path, TF was already set.
180 */
181 if (work & _TIF_SINGLESTEP)
182 regs->flags |= X86_EFLAGS_TF;
183
184#ifdef CONFIG_SECCOMP
185 /*
186 * Call seccomp_phase2 before running the other hooks so that
187 * they can see any changes made by a seccomp tracer.
188 */
189 if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
190 /* seccomp failures shouldn't expose any additional code. */
191 return -1;
192 }
193#endif
194
195 if (unlikely(work & _TIF_SYSCALL_EMU))
196 ret = -1L;
197
198 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
199 tracehook_report_syscall_entry(regs))
200 ret = -1L;
201
202 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
203 trace_sys_enter(regs, regs->orig_ax);
204
205 do_audit_syscall_entry(regs, arch);
206
207 return ret ?: regs->orig_ax;
208}
209
210long syscall_trace_enter(struct pt_regs *regs)
211{
212 u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
213 unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
214
215 if (phase1_result == 0)
216 return regs->orig_ax;
217 else
218 return syscall_trace_enter_phase2(regs, arch, phase1_result);
219}
220
39b48e57
AL
221#define EXIT_TO_USERMODE_LOOP_FLAGS \
222 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
223 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
72f92478 224
39b48e57
AL
225static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
226{
c5c46f59
AL
227 /*
228 * In order to return to user mode, we need to have IRQs off with
229 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
230 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
231 * can be set at any time on preemptable kernels if we have IRQs on,
232 * so we need to loop. Disabling preemption wouldn't help: doing the
233 * work to clear some of the flags can sleep.
234 */
235 while (true) {
c5c46f59
AL
236 /* We have work to do. */
237 local_irq_enable();
238
239 if (cached_flags & _TIF_NEED_RESCHED)
240 schedule();
241
242 if (cached_flags & _TIF_UPROBE)
243 uprobe_notify_resume(regs);
244
245 /* deal with pending signal delivery */
246 if (cached_flags & _TIF_SIGPENDING)
247 do_signal(regs);
248
249 if (cached_flags & _TIF_NOTIFY_RESUME) {
250 clear_thread_flag(TIF_NOTIFY_RESUME);
251 tracehook_notify_resume(regs);
252 }
253
254 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
255 fire_user_return_notifiers();
256
257 /* Disable IRQs and retry */
258 local_irq_disable();
39b48e57
AL
259
260 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
261
262 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
263 break;
264
c5c46f59 265 }
39b48e57
AL
266}
267
268/* Called with IRQs disabled. */
269__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
270{
271 u32 cached_flags;
272
273 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
274 local_irq_disable();
275
276 lockdep_sys_exit();
277
278 cached_flags =
279 READ_ONCE(pt_regs_to_thread_info(regs)->flags);
280
281 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
282 exit_to_usermode_loop(regs, cached_flags);
c5c46f59
AL
283
284 user_enter();
285}
286
287/*
288 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
289 * state such that we can immediately switch to user mode.
290 */
291__visible void syscall_return_slowpath(struct pt_regs *regs)
292{
293 struct thread_info *ti = pt_regs_to_thread_info(regs);
294 u32 cached_flags = READ_ONCE(ti->flags);
295 bool step;
296
297 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
298
460d1245
AL
299 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
300 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
c5c46f59
AL
301 local_irq_enable();
302
303 /*
304 * First do one-time work. If these work items are enabled, we
305 * want to run them exactly once per syscall exit with IRQs on.
306 */
307 if (cached_flags & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |
308 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)) {
309 audit_syscall_exit(regs);
310
311 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
312 trace_sys_exit(regs, regs->ax);
313
314 /*
315 * If TIF_SYSCALL_EMU is set, we only get here because of
316 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
317 * We already reported this syscall instruction in
318 * syscall_trace_enter().
319 */
320 step = unlikely(
321 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
322 == _TIF_SINGLESTEP);
323 if (step || cached_flags & _TIF_SYSCALL_TRACE)
324 tracehook_report_syscall_exit(regs, step);
325 }
326
327#ifdef CONFIG_COMPAT
328 /*
329 * Compat syscalls set TS_COMPAT. Make sure we clear it before
330 * returning to user mode.
331 */
332 ti->status &= ~TS_COMPAT;
333#endif
334
335 local_irq_disable();
336 prepare_exit_to_usermode(regs);
337}
bd2d3a3b
AL
338
339#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
340/*
8b13c255 341 * Does a 32-bit syscall. Called with IRQs on and does all entry and
33c52129
AL
342 * exit work and returns with IRQs off. This function is extremely hot
343 * in workloads that use it, and it's usually called from
344 * do_fast_syscall_32, so forcibly inline it to improve performance.
bd2d3a3b 345 */
33c52129 346static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
bd2d3a3b
AL
347{
348 struct thread_info *ti = pt_regs_to_thread_info(regs);
349 unsigned int nr = (unsigned int)regs->orig_ax;
350
351#ifdef CONFIG_IA32_EMULATION
352 ti->status |= TS_COMPAT;
353#endif
354
bd2d3a3b
AL
355 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
356 /*
357 * Subtlety here: if ptrace pokes something larger than
358 * 2^32-1 into orig_ax, this truncates it. This may or
359 * may not be necessary, but it matches the old asm
360 * behavior.
361 */
362 nr = syscall_trace_enter(regs);
363 }
364
33c52129 365 if (likely(nr < IA32_NR_syscalls)) {
bd2d3a3b
AL
366 /*
367 * It's possible that a 32-bit syscall implementation
368 * takes a 64-bit parameter but nonetheless assumes that
369 * the high bits are zero. Make sure we zero-extend all
370 * of the args.
371 */
372 regs->ax = ia32_sys_call_table[nr](
373 (unsigned int)regs->bx, (unsigned int)regs->cx,
374 (unsigned int)regs->dx, (unsigned int)regs->si,
375 (unsigned int)regs->di, (unsigned int)regs->bp);
376 }
377
378 syscall_return_slowpath(regs);
379}
710246df 380
8b13c255
AL
381/* Handles int $0x80 */
382__visible void do_int80_syscall_32(struct pt_regs *regs)
383{
384 local_irq_enable();
385 do_syscall_32_irqs_on(regs);
386}
387
5f310f73 388/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
7841b408 389__visible long do_fast_syscall_32(struct pt_regs *regs)
710246df
AL
390{
391 /*
392 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
393 * convention. Adjust regs so it looks like we entered using int80.
394 */
395
396 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
397 vdso_image_32.sym_int80_landing_pad;
398
399 /*
400 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
401 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
402 * Fix it up.
403 */
404 regs->ip = landing_pad;
405
406 /*
407 * Fetch ECX from where the vDSO stashed it.
408 *
409 * WARNING: We are in CONTEXT_USER and RCU isn't paying attention!
410 */
411 local_irq_enable();
c68ca678
AL
412 if (
413#ifdef CONFIG_X86_64
414 /*
415 * Micro-optimization: the pointer we're following is explicitly
416 * 32 bits, so it can't be out of range.
417 */
418 __get_user(*(u32 *)&regs->cx,
419 (u32 __user __force *)(unsigned long)(u32)regs->sp)
420#else
421 get_user(*(u32 *)&regs->cx,
422 (u32 __user __force *)(unsigned long)(u32)regs->sp)
423#endif
424 ) {
425
710246df
AL
426 /* User code screwed up. */
427 local_irq_disable();
428 regs->ax = -EFAULT;
429#ifdef CONFIG_CONTEXT_TRACKING
430 enter_from_user_mode();
431#endif
432 prepare_exit_to_usermode(regs);
7841b408 433 return 0; /* Keep it simple: use IRET. */
710246df 434 }
710246df
AL
435
436 /* Now this is just like a normal syscall. */
8b13c255 437 do_syscall_32_irqs_on(regs);
7841b408
AL
438
439#ifdef CONFIG_X86_64
440 /*
441 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
442 * SYSRETL is available on all 64-bit CPUs, so we don't need to
443 * bother with SYSEXIT.
444 *
445 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
446 * because the ECX fixup above will ensure that this is essentially
447 * never the case.
448 */
449 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
450 regs->ip == landing_pad &&
451 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
452#else
5f310f73
AL
453 /*
454 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
455 *
456 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
457 * because the ECX fixup above will ensure that this is essentially
458 * never the case.
459 *
460 * We don't allow syscalls at all from VM86 mode, but we still
461 * need to check VM, because we might be returning from sys_vm86.
462 */
463 return static_cpu_has(X86_FEATURE_SEP) &&
464 regs->cs == __USER_CS && regs->ss == __USER_DS &&
465 regs->ip == landing_pad &&
466 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
7841b408 467#endif
710246df 468}
bd2d3a3b 469#endif
This page took 0.053268 seconds and 5 git commands to generate.