1433f6b4607d6f6f38a1415afa02f43ca890d8d7
[deliverable/linux.git] / arch / x86 / entry / common.c
1 /*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/tracehook.h>
17 #include <linux/audit.h>
18 #include <linux/seccomp.h>
19 #include <linux/signal.h>
20 #include <linux/export.h>
21 #include <linux/context_tracking.h>
22 #include <linux/user-return-notifier.h>
23 #include <linux/uprobes.h>
24
25 #include <asm/desc.h>
26 #include <asm/traps.h>
27 #include <asm/vdso.h>
28 #include <asm/uaccess.h>
29 #include <asm/cpufeature.h>
30
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/syscalls.h>
33
34 static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
35 {
36 unsigned long top_of_stack =
37 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
38 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
39 }
40
41 #ifdef CONFIG_CONTEXT_TRACKING
42 /* Called on entry from user mode with IRQs off. */
43 __visible inline void enter_from_user_mode(void)
44 {
45 CT_WARN_ON(ct_state() != CONTEXT_USER);
46 user_exit_irqoff();
47 }
48 #else
49 static inline void enter_from_user_mode(void) {}
50 #endif
51
52 static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
53 {
54 #ifdef CONFIG_X86_64
55 if (arch == AUDIT_ARCH_X86_64) {
56 audit_syscall_entry(regs->orig_ax, regs->di,
57 regs->si, regs->dx, regs->r10);
58 } else
59 #endif
60 {
61 audit_syscall_entry(regs->orig_ax, regs->bx,
62 regs->cx, regs->dx, regs->si);
63 }
64 }
65
66 /*
67 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
68 * to skip the syscall.
69 */
70 static long syscall_trace_enter(struct pt_regs *regs)
71 {
72 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
73
74 struct thread_info *ti = pt_regs_to_thread_info(regs);
75 unsigned long ret = 0;
76 bool emulated = false;
77 u32 work;
78
79 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
80 BUG_ON(regs != task_pt_regs(current));
81
82 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
83
84 if (unlikely(work & _TIF_SYSCALL_EMU))
85 emulated = true;
86
87 if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
88 tracehook_report_syscall_entry(regs))
89 return -1L;
90
91 if (emulated)
92 return -1L;
93
94 #ifdef CONFIG_SECCOMP
95 /*
96 * Do seccomp after ptrace, to catch any tracer changes.
97 */
98 if (work & _TIF_SECCOMP) {
99 struct seccomp_data sd;
100
101 sd.arch = arch;
102 sd.nr = regs->orig_ax;
103 sd.instruction_pointer = regs->ip;
104 #ifdef CONFIG_X86_64
105 if (arch == AUDIT_ARCH_X86_64) {
106 sd.args[0] = regs->di;
107 sd.args[1] = regs->si;
108 sd.args[2] = regs->dx;
109 sd.args[3] = regs->r10;
110 sd.args[4] = regs->r8;
111 sd.args[5] = regs->r9;
112 } else
113 #endif
114 {
115 sd.args[0] = regs->bx;
116 sd.args[1] = regs->cx;
117 sd.args[2] = regs->dx;
118 sd.args[3] = regs->si;
119 sd.args[4] = regs->di;
120 sd.args[5] = regs->bp;
121 }
122
123 ret = __secure_computing(&sd);
124 if (ret == -1)
125 return ret;
126 }
127 #endif
128
129 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
130 trace_sys_enter(regs, regs->orig_ax);
131
132 do_audit_syscall_entry(regs, arch);
133
134 return ret ?: regs->orig_ax;
135 }
136
137 #define EXIT_TO_USERMODE_LOOP_FLAGS \
138 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
139 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
140
141 static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
142 {
143 /*
144 * In order to return to user mode, we need to have IRQs off with
145 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
146 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
147 * can be set at any time on preemptable kernels if we have IRQs on,
148 * so we need to loop. Disabling preemption wouldn't help: doing the
149 * work to clear some of the flags can sleep.
150 */
151 while (true) {
152 /* We have work to do. */
153 local_irq_enable();
154
155 if (cached_flags & _TIF_NEED_RESCHED)
156 schedule();
157
158 if (cached_flags & _TIF_UPROBE)
159 uprobe_notify_resume(regs);
160
161 /* deal with pending signal delivery */
162 if (cached_flags & _TIF_SIGPENDING)
163 do_signal(regs);
164
165 if (cached_flags & _TIF_NOTIFY_RESUME) {
166 clear_thread_flag(TIF_NOTIFY_RESUME);
167 tracehook_notify_resume(regs);
168 }
169
170 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
171 fire_user_return_notifiers();
172
173 /* Disable IRQs and retry */
174 local_irq_disable();
175
176 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
177
178 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
179 break;
180
181 }
182 }
183
184 /* Called with IRQs disabled. */
185 __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
186 {
187 struct thread_info *ti = pt_regs_to_thread_info(regs);
188 u32 cached_flags;
189
190 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
191 local_irq_disable();
192
193 lockdep_sys_exit();
194
195 cached_flags = READ_ONCE(ti->flags);
196
197 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
198 exit_to_usermode_loop(regs, cached_flags);
199
200 #ifdef CONFIG_COMPAT
201 /*
202 * Compat syscalls set TS_COMPAT. Make sure we clear it before
203 * returning to user mode. We need to clear it *after* signal
204 * handling, because syscall restart has a fixup for compat
205 * syscalls. The fixup is exercised by the ptrace_syscall_32
206 * selftest.
207 *
208 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
209 * special case only applies after poking regs and before the
210 * very next return to user mode.
211 */
212 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
213 #endif
214
215 user_enter_irqoff();
216 }
217
218 #define SYSCALL_EXIT_WORK_FLAGS \
219 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
220 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
221
222 static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
223 {
224 bool step;
225
226 audit_syscall_exit(regs);
227
228 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
229 trace_sys_exit(regs, regs->ax);
230
231 /*
232 * If TIF_SYSCALL_EMU is set, we only get here because of
233 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
234 * We already reported this syscall instruction in
235 * syscall_trace_enter().
236 */
237 step = unlikely(
238 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
239 == _TIF_SINGLESTEP);
240 if (step || cached_flags & _TIF_SYSCALL_TRACE)
241 tracehook_report_syscall_exit(regs, step);
242 }
243
244 /*
245 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
246 * state such that we can immediately switch to user mode.
247 */
248 __visible inline void syscall_return_slowpath(struct pt_regs *regs)
249 {
250 struct thread_info *ti = pt_regs_to_thread_info(regs);
251 u32 cached_flags = READ_ONCE(ti->flags);
252
253 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
254
255 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
256 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
257 local_irq_enable();
258
259 /*
260 * First do one-time work. If these work items are enabled, we
261 * want to run them exactly once per syscall exit with IRQs on.
262 */
263 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
264 syscall_slow_exit_work(regs, cached_flags);
265
266 local_irq_disable();
267 prepare_exit_to_usermode(regs);
268 }
269
270 #ifdef CONFIG_X86_64
271 __visible void do_syscall_64(struct pt_regs *regs)
272 {
273 struct thread_info *ti = pt_regs_to_thread_info(regs);
274 unsigned long nr = regs->orig_ax;
275
276 enter_from_user_mode();
277 local_irq_enable();
278
279 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
280 nr = syscall_trace_enter(regs);
281
282 /*
283 * NB: Native and x32 syscalls are dispatched from the same
284 * table. The only functional difference is the x32 bit in
285 * regs->orig_ax, which changes the behavior of some syscalls.
286 */
287 if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
288 regs->ax = sys_call_table[nr & __SYSCALL_MASK](
289 regs->di, regs->si, regs->dx,
290 regs->r10, regs->r8, regs->r9);
291 }
292
293 syscall_return_slowpath(regs);
294 }
295 #endif
296
297 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
298 /*
299 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
300 * all entry and exit work and returns with IRQs off. This function is
301 * extremely hot in workloads that use it, and it's usually called from
302 * do_fast_syscall_32, so forcibly inline it to improve performance.
303 */
304 static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
305 {
306 struct thread_info *ti = pt_regs_to_thread_info(regs);
307 unsigned int nr = (unsigned int)regs->orig_ax;
308
309 #ifdef CONFIG_IA32_EMULATION
310 ti->status |= TS_COMPAT;
311 #endif
312
313 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
314 /*
315 * Subtlety here: if ptrace pokes something larger than
316 * 2^32-1 into orig_ax, this truncates it. This may or
317 * may not be necessary, but it matches the old asm
318 * behavior.
319 */
320 nr = syscall_trace_enter(regs);
321 }
322
323 if (likely(nr < IA32_NR_syscalls)) {
324 /*
325 * It's possible that a 32-bit syscall implementation
326 * takes a 64-bit parameter but nonetheless assumes that
327 * the high bits are zero. Make sure we zero-extend all
328 * of the args.
329 */
330 regs->ax = ia32_sys_call_table[nr](
331 (unsigned int)regs->bx, (unsigned int)regs->cx,
332 (unsigned int)regs->dx, (unsigned int)regs->si,
333 (unsigned int)regs->di, (unsigned int)regs->bp);
334 }
335
336 syscall_return_slowpath(regs);
337 }
338
339 /* Handles int $0x80 */
340 __visible void do_int80_syscall_32(struct pt_regs *regs)
341 {
342 enter_from_user_mode();
343 local_irq_enable();
344 do_syscall_32_irqs_on(regs);
345 }
346
347 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
348 __visible long do_fast_syscall_32(struct pt_regs *regs)
349 {
350 /*
351 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
352 * convention. Adjust regs so it looks like we entered using int80.
353 */
354
355 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
356 vdso_image_32.sym_int80_landing_pad;
357
358 /*
359 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
360 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
361 * Fix it up.
362 */
363 regs->ip = landing_pad;
364
365 enter_from_user_mode();
366
367 local_irq_enable();
368
369 /* Fetch EBP from where the vDSO stashed it. */
370 if (
371 #ifdef CONFIG_X86_64
372 /*
373 * Micro-optimization: the pointer we're following is explicitly
374 * 32 bits, so it can't be out of range.
375 */
376 __get_user(*(u32 *)&regs->bp,
377 (u32 __user __force *)(unsigned long)(u32)regs->sp)
378 #else
379 get_user(*(u32 *)&regs->bp,
380 (u32 __user __force *)(unsigned long)(u32)regs->sp)
381 #endif
382 ) {
383
384 /* User code screwed up. */
385 local_irq_disable();
386 regs->ax = -EFAULT;
387 prepare_exit_to_usermode(regs);
388 return 0; /* Keep it simple: use IRET. */
389 }
390
391 /* Now this is just like a normal syscall. */
392 do_syscall_32_irqs_on(regs);
393
394 #ifdef CONFIG_X86_64
395 /*
396 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
397 * SYSRETL is available on all 64-bit CPUs, so we don't need to
398 * bother with SYSEXIT.
399 *
400 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
401 * because the ECX fixup above will ensure that this is essentially
402 * never the case.
403 */
404 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
405 regs->ip == landing_pad &&
406 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
407 #else
408 /*
409 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
410 *
411 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
412 * because the ECX fixup above will ensure that this is essentially
413 * never the case.
414 *
415 * We don't allow syscalls at all from VM86 mode, but we still
416 * need to check VM, because we might be returning from sys_vm86.
417 */
418 return static_cpu_has(X86_FEATURE_SEP) &&
419 regs->cs == __USER_CS && regs->ss == __USER_DS &&
420 regs->ip == landing_pad &&
421 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
422 #endif
423 }
424 #endif
This page took 0.039088 seconds and 4 git commands to generate.