x86/entry/64/compat: Migrate the body of the syscall entry to C
[deliverable/linux.git] / arch / x86 / entry / common.c
CommitLineData
1f484aa6
AL
1/*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
17#include <linux/audit.h>
18#include <linux/seccomp.h>
19#include <linux/signal.h>
20#include <linux/export.h>
21#include <linux/context_tracking.h>
22#include <linux/user-return-notifier.h>
23#include <linux/uprobes.h>
24
25#include <asm/desc.h>
26#include <asm/traps.h>
27
28#define CREATE_TRACE_POINTS
29#include <trace/events/syscalls.h>
30
feed36cd
AL
31#ifdef CONFIG_CONTEXT_TRACKING
32/* Called on entry from user mode with IRQs off. */
33__visible void enter_from_user_mode(void)
34{
35 CT_WARN_ON(ct_state() != CONTEXT_USER);
36 user_exit();
37}
38#endif
39
1f484aa6
AL
40static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
41{
42#ifdef CONFIG_X86_64
43 if (arch == AUDIT_ARCH_X86_64) {
44 audit_syscall_entry(regs->orig_ax, regs->di,
45 regs->si, regs->dx, regs->r10);
46 } else
47#endif
48 {
49 audit_syscall_entry(regs->orig_ax, regs->bx,
50 regs->cx, regs->dx, regs->si);
51 }
52}
53
54/*
55 * We can return 0 to resume the syscall or anything else to go to phase
56 * 2. If we resume the syscall, we need to put something appropriate in
57 * regs->orig_ax.
58 *
59 * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
60 * are fully functional.
61 *
62 * For phase 2's benefit, our return value is:
63 * 0: resume the syscall
64 * 1: go to phase 2; no seccomp phase 2 needed
65 * anything else: go to phase 2; pass return value to seccomp
66 */
67unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
68{
69 unsigned long ret = 0;
70 u32 work;
71
72 BUG_ON(regs != task_pt_regs(current));
73
74 work = ACCESS_ONCE(current_thread_info()->flags) &
75 _TIF_WORK_SYSCALL_ENTRY;
76
feed36cd 77#ifdef CONFIG_CONTEXT_TRACKING
1f484aa6
AL
78 /*
79 * If TIF_NOHZ is set, we are required to call user_exit() before
80 * doing anything that could touch RCU.
81 */
82 if (work & _TIF_NOHZ) {
feed36cd 83 enter_from_user_mode();
1f484aa6
AL
84 work &= ~_TIF_NOHZ;
85 }
feed36cd 86#endif
1f484aa6
AL
87
88#ifdef CONFIG_SECCOMP
89 /*
90 * Do seccomp first -- it should minimize exposure of other
91 * code, and keeping seccomp fast is probably more valuable
92 * than the rest of this.
93 */
94 if (work & _TIF_SECCOMP) {
95 struct seccomp_data sd;
96
97 sd.arch = arch;
98 sd.nr = regs->orig_ax;
99 sd.instruction_pointer = regs->ip;
100#ifdef CONFIG_X86_64
101 if (arch == AUDIT_ARCH_X86_64) {
102 sd.args[0] = regs->di;
103 sd.args[1] = regs->si;
104 sd.args[2] = regs->dx;
105 sd.args[3] = regs->r10;
106 sd.args[4] = regs->r8;
107 sd.args[5] = regs->r9;
108 } else
109#endif
110 {
111 sd.args[0] = regs->bx;
112 sd.args[1] = regs->cx;
113 sd.args[2] = regs->dx;
114 sd.args[3] = regs->si;
115 sd.args[4] = regs->di;
116 sd.args[5] = regs->bp;
117 }
118
119 BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
120 BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
121
122 ret = seccomp_phase1(&sd);
123 if (ret == SECCOMP_PHASE1_SKIP) {
124 regs->orig_ax = -1;
125 ret = 0;
126 } else if (ret != SECCOMP_PHASE1_OK) {
127 return ret; /* Go directly to phase 2 */
128 }
129
130 work &= ~_TIF_SECCOMP;
131 }
132#endif
133
134 /* Do our best to finish without phase 2. */
135 if (work == 0)
136 return ret; /* seccomp and/or nohz only (ret == 0 here) */
137
138#ifdef CONFIG_AUDITSYSCALL
139 if (work == _TIF_SYSCALL_AUDIT) {
140 /*
141 * If there is no more work to be done except auditing,
142 * then audit in phase 1. Phase 2 always audits, so, if
143 * we audit here, then we can't go on to phase 2.
144 */
145 do_audit_syscall_entry(regs, arch);
146 return 0;
147 }
148#endif
149
150 return 1; /* Something is enabled that we can't handle in phase 1 */
151}
152
153/* Returns the syscall nr to run (which should match regs->orig_ax). */
154long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
155 unsigned long phase1_result)
156{
157 long ret = 0;
158 u32 work = ACCESS_ONCE(current_thread_info()->flags) &
159 _TIF_WORK_SYSCALL_ENTRY;
160
161 BUG_ON(regs != task_pt_regs(current));
162
163 /*
164 * If we stepped into a sysenter/syscall insn, it trapped in
165 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
166 * If user-mode had set TF itself, then it's still clear from
167 * do_debug() and we need to set it again to restore the user
168 * state. If we entered on the slow path, TF was already set.
169 */
170 if (work & _TIF_SINGLESTEP)
171 regs->flags |= X86_EFLAGS_TF;
172
173#ifdef CONFIG_SECCOMP
174 /*
175 * Call seccomp_phase2 before running the other hooks so that
176 * they can see any changes made by a seccomp tracer.
177 */
178 if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
179 /* seccomp failures shouldn't expose any additional code. */
180 return -1;
181 }
182#endif
183
184 if (unlikely(work & _TIF_SYSCALL_EMU))
185 ret = -1L;
186
187 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
188 tracehook_report_syscall_entry(regs))
189 ret = -1L;
190
191 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
192 trace_sys_enter(regs, regs->orig_ax);
193
194 do_audit_syscall_entry(regs, arch);
195
196 return ret ?: regs->orig_ax;
197}
198
199long syscall_trace_enter(struct pt_regs *regs)
200{
201 u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
202 unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
203
204 if (phase1_result == 0)
205 return regs->orig_ax;
206 else
207 return syscall_trace_enter_phase2(regs, arch, phase1_result);
208}
209
c5c46f59
AL
210static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
211{
212 unsigned long top_of_stack =
213 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
214 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
215}
216
217/* Called with IRQs disabled. */
218__visible void prepare_exit_to_usermode(struct pt_regs *regs)
219{
220 if (WARN_ON(!irqs_disabled()))
221 local_irq_disable();
222
72f92478
AL
223 lockdep_sys_exit();
224
c5c46f59
AL
225 /*
226 * In order to return to user mode, we need to have IRQs off with
227 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
228 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
229 * can be set at any time on preemptable kernels if we have IRQs on,
230 * so we need to loop. Disabling preemption wouldn't help: doing the
231 * work to clear some of the flags can sleep.
232 */
233 while (true) {
234 u32 cached_flags =
235 READ_ONCE(pt_regs_to_thread_info(regs)->flags);
236
237 if (!(cached_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME |
d132803e
AL
238 _TIF_UPROBE | _TIF_NEED_RESCHED |
239 _TIF_USER_RETURN_NOTIFY)))
c5c46f59
AL
240 break;
241
242 /* We have work to do. */
243 local_irq_enable();
244
245 if (cached_flags & _TIF_NEED_RESCHED)
246 schedule();
247
248 if (cached_flags & _TIF_UPROBE)
249 uprobe_notify_resume(regs);
250
251 /* deal with pending signal delivery */
252 if (cached_flags & _TIF_SIGPENDING)
253 do_signal(regs);
254
255 if (cached_flags & _TIF_NOTIFY_RESUME) {
256 clear_thread_flag(TIF_NOTIFY_RESUME);
257 tracehook_notify_resume(regs);
258 }
259
260 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
261 fire_user_return_notifiers();
262
263 /* Disable IRQs and retry */
264 local_irq_disable();
265 }
266
267 user_enter();
268}
269
270/*
271 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
272 * state such that we can immediately switch to user mode.
273 */
274__visible void syscall_return_slowpath(struct pt_regs *regs)
275{
276 struct thread_info *ti = pt_regs_to_thread_info(regs);
277 u32 cached_flags = READ_ONCE(ti->flags);
278 bool step;
279
280 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
281
282 if (WARN(irqs_disabled(), "syscall %ld left IRQs disabled",
283 regs->orig_ax))
284 local_irq_enable();
285
286 /*
287 * First do one-time work. If these work items are enabled, we
288 * want to run them exactly once per syscall exit with IRQs on.
289 */
290 if (cached_flags & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |
291 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)) {
292 audit_syscall_exit(regs);
293
294 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
295 trace_sys_exit(regs, regs->ax);
296
297 /*
298 * If TIF_SYSCALL_EMU is set, we only get here because of
299 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
300 * We already reported this syscall instruction in
301 * syscall_trace_enter().
302 */
303 step = unlikely(
304 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
305 == _TIF_SINGLESTEP);
306 if (step || cached_flags & _TIF_SYSCALL_TRACE)
307 tracehook_report_syscall_exit(regs, step);
308 }
309
310#ifdef CONFIG_COMPAT
311 /*
312 * Compat syscalls set TS_COMPAT. Make sure we clear it before
313 * returning to user mode.
314 */
315 ti->status &= ~TS_COMPAT;
316#endif
317
318 local_irq_disable();
319 prepare_exit_to_usermode(regs);
320}
bd2d3a3b
AL
321
322#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
323/*
324 * Does a 32-bit syscall. Called with IRQs off and does all entry and
325 * exit work.
326 */
327__visible void do_int80_syscall_32(struct pt_regs *regs)
328{
329 struct thread_info *ti = pt_regs_to_thread_info(regs);
330 unsigned int nr = (unsigned int)regs->orig_ax;
331
332#ifdef CONFIG_IA32_EMULATION
333 ti->status |= TS_COMPAT;
334#endif
335
336 local_irq_enable();
337
338 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
339 /*
340 * Subtlety here: if ptrace pokes something larger than
341 * 2^32-1 into orig_ax, this truncates it. This may or
342 * may not be necessary, but it matches the old asm
343 * behavior.
344 */
345 nr = syscall_trace_enter(regs);
346 }
347
348 if (nr < IA32_NR_syscalls) {
349 /*
350 * It's possible that a 32-bit syscall implementation
351 * takes a 64-bit parameter but nonetheless assumes that
352 * the high bits are zero. Make sure we zero-extend all
353 * of the args.
354 */
355 regs->ax = ia32_sys_call_table[nr](
356 (unsigned int)regs->bx, (unsigned int)regs->cx,
357 (unsigned int)regs->dx, (unsigned int)regs->si,
358 (unsigned int)regs->di, (unsigned int)regs->bp);
359 }
360
361 syscall_return_slowpath(regs);
362}
363#endif
This page took 0.044938 seconds and 5 git commands to generate.