Restartable sequences system call (v8)
[deliverable/linux.git] / kernel / rseq.c
1 /*
2 * Restartable sequences system call
3 *
4 * Restartable sequences are a lightweight interface that allows
5 * user-level code to be executed atomically relative to scheduler
6 * preemption and signal delivery. Typically used for implementing
7 * per-cpu operations.
8 *
9 * It allows user-space to perform update operations on per-cpu data
10 * without requiring heavy-weight atomic operations.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * Copyright (C) 2015, Google, Inc.,
23 * Paul Turner <pjt@google.com> and Andrew Hunter <ahh@google.com>
24 * Copyright (C) 2015-2016, EfficiOS Inc.,
25 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
26 */
27
28 #include <linux/sched.h>
29 #include <linux/uaccess.h>
30 #include <linux/syscalls.h>
31 #include <linux/rseq.h>
32 #include <linux/types.h>
33 #include <asm/ptrace.h>
34
35 /*
36 * The restartable sequences mechanism is the overlap of two distinct
37 * restart mechanisms: a sequence counter tracking preemption and signal
38 * delivery for high-level code, and an ip-fixup-based mechanism for the
39 * final assembly instruction sequence.
40 *
41 * A high-level summary of the algorithm to use rseq from user-space is
42 * as follows:
43 *
44 * The high-level code between rseq_start() and rseq_finish() loads the
45 * current value of the sequence counter in rseq_start(), and then it
46 * gets compared with the new current value within the rseq_finish()
47 * restartable instruction sequence. Between rseq_start() and
48 * rseq_finish(), the high-level code can perform operations that do not
49 * have side-effects, such as getting the current CPU number, and
50 * loading from variables.
51 *
52 * Stores are performed at the very end of the restartable sequence
53 * assembly block. Each assembly block within rseq_finish() defines a
54 * "struct rseq_cs" structure which describes the start_ip and
55 * post_commit_ip addresses, as well as the abort_ip address where the
56 * kernel should move the thread instruction pointer if a rseq critical
57 * section assembly block is preempted or if a signal is delivered on
58 * top of a rseq critical section assembly block.
59 *
60 * Detailed algorithm of rseq use:
61 *
62 * rseq_start()
63 *
64 * 0. Userspace loads the current event counter value from the
65 * event_counter field of the registered struct rseq TLS area,
66 *
67 * rseq_finish()
68 *
69 * Steps [1]-[3] (inclusive) need to be a sequence of instructions in
70 * userspace that can handle being moved to the abort_ip between any
71 * of those instructions.
72 *
73 * The abort_ip address needs to be less than start_ip, or
74 * greater-or-equal the post_commit_ip. Step [4] and the failure
75 * code step [F1] need to be at addresses lesser than start_ip, or
76 * greater-or-equal the post_commit_ip.
77 *
78 * [start_ip]
79 * 1. Userspace stores the address of the struct rseq_cs assembly
80 * block descriptor into the rseq_cs field of the registered
81 * struct rseq TLS area. This update is performed through a single
82 * store, followed by a compiler barrier which prevents the
83 * compiler from moving following loads or stores before this
84 * store.
85 *
86 * 2. Userspace tests to see whether the current event counter value
87 * match the value loaded at [0]. Manually jumping to [F1] in case
88 * of a mismatch.
89 *
90 * Note that if we are preempted or interrupted by a signal
91 * after [1] and before post_commit_ip, then the kernel also
92 * performs the comparison performed in [2], and conditionally
93 * clears the rseq_cs field of struct rseq, then jumps us to
94 * abort_ip.
95 *
96 * 3. Userspace critical section final instruction before
97 * post_commit_ip is the commit. The critical section is
98 * self-terminating.
99 * [post_commit_ip]
100 *
101 * 4. Userspace clears the rseq_cs field of the struct rseq
102 * TLS area.
103 *
104 * 5. Return true.
105 *
106 * On failure at [2]:
107 *
108 * F1. Userspace clears the rseq_cs field of the struct rseq
109 * TLS area. Followed by step [F2].
110 *
111 * [abort_ip]
112 * F2. Return false.
113 */
114
115 /*
116 * The rseq_event_counter allow user-space to detect preemption and
117 * signal delivery. It increments at least once before returning to
118 * user-space if a thread is preempted or has a signal delivered. It is
119 * not meant to be an exact counter of such events.
120 *
121 * Overflow of the event counter is not a problem in practice. It
122 * increments at most once between each user-space thread instruction
123 * executed, so we would need a thread to execute 2^32 instructions or
124 * more between rseq_start() and rseq_finish(), while single-stepping,
125 * for this to be an issue.
126 *
127 * On 64-bit architectures, both cpu_id and event_counter can be updated
128 * with a single 64-bit store. On 32-bit architectures, __put_user() is
129 * expected to perform two 32-bit single-copy stores to guarantee
130 * single-copy atomicity semantics for other threads.
131 */
132 static bool rseq_update_cpu_id_event_counter(struct task_struct *t)
133 {
134 union rseq_cpu_event u;
135
136 u.e.cpu_id = raw_smp_processor_id();
137 u.e.event_counter = ++t->rseq_event_counter;
138 if (__put_user(u.v, &t->rseq->u.v))
139 return false;
140 return true;
141 }
142
143 static bool rseq_get_rseq_cs(struct task_struct *t,
144 void __user **start_ip,
145 void __user **post_commit_ip,
146 void __user **abort_ip)
147 {
148 unsigned long ptr;
149 struct rseq_cs __user *urseq_cs;
150 struct rseq_cs rseq_cs;
151
152 if (__get_user(ptr, &t->rseq->rseq_cs))
153 return false;
154 if (!ptr)
155 return true;
156 urseq_cs = (struct rseq_cs __user *)ptr;
157 if (copy_from_user(&rseq_cs, urseq_cs, sizeof(rseq_cs)))
158 return false;
159 *start_ip = (void __user *)rseq_cs.start_ip;
160 *post_commit_ip = (void __user *)rseq_cs.post_commit_ip;
161 *abort_ip = (void __user *)rseq_cs.abort_ip;
162 return true;
163 }
164
165 static bool rseq_ip_fixup(struct pt_regs *regs)
166 {
167 struct task_struct *t = current;
168 void __user *start_ip = NULL;
169 void __user *post_commit_ip = NULL;
170 void __user *abort_ip = NULL;
171
172 if (!rseq_get_rseq_cs(t, &start_ip, &post_commit_ip, &abort_ip))
173 return false;
174
175 /* Handle potentially not being within a critical section. */
176 if ((void __user *)instruction_pointer(regs) >= post_commit_ip ||
177 (void __user *)instruction_pointer(regs) < start_ip)
178 return true;
179
180 /*
181 * We need to clear rseq_cs upon entry into a signal
182 * handler nested on top of a rseq assembly block, so
183 * the signal handler will not be fixed up if itself
184 * interrupted by a nested signal handler or preempted.
185 */
186 if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs)))
187 return false;
188
189 /*
190 * We set this after potentially failing in
191 * clear_user so that the signal arrives at the
192 * faulting rip.
193 */
194 instruction_pointer_set(regs, (unsigned long)abort_ip);
195 return true;
196 }
197
198 /*
199 * This resume handler should always be executed between any of:
200 * - preemption,
201 * - signal delivery,
202 * and return to user-space.
203 *
204 * This is how we can ensure that the entire rseq critical section,
205 * consisting of both the C part and the assembly instruction sequence,
206 * will issue the commit instruction only if executed atomically with
207 * respect to other threads scheduled on the same CPU, and with respect
208 * to signal handlers.
209 */
210 void __rseq_handle_notify_resume(struct pt_regs *regs)
211 {
212 struct task_struct *t = current;
213
214 if (unlikely(t->flags & PF_EXITING))
215 return;
216 if (!access_ok(VERIFY_WRITE, t->rseq, sizeof(*t->rseq)))
217 goto error;
218 if (!rseq_update_cpu_id_event_counter(t))
219 goto error;
220 if (!rseq_ip_fixup(regs))
221 goto error;
222 return;
223
224 error:
225 force_sig(SIGSEGV, t);
226 }
227
228 /*
229 * sys_rseq - setup restartable sequences for caller thread.
230 */
231 SYSCALL_DEFINE2(rseq, struct rseq __user *, rseq, int, flags)
232 {
233 if (!rseq) {
234 /* Unregister rseq for current thread. */
235 if (unlikely(flags & ~RSEQ_FORCE_UNREGISTER))
236 return -EINVAL;
237 if (flags & RSEQ_FORCE_UNREGISTER) {
238 current->rseq = NULL;
239 current->rseq_refcount = 0;
240 return 0;
241 }
242 if (!current->rseq_refcount)
243 return -ENOENT;
244 if (!--current->rseq_refcount)
245 current->rseq = NULL;
246 return 0;
247 }
248
249 if (unlikely(flags))
250 return -EINVAL;
251
252 if (current->rseq) {
253 /*
254 * If rseq is already registered, check whether
255 * the provided address differs from the prior
256 * one.
257 */
258 BUG_ON(!current->rseq_refcount);
259 if (current->rseq != rseq)
260 return -EBUSY;
261 if (current->rseq_refcount == UINT_MAX)
262 return -EOVERFLOW;
263 current->rseq_refcount++;
264 } else {
265 /*
266 * If there was no rseq previously registered,
267 * we need to ensure the provided rseq is
268 * properly aligned and valid.
269 */
270 BUG_ON(current->rseq_refcount);
271 if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)))
272 return -EINVAL;
273 if (!access_ok(VERIFY_WRITE, rseq, sizeof(*rseq)))
274 return -EFAULT;
275 current->rseq = rseq;
276 current->rseq_refcount = 1;
277 /*
278 * If rseq was previously inactive, and has just
279 * been registered, ensure the cpu_id and
280 * event_counter fields are updated before
281 * returning to user-space.
282 */
283 rseq_set_notify_resume(current);
284 }
285
286 return 0;
287 }
This page took 0.036287 seconds and 5 git commands to generate.