Restartable sequences: clear rseq_cs even if non-nested
[deliverable/linux.git] / kernel / rseq.c
1 /*
2 * Restartable sequences system call
3 *
4 * Restartable sequences are a lightweight interface that allows
5 * user-level code to be executed atomically relative to scheduler
6 * preemption and signal delivery. Typically used for implementing
7 * per-cpu operations.
8 *
9 * It allows user-space to perform update operations on per-cpu data
10 * without requiring heavy-weight atomic operations.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * Copyright (C) 2015, Google, Inc.,
23 * Paul Turner <pjt@google.com> and Andrew Hunter <ahh@google.com>
24 * Copyright (C) 2015-2016, EfficiOS Inc.,
25 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
26 */
27
28 #include <linux/sched.h>
29 #include <linux/uaccess.h>
30 #include <linux/syscalls.h>
31 #include <linux/rseq.h>
32 #include <linux/types.h>
33 #include <asm/ptrace.h>
34
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/rseq.h>
37
38 /*
39 * The restartable sequences mechanism is the overlap of two distinct
40 * restart mechanisms: a sequence counter tracking preemption and signal
41 * delivery for high-level code, and an ip-fixup-based mechanism for the
42 * final assembly instruction sequence.
43 *
44 * A high-level summary of the algorithm to use rseq from user-space is
45 * as follows:
46 *
47 * The high-level code between rseq_start() and rseq_finish() loads the
48 * current value of the sequence counter in rseq_start(), and then it
49 * gets compared with the new current value within the rseq_finish()
50 * restartable instruction sequence. Between rseq_start() and
51 * rseq_finish(), the high-level code can perform operations that do not
52 * have side-effects, such as getting the current CPU number, and
53 * loading from variables.
54 *
55 * Stores are performed at the very end of the restartable sequence
56 * assembly block. Each assembly block within rseq_finish() defines a
57 * "struct rseq_cs" structure which describes the start_ip and
58 * post_commit_ip addresses, as well as the abort_ip address where the
59 * kernel should move the thread instruction pointer if a rseq critical
60 * section assembly block is preempted or if a signal is delivered on
61 * top of a rseq critical section assembly block.
62 *
63 * Detailed algorithm of rseq use:
64 *
65 * rseq_start()
66 *
67 * 0. Userspace loads the current event counter value from the
68 * event_counter field of the registered struct rseq TLS area,
69 *
70 * rseq_finish()
71 *
72 * Steps [1]-[3] (inclusive) need to be a sequence of instructions in
73 * userspace that can handle being moved to the abort_ip between any
74 * of those instructions.
75 *
76 * The abort_ip address needs to be less than start_ip, or
77 * greater-or-equal the post_commit_ip. Step [4] and the failure
78 * code step [F1] need to be at addresses lesser than start_ip, or
79 * greater-or-equal the post_commit_ip.
80 *
81 * [start_ip]
82 * 1. Userspace stores the address of the struct rseq_cs assembly
83 * block descriptor into the rseq_cs field of the registered
84 * struct rseq TLS area. This update is performed through a single
85 * store, followed by a compiler barrier which prevents the
86 * compiler from moving following loads or stores before this
87 * store.
88 *
89 * 2. Userspace tests to see whether the current event counter value
90 * match the value loaded at [0]. Manually jumping to [F1] in case
91 * of a mismatch.
92 *
93 * Note that if we are preempted or interrupted by a signal
94 * after [1] and before post_commit_ip, then the kernel also
95 * performs the comparison performed in [2], and conditionally
96 * clears the rseq_cs field of struct rseq, then jumps us to
97 * abort_ip.
98 *
99 * 3. Userspace critical section final instruction before
100 * post_commit_ip is the commit. The critical section is
101 * self-terminating.
102 * [post_commit_ip]
103 *
104 * 4. Userspace clears the rseq_cs field of the struct rseq
105 * TLS area.
106 *
107 * 5. Return true.
108 *
109 * On failure at [2]:
110 *
111 * F1. Userspace clears the rseq_cs field of the struct rseq
112 * TLS area. Followed by step [F2].
113 *
114 * [abort_ip]
115 * F2. Return false.
116 */
117
118 /*
119 * The rseq_event_counter allow user-space to detect preemption and
120 * signal delivery. It increments at least once before returning to
121 * user-space if a thread is preempted or has a signal delivered. It is
122 * not meant to be an exact counter of such events.
123 *
124 * Overflow of the event counter is not a problem in practice. It
125 * increments at most once between each user-space thread instruction
126 * executed, so we would need a thread to execute 2^32 instructions or
127 * more between rseq_start() and rseq_finish(), while single-stepping,
128 * for this to be an issue.
129 *
130 * On 64-bit architectures, both cpu_id and event_counter can be updated
131 * with a single 64-bit store. On 32-bit architectures, __put_user() is
132 * expected to perform two 32-bit single-copy stores to guarantee
133 * single-copy atomicity semantics for other threads.
134 */
135 static bool rseq_update_cpu_id_event_counter(struct task_struct *t)
136 {
137 union rseq_cpu_event u;
138
139 u.e.cpu_id = raw_smp_processor_id();
140 u.e.event_counter = ++t->rseq_event_counter;
141 if (__put_user(u.v, &t->rseq->u.v))
142 return false;
143 trace_rseq_update(t);
144 return true;
145 }
146
147 static bool rseq_get_rseq_cs(struct task_struct *t,
148 void __user **start_ip,
149 void __user **post_commit_ip,
150 void __user **abort_ip)
151 {
152 unsigned long ptr;
153 struct rseq_cs __user *urseq_cs;
154 struct rseq_cs rseq_cs;
155
156 if (__get_user(ptr, &t->rseq->rseq_cs))
157 return false;
158 if (!ptr)
159 return true;
160 urseq_cs = (struct rseq_cs __user *)ptr;
161 if (copy_from_user(&rseq_cs, urseq_cs, sizeof(rseq_cs)))
162 return false;
163 /*
164 * We need to clear rseq_cs upon entry into a signal handler
165 * nested on top of a rseq assembly block, so the signal handler
166 * will not be fixed up if itself interrupted by a nested signal
167 * handler or preempted. We also need to clear rseq_cs if we
168 * preempt or deliver a signal on top of code outside of the
169 * rseq assembly block, to ensure that a following preemption or
170 * signal delivery will not try to perform a fixup needlessly.
171 */
172 if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs)))
173 return false;
174 *start_ip = (void __user *)rseq_cs.start_ip;
175 *post_commit_ip = (void __user *)rseq_cs.post_commit_ip;
176 *abort_ip = (void __user *)rseq_cs.abort_ip;
177 return true;
178 }
179
180 static bool rseq_ip_fixup(struct pt_regs *regs)
181 {
182 struct task_struct *t = current;
183 void __user *start_ip = NULL;
184 void __user *post_commit_ip = NULL;
185 void __user *abort_ip = NULL;
186 bool ret;
187
188 ret = rseq_get_rseq_cs(t, &start_ip, &post_commit_ip, &abort_ip);
189 trace_rseq_ip_fixup((void __user *)instruction_pointer(regs),
190 start_ip, post_commit_ip, abort_ip, t->rseq_event_counter,
191 ret);
192 if (!ret)
193 return false;
194
195 /* Handle potentially not being within a critical section. */
196 if ((void __user *)instruction_pointer(regs) >= post_commit_ip ||
197 (void __user *)instruction_pointer(regs) < start_ip)
198 return true;
199
200 /*
201 * We set this after potentially failing in
202 * clear_user so that the signal arrives at the
203 * faulting rip.
204 */
205 instruction_pointer_set(regs, (unsigned long)abort_ip);
206 return true;
207 }
208
209 /*
210 * This resume handler should always be executed between any of:
211 * - preemption,
212 * - signal delivery,
213 * and return to user-space.
214 *
215 * This is how we can ensure that the entire rseq critical section,
216 * consisting of both the C part and the assembly instruction sequence,
217 * will issue the commit instruction only if executed atomically with
218 * respect to other threads scheduled on the same CPU, and with respect
219 * to signal handlers.
220 */
221 void __rseq_handle_notify_resume(struct pt_regs *regs)
222 {
223 struct task_struct *t = current;
224
225 if (unlikely(t->flags & PF_EXITING))
226 return;
227 if (!access_ok(VERIFY_WRITE, t->rseq, sizeof(*t->rseq)))
228 goto error;
229 if (!rseq_update_cpu_id_event_counter(t))
230 goto error;
231 if (!rseq_ip_fixup(regs))
232 goto error;
233 return;
234
235 error:
236 force_sig(SIGSEGV, t);
237 }
238
239 /*
240 * sys_rseq - setup restartable sequences for caller thread.
241 */
242 SYSCALL_DEFINE2(rseq, struct rseq __user *, rseq, int, flags)
243 {
244 if (!rseq) {
245 /* Unregister rseq for current thread. */
246 if (unlikely(flags & ~RSEQ_FORCE_UNREGISTER))
247 return -EINVAL;
248 if (flags & RSEQ_FORCE_UNREGISTER) {
249 current->rseq = NULL;
250 current->rseq_refcount = 0;
251 return 0;
252 }
253 if (!current->rseq_refcount)
254 return -ENOENT;
255 if (!--current->rseq_refcount)
256 current->rseq = NULL;
257 return 0;
258 }
259
260 if (unlikely(flags))
261 return -EINVAL;
262
263 if (current->rseq) {
264 /*
265 * If rseq is already registered, check whether
266 * the provided address differs from the prior
267 * one.
268 */
269 BUG_ON(!current->rseq_refcount);
270 if (current->rseq != rseq)
271 return -EBUSY;
272 if (current->rseq_refcount == UINT_MAX)
273 return -EOVERFLOW;
274 current->rseq_refcount++;
275 } else {
276 /*
277 * If there was no rseq previously registered,
278 * we need to ensure the provided rseq is
279 * properly aligned and valid.
280 */
281 BUG_ON(current->rseq_refcount);
282 if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)))
283 return -EINVAL;
284 if (!access_ok(VERIFY_WRITE, rseq, sizeof(*rseq)))
285 return -EFAULT;
286 current->rseq = rseq;
287 current->rseq_refcount = 1;
288 /*
289 * If rseq was previously inactive, and has just
290 * been registered, ensure the cpu_id and
291 * event_counter fields are updated before
292 * returning to user-space.
293 */
294 rseq_set_notify_resume(current);
295 }
296
297 return 0;
298 }
This page took 0.039866 seconds and 5 git commands to generate.