Fix: rseq: arm branch to failure
[deliverable/linux.git] / kernel / rseq.c
CommitLineData
552d3cfa
MD
1/*
2 * Restartable sequences system call
3 *
4 * Restartable sequences are a lightweight interface that allows
5 * user-level code to be executed atomically relative to scheduler
6 * preemption and signal delivery. Typically used for implementing
7 * per-cpu operations.
8 *
9 * It allows user-space to perform update operations on per-cpu data
10 * without requiring heavy-weight atomic operations.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * Copyright (C) 2015, Google, Inc.,
23 * Paul Turner <pjt@google.com> and Andrew Hunter <ahh@google.com>
24 * Copyright (C) 2015-2016, EfficiOS Inc.,
25 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
26 */
27
28#include <linux/sched.h>
29#include <linux/uaccess.h>
30#include <linux/syscalls.h>
31#include <linux/rseq.h>
32#include <linux/types.h>
33#include <asm/ptrace.h>
34
e6c06aba
MD
35#define CREATE_TRACE_POINTS
36#include <trace/events/rseq.h>
37
552d3cfa
MD
38/*
39 * The restartable sequences mechanism is the overlap of two distinct
40 * restart mechanisms: a sequence counter tracking preemption and signal
41 * delivery for high-level code, and an ip-fixup-based mechanism for the
42 * final assembly instruction sequence.
43 *
44 * A high-level summary of the algorithm to use rseq from user-space is
45 * as follows:
46 *
47 * The high-level code between rseq_start() and rseq_finish() loads the
48 * current value of the sequence counter in rseq_start(), and then it
49 * gets compared with the new current value within the rseq_finish()
50 * restartable instruction sequence. Between rseq_start() and
51 * rseq_finish(), the high-level code can perform operations that do not
52 * have side-effects, such as getting the current CPU number, and
53 * loading from variables.
54 *
55 * Stores are performed at the very end of the restartable sequence
56 * assembly block. Each assembly block within rseq_finish() defines a
57 * "struct rseq_cs" structure which describes the start_ip and
58 * post_commit_ip addresses, as well as the abort_ip address where the
59 * kernel should move the thread instruction pointer if a rseq critical
60 * section assembly block is preempted or if a signal is delivered on
61 * top of a rseq critical section assembly block.
62 *
63 * Detailed algorithm of rseq use:
64 *
65 * rseq_start()
66 *
67 * 0. Userspace loads the current event counter value from the
68 * event_counter field of the registered struct rseq TLS area,
69 *
70 * rseq_finish()
71 *
72 * Steps [1]-[3] (inclusive) need to be a sequence of instructions in
73 * userspace that can handle being moved to the abort_ip between any
74 * of those instructions.
75 *
76 * The abort_ip address needs to be less than start_ip, or
77 * greater-or-equal the post_commit_ip. Step [4] and the failure
78 * code step [F1] need to be at addresses lesser than start_ip, or
79 * greater-or-equal the post_commit_ip.
80 *
81 * [start_ip]
82 * 1. Userspace stores the address of the struct rseq_cs assembly
83 * block descriptor into the rseq_cs field of the registered
84 * struct rseq TLS area. This update is performed through a single
85 * store, followed by a compiler barrier which prevents the
86 * compiler from moving following loads or stores before this
87 * store.
88 *
89 * 2. Userspace tests to see whether the current event counter value
90 * match the value loaded at [0]. Manually jumping to [F1] in case
91 * of a mismatch.
92 *
93 * Note that if we are preempted or interrupted by a signal
94 * after [1] and before post_commit_ip, then the kernel also
95 * performs the comparison performed in [2], and conditionally
96 * clears the rseq_cs field of struct rseq, then jumps us to
97 * abort_ip.
98 *
99 * 3. Userspace critical section final instruction before
100 * post_commit_ip is the commit. The critical section is
101 * self-terminating.
102 * [post_commit_ip]
103 *
104 * 4. Userspace clears the rseq_cs field of the struct rseq
105 * TLS area.
106 *
107 * 5. Return true.
108 *
109 * On failure at [2]:
110 *
111 * F1. Userspace clears the rseq_cs field of the struct rseq
112 * TLS area. Followed by step [F2].
113 *
114 * [abort_ip]
115 * F2. Return false.
116 */
117
118/*
119 * The rseq_event_counter allow user-space to detect preemption and
120 * signal delivery. It increments at least once before returning to
121 * user-space if a thread is preempted or has a signal delivered. It is
122 * not meant to be an exact counter of such events.
123 *
124 * Overflow of the event counter is not a problem in practice. It
125 * increments at most once between each user-space thread instruction
126 * executed, so we would need a thread to execute 2^32 instructions or
127 * more between rseq_start() and rseq_finish(), while single-stepping,
128 * for this to be an issue.
129 *
130 * On 64-bit architectures, both cpu_id and event_counter can be updated
131 * with a single 64-bit store. On 32-bit architectures, __put_user() is
132 * expected to perform two 32-bit single-copy stores to guarantee
133 * single-copy atomicity semantics for other threads.
134 */
135static bool rseq_update_cpu_id_event_counter(struct task_struct *t)
136{
137 union rseq_cpu_event u;
138
139 u.e.cpu_id = raw_smp_processor_id();
140 u.e.event_counter = ++t->rseq_event_counter;
141 if (__put_user(u.v, &t->rseq->u.v))
142 return false;
e6c06aba 143 trace_rseq_update(t);
552d3cfa
MD
144 return true;
145}
146
147static bool rseq_get_rseq_cs(struct task_struct *t,
148 void __user **start_ip,
149 void __user **post_commit_ip,
150 void __user **abort_ip)
151{
152 unsigned long ptr;
153 struct rseq_cs __user *urseq_cs;
154 struct rseq_cs rseq_cs;
155
156 if (__get_user(ptr, &t->rseq->rseq_cs))
157 return false;
158 if (!ptr)
159 return true;
160 urseq_cs = (struct rseq_cs __user *)ptr;
161 if (copy_from_user(&rseq_cs, urseq_cs, sizeof(rseq_cs)))
162 return false;
163 *start_ip = (void __user *)rseq_cs.start_ip;
164 *post_commit_ip = (void __user *)rseq_cs.post_commit_ip;
165 *abort_ip = (void __user *)rseq_cs.abort_ip;
166 return true;
167}
168
169static bool rseq_ip_fixup(struct pt_regs *regs)
170{
171 struct task_struct *t = current;
172 void __user *start_ip = NULL;
173 void __user *post_commit_ip = NULL;
174 void __user *abort_ip = NULL;
e6c06aba 175 bool ret;
552d3cfa 176
e6c06aba
MD
177 ret = rseq_get_rseq_cs(t, &start_ip, &post_commit_ip, &abort_ip);
178 trace_rseq_ip_fixup((void __user *)instruction_pointer(regs),
179 start_ip, post_commit_ip, abort_ip, t->rseq_event_counter,
180 ret);
181 if (!ret)
552d3cfa
MD
182 return false;
183
184 /* Handle potentially not being within a critical section. */
185 if ((void __user *)instruction_pointer(regs) >= post_commit_ip ||
186 (void __user *)instruction_pointer(regs) < start_ip)
187 return true;
188
189 /*
190 * We need to clear rseq_cs upon entry into a signal
191 * handler nested on top of a rseq assembly block, so
192 * the signal handler will not be fixed up if itself
193 * interrupted by a nested signal handler or preempted.
194 */
195 if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs)))
196 return false;
197
198 /*
199 * We set this after potentially failing in
200 * clear_user so that the signal arrives at the
201 * faulting rip.
202 */
203 instruction_pointer_set(regs, (unsigned long)abort_ip);
204 return true;
205}
206
207/*
208 * This resume handler should always be executed between any of:
209 * - preemption,
210 * - signal delivery,
211 * and return to user-space.
212 *
213 * This is how we can ensure that the entire rseq critical section,
214 * consisting of both the C part and the assembly instruction sequence,
215 * will issue the commit instruction only if executed atomically with
216 * respect to other threads scheduled on the same CPU, and with respect
217 * to signal handlers.
218 */
219void __rseq_handle_notify_resume(struct pt_regs *regs)
220{
221 struct task_struct *t = current;
222
223 if (unlikely(t->flags & PF_EXITING))
224 return;
225 if (!access_ok(VERIFY_WRITE, t->rseq, sizeof(*t->rseq)))
226 goto error;
227 if (!rseq_update_cpu_id_event_counter(t))
228 goto error;
229 if (!rseq_ip_fixup(regs))
230 goto error;
231 return;
232
233error:
234 force_sig(SIGSEGV, t);
235}
236
237/*
238 * sys_rseq - setup restartable sequences for caller thread.
239 */
240SYSCALL_DEFINE2(rseq, struct rseq __user *, rseq, int, flags)
241{
242 if (!rseq) {
243 /* Unregister rseq for current thread. */
244 if (unlikely(flags & ~RSEQ_FORCE_UNREGISTER))
245 return -EINVAL;
246 if (flags & RSEQ_FORCE_UNREGISTER) {
247 current->rseq = NULL;
248 current->rseq_refcount = 0;
249 return 0;
250 }
251 if (!current->rseq_refcount)
252 return -ENOENT;
253 if (!--current->rseq_refcount)
254 current->rseq = NULL;
255 return 0;
256 }
257
258 if (unlikely(flags))
259 return -EINVAL;
260
261 if (current->rseq) {
262 /*
263 * If rseq is already registered, check whether
264 * the provided address differs from the prior
265 * one.
266 */
267 BUG_ON(!current->rseq_refcount);
268 if (current->rseq != rseq)
269 return -EBUSY;
270 if (current->rseq_refcount == UINT_MAX)
271 return -EOVERFLOW;
272 current->rseq_refcount++;
273 } else {
274 /*
275 * If there was no rseq previously registered,
276 * we need to ensure the provided rseq is
277 * properly aligned and valid.
278 */
279 BUG_ON(current->rseq_refcount);
280 if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)))
281 return -EINVAL;
282 if (!access_ok(VERIFY_WRITE, rseq, sizeof(*rseq)))
283 return -EFAULT;
284 current->rseq = rseq;
285 current->rseq_refcount = 1;
286 /*
287 * If rseq was previously inactive, and has just
288 * been registered, ensure the cpu_id and
289 * event_counter fields are updated before
290 * returning to user-space.
291 */
292 rseq_set_notify_resume(current);
293 }
294
295 return 0;
296}
This page took 0.034534 seconds and 5 git commands to generate.