2 * Restartable sequences system call
4 * Restartable sequences are a lightweight interface that allows
5 * user-level code to be executed atomically relative to scheduler
6 * preemption and signal delivery. Typically used for implementing
9 * It allows user-space to perform update operations on per-cpu data
10 * without requiring heavy-weight atomic operations.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * Copyright (C) 2015, Google, Inc.,
23 * Paul Turner <pjt@google.com> and Andrew Hunter <ahh@google.com>
24 * Copyright (C) 2015-2016, EfficiOS Inc.,
25 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 #include <linux/sched.h>
29 #include <linux/uaccess.h>
30 #include <linux/syscalls.h>
31 #include <linux/rseq.h>
32 #include <linux/types.h>
33 #include <asm/ptrace.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/rseq.h>
39 * The restartable sequences mechanism is the overlap of two distinct
40 * restart mechanisms: a sequence counter tracking preemption and signal
41 * delivery for high-level code, and an ip-fixup-based mechanism for the
42 * final assembly instruction sequence.
44 * A high-level summary of the algorithm to use rseq from user-space is
47 * The high-level code between rseq_start() and rseq_finish() loads the
48 * current value of the sequence counter in rseq_start(), and then it
49 * gets compared with the new current value within the rseq_finish()
50 * restartable instruction sequence. Between rseq_start() and
51 * rseq_finish(), the high-level code can perform operations that do not
52 * have side-effects, such as getting the current CPU number, and
53 * loading from variables.
55 * Stores are performed at the very end of the restartable sequence
56 * assembly block. Each assembly block within rseq_finish() defines a
57 * "struct rseq_cs" structure which describes the start_ip and
58 * post_commit_ip addresses, as well as the abort_ip address where the
59 * kernel should move the thread instruction pointer if a rseq critical
60 * section assembly block is preempted or if a signal is delivered on
61 * top of a rseq critical section assembly block.
63 * Detailed algorithm of rseq use:
67 * 0. Userspace loads the current event counter value from the
68 * event_counter field of the registered struct rseq TLS area,
72 * Steps [1]-[3] (inclusive) need to be a sequence of instructions in
73 * userspace that can handle being moved to the abort_ip between any
74 * of those instructions.
76 * The abort_ip address needs to be less than start_ip, or
77 * greater-or-equal the post_commit_ip. Step [4] and the failure
78 * code step [F1] need to be at addresses lesser than start_ip, or
79 * greater-or-equal the post_commit_ip.
82 * 1. Userspace stores the address of the struct rseq_cs assembly
83 * block descriptor into the rseq_cs field of the registered
84 * struct rseq TLS area. This update is performed through a single
85 * store, followed by a compiler barrier which prevents the
86 * compiler from moving following loads or stores before this
89 * 2. Userspace tests to see whether the current event counter value
90 * match the value loaded at [0]. Manually jumping to [F1] in case
93 * Note that if we are preempted or interrupted by a signal
94 * after [1] and before post_commit_ip, then the kernel also
95 * performs the comparison performed in [2], and conditionally
96 * clears the rseq_cs field of struct rseq, then jumps us to
99 * 3. Userspace critical section final instruction before
100 * post_commit_ip is the commit. The critical section is
104 * 4. Userspace clears the rseq_cs field of the struct rseq
111 * F1. Userspace clears the rseq_cs field of the struct rseq
112 * TLS area. Followed by step [F2].
119 * The rseq_event_counter allow user-space to detect preemption and
120 * signal delivery. It increments at least once before returning to
121 * user-space if a thread is preempted or has a signal delivered. It is
122 * not meant to be an exact counter of such events.
124 * Overflow of the event counter is not a problem in practice. It
125 * increments at most once between each user-space thread instruction
126 * executed, so we would need a thread to execute 2^32 instructions or
127 * more between rseq_start() and rseq_finish(), while single-stepping,
128 * for this to be an issue.
130 * On 64-bit architectures, both cpu_id and event_counter can be updated
131 * with a single 64-bit store. On 32-bit architectures, __put_user() is
132 * expected to perform two 32-bit single-copy stores to guarantee
133 * single-copy atomicity semantics for other threads.
135 static bool rseq_update_cpu_id_event_counter(struct task_struct
*t
)
137 union rseq_cpu_event u
;
139 u
.e
.cpu_id
= raw_smp_processor_id();
140 u
.e
.event_counter
= ++t
->rseq_event_counter
;
141 if (__put_user(u
.v
, &t
->rseq
->u
.v
))
143 trace_rseq_update(t
);
147 static bool rseq_get_rseq_cs(struct task_struct
*t
,
148 void __user
**start_ip
,
149 void __user
**post_commit_ip
,
150 void __user
**abort_ip
)
153 struct rseq_cs __user
*urseq_cs
;
154 struct rseq_cs rseq_cs
;
156 if (__get_user(ptr
, &t
->rseq
->rseq_cs
))
160 urseq_cs
= (struct rseq_cs __user
*)ptr
;
161 if (copy_from_user(&rseq_cs
, urseq_cs
, sizeof(rseq_cs
)))
164 * We need to clear rseq_cs upon entry into a signal handler
165 * nested on top of a rseq assembly block, so the signal handler
166 * will not be fixed up if itself interrupted by a nested signal
167 * handler or preempted. We also need to clear rseq_cs if we
168 * preempt or deliver a signal on top of code outside of the
169 * rseq assembly block, to ensure that a following preemption or
170 * signal delivery will not try to perform a fixup needlessly.
172 if (clear_user(&t
->rseq
->rseq_cs
, sizeof(t
->rseq
->rseq_cs
)))
174 *start_ip
= (void __user
*)rseq_cs
.start_ip
;
175 *post_commit_ip
= (void __user
*)rseq_cs
.post_commit_ip
;
176 *abort_ip
= (void __user
*)rseq_cs
.abort_ip
;
180 static bool rseq_ip_fixup(struct pt_regs
*regs
)
182 struct task_struct
*t
= current
;
183 void __user
*start_ip
= NULL
;
184 void __user
*post_commit_ip
= NULL
;
185 void __user
*abort_ip
= NULL
;
188 ret
= rseq_get_rseq_cs(t
, &start_ip
, &post_commit_ip
, &abort_ip
);
189 trace_rseq_ip_fixup((void __user
*)instruction_pointer(regs
),
190 start_ip
, post_commit_ip
, abort_ip
, t
->rseq_event_counter
,
195 /* Handle potentially not being within a critical section. */
196 if ((void __user
*)instruction_pointer(regs
) >= post_commit_ip
||
197 (void __user
*)instruction_pointer(regs
) < start_ip
)
201 * We set this after potentially failing in
202 * clear_user so that the signal arrives at the
205 instruction_pointer_set(regs
, (unsigned long)abort_ip
);
210 * This resume handler should always be executed between any of:
213 * and return to user-space.
215 * This is how we can ensure that the entire rseq critical section,
216 * consisting of both the C part and the assembly instruction sequence,
217 * will issue the commit instruction only if executed atomically with
218 * respect to other threads scheduled on the same CPU, and with respect
219 * to signal handlers.
221 void __rseq_handle_notify_resume(struct pt_regs
*regs
)
223 struct task_struct
*t
= current
;
225 if (unlikely(t
->flags
& PF_EXITING
))
227 if (!access_ok(VERIFY_WRITE
, t
->rseq
, sizeof(*t
->rseq
)))
229 if (!rseq_update_cpu_id_event_counter(t
))
231 if (!rseq_ip_fixup(regs
))
236 force_sig(SIGSEGV
, t
);
240 * sys_rseq - setup restartable sequences for caller thread.
242 SYSCALL_DEFINE2(rseq
, struct rseq __user
*, rseq
, int, flags
)
245 /* Unregister rseq for current thread. */
246 if (unlikely(flags
& ~RSEQ_FORCE_UNREGISTER
))
248 if (flags
& RSEQ_FORCE_UNREGISTER
) {
249 current
->rseq
= NULL
;
250 current
->rseq_refcount
= 0;
253 if (!current
->rseq_refcount
)
255 if (!--current
->rseq_refcount
)
256 current
->rseq
= NULL
;
265 * If rseq is already registered, check whether
266 * the provided address differs from the prior
269 BUG_ON(!current
->rseq_refcount
);
270 if (current
->rseq
!= rseq
)
272 if (current
->rseq_refcount
== UINT_MAX
)
274 current
->rseq_refcount
++;
277 * If there was no rseq previously registered,
278 * we need to ensure the provided rseq is
279 * properly aligned and valid.
281 BUG_ON(current
->rseq_refcount
);
282 if (!IS_ALIGNED((unsigned long)rseq
, __alignof__(*rseq
)))
284 if (!access_ok(VERIFY_WRITE
, rseq
, sizeof(*rseq
)))
286 current
->rseq
= rseq
;
287 current
->rseq_refcount
= 1;
289 * If rseq was previously inactive, and has just
290 * been registered, ensure the cpu_id and
291 * event_counter fields are updated before
292 * returning to user-space.
294 rseq_set_notify_resume(current
);
This page took 0.039866 seconds and 5 git commands to generate.