Restartable sequences: only keep rseq lib parts needed by ust
[lttng-ust.git] / libringbuffer / rseq.h
CommitLineData
b76e5200
MD
1/*
2 * rseq.h
3 *
4 * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#ifndef RSEQ_H
26#define RSEQ_H
27
28#include <stdint.h>
29#include <stdbool.h>
30#include <pthread.h>
31#include <signal.h>
32#include <sched.h>
33#include <errno.h>
34#include <stdio.h>
35#include <stdlib.h>
36#include <sched.h>
38bfb073
MD
37#include <urcu/compiler.h>
38#include <urcu/system.h>
39#include <urcu/arch.h>
b76e5200
MD
40#include "linux-rseq-abi.h"
41
42/*
43 * Empty code injection macros, override when testing.
44 * It is important to consider that the ASM injection macros need to be
45 * fully reentrant (e.g. do not modify the stack).
46 */
47#ifndef RSEQ_INJECT_ASM
48#define RSEQ_INJECT_ASM(n)
49#endif
50
51#ifndef RSEQ_INJECT_C
52#define RSEQ_INJECT_C(n)
53#endif
54
55#ifndef RSEQ_INJECT_INPUT
56#define RSEQ_INJECT_INPUT
57#endif
58
59#ifndef RSEQ_INJECT_CLOBBER
60#define RSEQ_INJECT_CLOBBER
61#endif
62
63#ifndef RSEQ_INJECT_FAILED
64#define RSEQ_INJECT_FAILED
65#endif
66
67#ifndef RSEQ_FALLBACK_CNT
68#define RSEQ_FALLBACK_CNT 3
69#endif
70
b76e5200 71extern __thread volatile struct rseq __rseq_abi;
b76e5200
MD
72
73#if defined(__x86_64__) || defined(__i386__)
74#include <rseq-x86.h>
75#elif defined(__ARMEL__)
76#include <rseq-arm.h>
77#elif defined(__PPC__)
78#include <rseq-ppc.h>
79#else
80#error unsupported target
81#endif
82
b76e5200
MD
83/* State returned by rseq_start, passed as argument to rseq_finish. */
84struct rseq_state {
85 volatile struct rseq *rseqp;
86 int32_t cpu_id; /* cpu_id at start. */
87 uint32_t event_counter; /* event_counter at start. */
b76e5200
MD
88};
89
90/*
91 * Register rseq for the current thread. This needs to be called once
92 * by any thread which uses restartable sequences, before they start
38bfb073 93 * using restartable sequences.
b76e5200
MD
94 */
95int rseq_register_current_thread(void);
96
97/*
98 * Unregister rseq for current thread.
99 */
100int rseq_unregister_current_thread(void);
101
b76e5200
MD
102/*
103 * Restartable sequence fallback for reading the current CPU number.
104 */
105int rseq_fallback_current_cpu(void);
106
107static inline int32_t rseq_cpu_at_start(struct rseq_state start_value)
108{
109 return start_value.cpu_id;
110}
111
112static inline int32_t rseq_current_cpu_raw(void)
113{
38bfb073 114 return CMM_LOAD_SHARED(__rseq_abi.u.e.cpu_id);
b76e5200
MD
115}
116
117static inline int32_t rseq_current_cpu(void)
118{
119 int32_t cpu;
120
121 cpu = rseq_current_cpu_raw();
38bfb073 122 if (caa_unlikely(cpu < 0))
b76e5200
MD
123 cpu = rseq_fallback_current_cpu();
124 return cpu;
125}
126
127static inline __attribute__((always_inline))
38bfb073 128struct rseq_state rseq_start(void)
b76e5200
MD
129{
130 struct rseq_state result;
131
132 result.rseqp = &__rseq_abi;
133 if (has_single_copy_load_64()) {
134 union rseq_cpu_event u;
135
38bfb073 136 u.v = CMM_LOAD_SHARED(result.rseqp->u.v);
b76e5200
MD
137 result.event_counter = u.e.event_counter;
138 result.cpu_id = u.e.cpu_id;
139 } else {
140 result.event_counter =
38bfb073 141 CMM_LOAD_SHARED(result.rseqp->u.e.event_counter);
b76e5200
MD
142 /* load event_counter before cpu_id. */
143 RSEQ_INJECT_C(6)
38bfb073 144 result.cpu_id = CMM_LOAD_SHARED(result.rseqp->u.e.cpu_id);
b76e5200
MD
145 }
146 /*
147 * Read event counter before lock state and cpu_id. This ensures
148 * that when the state changes from RESTART to LOCK, if we have
149 * some threads that have already seen the RESTART still in
150 * flight, they will necessarily be preempted/signalled before a
151 * thread can see the LOCK state for that same CPU. That
152 * preemption/signalling will cause them to restart, so they
153 * don't interfere with the lock.
154 */
155 RSEQ_INJECT_C(7)
156
b76e5200
MD
157 /*
158 * Ensure the compiler does not re-order loads of protected
159 * values before we load the event counter.
160 */
38bfb073 161 cmm_barrier();
b76e5200
MD
162 return result;
163}
164
165enum rseq_finish_type {
166 RSEQ_FINISH_SINGLE,
167 RSEQ_FINISH_TWO,
168 RSEQ_FINISH_MEMCPY,
169};
170
171/*
172 * p_spec and to_write_spec are used for a speculative write attempted
173 * near the end of the restartable sequence. A rseq_finish2 may fail
174 * even after this write takes place.
175 *
176 * p_final and to_write_final are used for the final write. If this
177 * write takes place, the rseq_finish2 is guaranteed to succeed.
178 */
179static inline __attribute__((always_inline))
38bfb073 180bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
b76e5200
MD
181 void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
182 intptr_t *p_final, intptr_t to_write_final,
183 struct rseq_state start_value,
184 enum rseq_finish_type type, bool release)
185{
186 RSEQ_INJECT_C(9)
187
b76e5200
MD
188 switch (type) {
189 case RSEQ_FINISH_SINGLE:
190 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
191 /* no speculative write */, /* no speculative write */,
192 RSEQ_FINISH_FINAL_STORE_ASM(),
193 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
194 /* no extra clobber */, /* no arg */, /* no arg */,
195 /* no arg */
196 );
197 break;
198 case RSEQ_FINISH_TWO:
199 if (release) {
200 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
201 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
202 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec, to_write_spec),
203 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
204 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
205 /* no extra clobber */, /* no arg */, /* no arg */,
206 /* no arg */
207 );
208 } else {
209 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
210 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
211 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec, to_write_spec),
212 RSEQ_FINISH_FINAL_STORE_ASM(),
213 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
214 /* no extra clobber */, /* no arg */, /* no arg */,
215 /* no arg */
216 );
217 }
218 break;
219 case RSEQ_FINISH_MEMCPY:
220 if (release) {
221 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
222 RSEQ_FINISH_MEMCPY_STORE_ASM(),
223 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy, to_write_memcpy, len_memcpy),
224 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
225 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
226 RSEQ_FINISH_MEMCPY_CLOBBER(),
227 RSEQ_FINISH_MEMCPY_SETUP(),
228 RSEQ_FINISH_MEMCPY_TEARDOWN(),
229 RSEQ_FINISH_MEMCPY_SCRATCH()
230 );
231 } else {
232 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
233 RSEQ_FINISH_MEMCPY_STORE_ASM(),
234 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy, to_write_memcpy, len_memcpy),
235 RSEQ_FINISH_FINAL_STORE_ASM(),
236 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
237 RSEQ_FINISH_MEMCPY_CLOBBER(),
238 RSEQ_FINISH_MEMCPY_SETUP(),
239 RSEQ_FINISH_MEMCPY_TEARDOWN(),
240 RSEQ_FINISH_MEMCPY_SCRATCH()
241 );
242 }
243 break;
244 }
245 return true;
246failure:
247 RSEQ_INJECT_FAILED
248 return false;
249}
250
251static inline __attribute__((always_inline))
38bfb073 252bool rseq_finish(intptr_t *p, intptr_t to_write,
b76e5200
MD
253 struct rseq_state start_value)
254{
38bfb073 255 return __rseq_finish(NULL, 0,
b76e5200
MD
256 NULL, NULL, 0,
257 p, to_write, start_value,
258 RSEQ_FINISH_SINGLE, false);
259}
260
261static inline __attribute__((always_inline))
38bfb073 262bool rseq_finish2(intptr_t *p_spec, intptr_t to_write_spec,
b76e5200
MD
263 intptr_t *p_final, intptr_t to_write_final,
264 struct rseq_state start_value)
265{
38bfb073 266 return __rseq_finish(p_spec, to_write_spec,
b76e5200
MD
267 NULL, NULL, 0,
268 p_final, to_write_final, start_value,
269 RSEQ_FINISH_TWO, false);
270}
271
272static inline __attribute__((always_inline))
38bfb073 273bool rseq_finish2_release(intptr_t *p_spec, intptr_t to_write_spec,
b76e5200
MD
274 intptr_t *p_final, intptr_t to_write_final,
275 struct rseq_state start_value)
276{
38bfb073 277 return __rseq_finish(p_spec, to_write_spec,
b76e5200
MD
278 NULL, NULL, 0,
279 p_final, to_write_final, start_value,
280 RSEQ_FINISH_TWO, true);
281}
282
283static inline __attribute__((always_inline))
38bfb073
MD
284bool rseq_finish_memcpy(void *p_memcpy, void *to_write_memcpy,
285 size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
b76e5200
MD
286 struct rseq_state start_value)
287{
38bfb073 288 return __rseq_finish(NULL, 0,
b76e5200
MD
289 p_memcpy, to_write_memcpy, len_memcpy,
290 p_final, to_write_final, start_value,
291 RSEQ_FINISH_MEMCPY, false);
292}
293
294static inline __attribute__((always_inline))
38bfb073
MD
295bool rseq_finish_memcpy_release(void *p_memcpy, void *to_write_memcpy,
296 size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
b76e5200
MD
297 struct rseq_state start_value)
298{
38bfb073 299 return __rseq_finish(NULL, 0,
b76e5200
MD
300 p_memcpy, to_write_memcpy, len_memcpy,
301 p_final, to_write_final, start_value,
302 RSEQ_FINISH_MEMCPY, true);
303}
304
b76e5200 305#endif /* RSEQ_H_ */
This page took 0.039026 seconds and 5 git commands to generate.