rseq: output whether configure finds rseq syscall
[lttng-ust.git] / libringbuffer / rseq.h
CommitLineData
b76e5200
MD
1/*
2 * rseq.h
3 *
4 * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#ifndef RSEQ_H
26#define RSEQ_H
27
28#include <stdint.h>
29#include <stdbool.h>
30#include <pthread.h>
31#include <signal.h>
32#include <sched.h>
33#include <errno.h>
34#include <stdio.h>
35#include <stdlib.h>
36#include <sched.h>
26cc635c 37#include <unistd.h>
38bfb073
MD
38#include <urcu/compiler.h>
39#include <urcu/system.h>
40#include <urcu/arch.h>
26cc635c 41#include <lttng/ringbuffer-config.h> /* for struct lttng_rseq_state */
b76e5200
MD
42#include "linux-rseq-abi.h"
43
44/*
45 * Empty code injection macros, override when testing.
46 * It is important to consider that the ASM injection macros need to be
47 * fully reentrant (e.g. do not modify the stack).
48 */
49#ifndef RSEQ_INJECT_ASM
50#define RSEQ_INJECT_ASM(n)
51#endif
52
53#ifndef RSEQ_INJECT_C
54#define RSEQ_INJECT_C(n)
55#endif
56
57#ifndef RSEQ_INJECT_INPUT
58#define RSEQ_INJECT_INPUT
59#endif
60
61#ifndef RSEQ_INJECT_CLOBBER
62#define RSEQ_INJECT_CLOBBER
63#endif
64
65#ifndef RSEQ_INJECT_FAILED
66#define RSEQ_INJECT_FAILED
67#endif
68
69#ifndef RSEQ_FALLBACK_CNT
70#define RSEQ_FALLBACK_CNT 3
71#endif
72
b76e5200 73extern __thread volatile struct rseq __rseq_abi;
b76e5200
MD
74
75#if defined(__x86_64__) || defined(__i386__)
26cc635c
MD
76#include "rseq-x86.h"
77#ifdef __NR_rseq
78#define ARCH_HAS_RSEQ 1
79#endif
b76e5200 80#elif defined(__ARMEL__)
26cc635c
MD
81#include "rseq-arm.h"
82#ifdef __NR_rseq
83#define ARCH_HAS_RSEQ 1
84#endif
b76e5200 85#elif defined(__PPC__)
26cc635c
MD
86#include "rseq-ppc.h"
87#ifdef __NR_rseq
88#define ARCH_HAS_RSEQ 1
89#endif
b76e5200
MD
90#else
91#error unsupported target
92#endif
93
b76e5200
MD
94/*
95 * Register rseq for the current thread. This needs to be called once
96 * by any thread which uses restartable sequences, before they start
38bfb073 97 * using restartable sequences.
b76e5200
MD
98 */
99int rseq_register_current_thread(void);
100
101/*
102 * Unregister rseq for current thread.
103 */
104int rseq_unregister_current_thread(void);
105
26cc635c
MD
106void rseq_init(void);
107void rseq_destroy(void);
b76e5200 108
26cc635c 109static inline int32_t rseq_cpu_at_start(struct lttng_rseq_state start_value)
b76e5200
MD
110{
111 return start_value.cpu_id;
112}
113
114static inline int32_t rseq_current_cpu_raw(void)
115{
38bfb073 116 return CMM_LOAD_SHARED(__rseq_abi.u.e.cpu_id);
b76e5200
MD
117}
118
26cc635c 119#ifdef ARCH_HAS_RSEQ
b76e5200 120static inline __attribute__((always_inline))
26cc635c 121struct lttng_rseq_state rseq_start(void)
b76e5200 122{
26cc635c 123 struct lttng_rseq_state result;
b76e5200
MD
124
125 result.rseqp = &__rseq_abi;
126 if (has_single_copy_load_64()) {
127 union rseq_cpu_event u;
128
38bfb073 129 u.v = CMM_LOAD_SHARED(result.rseqp->u.v);
b76e5200
MD
130 result.event_counter = u.e.event_counter;
131 result.cpu_id = u.e.cpu_id;
132 } else {
133 result.event_counter =
38bfb073 134 CMM_LOAD_SHARED(result.rseqp->u.e.event_counter);
b76e5200
MD
135 /* load event_counter before cpu_id. */
136 RSEQ_INJECT_C(6)
38bfb073 137 result.cpu_id = CMM_LOAD_SHARED(result.rseqp->u.e.cpu_id);
b76e5200
MD
138 }
139 /*
140 * Read event counter before lock state and cpu_id. This ensures
141 * that when the state changes from RESTART to LOCK, if we have
142 * some threads that have already seen the RESTART still in
143 * flight, they will necessarily be preempted/signalled before a
144 * thread can see the LOCK state for that same CPU. That
145 * preemption/signalling will cause them to restart, so they
146 * don't interfere with the lock.
147 */
148 RSEQ_INJECT_C(7)
149
b76e5200
MD
150 /*
151 * Ensure the compiler does not re-order loads of protected
152 * values before we load the event counter.
153 */
38bfb073 154 cmm_barrier();
b76e5200
MD
155 return result;
156}
26cc635c
MD
157#else
158static inline __attribute__((always_inline))
159struct lttng_rseq_state rseq_start(void)
160{
161 struct lttng_rseq_state result = {
162 .cpu_id = -2,
163 };
164 return result;
165}
166#endif
b76e5200
MD
167
168enum rseq_finish_type {
169 RSEQ_FINISH_SINGLE,
170 RSEQ_FINISH_TWO,
171 RSEQ_FINISH_MEMCPY,
172};
173
174/*
175 * p_spec and to_write_spec are used for a speculative write attempted
176 * near the end of the restartable sequence. A rseq_finish2 may fail
177 * even after this write takes place.
178 *
179 * p_final and to_write_final are used for the final write. If this
180 * write takes place, the rseq_finish2 is guaranteed to succeed.
181 */
26cc635c 182#ifdef ARCH_HAS_RSEQ
b76e5200 183static inline __attribute__((always_inline))
38bfb073 184bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
b76e5200
MD
185 void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
186 intptr_t *p_final, intptr_t to_write_final,
26cc635c 187 struct lttng_rseq_state start_value,
b76e5200
MD
188 enum rseq_finish_type type, bool release)
189{
190 RSEQ_INJECT_C(9)
191
b76e5200
MD
192 switch (type) {
193 case RSEQ_FINISH_SINGLE:
194 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
195 /* no speculative write */, /* no speculative write */,
196 RSEQ_FINISH_FINAL_STORE_ASM(),
197 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
198 /* no extra clobber */, /* no arg */, /* no arg */,
199 /* no arg */
200 );
201 break;
202 case RSEQ_FINISH_TWO:
203 if (release) {
204 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
205 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
206 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec, to_write_spec),
207 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
208 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
209 /* no extra clobber */, /* no arg */, /* no arg */,
210 /* no arg */
211 );
212 } else {
213 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
214 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
215 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec, to_write_spec),
216 RSEQ_FINISH_FINAL_STORE_ASM(),
217 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
218 /* no extra clobber */, /* no arg */, /* no arg */,
219 /* no arg */
220 );
221 }
222 break;
223 case RSEQ_FINISH_MEMCPY:
224 if (release) {
225 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
226 RSEQ_FINISH_MEMCPY_STORE_ASM(),
227 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy, to_write_memcpy, len_memcpy),
228 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
229 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
230 RSEQ_FINISH_MEMCPY_CLOBBER(),
231 RSEQ_FINISH_MEMCPY_SETUP(),
232 RSEQ_FINISH_MEMCPY_TEARDOWN(),
233 RSEQ_FINISH_MEMCPY_SCRATCH()
234 );
235 } else {
236 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
237 RSEQ_FINISH_MEMCPY_STORE_ASM(),
238 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy, to_write_memcpy, len_memcpy),
239 RSEQ_FINISH_FINAL_STORE_ASM(),
240 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
241 RSEQ_FINISH_MEMCPY_CLOBBER(),
242 RSEQ_FINISH_MEMCPY_SETUP(),
243 RSEQ_FINISH_MEMCPY_TEARDOWN(),
244 RSEQ_FINISH_MEMCPY_SCRATCH()
245 );
246 }
247 break;
248 }
249 return true;
250failure:
251 RSEQ_INJECT_FAILED
252 return false;
253}
26cc635c
MD
254#else
255static inline __attribute__((always_inline))
256bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
257 void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
258 intptr_t *p_final, intptr_t to_write_final,
259 struct lttng_rseq_state start_value,
260 enum rseq_finish_type type, bool release)
261{
262 return false;
263}
264#endif
b76e5200
MD
265
266static inline __attribute__((always_inline))
38bfb073 267bool rseq_finish(intptr_t *p, intptr_t to_write,
26cc635c 268 struct lttng_rseq_state start_value)
b76e5200 269{
38bfb073 270 return __rseq_finish(NULL, 0,
b76e5200
MD
271 NULL, NULL, 0,
272 p, to_write, start_value,
273 RSEQ_FINISH_SINGLE, false);
274}
275
276static inline __attribute__((always_inline))
38bfb073 277bool rseq_finish2(intptr_t *p_spec, intptr_t to_write_spec,
b76e5200 278 intptr_t *p_final, intptr_t to_write_final,
26cc635c 279 struct lttng_rseq_state start_value)
b76e5200 280{
38bfb073 281 return __rseq_finish(p_spec, to_write_spec,
b76e5200
MD
282 NULL, NULL, 0,
283 p_final, to_write_final, start_value,
284 RSEQ_FINISH_TWO, false);
285}
286
287static inline __attribute__((always_inline))
38bfb073 288bool rseq_finish2_release(intptr_t *p_spec, intptr_t to_write_spec,
b76e5200 289 intptr_t *p_final, intptr_t to_write_final,
26cc635c 290 struct lttng_rseq_state start_value)
b76e5200 291{
38bfb073 292 return __rseq_finish(p_spec, to_write_spec,
b76e5200
MD
293 NULL, NULL, 0,
294 p_final, to_write_final, start_value,
295 RSEQ_FINISH_TWO, true);
296}
297
298static inline __attribute__((always_inline))
38bfb073
MD
299bool rseq_finish_memcpy(void *p_memcpy, void *to_write_memcpy,
300 size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
26cc635c 301 struct lttng_rseq_state start_value)
b76e5200 302{
38bfb073 303 return __rseq_finish(NULL, 0,
b76e5200
MD
304 p_memcpy, to_write_memcpy, len_memcpy,
305 p_final, to_write_final, start_value,
306 RSEQ_FINISH_MEMCPY, false);
307}
308
309static inline __attribute__((always_inline))
38bfb073
MD
310bool rseq_finish_memcpy_release(void *p_memcpy, void *to_write_memcpy,
311 size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
26cc635c 312 struct lttng_rseq_state start_value)
b76e5200 313{
38bfb073 314 return __rseq_finish(NULL, 0,
b76e5200
MD
315 p_memcpy, to_write_memcpy, len_memcpy,
316 p_final, to_write_final, start_value,
317 RSEQ_FINISH_MEMCPY, true);
318}
319
b76e5200 320#endif /* RSEQ_H_ */
This page took 0.037068 seconds and 5 git commands to generate.