4 * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <urcu/compiler.h>
39 #include <urcu/system.h>
40 #include <urcu/arch.h>
41 #include <lttng/ringbuffer-config.h> /* for struct lttng_rseq_state */
42 #include "linux-rseq-abi.h"
45 * Empty code injection macros, override when testing.
46 * It is important to consider that the ASM injection macros need to be
47 * fully reentrant (e.g. do not modify the stack).
49 #ifndef RSEQ_INJECT_ASM
50 #define RSEQ_INJECT_ASM(n)
54 #define RSEQ_INJECT_C(n)
57 #ifndef RSEQ_INJECT_INPUT
58 #define RSEQ_INJECT_INPUT
61 #ifndef RSEQ_INJECT_CLOBBER
62 #define RSEQ_INJECT_CLOBBER
65 #ifndef RSEQ_INJECT_FAILED
66 #define RSEQ_INJECT_FAILED
69 #ifndef RSEQ_FALLBACK_CNT
70 #define RSEQ_FALLBACK_CNT 3
73 extern __thread
volatile struct rseq __rseq_abi
;
75 #if defined(__x86_64__) || defined(__i386__)
78 #define ARCH_HAS_RSEQ 1
80 #elif defined(__ARMEL__)
83 #define ARCH_HAS_RSEQ 1
85 #elif defined(__PPC__)
88 #define ARCH_HAS_RSEQ 1
91 #error unsupported target
95 * Register rseq for the current thread. This needs to be called once
96 * by any thread which uses restartable sequences, before they start
97 * using restartable sequences.
99 int rseq_register_current_thread(void);
102 * Unregister rseq for current thread.
104 int rseq_unregister_current_thread(void);
106 void rseq_init(void);
107 void rseq_destroy(void);
109 static inline int32_t rseq_cpu_at_start(struct lttng_rseq_state start_value
)
111 return start_value
.cpu_id
;
114 static inline int32_t rseq_current_cpu_raw(void)
116 return CMM_LOAD_SHARED(__rseq_abi
.u
.e
.cpu_id
);
120 static inline __attribute__((always_inline
))
121 struct lttng_rseq_state
rseq_start(void)
123 struct lttng_rseq_state result
;
125 result
.rseqp
= &__rseq_abi
;
126 if (has_single_copy_load_64()) {
127 union rseq_cpu_event u
;
129 u
.v
= CMM_LOAD_SHARED(result
.rseqp
->u
.v
);
130 result
.event_counter
= u
.e
.event_counter
;
131 result
.cpu_id
= u
.e
.cpu_id
;
133 result
.event_counter
=
134 CMM_LOAD_SHARED(result
.rseqp
->u
.e
.event_counter
);
135 /* load event_counter before cpu_id. */
137 result
.cpu_id
= CMM_LOAD_SHARED(result
.rseqp
->u
.e
.cpu_id
);
140 * Read event counter before lock state and cpu_id. This ensures
141 * that when the state changes from RESTART to LOCK, if we have
142 * some threads that have already seen the RESTART still in
143 * flight, they will necessarily be preempted/signalled before a
144 * thread can see the LOCK state for that same CPU. That
145 * preemption/signalling will cause them to restart, so they
146 * don't interfere with the lock.
151 * Ensure the compiler does not re-order loads of protected
152 * values before we load the event counter.
158 static inline __attribute__((always_inline
))
159 struct lttng_rseq_state
rseq_start(void)
161 struct lttng_rseq_state result
= {
168 enum rseq_finish_type
{
175 * p_spec and to_write_spec are used for a speculative write attempted
176 * near the end of the restartable sequence. A rseq_finish2 may fail
177 * even after this write takes place.
179 * p_final and to_write_final are used for the final write. If this
180 * write takes place, the rseq_finish2 is guaranteed to succeed.
183 static inline __attribute__((always_inline
))
184 bool __rseq_finish(intptr_t *p_spec
, intptr_t to_write_spec
,
185 void *p_memcpy
, void *to_write_memcpy
, size_t len_memcpy
,
186 intptr_t *p_final
, intptr_t to_write_final
,
187 struct lttng_rseq_state start_value
,
188 enum rseq_finish_type type
, bool release
)
193 case RSEQ_FINISH_SINGLE
:
194 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
195 /* no speculative write */, /* no speculative write */,
196 RSEQ_FINISH_FINAL_STORE_ASM(),
197 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
198 /* no extra clobber */, /* no arg */, /* no arg */,
202 case RSEQ_FINISH_TWO
:
204 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
205 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
206 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec
, to_write_spec
),
207 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
208 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
209 /* no extra clobber */, /* no arg */, /* no arg */,
213 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
214 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
215 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec
, to_write_spec
),
216 RSEQ_FINISH_FINAL_STORE_ASM(),
217 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
218 /* no extra clobber */, /* no arg */, /* no arg */,
223 case RSEQ_FINISH_MEMCPY
:
225 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
226 RSEQ_FINISH_MEMCPY_STORE_ASM(),
227 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy
, to_write_memcpy
, len_memcpy
),
228 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
229 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
230 RSEQ_FINISH_MEMCPY_CLOBBER(),
231 RSEQ_FINISH_MEMCPY_SETUP(),
232 RSEQ_FINISH_MEMCPY_TEARDOWN(),
233 RSEQ_FINISH_MEMCPY_SCRATCH()
236 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
237 RSEQ_FINISH_MEMCPY_STORE_ASM(),
238 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy
, to_write_memcpy
, len_memcpy
),
239 RSEQ_FINISH_FINAL_STORE_ASM(),
240 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
241 RSEQ_FINISH_MEMCPY_CLOBBER(),
242 RSEQ_FINISH_MEMCPY_SETUP(),
243 RSEQ_FINISH_MEMCPY_TEARDOWN(),
244 RSEQ_FINISH_MEMCPY_SCRATCH()
255 static inline __attribute__((always_inline
))
256 bool __rseq_finish(intptr_t *p_spec
, intptr_t to_write_spec
,
257 void *p_memcpy
, void *to_write_memcpy
, size_t len_memcpy
,
258 intptr_t *p_final
, intptr_t to_write_final
,
259 struct lttng_rseq_state start_value
,
260 enum rseq_finish_type type
, bool release
)
266 static inline __attribute__((always_inline
))
267 bool rseq_finish(intptr_t *p
, intptr_t to_write
,
268 struct lttng_rseq_state start_value
)
270 return __rseq_finish(NULL
, 0,
272 p
, to_write
, start_value
,
273 RSEQ_FINISH_SINGLE
, false);
276 static inline __attribute__((always_inline
))
277 bool rseq_finish2(intptr_t *p_spec
, intptr_t to_write_spec
,
278 intptr_t *p_final
, intptr_t to_write_final
,
279 struct lttng_rseq_state start_value
)
281 return __rseq_finish(p_spec
, to_write_spec
,
283 p_final
, to_write_final
, start_value
,
284 RSEQ_FINISH_TWO
, false);
287 static inline __attribute__((always_inline
))
288 bool rseq_finish2_release(intptr_t *p_spec
, intptr_t to_write_spec
,
289 intptr_t *p_final
, intptr_t to_write_final
,
290 struct lttng_rseq_state start_value
)
292 return __rseq_finish(p_spec
, to_write_spec
,
294 p_final
, to_write_final
, start_value
,
295 RSEQ_FINISH_TWO
, true);
298 static inline __attribute__((always_inline
))
299 bool rseq_finish_memcpy(void *p_memcpy
, void *to_write_memcpy
,
300 size_t len_memcpy
, intptr_t *p_final
, intptr_t to_write_final
,
301 struct lttng_rseq_state start_value
)
303 return __rseq_finish(NULL
, 0,
304 p_memcpy
, to_write_memcpy
, len_memcpy
,
305 p_final
, to_write_final
, start_value
,
306 RSEQ_FINISH_MEMCPY
, false);
309 static inline __attribute__((always_inline
))
310 bool rseq_finish_memcpy_release(void *p_memcpy
, void *to_write_memcpy
,
311 size_t len_memcpy
, intptr_t *p_final
, intptr_t to_write_final
,
312 struct lttng_rseq_state start_value
)
314 return __rseq_finish(NULL
, 0,
315 p_memcpy
, to_write_memcpy
, len_memcpy
,
316 p_final
, to_write_final
, start_value
,
317 RSEQ_FINISH_MEMCPY
, true);