4 * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include "linux-rseq-abi.h"
40 * Empty code injection macros, override when testing.
41 * It is important to consider that the ASM injection macros need to be
42 * fully reentrant (e.g. do not modify the stack).
44 #ifndef RSEQ_INJECT_ASM
45 #define RSEQ_INJECT_ASM(n)
49 #define RSEQ_INJECT_C(n)
52 #ifndef RSEQ_INJECT_INPUT
53 #define RSEQ_INJECT_INPUT
56 #ifndef RSEQ_INJECT_CLOBBER
57 #define RSEQ_INJECT_CLOBBER
60 #ifndef RSEQ_INJECT_FAILED
61 #define RSEQ_INJECT_FAILED
64 #ifndef RSEQ_FALLBACK_CNT
65 #define RSEQ_FALLBACK_CNT 3
68 uint32_t rseq_get_fallback_wait_cnt(void);
69 uint32_t rseq_get_fallback_cnt(void);
71 extern __thread
volatile struct rseq __rseq_abi
;
72 extern int rseq_has_sys_membarrier
;
74 #define likely(x) __builtin_expect(!!(x), 1)
75 #define unlikely(x) __builtin_expect(!!(x), 0)
76 #define barrier() __asm__ __volatile__("" : : : "memory")
78 #define ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x))
79 #define WRITE_ONCE(x, v) __extension__ ({ ACCESS_ONCE(x) = (v); })
80 #define READ_ONCE(x) ACCESS_ONCE(x)
82 #if defined(__x86_64__) || defined(__i386__)
84 #elif defined(__ARMEL__)
86 #elif defined(__PPC__)
89 #error unsupported target
92 enum rseq_lock_state
{
93 RSEQ_LOCK_STATE_RESTART
= 0,
94 RSEQ_LOCK_STATE_LOCK
= 1,
95 RSEQ_LOCK_STATE_FAIL
= 2,
100 int32_t state
; /* enum rseq_lock_state */
103 /* State returned by rseq_start, passed as argument to rseq_finish. */
105 volatile struct rseq
*rseqp
;
106 int32_t cpu_id
; /* cpu_id at start. */
107 uint32_t event_counter
; /* event_counter at start. */
108 int32_t lock_state
; /* Lock state at start. */
112 * Register rseq for the current thread. This needs to be called once
113 * by any thread which uses restartable sequences, before they start
114 * using restartable sequences. If initialization is not invoked, or if
115 * it fails, the restartable critical sections will fall-back on locking
118 int rseq_register_current_thread(void);
121 * Unregister rseq for current thread.
123 int rseq_unregister_current_thread(void);
126 * The fallback lock should be initialized before being used by any
127 * thread, and destroyed after all threads are done using it. This lock
128 * should be used by all rseq calls associated with shared data, either
129 * between threads, or between processes in a shared memory.
131 * There may be many rseq_lock per process, e.g. one per protected data
134 int rseq_init_lock(struct rseq_lock
*rlock
);
135 int rseq_destroy_lock(struct rseq_lock
*rlock
);
138 * Restartable sequence fallback prototypes. Fallback on locking when
139 * rseq is not initialized, not available on the system, or during
140 * single-stepping to ensure forward progress.
142 int rseq_fallback_begin(struct rseq_lock
*rlock
);
143 void rseq_fallback_end(struct rseq_lock
*rlock
, int cpu
);
144 void rseq_fallback_wait(struct rseq_lock
*rlock
);
145 void rseq_fallback_noinit(struct rseq_state
*rseq_state
);
148 * Restartable sequence fallback for reading the current CPU number.
150 int rseq_fallback_current_cpu(void);
152 static inline int32_t rseq_cpu_at_start(struct rseq_state start_value
)
154 return start_value
.cpu_id
;
157 static inline int32_t rseq_current_cpu_raw(void)
159 return ACCESS_ONCE(__rseq_abi
.u
.e
.cpu_id
);
162 static inline int32_t rseq_current_cpu(void)
166 cpu
= rseq_current_cpu_raw();
167 if (unlikely(cpu
< 0))
168 cpu
= rseq_fallback_current_cpu();
172 static inline __attribute__((always_inline
))
173 struct rseq_state
rseq_start(struct rseq_lock
*rlock
)
175 struct rseq_state result
;
177 result
.rseqp
= &__rseq_abi
;
178 if (has_single_copy_load_64()) {
179 union rseq_cpu_event u
;
181 u
.v
= ACCESS_ONCE(result
.rseqp
->u
.v
);
182 result
.event_counter
= u
.e
.event_counter
;
183 result
.cpu_id
= u
.e
.cpu_id
;
185 result
.event_counter
=
186 ACCESS_ONCE(result
.rseqp
->u
.e
.event_counter
);
187 /* load event_counter before cpu_id. */
189 result
.cpu_id
= ACCESS_ONCE(result
.rseqp
->u
.e
.cpu_id
);
192 * Read event counter before lock state and cpu_id. This ensures
193 * that when the state changes from RESTART to LOCK, if we have
194 * some threads that have already seen the RESTART still in
195 * flight, they will necessarily be preempted/signalled before a
196 * thread can see the LOCK state for that same CPU. That
197 * preemption/signalling will cause them to restart, so they
198 * don't interfere with the lock.
202 if (!has_fast_acquire_release() && likely(rseq_has_sys_membarrier
)) {
203 result
.lock_state
= ACCESS_ONCE(rlock
->state
);
207 * Load lock state with acquire semantic. Matches
208 * smp_store_release() in rseq_fallback_end().
210 result
.lock_state
= smp_load_acquire(&rlock
->state
);
212 if (unlikely(result
.cpu_id
< 0))
213 rseq_fallback_noinit(&result
);
215 * Ensure the compiler does not re-order loads of protected
216 * values before we load the event counter.
222 enum rseq_finish_type
{
229 * p_spec and to_write_spec are used for a speculative write attempted
230 * near the end of the restartable sequence. A rseq_finish2 may fail
231 * even after this write takes place.
233 * p_final and to_write_final are used for the final write. If this
234 * write takes place, the rseq_finish2 is guaranteed to succeed.
236 static inline __attribute__((always_inline
))
237 bool __rseq_finish(struct rseq_lock
*rlock
,
238 intptr_t *p_spec
, intptr_t to_write_spec
,
239 void *p_memcpy
, void *to_write_memcpy
, size_t len_memcpy
,
240 intptr_t *p_final
, intptr_t to_write_final
,
241 struct rseq_state start_value
,
242 enum rseq_finish_type type
, bool release
)
246 if (unlikely(start_value
.lock_state
!= RSEQ_LOCK_STATE_RESTART
)) {
247 if (start_value
.lock_state
== RSEQ_LOCK_STATE_LOCK
)
248 rseq_fallback_wait(rlock
);
252 case RSEQ_FINISH_SINGLE
:
253 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
254 /* no speculative write */, /* no speculative write */,
255 RSEQ_FINISH_FINAL_STORE_ASM(),
256 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
257 /* no extra clobber */, /* no arg */, /* no arg */,
261 case RSEQ_FINISH_TWO
:
263 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
264 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
265 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec
, to_write_spec
),
266 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
267 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
268 /* no extra clobber */, /* no arg */, /* no arg */,
272 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
273 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
274 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec
, to_write_spec
),
275 RSEQ_FINISH_FINAL_STORE_ASM(),
276 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
277 /* no extra clobber */, /* no arg */, /* no arg */,
282 case RSEQ_FINISH_MEMCPY
:
284 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
285 RSEQ_FINISH_MEMCPY_STORE_ASM(),
286 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy
, to_write_memcpy
, len_memcpy
),
287 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
288 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
289 RSEQ_FINISH_MEMCPY_CLOBBER(),
290 RSEQ_FINISH_MEMCPY_SETUP(),
291 RSEQ_FINISH_MEMCPY_TEARDOWN(),
292 RSEQ_FINISH_MEMCPY_SCRATCH()
295 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
296 RSEQ_FINISH_MEMCPY_STORE_ASM(),
297 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy
, to_write_memcpy
, len_memcpy
),
298 RSEQ_FINISH_FINAL_STORE_ASM(),
299 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
300 RSEQ_FINISH_MEMCPY_CLOBBER(),
301 RSEQ_FINISH_MEMCPY_SETUP(),
302 RSEQ_FINISH_MEMCPY_TEARDOWN(),
303 RSEQ_FINISH_MEMCPY_SCRATCH()
314 static inline __attribute__((always_inline
))
315 bool rseq_finish(struct rseq_lock
*rlock
,
316 intptr_t *p
, intptr_t to_write
,
317 struct rseq_state start_value
)
319 return __rseq_finish(rlock
, NULL
, 0,
321 p
, to_write
, start_value
,
322 RSEQ_FINISH_SINGLE
, false);
325 static inline __attribute__((always_inline
))
326 bool rseq_finish2(struct rseq_lock
*rlock
,
327 intptr_t *p_spec
, intptr_t to_write_spec
,
328 intptr_t *p_final
, intptr_t to_write_final
,
329 struct rseq_state start_value
)
331 return __rseq_finish(rlock
, p_spec
, to_write_spec
,
333 p_final
, to_write_final
, start_value
,
334 RSEQ_FINISH_TWO
, false);
337 static inline __attribute__((always_inline
))
338 bool rseq_finish2_release(struct rseq_lock
*rlock
,
339 intptr_t *p_spec
, intptr_t to_write_spec
,
340 intptr_t *p_final
, intptr_t to_write_final
,
341 struct rseq_state start_value
)
343 return __rseq_finish(rlock
, p_spec
, to_write_spec
,
345 p_final
, to_write_final
, start_value
,
346 RSEQ_FINISH_TWO
, true);
349 static inline __attribute__((always_inline
))
350 bool rseq_finish_memcpy(struct rseq_lock
*rlock
,
351 void *p_memcpy
, void *to_write_memcpy
, size_t len_memcpy
,
352 intptr_t *p_final
, intptr_t to_write_final
,
353 struct rseq_state start_value
)
355 return __rseq_finish(rlock
, NULL
, 0,
356 p_memcpy
, to_write_memcpy
, len_memcpy
,
357 p_final
, to_write_final
, start_value
,
358 RSEQ_FINISH_MEMCPY
, false);
361 static inline __attribute__((always_inline
))
362 bool rseq_finish_memcpy_release(struct rseq_lock
*rlock
,
363 void *p_memcpy
, void *to_write_memcpy
, size_t len_memcpy
,
364 intptr_t *p_final
, intptr_t to_write_final
,
365 struct rseq_state start_value
)
367 return __rseq_finish(rlock
, NULL
, 0,
368 p_memcpy
, to_write_memcpy
, len_memcpy
,
369 p_final
, to_write_final
, start_value
,
370 RSEQ_FINISH_MEMCPY
, true);
373 #define __rseq_store_RSEQ_FINISH_SINGLE(_targetptr_spec, _newval_spec, \
374 _dest_memcpy, _src_memcpy, _len_memcpy, \
375 _targetptr_final, _newval_final) \
377 *(_targetptr_final) = (_newval_final); \
380 #define __rseq_store_RSEQ_FINISH_TWO(_targetptr_spec, _newval_spec, \
381 _dest_memcpy, _src_memcpy, _len_memcpy, \
382 _targetptr_final, _newval_final) \
384 *(_targetptr_spec) = (_newval_spec); \
385 *(_targetptr_final) = (_newval_final); \
388 #define __rseq_store_RSEQ_FINISH_MEMCPY(_targetptr_spec, \
389 _newval_spec, _dest_memcpy, _src_memcpy, _len_memcpy, \
390 _targetptr_final, _newval_final) \
392 memcpy(_dest_memcpy, _src_memcpy, _len_memcpy); \
393 *(_targetptr_final) = (_newval_final); \
397 * Helper macro doing two restartable critical section attempts, and if
398 * they fail, fallback on locking.
400 #define __do_rseq(_type, _lock, _rseq_state, _cpu, _result, \
401 _targetptr_spec, _newval_spec, \
402 _dest_memcpy, _src_memcpy, _len_memcpy, \
403 _targetptr_final, _newval_final, _code, _release) \
405 _rseq_state = rseq_start(_lock); \
406 _cpu = rseq_cpu_at_start(_rseq_state); \
409 if (unlikely(!_result)) \
411 if (likely(__rseq_finish(_lock, \
412 _targetptr_spec, _newval_spec, \
413 _dest_memcpy, _src_memcpy, _len_memcpy, \
414 _targetptr_final, _newval_final, \
415 _rseq_state, _type, _release))) \
417 _rseq_state = rseq_start(_lock); \
418 _cpu = rseq_cpu_at_start(_rseq_state); \
421 if (unlikely(!_result)) \
423 if (likely(__rseq_finish(_lock, \
424 _targetptr_spec, _newval_spec, \
425 _dest_memcpy, _src_memcpy, _len_memcpy, \
426 _targetptr_final, _newval_final, \
427 _rseq_state, _type, _release))) \
429 _cpu = rseq_fallback_begin(_lock); \
432 if (likely(_result)) \
433 __rseq_store_##_type(_targetptr_spec, \
434 _newval_spec, _dest_memcpy, \
435 _src_memcpy, _len_memcpy, \
436 _targetptr_final, _newval_final); \
437 rseq_fallback_end(_lock, _cpu); \
440 #define do_rseq(_lock, _rseq_state, _cpu, _result, _targetptr, _newval, \
442 __do_rseq(RSEQ_FINISH_SINGLE, _lock, _rseq_state, _cpu, _result,\
443 NULL, 0, NULL, NULL, 0, _targetptr, _newval, _code, false)
445 #define do_rseq2(_lock, _rseq_state, _cpu, _result, \
446 _targetptr_spec, _newval_spec, \
447 _targetptr_final, _newval_final, _code) \
448 __do_rseq(RSEQ_FINISH_TWO, _lock, _rseq_state, _cpu, _result, \
449 _targetptr_spec, _newval_spec, \
451 _targetptr_final, _newval_final, _code, false)
453 #define do_rseq2_release(_lock, _rseq_state, _cpu, _result, \
454 _targetptr_spec, _newval_spec, \
455 _targetptr_final, _newval_final, _code) \
456 __do_rseq(RSEQ_FINISH_TWO, _lock, _rseq_state, _cpu, _result, \
457 _targetptr_spec, _newval_spec, \
459 _targetptr_final, _newval_final, _code, true)
461 #define do_rseq_memcpy(_lock, _rseq_state, _cpu, _result, \
462 _dest_memcpy, _src_memcpy, _len_memcpy, \
463 _targetptr_final, _newval_final, _code) \
464 __do_rseq(RSEQ_FINISH_MEMCPY, _lock, _rseq_state, _cpu, _result,\
466 _dest_memcpy, _src_memcpy, _len_memcpy, \
467 _targetptr_final, _newval_final, _code, false)
469 #define do_rseq_memcpy_release(_lock, _rseq_state, _cpu, _result, \
470 _dest_memcpy, _src_memcpy, _len_memcpy, \
471 _targetptr_final, _newval_final, _code) \
472 __do_rseq(RSEQ_FINISH_MEMCPY, _lock, _rseq_state, _cpu, _result,\
474 _dest_memcpy, _src_memcpy, _len_memcpy, \
475 _targetptr_final, _newval_final, _code, true)
This page took 0.05266 seconds and 6 git commands to generate.