1f07b1c5814dbcd6e2d1fd556d83336136725c9d
4 * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <urcu/compiler.h>
38 #include <urcu/system.h>
39 #include <urcu/arch.h>
40 #include "linux-rseq-abi.h"
43 * Empty code injection macros, override when testing.
44 * It is important to consider that the ASM injection macros need to be
45 * fully reentrant (e.g. do not modify the stack).
47 #ifndef RSEQ_INJECT_ASM
48 #define RSEQ_INJECT_ASM(n)
52 #define RSEQ_INJECT_C(n)
55 #ifndef RSEQ_INJECT_INPUT
56 #define RSEQ_INJECT_INPUT
59 #ifndef RSEQ_INJECT_CLOBBER
60 #define RSEQ_INJECT_CLOBBER
63 #ifndef RSEQ_INJECT_FAILED
64 #define RSEQ_INJECT_FAILED
67 #ifndef RSEQ_FALLBACK_CNT
68 #define RSEQ_FALLBACK_CNT 3
71 extern __thread
volatile struct rseq __rseq_abi
;
73 #if defined(__x86_64__) || defined(__i386__)
75 #elif defined(__ARMEL__)
77 #elif defined(__PPC__)
80 #error unsupported target
83 /* State returned by rseq_start, passed as argument to rseq_finish. */
85 volatile struct rseq
*rseqp
;
86 int32_t cpu_id
; /* cpu_id at start. */
87 uint32_t event_counter
; /* event_counter at start. */
91 * Register rseq for the current thread. This needs to be called once
92 * by any thread which uses restartable sequences, before they start
93 * using restartable sequences.
95 int rseq_register_current_thread(void);
98 * Unregister rseq for current thread.
100 int rseq_unregister_current_thread(void);
103 * Restartable sequence fallback for reading the current CPU number.
105 int rseq_fallback_current_cpu(void);
107 static inline int32_t rseq_cpu_at_start(struct rseq_state start_value
)
109 return start_value
.cpu_id
;
112 static inline int32_t rseq_current_cpu_raw(void)
114 return CMM_LOAD_SHARED(__rseq_abi
.u
.e
.cpu_id
);
117 static inline int32_t rseq_current_cpu(void)
121 cpu
= rseq_current_cpu_raw();
122 if (caa_unlikely(cpu
< 0))
123 cpu
= rseq_fallback_current_cpu();
127 static inline __attribute__((always_inline
))
128 struct rseq_state
rseq_start(void)
130 struct rseq_state result
;
132 result
.rseqp
= &__rseq_abi
;
133 if (has_single_copy_load_64()) {
134 union rseq_cpu_event u
;
136 u
.v
= CMM_LOAD_SHARED(result
.rseqp
->u
.v
);
137 result
.event_counter
= u
.e
.event_counter
;
138 result
.cpu_id
= u
.e
.cpu_id
;
140 result
.event_counter
=
141 CMM_LOAD_SHARED(result
.rseqp
->u
.e
.event_counter
);
142 /* load event_counter before cpu_id. */
144 result
.cpu_id
= CMM_LOAD_SHARED(result
.rseqp
->u
.e
.cpu_id
);
147 * Read event counter before lock state and cpu_id. This ensures
148 * that when the state changes from RESTART to LOCK, if we have
149 * some threads that have already seen the RESTART still in
150 * flight, they will necessarily be preempted/signalled before a
151 * thread can see the LOCK state for that same CPU. That
152 * preemption/signalling will cause them to restart, so they
153 * don't interfere with the lock.
158 * Ensure the compiler does not re-order loads of protected
159 * values before we load the event counter.
165 enum rseq_finish_type
{
172 * p_spec and to_write_spec are used for a speculative write attempted
173 * near the end of the restartable sequence. A rseq_finish2 may fail
174 * even after this write takes place.
176 * p_final and to_write_final are used for the final write. If this
177 * write takes place, the rseq_finish2 is guaranteed to succeed.
179 static inline __attribute__((always_inline
))
180 bool __rseq_finish(intptr_t *p_spec
, intptr_t to_write_spec
,
181 void *p_memcpy
, void *to_write_memcpy
, size_t len_memcpy
,
182 intptr_t *p_final
, intptr_t to_write_final
,
183 struct rseq_state start_value
,
184 enum rseq_finish_type type
, bool release
)
189 case RSEQ_FINISH_SINGLE
:
190 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
191 /* no speculative write */, /* no speculative write */,
192 RSEQ_FINISH_FINAL_STORE_ASM(),
193 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
194 /* no extra clobber */, /* no arg */, /* no arg */,
198 case RSEQ_FINISH_TWO
:
200 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
201 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
202 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec
, to_write_spec
),
203 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
204 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
205 /* no extra clobber */, /* no arg */, /* no arg */,
209 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
210 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
211 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec
, to_write_spec
),
212 RSEQ_FINISH_FINAL_STORE_ASM(),
213 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
214 /* no extra clobber */, /* no arg */, /* no arg */,
219 case RSEQ_FINISH_MEMCPY
:
221 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
222 RSEQ_FINISH_MEMCPY_STORE_ASM(),
223 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy
, to_write_memcpy
, len_memcpy
),
224 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
225 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
226 RSEQ_FINISH_MEMCPY_CLOBBER(),
227 RSEQ_FINISH_MEMCPY_SETUP(),
228 RSEQ_FINISH_MEMCPY_TEARDOWN(),
229 RSEQ_FINISH_MEMCPY_SCRATCH()
232 RSEQ_FINISH_ASM(p_final
, to_write_final
, start_value
, failure
,
233 RSEQ_FINISH_MEMCPY_STORE_ASM(),
234 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy
, to_write_memcpy
, len_memcpy
),
235 RSEQ_FINISH_FINAL_STORE_ASM(),
236 RSEQ_FINISH_FINAL_STORE_INPUT(p_final
, to_write_final
),
237 RSEQ_FINISH_MEMCPY_CLOBBER(),
238 RSEQ_FINISH_MEMCPY_SETUP(),
239 RSEQ_FINISH_MEMCPY_TEARDOWN(),
240 RSEQ_FINISH_MEMCPY_SCRATCH()
251 static inline __attribute__((always_inline
))
252 bool rseq_finish(intptr_t *p
, intptr_t to_write
,
253 struct rseq_state start_value
)
255 return __rseq_finish(NULL
, 0,
257 p
, to_write
, start_value
,
258 RSEQ_FINISH_SINGLE
, false);
261 static inline __attribute__((always_inline
))
262 bool rseq_finish2(intptr_t *p_spec
, intptr_t to_write_spec
,
263 intptr_t *p_final
, intptr_t to_write_final
,
264 struct rseq_state start_value
)
266 return __rseq_finish(p_spec
, to_write_spec
,
268 p_final
, to_write_final
, start_value
,
269 RSEQ_FINISH_TWO
, false);
272 static inline __attribute__((always_inline
))
273 bool rseq_finish2_release(intptr_t *p_spec
, intptr_t to_write_spec
,
274 intptr_t *p_final
, intptr_t to_write_final
,
275 struct rseq_state start_value
)
277 return __rseq_finish(p_spec
, to_write_spec
,
279 p_final
, to_write_final
, start_value
,
280 RSEQ_FINISH_TWO
, true);
283 static inline __attribute__((always_inline
))
284 bool rseq_finish_memcpy(void *p_memcpy
, void *to_write_memcpy
,
285 size_t len_memcpy
, intptr_t *p_final
, intptr_t to_write_final
,
286 struct rseq_state start_value
)
288 return __rseq_finish(NULL
, 0,
289 p_memcpy
, to_write_memcpy
, len_memcpy
,
290 p_final
, to_write_final
, start_value
,
291 RSEQ_FINISH_MEMCPY
, false);
294 static inline __attribute__((always_inline
))
295 bool rseq_finish_memcpy_release(void *p_memcpy
, void *to_write_memcpy
,
296 size_t len_memcpy
, intptr_t *p_final
, intptr_t to_write_final
,
297 struct rseq_state start_value
)
299 return __rseq_finish(NULL
, 0,
300 p_memcpy
, to_write_memcpy
, len_memcpy
,
301 p_final
, to_write_final
, start_value
,
302 RSEQ_FINISH_MEMCPY
, true);
This page took 0.110009 seconds and 4 git commands to generate.