Use rseq for cpu_id in libringbuffer
[lttng-ust.git] / libringbuffer / rseq.h
1 /*
2 * rseq.h
3 *
4 * (C) Copyright 2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #ifndef RSEQ_H
26 #define RSEQ_H
27
28 #include <stdint.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <signal.h>
32 #include <sched.h>
33 #include <errno.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <sched.h>
37 #include <unistd.h>
38 #include <urcu/compiler.h>
39 #include <urcu/system.h>
40 #include <urcu/arch.h>
41 #include <lttng/ringbuffer-config.h> /* for struct lttng_rseq_state */
42 #include "linux-rseq-abi.h"
43
44 /*
45 * Empty code injection macros, override when testing.
46 * It is important to consider that the ASM injection macros need to be
47 * fully reentrant (e.g. do not modify the stack).
48 */
49 #ifndef RSEQ_INJECT_ASM
50 #define RSEQ_INJECT_ASM(n)
51 #endif
52
53 #ifndef RSEQ_INJECT_C
54 #define RSEQ_INJECT_C(n)
55 #endif
56
57 #ifndef RSEQ_INJECT_INPUT
58 #define RSEQ_INJECT_INPUT
59 #endif
60
61 #ifndef RSEQ_INJECT_CLOBBER
62 #define RSEQ_INJECT_CLOBBER
63 #endif
64
65 #ifndef RSEQ_INJECT_FAILED
66 #define RSEQ_INJECT_FAILED
67 #endif
68
69 #ifndef RSEQ_FALLBACK_CNT
70 #define RSEQ_FALLBACK_CNT 3
71 #endif
72
73 extern __thread volatile struct rseq __rseq_abi;
74
75 #if defined(__x86_64__) || defined(__i386__)
76 #include "rseq-x86.h"
77 #ifdef __NR_rseq
78 #define ARCH_HAS_RSEQ 1
79 #endif
80 #elif defined(__ARMEL__)
81 #include "rseq-arm.h"
82 #ifdef __NR_rseq
83 #define ARCH_HAS_RSEQ 1
84 #endif
85 #elif defined(__PPC__)
86 #include "rseq-ppc.h"
87 #ifdef __NR_rseq
88 #define ARCH_HAS_RSEQ 1
89 #endif
90 #else
91 #error unsupported target
92 #endif
93
94 /*
95 * Register rseq for the current thread. This needs to be called once
96 * by any thread which uses restartable sequences, before they start
97 * using restartable sequences.
98 */
99 int rseq_register_current_thread(void);
100
101 /*
102 * Unregister rseq for current thread.
103 */
104 int rseq_unregister_current_thread(void);
105
106 void rseq_init(void);
107 void rseq_destroy(void);
108
109 static inline int32_t rseq_cpu_at_start(struct lttng_rseq_state start_value)
110 {
111 return start_value.cpu_id;
112 }
113
114 static inline int32_t rseq_current_cpu_raw(void)
115 {
116 return CMM_LOAD_SHARED(__rseq_abi.u.e.cpu_id);
117 }
118
119 #ifdef ARCH_HAS_RSEQ
120 static inline __attribute__((always_inline))
121 struct lttng_rseq_state rseq_start(void)
122 {
123 struct lttng_rseq_state result;
124
125 result.rseqp = &__rseq_abi;
126 if (has_single_copy_load_64()) {
127 union rseq_cpu_event u;
128
129 u.v = CMM_LOAD_SHARED(result.rseqp->u.v);
130 result.event_counter = u.e.event_counter;
131 result.cpu_id = u.e.cpu_id;
132 } else {
133 result.event_counter =
134 CMM_LOAD_SHARED(result.rseqp->u.e.event_counter);
135 /* load event_counter before cpu_id. */
136 RSEQ_INJECT_C(6)
137 result.cpu_id = CMM_LOAD_SHARED(result.rseqp->u.e.cpu_id);
138 }
139 /*
140 * Read event counter before lock state and cpu_id. This ensures
141 * that when the state changes from RESTART to LOCK, if we have
142 * some threads that have already seen the RESTART still in
143 * flight, they will necessarily be preempted/signalled before a
144 * thread can see the LOCK state for that same CPU. That
145 * preemption/signalling will cause them to restart, so they
146 * don't interfere with the lock.
147 */
148 RSEQ_INJECT_C(7)
149
150 /*
151 * Ensure the compiler does not re-order loads of protected
152 * values before we load the event counter.
153 */
154 cmm_barrier();
155 return result;
156 }
157 #else
158 static inline __attribute__((always_inline))
159 struct lttng_rseq_state rseq_start(void)
160 {
161 struct lttng_rseq_state result = {
162 .cpu_id = -2,
163 };
164 return result;
165 }
166 #endif
167
168 enum rseq_finish_type {
169 RSEQ_FINISH_SINGLE,
170 RSEQ_FINISH_TWO,
171 RSEQ_FINISH_MEMCPY,
172 };
173
174 /*
175 * p_spec and to_write_spec are used for a speculative write attempted
176 * near the end of the restartable sequence. A rseq_finish2 may fail
177 * even after this write takes place.
178 *
179 * p_final and to_write_final are used for the final write. If this
180 * write takes place, the rseq_finish2 is guaranteed to succeed.
181 */
182 #ifdef ARCH_HAS_RSEQ
183 static inline __attribute__((always_inline))
184 bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
185 void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
186 intptr_t *p_final, intptr_t to_write_final,
187 struct lttng_rseq_state start_value,
188 enum rseq_finish_type type, bool release)
189 {
190 RSEQ_INJECT_C(9)
191
192 switch (type) {
193 case RSEQ_FINISH_SINGLE:
194 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
195 /* no speculative write */, /* no speculative write */,
196 RSEQ_FINISH_FINAL_STORE_ASM(),
197 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
198 /* no extra clobber */, /* no arg */, /* no arg */,
199 /* no arg */
200 );
201 break;
202 case RSEQ_FINISH_TWO:
203 if (release) {
204 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
205 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
206 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec, to_write_spec),
207 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
208 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
209 /* no extra clobber */, /* no arg */, /* no arg */,
210 /* no arg */
211 );
212 } else {
213 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
214 RSEQ_FINISH_SPECULATIVE_STORE_ASM(),
215 RSEQ_FINISH_SPECULATIVE_STORE_INPUT(p_spec, to_write_spec),
216 RSEQ_FINISH_FINAL_STORE_ASM(),
217 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
218 /* no extra clobber */, /* no arg */, /* no arg */,
219 /* no arg */
220 );
221 }
222 break;
223 case RSEQ_FINISH_MEMCPY:
224 if (release) {
225 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
226 RSEQ_FINISH_MEMCPY_STORE_ASM(),
227 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy, to_write_memcpy, len_memcpy),
228 RSEQ_FINISH_FINAL_STORE_RELEASE_ASM(),
229 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
230 RSEQ_FINISH_MEMCPY_CLOBBER(),
231 RSEQ_FINISH_MEMCPY_SETUP(),
232 RSEQ_FINISH_MEMCPY_TEARDOWN(),
233 RSEQ_FINISH_MEMCPY_SCRATCH()
234 );
235 } else {
236 RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
237 RSEQ_FINISH_MEMCPY_STORE_ASM(),
238 RSEQ_FINISH_MEMCPY_STORE_INPUT(p_memcpy, to_write_memcpy, len_memcpy),
239 RSEQ_FINISH_FINAL_STORE_ASM(),
240 RSEQ_FINISH_FINAL_STORE_INPUT(p_final, to_write_final),
241 RSEQ_FINISH_MEMCPY_CLOBBER(),
242 RSEQ_FINISH_MEMCPY_SETUP(),
243 RSEQ_FINISH_MEMCPY_TEARDOWN(),
244 RSEQ_FINISH_MEMCPY_SCRATCH()
245 );
246 }
247 break;
248 }
249 return true;
250 failure:
251 RSEQ_INJECT_FAILED
252 return false;
253 }
254 #else
255 static inline __attribute__((always_inline))
256 bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
257 void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
258 intptr_t *p_final, intptr_t to_write_final,
259 struct lttng_rseq_state start_value,
260 enum rseq_finish_type type, bool release)
261 {
262 return false;
263 }
264 #endif
265
266 static inline __attribute__((always_inline))
267 bool rseq_finish(intptr_t *p, intptr_t to_write,
268 struct lttng_rseq_state start_value)
269 {
270 return __rseq_finish(NULL, 0,
271 NULL, NULL, 0,
272 p, to_write, start_value,
273 RSEQ_FINISH_SINGLE, false);
274 }
275
276 static inline __attribute__((always_inline))
277 bool rseq_finish2(intptr_t *p_spec, intptr_t to_write_spec,
278 intptr_t *p_final, intptr_t to_write_final,
279 struct lttng_rseq_state start_value)
280 {
281 return __rseq_finish(p_spec, to_write_spec,
282 NULL, NULL, 0,
283 p_final, to_write_final, start_value,
284 RSEQ_FINISH_TWO, false);
285 }
286
287 static inline __attribute__((always_inline))
288 bool rseq_finish2_release(intptr_t *p_spec, intptr_t to_write_spec,
289 intptr_t *p_final, intptr_t to_write_final,
290 struct lttng_rseq_state start_value)
291 {
292 return __rseq_finish(p_spec, to_write_spec,
293 NULL, NULL, 0,
294 p_final, to_write_final, start_value,
295 RSEQ_FINISH_TWO, true);
296 }
297
298 static inline __attribute__((always_inline))
299 bool rseq_finish_memcpy(void *p_memcpy, void *to_write_memcpy,
300 size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
301 struct lttng_rseq_state start_value)
302 {
303 return __rseq_finish(NULL, 0,
304 p_memcpy, to_write_memcpy, len_memcpy,
305 p_final, to_write_final, start_value,
306 RSEQ_FINISH_MEMCPY, false);
307 }
308
309 static inline __attribute__((always_inline))
310 bool rseq_finish_memcpy_release(void *p_memcpy, void *to_write_memcpy,
311 size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
312 struct lttng_rseq_state start_value)
313 {
314 return __rseq_finish(NULL, 0,
315 p_memcpy, to_write_memcpy, len_memcpy,
316 p_final, to_write_final, start_value,
317 RSEQ_FINISH_MEMCPY, true);
318 }
319
320 #endif /* RSEQ_H_ */
This page took 0.035716 seconds and 5 git commands to generate.