ea795c4cdea74142a9419628e65ca448efb4cc1c
[librseq.git] / include / rseq / rseq.h
1 /* SPDX-License-Identifier: MIT */
2 /* SPDX-FileCopyrightText: 2016-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> */
3
4 /*
5 * rseq/rseq.h
6 */
7
8 #ifndef _RSEQ_RSEQ_H
9 #define _RSEQ_RSEQ_H
10
11 #include <stdint.h>
12 #include <stdbool.h>
13 #include <pthread.h>
14 #include <signal.h>
15 #include <sched.h>
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <stddef.h>
20 #include <assert.h>
21
22 #include <rseq/abi.h>
23 #include <rseq/compiler.h>
24 #include <rseq/inject.h>
25 #include <rseq/thread-pointer.h>
26 #include <rseq/utils.h>
27
28 enum rseq_mo {
29 RSEQ_MO_RELAXED = 0,
30 RSEQ_MO_CONSUME = 1, /* Unused */
31 RSEQ_MO_ACQUIRE = 2, /* Unused */
32 RSEQ_MO_RELEASE = 3,
33 RSEQ_MO_ACQ_REL = 4, /* Unused */
34 RSEQ_MO_SEQ_CST = 5, /* Unused */
35 };
36
37 enum rseq_percpu_mode {
38 RSEQ_PERCPU_CPU_ID = 0,
39 RSEQ_PERCPU_MM_CID = 1,
40 };
41
42 enum rseq_available_query {
43 RSEQ_AVAILABLE_QUERY_KERNEL = 0,
44 RSEQ_AVAILABLE_QUERY_LIBC = 1,
45 };
46
47 /*
48 * User code can define RSEQ_GET_ABI_OVERRIDE to override the
49 * rseq_get_abi() implementation, for instance to use glibc's symbols
50 * directly.
51 */
52 #ifndef RSEQ_GET_ABI_OVERRIDE
53
54 # ifdef __cplusplus
55 extern "C" {
56 # endif
57
58 /* Offset from the thread pointer to the rseq area. */
59 extern ptrdiff_t rseq_offset;
60
61 /*
62 * Size of the registered rseq area. 0 if the registration was
63 * unsuccessful.
64 */
65 extern unsigned int rseq_size;
66
67 /* Flags used during rseq registration. */
68 extern unsigned int rseq_flags;
69
70 /*
71 * rseq feature size supported by the kernel. 0 if the registration was
72 * unsuccessful.
73 */
74 extern unsigned int rseq_feature_size;
75
76 /*
77 * Returns a pointer to the rseq area.
78 */
79 static inline __attribute__((always_inline))
80 struct rseq_abi *rseq_get_abi(void)
81 {
82 return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
83 }
84
85 # ifdef __cplusplus
86 }
87 # endif
88
89 #endif /* RSEQ_GET_ABI_OVERRIDE */
90
91
92 /*
93 * Architecture specific.
94 */
95 #include <rseq/arch.h>
96
97
98 #ifdef __cplusplus
99 extern "C" {
100 #endif
101
102 /*
103 * Register rseq for the current thread. This needs to be called once
104 * by any thread which uses restartable sequences, before they start
105 * using restartable sequences, to ensure restartable sequences
106 * succeed. A restartable sequence executed from a non-registered
107 * thread will always fail.
108 */
109 int rseq_register_current_thread(void);
110
111 /*
112 * Unregister rseq for current thread.
113 */
114 int rseq_unregister_current_thread(void);
115
116 /*
117 * Restartable sequence fallback for reading the current CPU number.
118 */
119 int32_t rseq_fallback_current_cpu(void);
120
121 /*
122 * Restartable sequence fallback for reading the current node number.
123 */
124 int32_t rseq_fallback_current_node(void);
125
126 /*
127 * Returns true if rseq is supported.
128 */
129 bool rseq_available(unsigned int query);
130
131 /*
132 * Values returned can be either the current CPU number, -1 (rseq is
133 * uninitialized), or -2 (rseq initialization has failed).
134 */
135 static inline __attribute__((always_inline))
136 int32_t rseq_current_cpu_raw(void)
137 {
138 return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id);
139 }
140
141 /*
142 * Returns a possible CPU number, which is typically the current CPU.
143 * The returned CPU number can be used to prepare for an rseq critical
144 * section, which will confirm whether the cpu number is indeed the
145 * current one, and whether rseq is initialized.
146 *
147 * The CPU number returned by rseq_cpu_start should always be validated
148 * by passing it to a rseq asm sequence, or by comparing it to the
149 * return value of rseq_current_cpu_raw() if the rseq asm sequence
150 * does not need to be invoked.
151 */
152 static inline __attribute__((always_inline))
153 uint32_t rseq_cpu_start(void)
154 {
155 return RSEQ_READ_ONCE(rseq_get_abi()->cpu_id_start);
156 }
157
158 static inline __attribute__((always_inline))
159 uint32_t rseq_current_cpu(void)
160 {
161 int32_t cpu;
162
163 cpu = rseq_current_cpu_raw();
164 if (rseq_unlikely(cpu < 0))
165 cpu = rseq_fallback_current_cpu();
166 return cpu;
167 }
168
169 static inline __attribute__((always_inline))
170 bool rseq_node_id_available(void)
171 {
172 return (int) rseq_feature_size >= (int) rseq_offsetofend(struct rseq_abi, node_id);
173 }
174
175 /*
176 * Current NUMA node number.
177 */
178 static inline __attribute__((always_inline))
179 uint32_t rseq_current_node_id(void)
180 {
181 assert(rseq_node_id_available());
182 return RSEQ_READ_ONCE(rseq_get_abi()->node_id);
183 }
184
185 static inline __attribute__((always_inline))
186 bool rseq_mm_cid_available(void)
187 {
188 return (int) rseq_feature_size >= (int) rseq_offsetofend(struct rseq_abi, mm_cid);
189 }
190
191 static inline __attribute__((always_inline))
192 uint32_t rseq_current_mm_cid(void)
193 {
194 return RSEQ_READ_ONCE(rseq_get_abi()->mm_cid);
195 }
196
197 static inline __attribute__((always_inline))
198 void rseq_clear_rseq_cs(void)
199 {
200 RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0);
201 }
202
203 /*
204 * rseq_prepare_unload() should be invoked by each thread executing a rseq
205 * critical section at least once between their last critical section and
206 * library unload of the library defining the rseq critical section (struct
207 * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
208 * post_commit_offset fields. This also applies to use of rseq in code
209 * generated by JIT: rseq_prepare_unload() should be invoked at least once by
210 * each thread executing a rseq critical section before reclaim of the memory
211 * holding the struct rseq_cs or reclaim of the code pointed to by struct
212 * rseq_cs start_ip and post_commit_offset fields.
213 */
214 static inline __attribute__((always_inline))
215 void rseq_prepare_unload(void)
216 {
217 rseq_clear_rseq_cs();
218 }
219
220 /*
221 * Refer to rseq/pseudocode.h for documentation and pseudo-code of the
222 * rseq critical section helpers.
223 */
224 #include "rseq/pseudocode.h"
225
226 static inline __attribute__((always_inline))
227 int rseq_load_cbne_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
228 intptr_t *v, intptr_t expect,
229 intptr_t newv, int cpu)
230 {
231 if (rseq_mo != RSEQ_MO_RELAXED)
232 return -1;
233 switch (percpu_mode) {
234 case RSEQ_PERCPU_CPU_ID:
235 return rseq_load_cbne_store__ptr_relaxed_cpu_id(v, expect, newv, cpu);
236 case RSEQ_PERCPU_MM_CID:
237 return rseq_load_cbne_store__ptr_relaxed_mm_cid(v, expect, newv, cpu);
238 default:
239 return -1;
240 }
241 }
242
243 static inline __attribute__((always_inline))
244 int rseq_load_cbeq_store_add_load_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
245 intptr_t *v, intptr_t expectnot, long voffp, intptr_t *load,
246 int cpu)
247 {
248 if (rseq_mo != RSEQ_MO_RELAXED)
249 return -1;
250 switch (percpu_mode) {
251 case RSEQ_PERCPU_CPU_ID:
252 return rseq_load_cbeq_store_add_load_store__ptr_relaxed_cpu_id(v, expectnot, voffp, load, cpu);
253 case RSEQ_PERCPU_MM_CID:
254 return rseq_load_cbeq_store_add_load_store__ptr_relaxed_mm_cid(v, expectnot, voffp, load, cpu);
255 default:
256 return -1;
257 }
258 }
259
260 static inline __attribute__((always_inline))
261 int rseq_load_add_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
262 intptr_t *v, intptr_t count, int cpu)
263 {
264 if (rseq_mo != RSEQ_MO_RELAXED)
265 return -1;
266 switch (percpu_mode) {
267 case RSEQ_PERCPU_CPU_ID:
268 return rseq_load_add_store__ptr_relaxed_cpu_id(v, count, cpu);
269 case RSEQ_PERCPU_MM_CID:
270 return rseq_load_add_store__ptr_relaxed_mm_cid(v, count, cpu);
271 default:
272 return -1;
273 }
274 }
275
276 #ifdef rseq_arch_has_load_add_load_load_add_store
277 static inline __attribute__((always_inline))
278 int rseq_load_add_load_load_add_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
279 intptr_t *ptr, long off, intptr_t inc, int cpu)
280 {
281 if (rseq_mo != RSEQ_MO_RELAXED)
282 return -1;
283 switch (percpu_mode) {
284 case RSEQ_PERCPU_CPU_ID:
285 return rseq_load_add_load_load_add_store__ptr_relaxed_cpu_id(ptr, off, inc, cpu);
286 case RSEQ_PERCPU_MM_CID:
287 return rseq_load_add_load_load_add_store__ptr_relaxed_mm_cid(ptr, off, inc, cpu);
288 default:
289 return -1;
290 }
291 }
292 #endif
293
294 static inline __attribute__((always_inline))
295 int rseq_load_cbne_store_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
296 intptr_t *v, intptr_t expect,
297 intptr_t *v2, intptr_t newv2,
298 intptr_t newv, int cpu)
299 {
300 switch (rseq_mo) {
301 case RSEQ_MO_RELAXED:
302 switch (percpu_mode) {
303 case RSEQ_PERCPU_CPU_ID:
304 return rseq_load_cbne_store_store__ptr_relaxed_cpu_id(v, expect, v2, newv2, newv, cpu);
305 case RSEQ_PERCPU_MM_CID:
306 return rseq_load_cbne_store_store__ptr_relaxed_mm_cid(v, expect, v2, newv2, newv, cpu);
307 default:
308 return -1;
309 }
310 case RSEQ_MO_RELEASE:
311 switch (percpu_mode) {
312 case RSEQ_PERCPU_CPU_ID:
313 return rseq_load_cbne_store_store__ptr_release_cpu_id(v, expect, v2, newv2, newv, cpu);
314 case RSEQ_PERCPU_MM_CID:
315 return rseq_load_cbne_store_store__ptr_release_mm_cid(v, expect, v2, newv2, newv, cpu);
316 default:
317 return -1;
318 }
319 case RSEQ_MO_ACQUIRE: /* Fallthrough */
320 case RSEQ_MO_ACQ_REL: /* Fallthrough */
321 case RSEQ_MO_CONSUME: /* Fallthrough */
322 case RSEQ_MO_SEQ_CST: /* Fallthrough */
323 default:
324 return -1;
325 }
326 }
327
328 static inline __attribute__((always_inline))
329 int rseq_load_cbne_load_cbne_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
330 intptr_t *v, intptr_t expect,
331 intptr_t *v2, intptr_t expect2,
332 intptr_t newv, int cpu)
333 {
334 if (rseq_mo != RSEQ_MO_RELAXED)
335 return -1;
336 switch (percpu_mode) {
337 case RSEQ_PERCPU_CPU_ID:
338 return rseq_load_cbne_load_cbne_store__ptr_relaxed_cpu_id(v, expect, v2, expect2, newv, cpu);
339 case RSEQ_PERCPU_MM_CID:
340 return rseq_load_cbne_load_cbne_store__ptr_relaxed_mm_cid(v, expect, v2, expect2, newv, cpu);
341 default:
342 return -1;
343 }
344 }
345
346 static inline __attribute__((always_inline))
347 int rseq_load_cbne_memcpy_store__ptr(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
348 intptr_t *v, intptr_t expect,
349 void *dst, void *src, size_t len,
350 intptr_t newv, int cpu)
351 {
352 switch (rseq_mo) {
353 case RSEQ_MO_RELAXED:
354 switch (percpu_mode) {
355 case RSEQ_PERCPU_CPU_ID:
356 return rseq_load_cbne_memcpy_store__ptr_relaxed_cpu_id(v, expect, dst, src, len, newv, cpu);
357 case RSEQ_PERCPU_MM_CID:
358 return rseq_load_cbne_memcpy_store__ptr_relaxed_mm_cid(v, expect, dst, src, len, newv, cpu);
359 default:
360 return -1;
361 }
362 case RSEQ_MO_RELEASE:
363 switch (percpu_mode) {
364 case RSEQ_PERCPU_CPU_ID:
365 return rseq_load_cbne_memcpy_store__ptr_release_cpu_id(v, expect, dst, src, len, newv, cpu);
366 case RSEQ_PERCPU_MM_CID:
367 return rseq_load_cbne_memcpy_store__ptr_release_mm_cid(v, expect, dst, src, len, newv, cpu);
368 default:
369 return -1;
370 }
371 case RSEQ_MO_ACQUIRE: /* Fallthrough */
372 case RSEQ_MO_ACQ_REL: /* Fallthrough */
373 case RSEQ_MO_CONSUME: /* Fallthrough */
374 case RSEQ_MO_SEQ_CST: /* Fallthrough */
375 default:
376 return -1;
377 }
378 }
379
380 #ifdef __cplusplus
381 }
382 #endif
383
384 #endif /* _RSEQ_RSEQ_H */
This page took 0.037384 seconds and 3 git commands to generate.