Event registration/unregistration
[libside.git] / src / rcu.h
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #ifndef _SIDE_RCU_H
7 #define _SIDE_RCU_H
8
9 #include <sched.h>
10 #include <stdint.h>
11 #include <pthread.h>
12 #include <stdbool.h>
13 #include <poll.h>
14
15 #define SIDE_CACHE_LINE_SIZE 256
16
17 struct side_rcu_percpu_count {
18 uintptr_t begin;
19 uintptr_t end;
20 } __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
21
22 struct side_rcu_cpu_gp_state {
23 struct side_rcu_percpu_count count[2];
24 };
25
26 struct side_rcu_gp_state {
27 struct side_rcu_cpu_gp_state *percpu_state;
28 int nr_cpus;
29 unsigned int period;
30 pthread_mutex_t gp_lock;
31 };
32
33 //TODO: replace atomics by rseq (when available)
34 //TODO: replace acquire/release by membarrier+compiler barrier (when available)
35 //TODO: implement wait/wakeup for grace period using sys_futex
36 static inline
37 unsigned int side_rcu_read_begin(struct side_rcu_gp_state *gp_state)
38 {
39 int cpu = sched_getcpu();
40 unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
41
42 if (cpu < 0)
43 cpu = 0;
44 /*
45 * This memory barrier (A) ensures that the contents of the
46 * read-side critical section does not leak before the "begin"
47 * counter increment. It pairs with memory barriers (D) and (E).
48 *
49 * This memory barrier (A) also ensures that the "begin"
50 * increment is before the "end" increment. It pairs with memory
51 * barrier (C). It is redundant with memory barrier (B) for that
52 * purpose.
53 */
54 (void) __atomic_add_fetch(&gp_state->percpu_state[cpu].count[period].begin, 1, __ATOMIC_SEQ_CST);
55 return period;
56 }
57
58 static inline
59 void side_rcu_read_end(struct side_rcu_gp_state *gp_state, unsigned int period)
60 {
61 int cpu = sched_getcpu();
62
63 if (cpu < 0)
64 cpu = 0;
65 /*
66 * This memory barrier (B) ensures that the contents of the
67 * read-side critical section does not leak after the "end"
68 * counter increment. It pairs with memory barriers (D) and (E).
69 *
70 * This memory barrier (B) also ensures that the "begin"
71 * increment is before the "end" increment. It pairs with memory
72 * barrier (C). It is redundant with memory barrier (A) for that
73 * purpose.
74 */
75 (void) __atomic_add_fetch(&gp_state->percpu_state[cpu].count[period].end, 1, __ATOMIC_SEQ_CST);
76 }
77
78 #define side_rcu_dereference(p) \
79 __extension__ \
80 ({ \
81 __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
82 (_____side_v); \
83 })
84
85 #define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
86
87 void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
88 void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
89 void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
90
91 #endif /* _SIDE_RCU_H */
This page took 0.032056 seconds and 5 git commands to generate.