rcu: introduce read state
[libside.git] / src / rcu.h
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #ifndef _SIDE_RCU_H
7 #define _SIDE_RCU_H
8
9 #include <sched.h>
10 #include <stdint.h>
11 #include <pthread.h>
12 #include <stdbool.h>
13 #include <poll.h>
14 #include <rseq/rseq.h>
15 #include <linux/futex.h>
16 #include <sys/time.h>
17 #include <unistd.h>
18 #include <sys/syscall.h>
19 #include <side/macros.h>
20
21 #define SIDE_CACHE_LINE_SIZE 256
22
23 struct side_rcu_percpu_count {
24 uintptr_t begin;
25 uintptr_t rseq_begin;
26 uintptr_t end;
27 uintptr_t rseq_end;
28 };
29
30 struct side_rcu_cpu_gp_state {
31 struct side_rcu_percpu_count count[2];
32 } __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
33
34 struct side_rcu_gp_state {
35 struct side_rcu_cpu_gp_state *percpu_state;
36 int nr_cpus;
37 int32_t futex;
38 unsigned int period;
39 pthread_mutex_t gp_lock;
40 };
41
42 struct side_rcu_read_state {
43 struct side_rcu_percpu_count *percpu_count;
44 int cpu;
45 };
46
47 extern unsigned int side_rcu_rseq_membarrier_available __attribute__((visibility("hidden")));
48
49 static inline
50 int futex(int32_t *uaddr, int op, int32_t val,
51 const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
52 {
53 return syscall(__NR_futex, uaddr, op, val, timeout, uaddr2, val3);
54 }
55
56 /*
57 * Wake-up side_rcu_wait_grace_period. Called concurrently from many
58 * threads.
59 */
60 static inline
61 void side_rcu_wake_up_gp(struct side_rcu_gp_state *gp_state)
62 {
63 if (side_unlikely(__atomic_load_n(&gp_state->futex, __ATOMIC_RELAXED) == -1)) {
64 __atomic_store_n(&gp_state->futex, 0, __ATOMIC_RELAXED);
65 /* TODO: handle futex return values. */
66 (void) futex(&gp_state->futex, FUTEX_WAKE, 1, NULL, NULL, 0);
67 }
68 }
69
70 static inline
71 void side_rcu_read_begin(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state)
72 {
73 unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
74 struct side_rcu_percpu_count *begin_cpu_count;
75 struct side_rcu_cpu_gp_state *cpu_gp_state;
76 int cpu;
77
78 if (side_likely(side_rcu_rseq_membarrier_available)) {
79 cpu = rseq_cpu_start();
80 cpu_gp_state = &gp_state->percpu_state[cpu];
81 read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period];
82 read_state->cpu = cpu;
83 if (side_likely(!rseq_addv((intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) {
84 /*
85 * This compiler barrier (A) is paired with membarrier() at (C),
86 * (D), (E). It effectively upgrades this compiler barrier to a
87 * SEQ_CST fence with respect to the paired barriers.
88 *
89 * This barrier (A) ensures that the contents of the read-side
90 * critical section does not leak before the "begin" counter
91 * increment. It pairs with memory barriers (D) and (E).
92 *
93 * This barrier (A) also ensures that the "begin" increment is
94 * before the "end" increment. It pairs with memory barrier (C).
95 * It is redundant with barrier (B) for that purpose.
96 */
97 rseq_barrier();
98 return;
99 }
100 }
101 /* Fallback to atomic increment and SEQ_CST. */
102 cpu = sched_getcpu();
103 if (side_unlikely(cpu < 0))
104 cpu = 0;
105 read_state->cpu = cpu;
106 cpu_gp_state = &gp_state->percpu_state[cpu];
107 read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period];
108 (void) __atomic_add_fetch(&begin_cpu_count->begin, 1, __ATOMIC_SEQ_CST);
109 }
110
111 static inline
112 void side_rcu_read_end(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state)
113 {
114 struct side_rcu_percpu_count *begin_cpu_count = read_state->percpu_count;
115 int cpu = read_state->cpu;
116
117 if (side_likely(side_rcu_rseq_membarrier_available)) {
118 /*
119 * This compiler barrier (B) is paired with membarrier() at (C),
120 * (D), (E). It effectively upgrades this compiler barrier to a
121 * SEQ_CST fence with respect to the paired barriers.
122 *
123 * This barrier (B) ensures that the contents of the read-side
124 * critical section does not leak after the "end" counter
125 * increment. It pairs with memory barriers (D) and (E).
126 *
127 * This barrier (B) also ensures that the "begin" increment is
128 * before the "end" increment. It pairs with memory barrier (C).
129 * It is redundant with barrier (A) for that purpose.
130 */
131 rseq_barrier();
132 if (side_likely(!rseq_addv((intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) {
133 /*
134 * This barrier (F) is paired with membarrier()
135 * at (G). It orders increment of the begin/end
136 * counters before load/store to the futex.
137 */
138 rseq_barrier();
139 goto end;
140 }
141 }
142 /* Fallback to atomic increment and SEQ_CST. */
143 (void) __atomic_add_fetch(&begin_cpu_count->end, 1, __ATOMIC_SEQ_CST);
144 /*
145 * This barrier (F) is paired with SEQ_CST barrier or
146 * membarrier() at (G). It orders increment of the begin/end
147 * counters before load/store to the futex.
148 */
149 __atomic_thread_fence(__ATOMIC_SEQ_CST);
150 end:
151 side_rcu_wake_up_gp(gp_state);
152 }
153
154 #define side_rcu_dereference(p) \
155 __extension__ \
156 ({ \
157 __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
158 (_____side_v); \
159 })
160
161 #define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
162
163 void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
164 void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
165 void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
166
167 #endif /* _SIDE_RCU_H */
This page took 0.034009 seconds and 5 git commands to generate.