Update barrier comments for membarrier/compiler barrier
[libside.git] / src / rcu.h
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #ifndef _SIDE_RCU_H
7 #define _SIDE_RCU_H
8
9 #include <sched.h>
10 #include <stdint.h>
11 #include <pthread.h>
12 #include <stdbool.h>
13 #include <poll.h>
14 #include <side/trace.h>
15 #include <rseq/rseq.h>
16
17 #define SIDE_CACHE_LINE_SIZE 256
18
19 struct side_rcu_percpu_count {
20 uintptr_t begin;
21 uintptr_t rseq_begin;
22 uintptr_t end;
23 uintptr_t rseq_end;
24 } __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
25
26 struct side_rcu_cpu_gp_state {
27 struct side_rcu_percpu_count count[2];
28 };
29
30 struct side_rcu_gp_state {
31 struct side_rcu_cpu_gp_state *percpu_state;
32 int nr_cpus;
33 unsigned int period;
34 pthread_mutex_t gp_lock;
35 };
36
37 //TODO: implement wait/wakeup for grace period using sys_futex
38 static inline
39 unsigned int side_rcu_read_begin(struct side_rcu_gp_state *gp_state)
40 {
41 unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
42 struct side_rcu_cpu_gp_state *cpu_gp_state;
43 int cpu;
44
45 if (side_likely(rseq_offset > 0)) {
46 cpu = rseq_cpu_start();
47 cpu_gp_state = &gp_state->percpu_state[cpu];
48 if (!rseq_addv((intptr_t *)&cpu_gp_state->count[period].rseq_begin, 1, cpu))
49 goto fence;
50 }
51 cpu = sched_getcpu();
52 if (side_unlikely(cpu < 0))
53 cpu = 0;
54 cpu_gp_state = &gp_state->percpu_state[cpu];
55 (void) __atomic_add_fetch(&cpu_gp_state->count[period].begin, 1, __ATOMIC_RELAXED);
56 fence:
57 /*
58 * This compiler barrier (A) is paired with membarrier() at (C),
59 * (D), (E). It effectively upgrades this compiler barrier to a
60 * SEQ_CST fence with respect to the paired barriers.
61 *
62 * This barrier (A) ensures that the contents of the read-side
63 * critical section does not leak before the "begin" counter
64 * increment. It pairs with memory barriers (D) and (E).
65 *
66 * This barrier (A) also ensures that the "begin" increment is
67 * before the "end" increment. It pairs with memory barrier (C).
68 * It is redundant with barrier (B) for that purpose.
69 */
70 rseq_barrier();
71 return period;
72 }
73
74 static inline
75 void side_rcu_read_end(struct side_rcu_gp_state *gp_state, unsigned int period)
76 {
77 struct side_rcu_cpu_gp_state *cpu_gp_state;
78 int cpu;
79
80 /*
81 * This compiler barrier (B) is paired with membarrier() at (C),
82 * (D), (E). It effectively upgrades this compiler barrier to a
83 * SEQ_CST fence with respect to the paired barriers.
84 *
85 * This barrier (B) ensures that the contents of the read-side
86 * critical section does not leak after the "end" counter
87 * increment. It pairs with memory barriers (D) and (E).
88 *
89 * This barrier (B) also ensures that the "begin" increment is
90 * before the "end" increment. It pairs with memory barrier (C).
91 * It is redundant with barrier (A) for that purpose.
92 */
93 rseq_barrier();
94
95 if (side_likely(rseq_offset > 0)) {
96 cpu = rseq_cpu_start();
97 cpu_gp_state = &gp_state->percpu_state[cpu];
98 if (!rseq_addv((intptr_t *)&cpu_gp_state->count[period].rseq_end, 1, cpu))
99 return;
100 }
101 cpu = sched_getcpu();
102 if (side_unlikely(cpu < 0))
103 cpu = 0;
104 cpu_gp_state = &gp_state->percpu_state[cpu];
105 (void) __atomic_add_fetch(&cpu_gp_state->count[period].end, 1, __ATOMIC_RELAXED);
106 }
107
108 #define side_rcu_dereference(p) \
109 __extension__ \
110 ({ \
111 __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
112 (_____side_v); \
113 })
114
115 #define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
116
117 void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
118 void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
119 void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
120
121 #endif /* _SIDE_RCU_H */
This page took 0.035957 seconds and 5 git commands to generate.