Fix typos
[libside.git] / src / rcu.h
CommitLineData
85b765b8
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
67337c4a
MD
6#ifndef _SIDE_RCU_H
7#define _SIDE_RCU_H
0a65ebfb 8
85b765b8
MD
9#include <sched.h>
10#include <stdint.h>
11#include <pthread.h>
bcdea8ac 12#include <stdbool.h>
85b765b8 13#include <poll.h>
7fb53c62 14#include <rseq/rseq.h>
6635dd92
MD
15#include <linux/futex.h>
16#include <sys/time.h>
17#include <unistd.h>
18#include <sys/syscall.h>
67337c4a 19#include <side/macros.h>
85b765b8 20
67337c4a 21#define SIDE_CACHE_LINE_SIZE 256
85b765b8 22
67337c4a 23struct side_rcu_percpu_count {
85b765b8 24 uintptr_t begin;
7fb53c62 25 uintptr_t rseq_begin;
85b765b8 26 uintptr_t end;
7fb53c62 27 uintptr_t rseq_end;
f90ef04f 28};
85b765b8 29
67337c4a
MD
30struct side_rcu_cpu_gp_state {
31 struct side_rcu_percpu_count count[2];
32} __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
85b765b8 33
67337c4a
MD
34struct side_rcu_gp_state {
35 struct side_rcu_cpu_gp_state *percpu_state;
85b765b8 36 int nr_cpus;
6635dd92 37 int32_t futex;
85b765b8
MD
38 unsigned int period;
39 pthread_mutex_t gp_lock;
40};
41
67337c4a
MD
42struct side_rcu_read_state {
43 struct side_rcu_percpu_count *percpu_count;
cd50bc06
MD
44 int cpu;
45};
46
67337c4a 47extern unsigned int side_rcu_rseq_membarrier_available __attribute__((visibility("hidden")));
bddcdc92 48
6635dd92
MD
49static inline
50int futex(int32_t *uaddr, int op, int32_t val,
51 const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
52{
53 return syscall(__NR_futex, uaddr, op, val, timeout, uaddr2, val3);
54}
55
56/*
67337c4a 57 * Wake-up side_rcu_wait_grace_period. Called concurrently from many
6635dd92
MD
58 * threads.
59 */
60static inline
67337c4a 61void side_rcu_wake_up_gp(struct side_rcu_gp_state *gp_state)
6635dd92 62{
67337c4a 63 if (side_unlikely(__atomic_load_n(&gp_state->futex, __ATOMIC_RELAXED) == -1)) {
6635dd92
MD
64 __atomic_store_n(&gp_state->futex, 0, __ATOMIC_RELAXED);
65 /* TODO: handle futex return values. */
66 (void) futex(&gp_state->futex, FUTEX_WAKE, 1, NULL, NULL, 0);
67 }
68}
69
85b765b8 70static inline
67337c4a 71void side_rcu_read_begin(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state)
85b765b8 72{
67337c4a
MD
73 struct side_rcu_percpu_count *begin_cpu_count;
74 struct side_rcu_cpu_gp_state *cpu_gp_state;
609cb7ea 75 unsigned int period;
7fb53c62 76 int cpu;
85b765b8 77
609cb7ea
MD
78 cpu = rseq_cpu_start();
79 period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
80 cpu_gp_state = &gp_state->percpu_state[cpu];
81 read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period];
82 read_state->cpu = cpu;
67337c4a 83 if (side_likely(side_rcu_rseq_membarrier_available &&
d37c7e99
MD
84 !rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID,
85 (intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) {
609cb7ea
MD
86 /*
87 * This compiler barrier (A) is paired with membarrier() at (C),
88 * (D), (E). It effectively upgrades this compiler barrier to a
89 * SEQ_CST fence with respect to the paired barriers.
90 *
67337c4a 91 * This barrier (A) ensures that the contents of the read-side
609cb7ea
MD
92 * critical section does not leak before the "begin" counter
93 * increment. It pairs with memory barriers (D) and (E).
94 *
95 * This barrier (A) also ensures that the "begin" increment is
96 * before the "end" increment. It pairs with memory barrier (C).
97 * It is redundant with barrier (B) for that purpose.
98 */
99 rseq_barrier();
100 return;
7fb53c62 101 }
bddcdc92 102 /* Fallback to atomic increment and SEQ_CST. */
7fb53c62 103 cpu = sched_getcpu();
67337c4a 104 if (side_unlikely(cpu < 0))
85b765b8 105 cpu = 0;
cd50bc06 106 read_state->cpu = cpu;
7fb53c62 107 cpu_gp_state = &gp_state->percpu_state[cpu];
cd50bc06
MD
108 read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period];
109 (void) __atomic_add_fetch(&begin_cpu_count->begin, 1, __ATOMIC_SEQ_CST);
85b765b8
MD
110}
111
112static inline
67337c4a 113void side_rcu_read_end(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state)
85b765b8 114{
67337c4a 115 struct side_rcu_percpu_count *begin_cpu_count = read_state->percpu_count;
cd50bc06 116 int cpu = read_state->cpu;
85b765b8 117
609cb7ea
MD
118 /*
119 * This compiler barrier (B) is paired with membarrier() at (C),
120 * (D), (E). It effectively upgrades this compiler barrier to a
121 * SEQ_CST fence with respect to the paired barriers.
122 *
67337c4a 123 * This barrier (B) ensures that the contents of the read-side
609cb7ea
MD
124 * critical section does not leak after the "end" counter
125 * increment. It pairs with memory barriers (D) and (E).
126 *
127 * This barrier (B) also ensures that the "begin" increment is
128 * before the "end" increment. It pairs with memory barrier (C).
129 * It is redundant with barrier (A) for that purpose.
130 */
131 rseq_barrier();
67337c4a 132 if (side_likely(side_rcu_rseq_membarrier_available &&
d37c7e99
MD
133 !rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID,
134 (intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) {
bddcdc92 135 /*
609cb7ea
MD
136 * This barrier (F) is paired with membarrier()
137 * at (G). It orders increment of the begin/end
138 * counters before load/store to the futex.
bddcdc92
MD
139 */
140 rseq_barrier();
609cb7ea 141 goto end;
7fb53c62 142 }
bddcdc92 143 /* Fallback to atomic increment and SEQ_CST. */
cd50bc06 144 (void) __atomic_add_fetch(&begin_cpu_count->end, 1, __ATOMIC_SEQ_CST);
6635dd92 145 /*
7e6bad80
MD
146 * This barrier (F) implied by SEQ_CST is paired with SEQ_CST
147 * barrier or membarrier() at (G). It orders increment of the
148 * begin/end counters before load/store to the futex.
6635dd92 149 */
6635dd92 150end:
67337c4a 151 side_rcu_wake_up_gp(gp_state);
85b765b8
MD
152}
153
67337c4a 154#define side_rcu_dereference(p) \
11f8a2ce
MD
155 __extension__ \
156 ({ \
67337c4a
MD
157 __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
158 (_____side_v); \
11f8a2ce
MD
159 })
160
78dd4cc3 161#define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE);
11f8a2ce 162
67337c4a
MD
163void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
164void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
165void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
0a65ebfb 166
67337c4a 167#endif /* _SIDE_RCU_H */
This page took 0.031284 seconds and 4 git commands to generate.