1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 #define SIDE_CACHE_LINE_SIZE 256
14 struct side_rcu_percpu_count
{
17 } __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE
)));
19 struct side_rcu_cpu_gp_state
{
20 struct side_rcu_percpu_count count
[2];
23 struct side_rcu_gp_state
{
24 struct side_rcu_cpu_gp_state
*percpu_state
;
27 pthread_mutex_t gp_lock
;
30 //TODO: replace atomics by rseq (when available)
31 //TODO: replace acquire/release by membarrier+compiler barrier (when available)
32 //TODO: implement wait/wakeup for grace period using sys_futex
34 unsigned int side_rcu_read_begin(struct side_rcu_gp_state
*gp_state
)
36 int cpu
= sched_getcpu();
37 unsigned int period
= __atomic_load_n(&gp_state
->period
, __ATOMIC_RELAXED
);
42 * This acquire MO pairs with the release fence at the end of
43 * side_rcu_wait_grace_period().
45 (void) __atomic_add_fetch(&gp_state
->percpu_state
[cpu
].count
[period
].begin
, 1, __ATOMIC_SEQ_CST
);
50 void side_rcu_read_end(struct side_rcu_gp_state
*gp_state
, unsigned int period
)
52 int cpu
= sched_getcpu();
57 * This release MO pairs with the acquire fence at the beginning
58 * of side_rcu_wait_grace_period().
60 (void) __atomic_add_fetch(&gp_state
->percpu_state
[cpu
].count
[period
].end
, 1, __ATOMIC_SEQ_CST
);
63 #define side_rcu_dereference(p) \
66 (__typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
70 #define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
73 void check_active_readers(struct side_rcu_gp_state
*gp_state
, bool *active_readers
)
75 uintptr_t sum
[2] = { 0, 0 }; /* begin - end */
78 for (i
= 0; i
< gp_state
->nr_cpus
; i
++) {
79 struct side_rcu_cpu_gp_state
*cpu_state
= &gp_state
->percpu_state
[i
];
81 sum
[0] -= __atomic_load_n(&cpu_state
->count
[0].end
, __ATOMIC_RELAXED
);
82 sum
[1] -= __atomic_load_n(&cpu_state
->count
[1].end
, __ATOMIC_RELAXED
);
86 * Read end counts before begin counts. Reading end
87 * before begin count ensures we never see an end
88 * without having seen its associated begin, in case of
89 * a thread migration during the traversal over each
92 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
94 for (i
= 0; i
< gp_state
->nr_cpus
; i
++) {
95 struct side_rcu_cpu_gp_state
*cpu_state
= &gp_state
->percpu_state
[i
];
97 sum
[0] += __atomic_load_n(&cpu_state
->count
[0].begin
, __ATOMIC_RELAXED
);
98 sum
[1] += __atomic_load_n(&cpu_state
->count
[1].begin
, __ATOMIC_RELAXED
);
100 active_readers
[0] = sum
[0];
101 active_readers
[1] = sum
[1];
105 void wait_for_prev_period_readers(struct side_rcu_gp_state
*gp_state
)
107 unsigned int prev_period
= gp_state
->period
^ 1;
108 bool active_readers
[2];
111 * Wait for the sum of CPU begin/end counts to match for the
115 check_active_readers(gp_state
, active_readers
);
116 if (!active_readers
[prev_period
])
118 /* Retry after 10ms. */
124 void side_rcu_wait_grace_period(struct side_rcu_gp_state
*gp_state
)
126 bool active_readers
[2];
129 * This fence pairs with the __atomic_add_fetch __ATOMIC_SEQ_CST in
130 * side_rcu_read_end().
132 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
135 * First scan through all cpus, for both period. If no readers
136 * are accounted for, we have observed quiescence and can
137 * complete the grace period immediately.
139 check_active_readers(gp_state
, active_readers
);
140 if (!active_readers
[0] && !active_readers
[1])
143 pthread_mutex_lock(&gp_state
->gp_lock
);
145 wait_for_prev_period_readers(gp_state
);
147 /* Flip period: 0 -> 1, 1 -> 0. */
148 (void) __atomic_xor_fetch(&gp_state
->period
, 1, __ATOMIC_RELAXED
);
150 wait_for_prev_period_readers(gp_state
);
152 pthread_mutex_unlock(&gp_state
->gp_lock
);
156 * This fence pairs with the __atomic_add_fetch __ATOMIC_SEQ_CST in
157 * side_rcu_read_begin().
159 __atomic_thread_fence(__ATOMIC_SEQ_CST
);