RCU: Move implementation to rcu.c
[libside.git] / src / rcu.h
CommitLineData
85b765b8
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <sched.h>
7#include <stdint.h>
8#include <pthread.h>
bcdea8ac 9#include <stdbool.h>
85b765b8
MD
10#include <poll.h>
11
12#define SIDE_CACHE_LINE_SIZE 256
85b765b8
MD
13
14struct side_rcu_percpu_count {
15 uintptr_t begin;
16 uintptr_t end;
17} __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
18
19struct side_rcu_cpu_gp_state {
bcdea8ac 20 struct side_rcu_percpu_count count[2];
85b765b8
MD
21};
22
23struct side_rcu_gp_state {
24 struct side_rcu_cpu_gp_state *percpu_state;
25 int nr_cpus;
26 unsigned int period;
27 pthread_mutex_t gp_lock;
28};
29
30//TODO: replace atomics by rseq (when available)
31//TODO: replace acquire/release by membarrier+compiler barrier (when available)
32//TODO: implement wait/wakeup for grace period using sys_futex
33static inline
34unsigned int side_rcu_read_begin(struct side_rcu_gp_state *gp_state)
35{
36 int cpu = sched_getcpu();
37 unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
38
39 if (cpu < 0)
40 cpu = 0;
117e507d 41 /*
05a2e061
MD
42 * This memory barrier (A) ensures that the contents of the
43 * read-side critical section does not leak before the "begin"
44 * counter increment. It pairs with memory barriers (D) and (E).
45 *
46 * This memory barrier (A) also ensures that the "begin"
47 * increment is before the "end" increment. It pairs with memory
48 * barrier (C). It is redundant with memory barrier (B) for that
49 * purpose.
117e507d 50 */
4722edf9 51 (void) __atomic_add_fetch(&gp_state->percpu_state[cpu].count[period].begin, 1, __ATOMIC_SEQ_CST);
85b765b8
MD
52 return period;
53}
54
55static inline
56void side_rcu_read_end(struct side_rcu_gp_state *gp_state, unsigned int period)
57{
58 int cpu = sched_getcpu();
59
60 if (cpu < 0)
61 cpu = 0;
117e507d 62 /*
05a2e061
MD
63 * This memory barrier (B) ensures that the contents of the
64 * read-side critical section does not leak after the "end"
65 * counter increment. It pairs with memory barriers (D) and (E).
66 *
67 * This memory barrier (B) also ensures that the "begin"
68 * increment is before the "end" increment. It pairs with memory
69 * barrier (C). It is redundant with memory barrier (A) for that
70 * purpose.
117e507d 71 */
4722edf9 72 (void) __atomic_add_fetch(&gp_state->percpu_state[cpu].count[period].end, 1, __ATOMIC_SEQ_CST);
85b765b8
MD
73}
74
11f8a2ce
MD
75#define side_rcu_dereference(p) \
76 __extension__ \
77 ({ \
78 (__typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
79 (_____side_v); \
80 })
81
82#define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
83
48363c84 84void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
This page took 0.026449 seconds and 4 git commands to generate.