Fix: order of side exit
[libside.git] / src / rcu.h
CommitLineData
85b765b8
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
0a65ebfb
MD
6#ifndef _SIDE_RCU_H
7#define _SIDE_RCU_H
8
85b765b8
MD
9#include <sched.h>
10#include <stdint.h>
11#include <pthread.h>
bcdea8ac 12#include <stdbool.h>
85b765b8
MD
13#include <poll.h>
14
15#define SIDE_CACHE_LINE_SIZE 256
85b765b8
MD
16
17struct side_rcu_percpu_count {
18 uintptr_t begin;
19 uintptr_t end;
20} __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
21
22struct side_rcu_cpu_gp_state {
bcdea8ac 23 struct side_rcu_percpu_count count[2];
85b765b8
MD
24};
25
26struct side_rcu_gp_state {
27 struct side_rcu_cpu_gp_state *percpu_state;
28 int nr_cpus;
29 unsigned int period;
30 pthread_mutex_t gp_lock;
31};
32
33//TODO: replace atomics by rseq (when available)
34//TODO: replace acquire/release by membarrier+compiler barrier (when available)
35//TODO: implement wait/wakeup for grace period using sys_futex
36static inline
37unsigned int side_rcu_read_begin(struct side_rcu_gp_state *gp_state)
38{
39 int cpu = sched_getcpu();
40 unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
41
42 if (cpu < 0)
43 cpu = 0;
117e507d 44 /*
05a2e061
MD
45 * This memory barrier (A) ensures that the contents of the
46 * read-side critical section does not leak before the "begin"
47 * counter increment. It pairs with memory barriers (D) and (E).
48 *
49 * This memory barrier (A) also ensures that the "begin"
50 * increment is before the "end" increment. It pairs with memory
51 * barrier (C). It is redundant with memory barrier (B) for that
52 * purpose.
117e507d 53 */
4722edf9 54 (void) __atomic_add_fetch(&gp_state->percpu_state[cpu].count[period].begin, 1, __ATOMIC_SEQ_CST);
85b765b8
MD
55 return period;
56}
57
58static inline
59void side_rcu_read_end(struct side_rcu_gp_state *gp_state, unsigned int period)
60{
61 int cpu = sched_getcpu();
62
63 if (cpu < 0)
64 cpu = 0;
117e507d 65 /*
05a2e061
MD
66 * This memory barrier (B) ensures that the contents of the
67 * read-side critical section does not leak after the "end"
68 * counter increment. It pairs with memory barriers (D) and (E).
69 *
70 * This memory barrier (B) also ensures that the "begin"
71 * increment is before the "end" increment. It pairs with memory
72 * barrier (C). It is redundant with memory barrier (A) for that
73 * purpose.
117e507d 74 */
4722edf9 75 (void) __atomic_add_fetch(&gp_state->percpu_state[cpu].count[period].end, 1, __ATOMIC_SEQ_CST);
85b765b8
MD
76}
77
11f8a2ce
MD
78#define side_rcu_dereference(p) \
79 __extension__ \
80 ({ \
04369894 81 __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
11f8a2ce
MD
82 (_____side_v); \
83 })
84
85#define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
86
48363c84 87void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
054b7b5c 88void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
6e46f5e6 89void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
0a65ebfb
MD
90
91#endif /* _SIDE_RCU_H */
This page took 0.039768 seconds and 4 git commands to generate.