Cleanup: remove empty lines
[libside.git] / src / rcu.h
CommitLineData
85b765b8
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
0a65ebfb
MD
6#ifndef _SIDE_RCU_H
7#define _SIDE_RCU_H
8
85b765b8
MD
9#include <sched.h>
10#include <stdint.h>
11#include <pthread.h>
bcdea8ac 12#include <stdbool.h>
85b765b8 13#include <poll.h>
7fb53c62
MD
14#include <side/trace.h>
15#include <rseq/rseq.h>
85b765b8
MD
16
17#define SIDE_CACHE_LINE_SIZE 256
85b765b8
MD
18
19struct side_rcu_percpu_count {
20 uintptr_t begin;
7fb53c62 21 uintptr_t rseq_begin;
85b765b8 22 uintptr_t end;
7fb53c62 23 uintptr_t rseq_end;
85b765b8
MD
24} __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
25
26struct side_rcu_cpu_gp_state {
bcdea8ac 27 struct side_rcu_percpu_count count[2];
85b765b8
MD
28};
29
30struct side_rcu_gp_state {
31 struct side_rcu_cpu_gp_state *percpu_state;
32 int nr_cpus;
33 unsigned int period;
34 pthread_mutex_t gp_lock;
35};
36
bddcdc92
MD
37extern unsigned int side_rcu_rseq_membarrier_available __attribute__((visibility("hidden")));
38
85b765b8
MD
39//TODO: implement wait/wakeup for grace period using sys_futex
40static inline
41unsigned int side_rcu_read_begin(struct side_rcu_gp_state *gp_state)
42{
85b765b8 43 unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
7fb53c62
MD
44 struct side_rcu_cpu_gp_state *cpu_gp_state;
45 int cpu;
85b765b8 46
bddcdc92 47 if (side_likely(side_rcu_rseq_membarrier_available)) {
7fb53c62
MD
48 cpu = rseq_cpu_start();
49 cpu_gp_state = &gp_state->percpu_state[cpu];
bddcdc92
MD
50 if (side_likely(!rseq_addv((intptr_t *)&cpu_gp_state->count[period].rseq_begin, 1, cpu))) {
51 /*
52 * This compiler barrier (A) is paired with membarrier() at (C),
53 * (D), (E). It effectively upgrades this compiler barrier to a
54 * SEQ_CST fence with respect to the paired barriers.
55 *
56 * This barrier (A) ensures that the contents of the read-side
57 * critical section does not leak before the "begin" counter
58 * increment. It pairs with memory barriers (D) and (E).
59 *
60 * This barrier (A) also ensures that the "begin" increment is
61 * before the "end" increment. It pairs with memory barrier (C).
62 * It is redundant with barrier (B) for that purpose.
63 */
64 rseq_barrier();
65 return period;
66 }
7fb53c62 67 }
bddcdc92 68 /* Fallback to atomic increment and SEQ_CST. */
7fb53c62
MD
69 cpu = sched_getcpu();
70 if (side_unlikely(cpu < 0))
85b765b8 71 cpu = 0;
7fb53c62 72 cpu_gp_state = &gp_state->percpu_state[cpu];
bddcdc92 73 (void) __atomic_add_fetch(&cpu_gp_state->count[period].begin, 1, __ATOMIC_SEQ_CST);
85b765b8
MD
74 return period;
75}
76
77static inline
78void side_rcu_read_end(struct side_rcu_gp_state *gp_state, unsigned int period)
79{
7fb53c62
MD
80 struct side_rcu_cpu_gp_state *cpu_gp_state;
81 int cpu;
85b765b8 82
bddcdc92
MD
83 if (side_likely(side_rcu_rseq_membarrier_available)) {
84 /*
85 * This compiler barrier (B) is paired with membarrier() at (C),
86 * (D), (E). It effectively upgrades this compiler barrier to a
87 * SEQ_CST fence with respect to the paired barriers.
88 *
89 * This barrier (B) ensures that the contents of the read-side
90 * critical section does not leak after the "end" counter
91 * increment. It pairs with memory barriers (D) and (E).
92 *
93 * This barrier (B) also ensures that the "begin" increment is
94 * before the "end" increment. It pairs with memory barrier (C).
95 * It is redundant with barrier (A) for that purpose.
96 */
97 rseq_barrier();
7fb53c62
MD
98 cpu = rseq_cpu_start();
99 cpu_gp_state = &gp_state->percpu_state[cpu];
bddcdc92 100 if (side_likely(!rseq_addv((intptr_t *)&cpu_gp_state->count[period].rseq_end, 1, cpu)))
7fb53c62
MD
101 return;
102 }
bddcdc92 103 /* Fallback to atomic increment and SEQ_CST. */
7fb53c62
MD
104 cpu = sched_getcpu();
105 if (side_unlikely(cpu < 0))
106 cpu = 0;
107 cpu_gp_state = &gp_state->percpu_state[cpu];
bddcdc92 108 (void) __atomic_add_fetch(&cpu_gp_state->count[period].end, 1, __ATOMIC_SEQ_CST);
85b765b8
MD
109}
110
11f8a2ce
MD
111#define side_rcu_dereference(p) \
112 __extension__ \
113 ({ \
04369894 114 __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
11f8a2ce
MD
115 (_____side_v); \
116 })
117
118#define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
119
48363c84 120void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
054b7b5c 121void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
6e46f5e6 122void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
0a65ebfb
MD
123
124#endif /* _SIDE_RCU_H */
This page took 0.028406 seconds and 4 git commands to generate.