Commit | Line | Data |
---|---|---|
85b765b8 MD |
1 | // SPDX-License-Identifier: MIT |
2 | /* | |
3 | * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | */ | |
5 | ||
0a65ebfb MD |
6 | #ifndef _SIDE_RCU_H |
7 | #define _SIDE_RCU_H | |
8 | ||
85b765b8 MD |
9 | #include <sched.h> |
10 | #include <stdint.h> | |
11 | #include <pthread.h> | |
bcdea8ac | 12 | #include <stdbool.h> |
85b765b8 | 13 | #include <poll.h> |
7fb53c62 MD |
14 | #include <side/trace.h> |
15 | #include <rseq/rseq.h> | |
85b765b8 MD |
16 | |
17 | #define SIDE_CACHE_LINE_SIZE 256 | |
85b765b8 MD |
18 | |
19 | struct side_rcu_percpu_count { | |
20 | uintptr_t begin; | |
7fb53c62 | 21 | uintptr_t rseq_begin; |
85b765b8 | 22 | uintptr_t end; |
7fb53c62 | 23 | uintptr_t rseq_end; |
85b765b8 MD |
24 | } __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE))); |
25 | ||
26 | struct side_rcu_cpu_gp_state { | |
bcdea8ac | 27 | struct side_rcu_percpu_count count[2]; |
85b765b8 MD |
28 | }; |
29 | ||
30 | struct side_rcu_gp_state { | |
31 | struct side_rcu_cpu_gp_state *percpu_state; | |
32 | int nr_cpus; | |
33 | unsigned int period; | |
34 | pthread_mutex_t gp_lock; | |
35 | }; | |
36 | ||
85b765b8 MD |
37 | //TODO: implement wait/wakeup for grace period using sys_futex |
38 | static inline | |
39 | unsigned int side_rcu_read_begin(struct side_rcu_gp_state *gp_state) | |
40 | { | |
85b765b8 | 41 | unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED); |
7fb53c62 MD |
42 | struct side_rcu_cpu_gp_state *cpu_gp_state; |
43 | int cpu; | |
85b765b8 | 44 | |
7fb53c62 MD |
45 | if (side_likely(rseq_offset > 0)) { |
46 | cpu = rseq_cpu_start(); | |
47 | cpu_gp_state = &gp_state->percpu_state[cpu]; | |
48 | if (!rseq_addv((intptr_t *)&cpu_gp_state->count[period].rseq_begin, 1, cpu)) | |
49 | goto fence; | |
50 | } | |
51 | cpu = sched_getcpu(); | |
52 | if (side_unlikely(cpu < 0)) | |
85b765b8 | 53 | cpu = 0; |
7fb53c62 MD |
54 | cpu_gp_state = &gp_state->percpu_state[cpu]; |
55 | (void) __atomic_add_fetch(&cpu_gp_state->count[period].begin, 1, __ATOMIC_RELAXED); | |
56 | fence: | |
117e507d | 57 | /* |
1743c3d6 MD |
58 | * This compiler barrier (A) is paired with membarrier() at (C), |
59 | * (D), (E). It effectively upgrades this compiler barrier to a | |
60 | * SEQ_CST fence with respect to the paired barriers. | |
05a2e061 | 61 | * |
1743c3d6 MD |
62 | * This barrier (A) ensures that the contents of the read-side |
63 | * critical section does not leak before the "begin" counter | |
64 | * increment. It pairs with memory barriers (D) and (E). | |
65 | * | |
66 | * This barrier (A) also ensures that the "begin" increment is | |
67 | * before the "end" increment. It pairs with memory barrier (C). | |
68 | * It is redundant with barrier (B) for that purpose. | |
117e507d | 69 | */ |
5a76c31e | 70 | rseq_barrier(); |
85b765b8 MD |
71 | return period; |
72 | } | |
73 | ||
74 | static inline | |
75 | void side_rcu_read_end(struct side_rcu_gp_state *gp_state, unsigned int period) | |
76 | { | |
7fb53c62 MD |
77 | struct side_rcu_cpu_gp_state *cpu_gp_state; |
78 | int cpu; | |
85b765b8 | 79 | |
117e507d | 80 | /* |
1743c3d6 MD |
81 | * This compiler barrier (B) is paired with membarrier() at (C), |
82 | * (D), (E). It effectively upgrades this compiler barrier to a | |
83 | * SEQ_CST fence with respect to the paired barriers. | |
84 | * | |
85 | * This barrier (B) ensures that the contents of the read-side | |
86 | * critical section does not leak after the "end" counter | |
87 | * increment. It pairs with memory barriers (D) and (E). | |
05a2e061 | 88 | * |
1743c3d6 MD |
89 | * This barrier (B) also ensures that the "begin" increment is |
90 | * before the "end" increment. It pairs with memory barrier (C). | |
91 | * It is redundant with barrier (A) for that purpose. | |
117e507d | 92 | */ |
5a76c31e | 93 | rseq_barrier(); |
7fb53c62 MD |
94 | |
95 | if (side_likely(rseq_offset > 0)) { | |
96 | cpu = rseq_cpu_start(); | |
97 | cpu_gp_state = &gp_state->percpu_state[cpu]; | |
98 | if (!rseq_addv((intptr_t *)&cpu_gp_state->count[period].rseq_end, 1, cpu)) | |
99 | return; | |
100 | } | |
101 | cpu = sched_getcpu(); | |
102 | if (side_unlikely(cpu < 0)) | |
103 | cpu = 0; | |
104 | cpu_gp_state = &gp_state->percpu_state[cpu]; | |
105 | (void) __atomic_add_fetch(&cpu_gp_state->count[period].end, 1, __ATOMIC_RELAXED); | |
85b765b8 MD |
106 | } |
107 | ||
11f8a2ce MD |
108 | #define side_rcu_dereference(p) \ |
109 | __extension__ \ | |
110 | ({ \ | |
04369894 | 111 | __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \ |
11f8a2ce MD |
112 | (_____side_v); \ |
113 | }) | |
114 | ||
115 | #define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \ | |
116 | ||
48363c84 | 117 | void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden"))); |
054b7b5c | 118 | void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden"))); |
6e46f5e6 | 119 | void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden"))); |
0a65ebfb MD |
120 | |
121 | #endif /* _SIDE_RCU_H */ |