Fix: remove bogus parenthesis
[libside.git] / src / rcu.c
CommitLineData
48363c84
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <sched.h>
7#include <stdint.h>
8#include <pthread.h>
9#include <stdbool.h>
10#include <poll.h>
11
12#include "rcu.h"
13
14/* active_readers is an input/output parameter. */
15static
16void check_active_readers(struct side_rcu_gp_state *gp_state, bool *active_readers)
17{
18 uintptr_t sum[2] = { 0, 0 }; /* begin - end */
19 int i;
20
21 for (i = 0; i < gp_state->nr_cpus; i++) {
22 struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
23
24 if (active_readers[0])
25 sum[0] -= __atomic_load_n(&cpu_state->count[0].end, __ATOMIC_RELAXED);
26 if (active_readers[1])
27 sum[1] -= __atomic_load_n(&cpu_state->count[1].end, __ATOMIC_RELAXED);
28 }
29
30 /*
31 * This memory barrier (C) pairs with either of memory barriers
32 * (A) or (B) (one is sufficient).
33 *
34 * Read end counts before begin counts. Reading "end" before
35 * "begin" counts ensures we never see an "end" without having
36 * seen its associated "begin", because "begin" is always
37 * incremented before "end", as guaranteed by memory barriers
38 * (A) or (B).
39 */
40 __atomic_thread_fence(__ATOMIC_SEQ_CST);
41
42 for (i = 0; i < gp_state->nr_cpus; i++) {
43 struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
44
45 if (active_readers[0])
46 sum[0] += __atomic_load_n(&cpu_state->count[0].begin, __ATOMIC_RELAXED);
47 if (active_readers[1])
48 sum[1] += __atomic_load_n(&cpu_state->count[1].begin, __ATOMIC_RELAXED);
49 }
50 if (active_readers[0])
51 active_readers[0] = sum[0];
52 if (active_readers[1])
53 active_readers[1] = sum[1];
54}
55
56/*
57 * Wait for previous period to have no active readers.
58 *
59 * active_readers is an input/output parameter.
60 */
61static
62void wait_for_prev_period_readers(struct side_rcu_gp_state *gp_state, bool *active_readers)
63{
64 unsigned int prev_period = gp_state->period ^ 1;
65
66 /*
67 * If a prior active readers scan already observed that no
68 * readers are present for the previous period, there is no need
69 * to scan again.
70 */
71 if (!active_readers[prev_period])
72 return;
73 /*
74 * Wait for the sum of CPU begin/end counts to match for the
75 * previous period.
76 */
77 for (;;) {
78 check_active_readers(gp_state, active_readers);
79 if (!active_readers[prev_period])
80 break;
81 /* Retry after 10ms. */
82 poll(NULL, 0, 10);
83 }
84}
85
86/*
87 * The grace period completes when it observes that there are no active
88 * readers within each of the periods.
89 *
90 * The active_readers state is initially true for each period, until the
91 * grace period observes that no readers are present for each given
92 * period, at which point the active_readers state becomes false.
93 */
94void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state)
95{
96 bool active_readers[2] = { true, true };
97
98 /*
99 * This memory barrier (D) pairs with memory barriers (A) and
100 * (B) on the read-side.
101 *
102 * It orders prior loads and stores before the "end"/"begin"
103 * reader state loads. In other words, it orders prior loads and
104 * stores before observation of active readers quiescence,
105 * effectively ensuring that read-side critical sections which
106 * exist after the grace period completes are ordered after
107 * loads and stores performed before the grace period.
108 */
109 __atomic_thread_fence(__ATOMIC_SEQ_CST);
110
111 /*
112 * First scan through all cpus, for both period. If no readers
113 * are accounted for, we have observed quiescence and can
114 * complete the grace period immediately.
115 */
116 check_active_readers(gp_state, active_readers);
117 if (!active_readers[0] && !active_readers[1])
118 goto end;
119
120 pthread_mutex_lock(&gp_state->gp_lock);
121
122 wait_for_prev_period_readers(gp_state, active_readers);
123 /*
124 * If the reader scan detected that there are no readers in the
125 * current period as well, we can complete the grace period
126 * immediately.
127 */
128 if (!active_readers[gp_state->period])
129 goto unlock;
130
131 /* Flip period: 0 -> 1, 1 -> 0. */
132 (void) __atomic_xor_fetch(&gp_state->period, 1, __ATOMIC_RELAXED);
133
134 wait_for_prev_period_readers(gp_state, active_readers);
135unlock:
136 pthread_mutex_unlock(&gp_state->gp_lock);
137end:
138 /*
139 * This memory barrier (E) pairs with memory barriers (A) and
140 * (B) on the read-side.
141 *
142 * It orders the "end"/"begin" reader state loads before
143 * following loads and stores. In other words, it orders
144 * observation of active readers quiescence before following
145 * loads and stores, effectively ensuring that read-side
146 * critical sections which existed prior to the grace period
147 * are ordered before loads and stores performed after the grace
148 * period.
149 */
150 __atomic_thread_fence(__ATOMIC_SEQ_CST);
151}
This page took 0.029168 seconds and 4 git commands to generate.