* Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#ifndef _TGIF_RCU_H
-#define _TGIF_RCU_H
+#ifndef _SIDE_RCU_H
+#define _SIDE_RCU_H
#include <sched.h>
#include <stdint.h>
#include <sys/time.h>
#include <unistd.h>
#include <sys/syscall.h>
-#include <tgif/macros.h>
+#include <side/macros.h>
-#define TGIF_CACHE_LINE_SIZE 256
+#define SIDE_CACHE_LINE_SIZE 256
-struct tgif_rcu_percpu_count {
+struct side_rcu_percpu_count {
uintptr_t begin;
uintptr_t rseq_begin;
uintptr_t end;
uintptr_t rseq_end;
};
-struct tgif_rcu_cpu_gp_state {
- struct tgif_rcu_percpu_count count[2];
-} __attribute__((__aligned__(TGIF_CACHE_LINE_SIZE)));
+struct side_rcu_cpu_gp_state {
+ struct side_rcu_percpu_count count[2];
+} __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
-struct tgif_rcu_gp_state {
- struct tgif_rcu_cpu_gp_state *percpu_state;
+struct side_rcu_gp_state {
+ struct side_rcu_cpu_gp_state *percpu_state;
int nr_cpus;
int32_t futex;
unsigned int period;
pthread_mutex_t gp_lock;
};
-struct tgif_rcu_read_state {
- struct tgif_rcu_percpu_count *percpu_count;
+struct side_rcu_read_state {
+ struct side_rcu_percpu_count *percpu_count;
int cpu;
};
-extern unsigned int tgif_rcu_rseq_membarrier_available __attribute__((visibility("hidden")));
+extern unsigned int side_rcu_rseq_membarrier_available __attribute__((visibility("hidden")));
static inline
int futex(int32_t *uaddr, int op, int32_t val,
}
/*
- * Wake-up tgif_rcu_wait_grace_period. Called concurrently from many
+ * Wake-up side_rcu_wait_grace_period. Called concurrently from many
* threads.
*/
static inline
-void tgif_rcu_wake_up_gp(struct tgif_rcu_gp_state *gp_state)
+void side_rcu_wake_up_gp(struct side_rcu_gp_state *gp_state)
{
- if (tgif_unlikely(__atomic_load_n(&gp_state->futex, __ATOMIC_RELAXED) == -1)) {
+ if (side_unlikely(__atomic_load_n(&gp_state->futex, __ATOMIC_RELAXED) == -1)) {
__atomic_store_n(&gp_state->futex, 0, __ATOMIC_RELAXED);
/* TODO: handle futex return values. */
(void) futex(&gp_state->futex, FUTEX_WAKE, 1, NULL, NULL, 0);
}
static inline
-void tgif_rcu_read_begin(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_state *read_state)
+void side_rcu_read_begin(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state)
{
- struct tgif_rcu_percpu_count *begin_cpu_count;
- struct tgif_rcu_cpu_gp_state *cpu_gp_state;
+ struct side_rcu_percpu_count *begin_cpu_count;
+ struct side_rcu_cpu_gp_state *cpu_gp_state;
unsigned int period;
int cpu;
cpu_gp_state = &gp_state->percpu_state[cpu];
read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period];
read_state->cpu = cpu;
- if (tgif_likely(tgif_rcu_rseq_membarrier_available &&
- !rseq_addv((intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) {
+ if (side_likely(side_rcu_rseq_membarrier_available &&
+ !rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID,
+ (intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) {
/*
* This compiler barrier (A) is paired with membarrier() at (C),
* (D), (E). It effectively upgrades this compiler barrier to a
* SEQ_CST fence with respect to the paired barriers.
*
- * This barrier (A) ensures that the contents of the read-tgif
+ * This barrier (A) ensures that the contents of the read-side
* critical section does not leak before the "begin" counter
* increment. It pairs with memory barriers (D) and (E).
*
}
/* Fallback to atomic increment and SEQ_CST. */
cpu = sched_getcpu();
- if (tgif_unlikely(cpu < 0))
+ if (side_unlikely(cpu < 0))
cpu = 0;
read_state->cpu = cpu;
cpu_gp_state = &gp_state->percpu_state[cpu];
}
static inline
-void tgif_rcu_read_end(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_state *read_state)
+void side_rcu_read_end(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state)
{
- struct tgif_rcu_percpu_count *begin_cpu_count = read_state->percpu_count;
+ struct side_rcu_percpu_count *begin_cpu_count = read_state->percpu_count;
int cpu = read_state->cpu;
/*
* (D), (E). It effectively upgrades this compiler barrier to a
* SEQ_CST fence with respect to the paired barriers.
*
- * This barrier (B) ensures that the contents of the read-tgif
+ * This barrier (B) ensures that the contents of the read-side
* critical section does not leak after the "end" counter
* increment. It pairs with memory barriers (D) and (E).
*
* It is redundant with barrier (A) for that purpose.
*/
rseq_barrier();
- if (tgif_likely(tgif_rcu_rseq_membarrier_available &&
- !rseq_addv((intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) {
+ if (side_likely(side_rcu_rseq_membarrier_available &&
+ !rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID,
+ (intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) {
/*
* This barrier (F) is paired with membarrier()
* at (G). It orders increment of the begin/end
*/
__atomic_thread_fence(__ATOMIC_SEQ_CST);
end:
- tgif_rcu_wake_up_gp(gp_state);
+ side_rcu_wake_up_gp(gp_state);
}
-#define tgif_rcu_dereference(p) \
+#define side_rcu_dereference(p) \
__extension__ \
({ \
- __typeof__(p) _____tgif_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
- (_____tgif_v); \
+ __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \
+ (_____side_v); \
})
-#define tgif_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
+#define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \
-void tgif_rcu_wait_grace_period(struct tgif_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
-void tgif_rcu_gp_init(struct tgif_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
-void tgif_rcu_gp_exit(struct tgif_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
+void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden")));
+void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
+void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden")));
-#endif /* _TGIF_RCU_H */
+#endif /* _SIDE_RCU_H */