Implement side per-cpu RCU
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 25 Oct 2022 16:16:11 +0000 (12:16 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 25 Oct 2022 16:16:11 +0000 (12:16 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
src/Makefile
src/rcu.h [new file with mode: 0644]
src/side.c

index 6332a4faa5affde0270df3bd9cd876022dd1059e..d04919d123f6c193bb50517ce07e13b165b54d9b 100644 (file)
@@ -1,9 +1,9 @@
 all: test
 
-HEADERS = ../include/side/trace.h ../include/side/macros.h
+HEADERS = ../include/side/trace.h ../include/side/macros.h rcu.h
 
 CFLAGS = -g -O2 -Wall
-CPPFLAGS = -I../include/
+CPPFLAGS = -I../include/ -D_GNU_SOURCE
 
 side.o: side.c $(HEADERS)
        gcc $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
diff --git a/src/rcu.h b/src/rcu.h
new file mode 100644 (file)
index 0000000..3b5d44c
--- /dev/null
+++ b/src/rcu.h
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <sched.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <poll.h>
+
+#define SIDE_CACHE_LINE_SIZE           256
+#define SIDE_RCU_PERCPU_ARRAY_SIZE     2
+
+struct side_rcu_percpu_count {
+       uintptr_t begin;
+       uintptr_t end;
+}  __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE)));
+
+struct side_rcu_cpu_gp_state {
+       struct side_rcu_percpu_count count[SIDE_RCU_PERCPU_ARRAY_SIZE];
+};
+
+struct side_rcu_gp_state {
+       struct side_rcu_cpu_gp_state *percpu_state;
+       int nr_cpus;
+       unsigned int period;
+       pthread_mutex_t gp_lock;
+};
+
+//TODO: replace atomics by rseq (when available)
+//TODO: replace acquire/release by membarrier+compiler barrier (when available)
+//TODO: implement wait/wakeup for grace period using sys_futex
+static inline
+unsigned int side_rcu_read_begin(struct side_rcu_gp_state *gp_state)
+{
+       int cpu = sched_getcpu();
+       unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
+
+       if (cpu < 0)
+               cpu = 0;
+       (void) __atomic_add_fetch(&gp_state->percpu_state[cpu].count[period].begin, 1, __ATOMIC_ACQUIRE);
+       return period;
+}
+
+static inline
+void side_rcu_read_end(struct side_rcu_gp_state *gp_state, unsigned int period)
+{
+       int cpu = sched_getcpu();
+
+       if (cpu < 0)
+               cpu = 0;
+       (void) __atomic_add_fetch(&gp_state->percpu_state[cpu].count[period].end, 1, __ATOMIC_RELEASE);
+}
+
+static inline
+void wait_for_cpus(struct side_rcu_gp_state *gp_state)
+{
+       unsigned int prev_period = 1 - gp_state->period;
+
+       /*
+        * Wait for the sum of CPU begin/end counts to match for the
+        * previous period.
+        */
+       for (;;) {
+               uintptr_t sum = 0;      /* begin - end */
+               int i;
+
+               for (i = 0; i < gp_state->nr_cpus; i++) {
+                       struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
+
+                       sum -= __atomic_load_n(&cpu_state->count[prev_period].end, __ATOMIC_RELAXED);
+               }
+
+               /*
+                * Read end counts before begin counts. Reading end
+                * before begin count ensures we never see an end
+                * without having seen its associated begin, in case of
+                * a thread migration during the traversal over each
+                * cpu.
+                */
+               __atomic_thread_fence(__ATOMIC_ACQ_REL);
+
+               for (i = 0; i < gp_state->nr_cpus; i++) {
+                       struct side_rcu_cpu_gp_state *cpu_state = &gp_state->percpu_state[i];
+
+                       sum += __atomic_load_n(&cpu_state->count[prev_period].begin, __ATOMIC_RELAXED);
+               }
+               if (!sum) {
+                       break;
+               } else {
+                       /* Retry after 10ms. */
+                       poll(NULL, 0, 10);
+               }
+       }
+}
+
+static inline
+void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state)
+{
+       /*
+        * This release fence pairs with the acquire MO __atomic_add_fetch
+        * in side_rcu_read_begin().
+        */
+       __atomic_thread_fence(__ATOMIC_RELEASE);
+
+       pthread_mutex_lock(&gp_state->gp_lock);
+
+       wait_for_cpus(gp_state);
+
+       /* Flip period: 0 -> 1, 1 -> 0. */
+       (void) __atomic_xor_fetch(&gp_state->period, 1, __ATOMIC_RELAXED);
+
+       wait_for_cpus(gp_state);
+
+       pthread_mutex_unlock(&gp_state->gp_lock);
+
+       /*
+        * This acquire fence pairs with the release MO __atomic_add_fetch
+        * in side_rcu_read_end().
+        */
+       __atomic_thread_fence(__ATOMIC_ACQUIRE);
+}
index 8f76b3f947a99c4824fd3d872ec9e737d7b1ee43..016f55b1f7b7a23aff103460464ac7ad3c8ae12a 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <side/trace.h>
 #include "tracer.h"
+#include "rcu.h"
 
 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK      0x80000000
 
This page took 0.032996 seconds and 4 git commands to generate.