static inline
void side_rcu_read_begin(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state)
{
- unsigned int period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
struct side_rcu_percpu_count *begin_cpu_count;
struct side_rcu_cpu_gp_state *cpu_gp_state;
+ unsigned int period;
int cpu;
- if (side_likely(side_rcu_rseq_membarrier_available)) {
- cpu = rseq_cpu_start();
- cpu_gp_state = &gp_state->percpu_state[cpu];
- read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period];
- read_state->cpu = cpu;
- if (side_likely(!rseq_addv((intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) {
- /*
- * This compiler barrier (A) is paired with membarrier() at (C),
- * (D), (E). It effectively upgrades this compiler barrier to a
- * SEQ_CST fence with respect to the paired barriers.
- *
- * This barrier (A) ensures that the contents of the read-side
- * critical section does not leak before the "begin" counter
- * increment. It pairs with memory barriers (D) and (E).
- *
- * This barrier (A) also ensures that the "begin" increment is
- * before the "end" increment. It pairs with memory barrier (C).
- * It is redundant with barrier (B) for that purpose.
- */
- rseq_barrier();
- return;
- }
+ cpu = rseq_cpu_start();
+ period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED);
+ cpu_gp_state = &gp_state->percpu_state[cpu];
+ read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period];
+ read_state->cpu = cpu;
+ if (side_likely(side_rcu_rseq_membarrier_available &&
+ !rseq_addv((intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) {
+ /*
+ * This compiler barrier (A) is paired with membarrier() at (C),
+ * (D), (E). It effectively upgrades this compiler barrier to a
+ * SEQ_CST fence with respect to the paired barriers.
+ *
+ * This barrier (A) ensures that the contents of the read-side
+ * critical section does not leak before the "begin" counter
+ * increment. It pairs with memory barriers (D) and (E).
+ *
+ * This barrier (A) also ensures that the "begin" increment is
+ * before the "end" increment. It pairs with memory barrier (C).
+ * It is redundant with barrier (B) for that purpose.
+ */
+ rseq_barrier();
+ return;
}
/* Fallback to atomic increment and SEQ_CST. */
cpu = sched_getcpu();
struct side_rcu_percpu_count *begin_cpu_count = read_state->percpu_count;
int cpu = read_state->cpu;
- if (side_likely(side_rcu_rseq_membarrier_available)) {
+ /*
+ * This compiler barrier (B) is paired with membarrier() at (C),
+ * (D), (E). It effectively upgrades this compiler barrier to a
+ * SEQ_CST fence with respect to the paired barriers.
+ *
+ * This barrier (B) ensures that the contents of the read-side
+ * critical section does not leak after the "end" counter
+ * increment. It pairs with memory barriers (D) and (E).
+ *
+ * This barrier (B) also ensures that the "begin" increment is
+ * before the "end" increment. It pairs with memory barrier (C).
+ * It is redundant with barrier (A) for that purpose.
+ */
+ rseq_barrier();
+ if (side_likely(side_rcu_rseq_membarrier_available &&
+ !rseq_addv((intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) {
/*
- * This compiler barrier (B) is paired with membarrier() at (C),
- * (D), (E). It effectively upgrades this compiler barrier to a
- * SEQ_CST fence with respect to the paired barriers.
- *
- * This barrier (B) ensures that the contents of the read-side
- * critical section does not leak after the "end" counter
- * increment. It pairs with memory barriers (D) and (E).
- *
- * This barrier (B) also ensures that the "begin" increment is
- * before the "end" increment. It pairs with memory barrier (C).
- * It is redundant with barrier (A) for that purpose.
+ * This barrier (F) is paired with membarrier()
+ * at (G). It orders increment of the begin/end
+ * counters before load/store to the futex.
*/
rseq_barrier();
- if (side_likely(!rseq_addv((intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) {
- /*
- * This barrier (F) is paired with membarrier()
- * at (G). It orders increment of the begin/end
- * counters before load/store to the futex.
- */
- rseq_barrier();
- goto end;
- }
+ goto end;
}
/* Fallback to atomic increment and SEQ_CST. */
(void) __atomic_add_fetch(&begin_cpu_count->end, 1, __ATOMIC_SEQ_CST);