Restartable sequences: only keep rseq lib parts needed by ust
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sun, 18 Sep 2016 00:06:17 +0000 (20:06 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 22 Nov 2016 00:11:32 +0000 (19:11 -0500)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
liblttng-ust/lttng-ring-buffer-client.h
libringbuffer/rseq-arm.h
libringbuffer/rseq-ppc.h
libringbuffer/rseq-x86.h
libringbuffer/rseq.c
libringbuffer/rseq.h

index 6ed067105d8e7fd0f8a280e3f09caa0375186836..5e95244f724b36dbcf23bedc6435c2407d401156 100644 (file)
@@ -26,6 +26,7 @@
 #include "clock.h"
 #include "lttng-tracer.h"
 #include "../libringbuffer/frontend_types.h"
+#include "../libringbuffer/rseq.h"
 
 #define LTTNG_COMPACT_EVENT_BITS       5
 #define LTTNG_COMPACT_TSC_BITS         27
@@ -696,6 +697,7 @@ int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
        struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
        int ret, cpu;
 
+       //TODO register (lazy)
        cpu = lib_ring_buffer_get_cpu(&client_config);
        if (cpu < 0)
                return -EPERM;
index 289abd498f98652b11529811e517635c068d7016..c0b172c21b995255345b41e1ee864b7e591f989a 100644 (file)
  * SOFTWARE.
  */
 
-#define smp_mb()       __asm__ __volatile__ ("dmb" : : : "memory")
-#define smp_rmb()      __asm__ __volatile__ ("dmb" : : : "memory")
-#define smp_wmb()      __asm__ __volatile__ ("dmb" : : : "memory")
-
-#define smp_load_acquire(p)                                            \
-__extension__ ({                                                       \
-       __typeof(*p) ____p1 = READ_ONCE(*p);                            \
-       smp_mb();                                                       \
-       ____p1;                                                         \
-})
-
-#define smp_acquire__after_ctrl_dep()  smp_rmb()
-
-#define smp_store_release(p, v)                                                \
-do {                                                                   \
-       smp_mb();                                                       \
-       WRITE_ONCE(*p, v);                                              \
-} while (0)
-
 #define has_fast_acquire_release()     0
 #define has_single_copy_load_64()      1
 
index 8a76d07fcb5408b4eb757615d720d69220398e72..6b1b13a38526629d148ea860e7c98b97452cf54f 100644 (file)
  * SOFTWARE.
  */
 
-#define smp_mb()       __asm__ __volatile__ ("sync" : : : "memory")
-#define smp_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory")
-#define smp_rmb()      smp_lwsync()
-#define smp_wmb()      smp_lwsync()
-
-#define smp_load_acquire(p)                                            \
-__extension__ ({                                                       \
-       __typeof(*p) ____p1 = READ_ONCE(*p);                            \
-       smp_lwsync();                                                   \
-       ____p1;                                                         \
-})
-
-#define smp_acquire__after_ctrl_dep()  smp_lwsync()
-
-#define smp_store_release(p, v)                                                \
-do {                                                                   \
-       smp_lwsync();                                                   \
-       WRITE_ONCE(*p, v);                                              \
-} while (0)
-
 #define has_fast_acquire_release()     0
 
 #ifdef __PPC64__
index 7154bfa49697f987ded459926670b072647d1e76..29e5647d6963c5ed412772ed729b834f7a56a295 100644 (file)
 
 #ifdef __x86_64__
 
-#define smp_mb()       __asm__ __volatile__ ("mfence" : : : "memory")
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-
-#define smp_load_acquire(p)                                            \
-__extension__ ({                                                       \
-       __typeof(*p) ____p1 = READ_ONCE(*p);                            \
-       barrier();                                                      \
-       ____p1;                                                         \
-})
-
-#define smp_acquire__after_ctrl_dep()  smp_rmb()
-
-#define smp_store_release(p, v)                                                \
-do {                                                                   \
-       barrier();                                                      \
-       WRITE_ONCE(*p, v);                                              \
-} while (0)
-
 #define has_fast_acquire_release()     1
 #define has_single_copy_load_64()      1
 
@@ -159,32 +140,6 @@ do { \
 
 #elif __i386__
 
-/*
- * Support older 32-bit architectures that do not implement fence
- * instructions.
- */
-#define smp_mb()       \
-       __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
-#define smp_rmb()      \
-       __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
-#define smp_wmb()      \
-       __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
-
-#define smp_load_acquire(p)                                            \
-__extension__ ({                                                       \
-       __typeof(*p) ____p1 = READ_ONCE(*p);                            \
-       smp_mb();                                                       \
-       ____p1;                                                         \
-})
-
-#define smp_acquire__after_ctrl_dep()  smp_rmb()
-
-#define smp_store_release(p, v)                                                \
-do {                                                                   \
-       smp_mb();                                                       \
-       WRITE_ONCE(*p, v);                                              \
-} while (0)
-
 #define has_fast_acquire_release()     0
 #define has_single_copy_load_64()      0
 
index c8193a37cd8c41ba916733d462250801232038cd..219f416eabf3c5c14f7f1aab0ec0dcf3a2d2be22 100644 (file)
 #include <syscall.h>
 #include <assert.h>
 #include <signal.h>
-#include <linux/membarrier.h>
 
 #include <rseq.h>
 
-#ifdef __NR_membarrier
-# define membarrier(...)               syscall(__NR_membarrier, __VA_ARGS__)
-#else
-# define membarrier(...)               -ENOSYS
-#endif
-
-struct rseq_thread_state {
-       uint32_t fallback_wait_cnt;
-       uint32_t fallback_cnt;
-       sigset_t sigmask_saved;
-};
-
 __attribute__((weak)) __thread volatile struct rseq __rseq_abi = {
        .u.e.cpu_id = -1,
 };
 
-static __thread volatile struct rseq_thread_state rseq_thread_state;
-
-int rseq_has_sys_membarrier;
-
 static int sys_rseq(volatile struct rseq *rseq_abi, int flags)
 {
        return syscall(__NR_rseq, rseq_abi, flags);
 }
 
-int rseq_register_current_thread(void)
-{
-       int rc;
-
-       rc = sys_rseq(&__rseq_abi, 0);
-       if (rc) {
-               fprintf(stderr, "Error: sys_rseq(...) failed(%d): %s\n",
-                       errno, strerror(errno));
-               return -1;
-       }
-       assert(rseq_current_cpu() >= 0);
-       return 0;
-}
-
-int rseq_unregister_current_thread(void)
-{
-       int rc;
-
-       rc = sys_rseq(NULL, 0);
-       if (rc) {
-               fprintf(stderr, "Error: sys_rseq(...) failed(%d): %s\n",
-                       errno, strerror(errno));
-               return -1;
-       }
-       return 0;
-}
-
-int rseq_init_lock(struct rseq_lock *rlock)
-{
-       int ret;
-
-       ret = pthread_mutex_init(&rlock->lock, NULL);
-       if (ret) {
-               errno = ret;
-               return -1;
-       }
-       rlock->state = RSEQ_LOCK_STATE_RESTART;
-       return 0;
-}
-
-int rseq_destroy_lock(struct rseq_lock *rlock)
-{
-       int ret;
-
-       ret = pthread_mutex_destroy(&rlock->lock);
-       if (ret) {
-               errno = ret;
-               return -1;
-       }
-       return 0;
-}
-
 static void signal_off_save(sigset_t *oldset)
 {
        sigset_t set;
@@ -125,123 +56,29 @@ static void signal_restore(sigset_t oldset)
                abort();
 }
 
-static void rseq_fallback_lock(struct rseq_lock *rlock)
-{
-       signal_off_save((sigset_t *)&rseq_thread_state.sigmask_saved);
-       pthread_mutex_lock(&rlock->lock);
-       rseq_thread_state.fallback_cnt++;
-       /*
-        * For concurrent threads arriving before we set LOCK:
-        * reading cpu_id after setting the state to LOCK
-        * ensures they restart.
-        */
-       ACCESS_ONCE(rlock->state) = RSEQ_LOCK_STATE_LOCK;
-       /*
-        * For concurrent threads arriving after we set LOCK:
-        * those will grab the lock, so we are protected by
-        * mutual exclusion.
-        */
-}
-
-void rseq_fallback_wait(struct rseq_lock *rlock)
+int rseq_register_current_thread(void)
 {
-       signal_off_save((sigset_t *)&rseq_thread_state.sigmask_saved);
-       pthread_mutex_lock(&rlock->lock);
-       rseq_thread_state.fallback_wait_cnt++;
-       pthread_mutex_unlock(&rlock->lock);
-       signal_restore(rseq_thread_state.sigmask_saved);
-}
+       int rc;
 
-static void rseq_fallback_unlock(struct rseq_lock *rlock, int cpu_at_start)
-{
-       /*
-        * Concurrent rseq arriving before we set state back to RESTART
-        * grab the lock. Those arriving after we set state back to
-        * RESTART will perform restartable critical sections. The next
-        * owner of the lock will take take of making sure it prevents
-        * concurrent restartable sequences from completing.  We may be
-        * writing from another CPU, so update the state with a store
-        * release semantic to ensure restartable sections will see our
-        * side effect (writing to *p) before they enter their
-        * restartable critical section.
-        *
-        * In cases where we observe that we are on the right CPU after the
-        * critical section, program order ensures that following restartable
-        * critical sections will see our stores, so we don't have to use
-        * store-release or membarrier.
-        *
-        * Use sys_membarrier when available to remove the memory barrier
-        * implied by smp_load_acquire().
-        */
-       barrier();
-       if (likely(rseq_current_cpu() == cpu_at_start)) {
-               ACCESS_ONCE(rlock->state) = RSEQ_LOCK_STATE_RESTART;
-       } else {
-               if (!has_fast_acquire_release() && rseq_has_sys_membarrier) {
-                       if (membarrier(MEMBARRIER_CMD_SHARED, 0))
-                               abort();
-                       ACCESS_ONCE(rlock->state) = RSEQ_LOCK_STATE_RESTART;
-               } else {
-                       /*
-                        * Store with release semantic to ensure
-                        * restartable sections will see our side effect
-                        * (writing to *p) before they enter their
-                        * restartable critical section. Matches
-                        * smp_load_acquire() in rseq_start().
-                        */
-                       smp_store_release(&rlock->state,
-                               RSEQ_LOCK_STATE_RESTART);
-               }
+       rc = sys_rseq(&__rseq_abi, 0);
+       if (rc) {
+               fprintf(stderr, "Error: sys_rseq(...) failed(%d): %s\n",
+                       errno, strerror(errno));
+               return -1;
        }
-       pthread_mutex_unlock(&rlock->lock);
-       signal_restore(rseq_thread_state.sigmask_saved);
+       assert(rseq_current_cpu() >= 0);
+       return 0;
 }
 
-int rseq_fallback_current_cpu(void)
+int rseq_unregister_current_thread(void)
 {
-       int cpu;
+       int rc;
 
-       cpu = sched_getcpu();
-       if (cpu < 0) {
-               perror("sched_getcpu()");
-               abort();
+       rc = sys_rseq(NULL, 0);
+       if (rc) {
+               fprintf(stderr, "Error: sys_rseq(...) failed(%d): %s\n",
+                       errno, strerror(errno));
+               return -1;
        }
-       return cpu;
-}
-
-int rseq_fallback_begin(struct rseq_lock *rlock)
-{
-       rseq_fallback_lock(rlock);
-       return rseq_fallback_current_cpu();
-}
-
-void rseq_fallback_end(struct rseq_lock *rlock, int cpu)
-{
-       rseq_fallback_unlock(rlock, cpu);
-}
-
-/* Handle non-initialized rseq for this thread. */
-void rseq_fallback_noinit(struct rseq_state *rseq_state)
-{
-       rseq_state->lock_state = RSEQ_LOCK_STATE_FAIL;
-       rseq_state->cpu_id = 0;
-}
-
-uint32_t rseq_get_fallback_wait_cnt(void)
-{
-       return rseq_thread_state.fallback_wait_cnt;
-}
-
-uint32_t rseq_get_fallback_cnt(void)
-{
-       return rseq_thread_state.fallback_cnt;
-}
-
-void __attribute__((constructor)) rseq_init(void)
-{
-       int ret;
-
-       ret = membarrier(MEMBARRIER_CMD_QUERY, 0);
-       if (ret >= 0 && (ret & MEMBARRIER_CMD_SHARED))
-               rseq_has_sys_membarrier = 1;
+       return 0;
 }
index e76a9946cd4a7419ba61717761c235981032e497..1f07b1c5814dbcd6e2d1fd556d83336136725c9d 100644 (file)
@@ -34,6 +34,9 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <sched.h>
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
 #include "linux-rseq-abi.h"
 
 /*
 #define RSEQ_FALLBACK_CNT      3
 #endif
 
-uint32_t rseq_get_fallback_wait_cnt(void);
-uint32_t rseq_get_fallback_cnt(void);
-
 extern __thread volatile struct rseq __rseq_abi;
-extern int rseq_has_sys_membarrier;
-
-#define likely(x)              __builtin_expect(!!(x), 1)
-#define unlikely(x)            __builtin_expect(!!(x), 0)
-#define barrier()              __asm__ __volatile__("" : : : "memory")
-
-#define ACCESS_ONCE(x)         (*(__volatile__  __typeof__(x) *)&(x))
-#define WRITE_ONCE(x, v)       __extension__ ({ ACCESS_ONCE(x) = (v); })
-#define READ_ONCE(x)           ACCESS_ONCE(x)
 
 #if defined(__x86_64__) || defined(__i386__)
 #include <rseq-x86.h>
@@ -89,31 +80,17 @@ extern int rseq_has_sys_membarrier;
 #error unsupported target
 #endif
 
-enum rseq_lock_state {
-       RSEQ_LOCK_STATE_RESTART = 0,
-       RSEQ_LOCK_STATE_LOCK = 1,
-       RSEQ_LOCK_STATE_FAIL = 2,
-};
-
-struct rseq_lock {
-       pthread_mutex_t lock;
-       int32_t state;          /* enum rseq_lock_state */
-};
-
 /* State returned by rseq_start, passed as argument to rseq_finish. */
 struct rseq_state {
        volatile struct rseq *rseqp;
        int32_t cpu_id;         /* cpu_id at start. */
        uint32_t event_counter; /* event_counter at start. */
-       int32_t lock_state;     /* Lock state at start. */
 };
 
 /*
  * Register rseq for the current thread. This needs to be called once
  * by any thread which uses restartable sequences, before they start
- * using restartable sequences. If initialization is not invoked, or if
- * it fails, the restartable critical sections will fall-back on locking
- * (rseq_lock).
+ * using restartable sequences.
  */
 int rseq_register_current_thread(void);
 
@@ -122,28 +99,6 @@ int rseq_register_current_thread(void);
  */
 int rseq_unregister_current_thread(void);
 
-/*
- * The fallback lock should be initialized before being used by any
- * thread, and destroyed after all threads are done using it. This lock
- * should be used by all rseq calls associated with shared data, either
- * between threads, or between processes in a shared memory.
- *
- * There may be many rseq_lock per process, e.g. one per protected data
- * structure.
- */
-int rseq_init_lock(struct rseq_lock *rlock);
-int rseq_destroy_lock(struct rseq_lock *rlock);
-
-/*
- * Restartable sequence fallback prototypes. Fallback on locking when
- * rseq is not initialized, not available on the system, or during
- * single-stepping to ensure forward progress.
- */
-int rseq_fallback_begin(struct rseq_lock *rlock);
-void rseq_fallback_end(struct rseq_lock *rlock, int cpu);
-void rseq_fallback_wait(struct rseq_lock *rlock);
-void rseq_fallback_noinit(struct rseq_state *rseq_state);
-
 /*
  * Restartable sequence fallback for reading the current CPU number.
  */
@@ -156,7 +111,7 @@ static inline int32_t rseq_cpu_at_start(struct rseq_state start_value)
 
 static inline int32_t rseq_current_cpu_raw(void)
 {
-       return ACCESS_ONCE(__rseq_abi.u.e.cpu_id);
+       return CMM_LOAD_SHARED(__rseq_abi.u.e.cpu_id);
 }
 
 static inline int32_t rseq_current_cpu(void)
@@ -164,13 +119,13 @@ static inline int32_t rseq_current_cpu(void)
        int32_t cpu;
 
        cpu = rseq_current_cpu_raw();
-       if (unlikely(cpu < 0))
+       if (caa_unlikely(cpu < 0))
                cpu = rseq_fallback_current_cpu();
        return cpu;
 }
 
 static inline __attribute__((always_inline))
-struct rseq_state rseq_start(struct rseq_lock *rlock)
+struct rseq_state rseq_start(void)
 {
        struct rseq_state result;
 
@@ -178,15 +133,15 @@ struct rseq_state rseq_start(struct rseq_lock *rlock)
        if (has_single_copy_load_64()) {
                union rseq_cpu_event u;
 
-               u.v = ACCESS_ONCE(result.rseqp->u.v);
+               u.v = CMM_LOAD_SHARED(result.rseqp->u.v);
                result.event_counter = u.e.event_counter;
                result.cpu_id = u.e.cpu_id;
        } else {
                result.event_counter =
-                       ACCESS_ONCE(result.rseqp->u.e.event_counter);
+                       CMM_LOAD_SHARED(result.rseqp->u.e.event_counter);
                /* load event_counter before cpu_id. */
                RSEQ_INJECT_C(6)
-               result.cpu_id = ACCESS_ONCE(result.rseqp->u.e.cpu_id);
+               result.cpu_id = CMM_LOAD_SHARED(result.rseqp->u.e.cpu_id);
        }
        /*
         * Read event counter before lock state and cpu_id. This ensures
@@ -199,23 +154,11 @@ struct rseq_state rseq_start(struct rseq_lock *rlock)
         */
        RSEQ_INJECT_C(7)
 
-       if (!has_fast_acquire_release() && likely(rseq_has_sys_membarrier)) {
-               result.lock_state = ACCESS_ONCE(rlock->state);
-               barrier();
-       } else {
-               /*
-                * Load lock state with acquire semantic. Matches
-                * smp_store_release() in rseq_fallback_end().
-                */
-               result.lock_state = smp_load_acquire(&rlock->state);
-       }
-       if (unlikely(result.cpu_id < 0))
-               rseq_fallback_noinit(&result);
        /*
         * Ensure the compiler does not re-order loads of protected
         * values before we load the event counter.
         */
-       barrier();
+       cmm_barrier();
        return result;
 }
 
@@ -234,8 +177,7 @@ enum rseq_finish_type {
  * write takes place, the rseq_finish2 is guaranteed to succeed.
  */
 static inline __attribute__((always_inline))
-bool __rseq_finish(struct rseq_lock *rlock,
-               intptr_t *p_spec, intptr_t to_write_spec,
+bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
                void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
                intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value,
@@ -243,11 +185,6 @@ bool __rseq_finish(struct rseq_lock *rlock,
 {
        RSEQ_INJECT_C(9)
 
-       if (unlikely(start_value.lock_state != RSEQ_LOCK_STATE_RESTART)) {
-               if (start_value.lock_state == RSEQ_LOCK_STATE_LOCK)
-                       rseq_fallback_wait(rlock);
-               return false;
-       }
        switch (type) {
        case RSEQ_FINISH_SINGLE:
                RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
@@ -312,166 +249,57 @@ failure:
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish(struct rseq_lock *rlock,
-               intptr_t *p, intptr_t to_write,
+bool rseq_finish(intptr_t *p, intptr_t to_write,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, NULL, 0,
+       return __rseq_finish(NULL, 0,
                        NULL, NULL, 0,
                        p, to_write, start_value,
                        RSEQ_FINISH_SINGLE, false);
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish2(struct rseq_lock *rlock,
-               intptr_t *p_spec, intptr_t to_write_spec,
+bool rseq_finish2(intptr_t *p_spec, intptr_t to_write_spec,
                intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, p_spec, to_write_spec,
+       return __rseq_finish(p_spec, to_write_spec,
                        NULL, NULL, 0,
                        p_final, to_write_final, start_value,
                        RSEQ_FINISH_TWO, false);
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish2_release(struct rseq_lock *rlock,
-               intptr_t *p_spec, intptr_t to_write_spec,
+bool rseq_finish2_release(intptr_t *p_spec, intptr_t to_write_spec,
                intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, p_spec, to_write_spec,
+       return __rseq_finish(p_spec, to_write_spec,
                        NULL, NULL, 0,
                        p_final, to_write_final, start_value,
                        RSEQ_FINISH_TWO, true);
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish_memcpy(struct rseq_lock *rlock,
-               void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
-               intptr_t *p_final, intptr_t to_write_final,
+bool rseq_finish_memcpy(void *p_memcpy, void *to_write_memcpy,
+               size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, NULL, 0,
+       return __rseq_finish(NULL, 0,
                        p_memcpy, to_write_memcpy, len_memcpy,
                        p_final, to_write_final, start_value,
                        RSEQ_FINISH_MEMCPY, false);
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish_memcpy_release(struct rseq_lock *rlock,
-               void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
-               intptr_t *p_final, intptr_t to_write_final,
+bool rseq_finish_memcpy_release(void *p_memcpy, void *to_write_memcpy,
+               size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, NULL, 0,
+       return __rseq_finish(NULL, 0,
                        p_memcpy, to_write_memcpy, len_memcpy,
                        p_final, to_write_final, start_value,
                        RSEQ_FINISH_MEMCPY, true);
 }
 
-#define __rseq_store_RSEQ_FINISH_SINGLE(_targetptr_spec, _newval_spec, \
-               _dest_memcpy, _src_memcpy, _len_memcpy,                 \
-               _targetptr_final, _newval_final)                        \
-       do {                                                            \
-               *(_targetptr_final) = (_newval_final);                  \
-       } while (0)
-
-#define __rseq_store_RSEQ_FINISH_TWO(_targetptr_spec, _newval_spec,    \
-               _dest_memcpy, _src_memcpy, _len_memcpy,                 \
-               _targetptr_final, _newval_final)                        \
-       do {                                                            \
-               *(_targetptr_spec) = (_newval_spec);                    \
-               *(_targetptr_final) = (_newval_final);                  \
-       } while (0)
-
-#define __rseq_store_RSEQ_FINISH_MEMCPY(_targetptr_spec,               \
-               _newval_spec, _dest_memcpy, _src_memcpy, _len_memcpy,   \
-               _targetptr_final, _newval_final)                        \
-       do {                                                            \
-               memcpy(_dest_memcpy, _src_memcpy, _len_memcpy);         \
-               *(_targetptr_final) = (_newval_final);                  \
-       } while (0)
-
-/*
- * Helper macro doing two restartable critical section attempts, and if
- * they fail, fallback on locking.
- */
-#define __do_rseq(_type, _lock, _rseq_state, _cpu, _result,            \
-               _targetptr_spec, _newval_spec,                          \
-               _dest_memcpy, _src_memcpy, _len_memcpy,                 \
-               _targetptr_final, _newval_final, _code, _release)       \
-       do {                                                            \
-               _rseq_state = rseq_start(_lock);                        \
-               _cpu = rseq_cpu_at_start(_rseq_state);                  \
-               _result = true;                                         \
-               _code                                                   \
-               if (unlikely(!_result))                                 \
-                       break;                                          \
-               if (likely(__rseq_finish(_lock,                         \
-                               _targetptr_spec, _newval_spec,          \
-                               _dest_memcpy, _src_memcpy, _len_memcpy, \
-                               _targetptr_final, _newval_final,        \
-                               _rseq_state, _type, _release)))         \
-                       break;                                          \
-               _rseq_state = rseq_start(_lock);                        \
-               _cpu = rseq_cpu_at_start(_rseq_state);                  \
-               _result = true;                                         \
-               _code                                                   \
-               if (unlikely(!_result))                                 \
-                       break;                                          \
-               if (likely(__rseq_finish(_lock,                         \
-                               _targetptr_spec, _newval_spec,          \
-                               _dest_memcpy, _src_memcpy, _len_memcpy, \
-                               _targetptr_final, _newval_final,        \
-                               _rseq_state, _type, _release)))         \
-                       break;                                          \
-               _cpu = rseq_fallback_begin(_lock);                      \
-               _result = true;                                         \
-               _code                                                   \
-               if (likely(_result))                                    \
-                       __rseq_store_##_type(_targetptr_spec,           \
-                                _newval_spec, _dest_memcpy,            \
-                               _src_memcpy, _len_memcpy,               \
-                               _targetptr_final, _newval_final);       \
-               rseq_fallback_end(_lock, _cpu);                         \
-       } while (0)
-
-#define do_rseq(_lock, _rseq_state, _cpu, _result, _targetptr, _newval,        \
-               _code)                                                  \
-       __do_rseq(RSEQ_FINISH_SINGLE, _lock, _rseq_state, _cpu, _result,\
-               NULL, 0, NULL, NULL, 0, _targetptr, _newval, _code, false)
-
-#define do_rseq2(_lock, _rseq_state, _cpu, _result,                    \
-               _targetptr_spec, _newval_spec,                          \
-               _targetptr_final, _newval_final, _code)                 \
-       __do_rseq(RSEQ_FINISH_TWO, _lock, _rseq_state, _cpu, _result,   \
-               _targetptr_spec, _newval_spec,                          \
-               NULL, NULL, 0,                                          \
-               _targetptr_final, _newval_final, _code, false)
-
-#define do_rseq2_release(_lock, _rseq_state, _cpu, _result,            \
-               _targetptr_spec, _newval_spec,                          \
-               _targetptr_final, _newval_final, _code)                 \
-       __do_rseq(RSEQ_FINISH_TWO, _lock, _rseq_state, _cpu, _result,   \
-               _targetptr_spec, _newval_spec,                          \
-               NULL, NULL, 0,                                          \
-               _targetptr_final, _newval_final, _code, true)
-
-#define do_rseq_memcpy(_lock, _rseq_state, _cpu, _result,              \
-               _dest_memcpy, _src_memcpy, _len_memcpy,                 \
-               _targetptr_final, _newval_final, _code)                 \
-       __do_rseq(RSEQ_FINISH_MEMCPY, _lock, _rseq_state, _cpu, _result,\
-               NULL, 0,                                                \
-               _dest_memcpy, _src_memcpy, _len_memcpy,                 \
-               _targetptr_final, _newval_final, _code, false)
-
-#define do_rseq_memcpy_release(_lock, _rseq_state, _cpu, _result,      \
-               _dest_memcpy, _src_memcpy, _len_memcpy,                 \
-               _targetptr_final, _newval_final, _code)                 \
-       __do_rseq(RSEQ_FINISH_MEMCPY, _lock, _rseq_state, _cpu, _result,\
-               NULL, 0,                                                \
-               _dest_memcpy, _src_memcpy, _len_memcpy,                 \
-               _targetptr_final, _newval_final, _code, true)
-
 #endif  /* RSEQ_H_ */
This page took 0.034826 seconds and 5 git commands to generate.