Restartable sequences: tests: introduce simple rseq start/finish rseq-v8.3
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Mon, 31 Oct 2016 15:44:30 +0000 (11:44 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Mon, 31 Oct 2016 15:44:30 +0000 (11:44 -0400)
Introduce rseq_start/rseq_finish that do not require the rseq lock. The
new rseq_start_rlock and rseq_finish_rlock deal with it.

This is useful for use-cases that do not require locking to handle
fallback. For instance, using split counter techniques for fallback when
possibe, or using reference counting to do a lock-free fallback.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
tools/testing/selftests/rseq/basic_test.c
tools/testing/selftests/rseq/rseq.h

index dad78f624444eafae7203d97c15aae377850f900..59b3ec487128b0b83f8cff479b6cafa0e4e540e1 100644 (file)
@@ -45,10 +45,10 @@ void test_critical_section(void)
        struct rseq_state start;
        uint32_t event_counter;
 
-       start = rseq_start(&rseq_lock);
+       start = rseq_start_rlock(&rseq_lock);
        event_counter = start.event_counter;
        do {
-               start = rseq_start(&rseq_lock);
+               start = rseq_start_rlock(&rseq_lock);
        } while (start.event_counter == event_counter);
 }
 
@@ -56,7 +56,7 @@ void test_signal_interrupt_handler(int signo)
 {
        struct rseq_state current;
 
-       current = rseq_start(&rseq_lock);
+       current = rseq_start_rlock(&rseq_lock);
        /*
         * The potential critical section bordered by 'start' must be
         * invalid.
@@ -73,7 +73,7 @@ void test_signal_interrupts(void)
        signal(SIGPROF, test_signal_interrupt_handler);
 
        do {
-               sigtest_start = rseq_start(&rseq_lock);
+               sigtest_start = rseq_start_rlock(&rseq_lock);
        } while (signals_delivered < 10);
        setitimer(ITIMER_PROF, NULL, NULL);
 }
index b0c743454fec39383c3a20f596bb827ec0513726..4bd16476060d3480da2aed865ef04345a0a5a145 100644 (file)
@@ -170,7 +170,7 @@ static inline int32_t rseq_current_cpu(void)
 }
 
 static inline __attribute__((always_inline))
-struct rseq_state rseq_start(struct rseq_lock *rlock)
+struct rseq_state rseq_start(void)
 {
        struct rseq_state result;
 
@@ -188,6 +188,21 @@ struct rseq_state rseq_start(struct rseq_lock *rlock)
                RSEQ_INJECT_C(6)
                result.cpu_id = ACCESS_ONCE(result.rseqp->u.e.cpu_id);
        }
+       RSEQ_INJECT_C(7)
+       /*
+        * Ensure the compiler does not re-order loads of protected
+        * values before we load the event counter.
+        */
+       barrier();
+       return result;
+}
+
+static inline __attribute__((always_inline))
+struct rseq_state rseq_start_rlock(struct rseq_lock *rlock)
+{
+       struct rseq_state result;
+
+       result = rseq_start();
        /*
         * Read event counter before lock state and cpu_id. This ensures
         * that when the state changes from RESTART to LOCK, if we have
@@ -197,7 +212,6 @@ struct rseq_state rseq_start(struct rseq_lock *rlock)
         * preemption/signalling will cause them to restart, so they
         * don't interfere with the lock.
         */
-       RSEQ_INJECT_C(7)
 
        if (!has_fast_acquire_release() && likely(rseq_has_sys_membarrier)) {
                result.lock_state = ACCESS_ONCE(rlock->state);
@@ -211,11 +225,6 @@ struct rseq_state rseq_start(struct rseq_lock *rlock)
        }
        if (unlikely(result.cpu_id < 0))
                rseq_fallback_noinit(&result);
-       /*
-        * Ensure the compiler does not re-order loads of protected
-        * values before we load the event counter.
-        */
-       barrier();
        return result;
 }
 
@@ -234,8 +243,7 @@ enum rseq_finish_type {
  * write takes place, the rseq_finish2 is guaranteed to succeed.
  */
 static inline __attribute__((always_inline))
-bool __rseq_finish(struct rseq_lock *rlock,
-               intptr_t *p_spec, intptr_t to_write_spec,
+bool __rseq_finish(intptr_t *p_spec, intptr_t to_write_spec,
                void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
                intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value,
@@ -243,11 +251,6 @@ bool __rseq_finish(struct rseq_lock *rlock,
 {
        RSEQ_INJECT_C(9)
 
-       if (unlikely(start_value.lock_state != RSEQ_LOCK_STATE_RESTART)) {
-               if (start_value.lock_state == RSEQ_LOCK_STATE_LOCK)
-                       rseq_fallback_wait(rlock);
-               return false;
-       }
        switch (type) {
        case RSEQ_FINISH_SINGLE:
                RSEQ_FINISH_ASM(p_final, to_write_final, start_value, failure,
@@ -312,59 +315,73 @@ failure:
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish(struct rseq_lock *rlock,
-               intptr_t *p, intptr_t to_write,
+bool rseq_finish_rlock(struct rseq_lock *rlock,
+               intptr_t *p_spec, intptr_t to_write_spec,
+               void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
+               intptr_t *p_final, intptr_t to_write_final,
+               struct rseq_state start_value,
+               enum rseq_finish_type type, bool release)
+{
+       if (unlikely(start_value.lock_state != RSEQ_LOCK_STATE_RESTART)) {
+               if (start_value.lock_state == RSEQ_LOCK_STATE_LOCK)
+                       rseq_fallback_wait(rlock);
+               return false;
+       }
+       return __rseq_finish(p_spec, to_write_spec, p_memcpy,
+               to_write_memcpy, len_memcpy,
+               p_final, to_write_final,
+               start_value, type, release);
+}
+
+static inline __attribute__((always_inline))
+bool rseq_finish(intptr_t *p, intptr_t to_write,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, NULL, 0,
+       return __rseq_finish(NULL, 0,
                        NULL, NULL, 0,
                        p, to_write, start_value,
                        RSEQ_FINISH_SINGLE, false);
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish2(struct rseq_lock *rlock,
-               intptr_t *p_spec, intptr_t to_write_spec,
+bool rseq_finish2(intptr_t *p_spec, intptr_t to_write_spec,
                intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, p_spec, to_write_spec,
+       return __rseq_finish(p_spec, to_write_spec,
                        NULL, NULL, 0,
                        p_final, to_write_final, start_value,
                        RSEQ_FINISH_TWO, false);
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish2_release(struct rseq_lock *rlock,
-               intptr_t *p_spec, intptr_t to_write_spec,
+bool rseq_finish2_release(intptr_t *p_spec, intptr_t to_write_spec,
                intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, p_spec, to_write_spec,
+       return __rseq_finish(p_spec, to_write_spec,
                        NULL, NULL, 0,
                        p_final, to_write_final, start_value,
                        RSEQ_FINISH_TWO, true);
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish_memcpy(struct rseq_lock *rlock,
-               void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
-               intptr_t *p_final, intptr_t to_write_final,
+bool rseq_finish_memcpy(void *p_memcpy, void *to_write_memcpy,
+               size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, NULL, 0,
+       return __rseq_finish(NULL, 0,
                        p_memcpy, to_write_memcpy, len_memcpy,
                        p_final, to_write_final, start_value,
                        RSEQ_FINISH_MEMCPY, false);
 }
 
 static inline __attribute__((always_inline))
-bool rseq_finish_memcpy_release(struct rseq_lock *rlock,
-               void *p_memcpy, void *to_write_memcpy, size_t len_memcpy,
-               intptr_t *p_final, intptr_t to_write_final,
+bool rseq_finish_memcpy_release(void *p_memcpy, void *to_write_memcpy,
+               size_t len_memcpy, intptr_t *p_final, intptr_t to_write_final,
                struct rseq_state start_value)
 {
-       return __rseq_finish(rlock, NULL, 0,
+       return __rseq_finish(NULL, 0,
                        p_memcpy, to_write_memcpy, len_memcpy,
                        p_final, to_write_final, start_value,
                        RSEQ_FINISH_MEMCPY, true);
@@ -402,25 +419,25 @@ bool rseq_finish_memcpy_release(struct rseq_lock *rlock,
                _dest_memcpy, _src_memcpy, _len_memcpy,                 \
                _targetptr_final, _newval_final, _code, _release)       \
        do {                                                            \
-               _rseq_state = rseq_start(_lock);                        \
+               _rseq_state = rseq_start_rlock(_lock);                  \
                _cpu = rseq_cpu_at_start(_rseq_state);                  \
                _result = true;                                         \
                _code                                                   \
                if (unlikely(!_result))                                 \
                        break;                                          \
-               if (likely(__rseq_finish(_lock,                         \
+               if (likely(rseq_finish_rlock(_lock,                     \
                                _targetptr_spec, _newval_spec,          \
                                _dest_memcpy, _src_memcpy, _len_memcpy, \
                                _targetptr_final, _newval_final,        \
                                _rseq_state, _type, _release)))         \
                        break;                                          \
-               _rseq_state = rseq_start(_lock);                        \
+               _rseq_state = rseq_start_rlock(_lock);                  \
                _cpu = rseq_cpu_at_start(_rseq_state);                  \
                _result = true;                                         \
                _code                                                   \
                if (unlikely(!_result))                                 \
                        break;                                          \
-               if (likely(__rseq_finish(_lock,                         \
+               if (likely(rseq_finish_rlock(_lock,                     \
                                _targetptr_spec, _newval_spec,          \
                                _dest_memcpy, _src_memcpy, _len_memcpy, \
                                _targetptr_final, _newval_final,        \
This page took 0.027591 seconds and 5 git commands to generate.