Update percpu ops test and header
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 9 Oct 2018 18:59:09 +0000 (14:59 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 9 Oct 2018 18:59:09 +0000 (14:59 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
include/rseq/percpu-op.h
tests/basic_percpu_ops_test.c

index 92b9b52a2732c76ac3932079951ec411451317cd..6ee8832262b91d06a44f908e5c18256e5ca125db 100644 (file)
 
 int percpu_op_available(void);
 
+static inline uint32_t percpu_current_cpu(void)
+{
+       return rseq_current_cpu();
+}
+
 static inline __attribute__((always_inline))
 int percpu_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv,
                         int cpu)
index ed29d11f6fd094fbd3a75558884adad97dfa774a..4b9c8463cb9c907d9b43d9c120c1b62fcf55b398 100644 (file)
@@ -44,15 +44,12 @@ struct percpu_list {
        struct percpu_list_entry c[CPU_SETSIZE];
 };
 
-/* A simple percpu spinlock.  Returns the cpu lock was acquired on. */
-int rseq_percpu_lock(struct percpu_lock *lock)
+/* A simple percpu spinlock. */
+void rseq_percpu_lock(struct percpu_lock *lock, int cpu)
 {
-       int cpu;
-
        for (;;) {
                int ret;
 
-               cpu = rseq_cpu_start();
                ret = percpu_cmpeqv_storev(&lock->c[cpu].v,
                                           0, 1, cpu);
                if (rseq_likely(!ret))
@@ -68,7 +65,6 @@ int rseq_percpu_lock(struct percpu_lock *lock)
         * Matches rseq_smp_store_release().
         */
        rseq_smp_acquire__after_ctrl_dep();
-       return cpu;
 }
 
 void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
@@ -84,7 +80,7 @@ void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
 void *test_percpu_spinlock_thread(void *arg)
 {
        struct spinlock_test_data *data = arg;
-       int i, cpu;
+       int i;
 
        if (rseq_register_current_thread()) {
                fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
@@ -92,7 +88,9 @@ void *test_percpu_spinlock_thread(void *arg)
                abort();
        }
        for (i = 0; i < data->reps; i++) {
-               cpu = rseq_percpu_lock(&data->lock);
+               int cpu = percpu_current_cpu();
+
+               rseq_percpu_lock(&data->lock, cpu);
                data->c[cpu].count++;
                rseq_percpu_unlock(&data->lock, cpu);
        }
@@ -202,10 +200,10 @@ void *test_percpu_list_thread(void *arg)
        for (i = 0; i < 100000; i++) {
                struct percpu_list_node *node;
 
-               node = percpu_list_pop(list, rseq_cpu_start());
+               node = percpu_list_pop(list, percpu_current_cpu());
                sched_yield();  /* encourage shuffling */
                if (node)
-                       percpu_list_push(list, node, rseq_cpu_start());
+                       percpu_list_push(list, node, percpu_current_cpu());
        }
 
        if (rseq_unregister_current_thread()) {
This page took 0.029618 seconds and 4 git commands to generate.