Use rseq for commit counter
[lttng-ust.git] / libringbuffer / frontend_api.h
index 140159739feebc954f68d39b33806e3778f4ca01..6ff98da0d1ed22a60800cb2dc7008e4f49e46ff0 100644 (file)
 #include "frontend.h"
 #include <urcu-bp.h>
 #include <urcu/compiler.h>
+#include "rseq.h"
+
+static inline
+int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+{
+       return lttng_ust_get_cpu();
+}
 
 /**
- * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
+ * lib_ring_buffer_begin - Precedes ring buffer reserve/commit.
  *
  * Keeps a ring buffer nesting count as supplementary safety net to
  * ensure tracer client code will never trigger an endless recursion.
  * section.
  */
 static inline
-int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+int lib_ring_buffer_begin(const struct lttng_ust_lib_ring_buffer_config *config)
 {
-       int cpu, nesting;
+       int nesting;
 
-       cpu = lttng_ust_get_cpu();
        nesting = ++URCU_TLS(lib_ring_buffer_nesting);
        cmm_barrier();
 
@@ -61,15 +67,15 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi
                WARN_ON_ONCE(1);
                URCU_TLS(lib_ring_buffer_nesting)--;
                return -EPERM;
-       } else
-               return cpu;
+       }
+       return 0;
 }
 
 /**
- * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
+ * lib_ring_buffer_end - Follows ring buffer reserve/commit.
  */
 static inline
-void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+void lib_ring_buffer_end(const struct lttng_ust_lib_ring_buffer_config *config)
 {
        cmm_barrier();
        URCU_TLS(lib_ring_buffer_nesting)--;            /* TLS */
@@ -254,6 +260,14 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot = shmp_index(handle,
                                                buf->commit_hot, endidx);
+       struct lttng_rseq_state rseq_state;
+
+       if (caa_likely(ctx->ctx_len
+                       >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
+               rseq_state = ctx->rseq_state;
+       } else {
+               rseq_state.cpu_id = -2;
+       }
 
        if (caa_unlikely(!cc_hot))
                return;
@@ -269,7 +283,18 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
         */
        cmm_smp_wmb();
 
+       if (caa_likely(rseq_state.cpu_id >= 0)) {
+               unsigned long newv;
+
+               newv = cc_hot->cc_rseq + ctx->slot_size;
+               if (caa_likely(__rseq_finish(NULL, 0, NULL, NULL, 0,
+                               (intptr_t *)&cc_hot->cc_rseq,
+                               (intptr_t) newv,
+                               rseq_state, RSEQ_FINISH_SINGLE, false)))
+                       goto add_done;
+       }
        v_add(config, ctx->slot_size, &cc_hot->cc);
+add_done:
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -290,6 +315,7 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
         *   which is completely independent of the order.
         */
        commit_count = v_read(config, &cc_hot->cc);
+       commit_count += cc_hot->cc_rseq;
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
                                      commit_count, endidx, handle, ctx->tsc);
This page took 0.032085 seconds and 5 git commands to generate.