rseq fallback: use saturated reference counter
[lttng-ust.git] / liblttng-ust / lttng-ring-buffer-client.h
index c5867dc1b697f2434a5bf17e74a98de85878661c..8a2155ef3e8501309948c49966fd37b9f41b8b52 100644 (file)
@@ -26,6 +26,7 @@
 #include "clock.h"
 #include "lttng-tracer.h"
 #include "../libringbuffer/frontend_types.h"
+#include "../libringbuffer/rseq.h"
 
 #define LTTNG_COMPACT_EVENT_BITS       5
 #define LTTNG_COMPACT_TSC_BITS         27
@@ -636,7 +637,7 @@ static const struct lttng_ust_lib_ring_buffer_config client_config = {
 
        .tsc_bits = LTTNG_COMPACT_TSC_BITS,
        .alloc = RING_BUFFER_ALLOC_PER_CPU,
-       .sync = RING_BUFFER_SYNC_GLOBAL,
+       .sync = RING_BUFFER_SYNC_PER_CPU,
        .mode = RING_BUFFER_MODE_TEMPLATE,
        .backend = RING_BUFFER_PAGE,
        .output = RING_BUFFER_MMAP,
@@ -689,16 +690,56 @@ void lttng_channel_destroy(struct lttng_channel *chan)
        channel_destroy(chan->chan, chan->handle, 1);
 }
 
+static
+bool refcount_get_saturate(long *ref)
+{
+       long old, _new, res;
+
+       old = uatomic_read(ref);
+       for (;;) {
+               if (old == LONG_MAX) {
+                       return false;   /* Saturated. */
+               }
+               _new = old + 1;
+               res = uatomic_cmpxchg(ref, old, _new);
+               if (res == old) {
+                       if (_new == LONG_MAX) {
+                               return false; /* Saturation. */
+                       }
+                       return true;    /* Success. */
+               }
+               old = res;
+       }
+}
+
 static
 int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
                      uint32_t event_id)
 {
        struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+       struct lttng_rseq_state rseq_state;
        int ret, cpu;
+       bool put_fallback_ref = false;
 
-       cpu = lib_ring_buffer_get_cpu(&client_config);
-       if (cpu < 0)
+       if (lib_ring_buffer_begin(&client_config))
                return -EPERM;
+retry:
+       rseq_state = rseq_start();
+       if (caa_unlikely(rseq_cpu_at_start(rseq_state) < 0)) {
+               if (caa_unlikely(rseq_cpu_at_start(rseq_state) == -1)) {
+                       if (!rseq_register_current_thread())
+                               goto retry;
+               }
+               /* rseq is unavailable. */
+               cpu = lib_ring_buffer_get_cpu(&client_config);
+               if (caa_unlikely(cpu < 0)) {
+                       ret = -EPERM;
+                       goto end;
+               }
+       } else {
+               cpu = rseq_cpu_at_start(rseq_state);
+       }
+fallback:
        ctx->cpu = cpu;
 
        switch (lttng_chan->header_type) {
@@ -714,21 +755,43 @@ int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
                WARN_ON_ONCE(1);
        }
 
+       if (caa_likely(ctx->ctx_len
+                       >= sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
+               ctx->rseq_state = rseq_state;
+
        ret = lib_ring_buffer_reserve(&client_config, ctx);
-       if (ret)
-               goto put;
+       if (caa_unlikely(ret)) {
+               if (ret == -EAGAIN) {
+                       assert(!put_fallback_ref);
+                       put_fallback_ref = refcount_get_saturate(
+                               &lttng_chan->chan->u.reserve_fallback_ref);
+                       cpu = lib_ring_buffer_get_cpu(&client_config);
+                       if (caa_unlikely(cpu < 0)) {
+                               ret = -EPERM;
+                               goto end;
+                       }
+                       goto fallback;
+               }
+               goto end;
+       }
        if (caa_likely(ctx->ctx_len
                        >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
                if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
                                &ctx->backend_pages)) {
                        ret = -EPERM;
-                       goto put;
+                       goto end;
                }
        }
        lttng_write_event_header(&client_config, ctx, event_id);
+
+       if (caa_unlikely(put_fallback_ref))
+               uatomic_dec(&lttng_chan->chan->u.reserve_fallback_ref);
+
        return 0;
-put:
-       lib_ring_buffer_put_cpu(&client_config);
+end:
+       lib_ring_buffer_end(&client_config);
+       if (put_fallback_ref)
+               uatomic_dec(&lttng_chan->chan->u.reserve_fallback_ref);
        return ret;
 }
 
@@ -736,7 +799,7 @@ static
 void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
        lib_ring_buffer_commit(&client_config, ctx);
-       lib_ring_buffer_put_cpu(&client_config);
+       lib_ring_buffer_end(&client_config);
 }
 
 static
This page took 0.028146 seconds and 5 git commands to generate.