#include "clock.h"
#include "lttng-tracer.h"
#include "../libringbuffer/frontend_types.h"
+#include "../libringbuffer/rseq.h"
#define LTTNG_COMPACT_EVENT_BITS 5
#define LTTNG_COMPACT_TSC_BITS 27
+#define LTTNG_RSEQ_ATTEMPTS 8
+
+enum app_ctx_mode {
+ APP_CTX_DISABLED,
+ APP_CTX_ENABLED,
+};
/*
* Keep the natural field alignment for _each field_ within this structure if
}
static inline
-size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx,
+ enum app_ctx_mode mode)
{
int i;
size_t orig_offset = offset;
if (caa_likely(!ctx))
return 0;
offset += lib_ring_buffer_align(offset, ctx->largest_align);
- for (i = 0; i < ctx->nr_fields; i++)
- offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (mode == APP_CTX_ENABLED) {
+ offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ } else {
+ if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
+ /*
+ * Before UST 2.8, we cannot use the
+ * application context, because we
+ * cannot trust that the handler used
+ * for get_size is the same used for
+ * ctx_record, which would result in
+ * corrupted traces when tracing
+ * concurrently with application context
+ * register/unregister.
+ */
+ offset += lttng_ust_dummy_get_size(&ctx->fields[i], offset);
+ } else {
+ offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ }
+ }
+ }
return offset - orig_offset;
}
static inline
void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
struct lttng_channel *chan,
- struct lttng_ctx *ctx)
+ struct lttng_ctx *ctx,
+ enum app_ctx_mode mode)
{
int i;
if (caa_likely(!ctx))
return;
lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
- for (i = 0; i < ctx->nr_fields; i++)
- ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (mode == APP_CTX_ENABLED) {
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ } else {
+ if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
+ /*
+ * Before UST 2.8, we cannot use the
+ * application context, because we
+ * cannot trust that the handler used
+ * for get_size is the same used for
+ * ctx_record, which would result in
+ * corrupted traces when tracing
+ * concurrently with application context
+ * register/unregister.
+ */
+ lttng_ust_dummy_record(&ctx->fields[i], bufctx, chan);
+ } else {
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ }
+ }
+ }
}
/*
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
struct lttng_event *event = ctx->priv;
+ struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
size_t orig_offset = offset;
size_t padding;
padding = 0;
WARN_ON_ONCE(1);
}
- offset += ctx_get_size(offset, event->ctx);
- offset += ctx_get_size(offset, lttng_chan->ctx);
-
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ offset += ctx_get_size(offset, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ offset += ctx_get_size(offset, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ offset += ctx_get_size(offset, lttng_chan->ctx, APP_CTX_DISABLED);
+ offset += ctx_get_size(offset, event->ctx, APP_CTX_DISABLED);
+ }
*pre_header_padding = padding;
return offset - orig_offset;
}
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_event *event = ctx->priv;
struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
if (caa_unlikely(ctx->rflags))
WARN_ON_ONCE(1);
}
- ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx);
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
+ ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
+ }
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
return;
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_event *event = ctx->priv;
struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
switch (lttng_chan->header_type) {
default:
WARN_ON_ONCE(1);
}
- ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx);
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
+ ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
+ }
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
}
.tsc_bits = LTTNG_COMPACT_TSC_BITS,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
- .sync = RING_BUFFER_SYNC_GLOBAL,
+ .sync = RING_BUFFER_SYNC_PER_CPU,
.mode = RING_BUFFER_MODE_TEMPLATE,
.backend = RING_BUFFER_PAGE,
.output = RING_BUFFER_MMAP,
channel_destroy(chan->chan, chan->handle, 1);
}
+static
+bool refcount_get_saturate(long *ref)
+{
+ long old, _new, res;
+
+ old = uatomic_read(ref);
+ for (;;) {
+ if (old == LONG_MAX) {
+ return false; /* Saturated. */
+ }
+ _new = old + 1;
+ res = uatomic_cmpxchg(ref, old, _new);
+ if (res == old) {
+ if (_new == LONG_MAX) {
+ return false; /* Saturation. */
+ }
+ return true; /* Success. */
+ }
+ old = res;
+ }
+}
+
static
int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
- int ret, cpu;
+ struct lttng_rseq_state rseq_state;
+ int ret, cpu, attempt = 0;
+ bool put_fallback_ref = false;
- cpu = lib_ring_buffer_get_cpu(&client_config);
- if (cpu < 0)
+ if (lib_ring_buffer_begin(&client_config))
return -EPERM;
+retry:
+ rseq_state = rseq_start();
+ if (caa_unlikely(rseq_cpu_at_start(rseq_state) < 0)) {
+ if (caa_unlikely(rseq_cpu_at_start(rseq_state) == -1)) {
+ if (!rseq_register_current_thread())
+ goto retry;
+ }
+ /* rseq is unavailable. */
+ cpu = lib_ring_buffer_get_cpu(&client_config);
+ if (caa_unlikely(cpu < 0)) {
+ ret = -EPERM;
+ goto end;
+ }
+ } else {
+ cpu = rseq_cpu_at_start(rseq_state);
+ }
+fallback:
ctx->cpu = cpu;
switch (lttng_chan->header_type) {
WARN_ON_ONCE(1);
}
+ if (caa_likely(ctx->ctx_len
+ >= sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
+ ctx->rseq_state = rseq_state;
+
ret = lib_ring_buffer_reserve(&client_config, ctx);
- if (ret)
- goto put;
+ if (caa_unlikely(ret)) {
+ if (ret == -EAGAIN) {
+ assert(!put_fallback_ref);
+ if (++attempt < LTTNG_RSEQ_ATTEMPTS) {
+ caa_cpu_relax();
+ goto retry;
+ }
+ put_fallback_ref = refcount_get_saturate(
+ <tng_chan->chan->u.reserve_fallback_ref);
+ cpu = lib_ring_buffer_get_cpu(&client_config);
+ if (caa_unlikely(cpu < 0)) {
+ ret = -EPERM;
+ goto end;
+ }
+ goto fallback;
+ }
+ goto end;
+ }
+ if (caa_likely(ctx->ctx_len
+ >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
+ if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &ctx->backend_pages)) {
+ ret = -EPERM;
+ goto end;
+ }
+ }
lttng_write_event_header(&client_config, ctx, event_id);
+
+ if (caa_unlikely(put_fallback_ref))
+ uatomic_dec(<tng_chan->chan->u.reserve_fallback_ref);
+
return 0;
-put:
- lib_ring_buffer_put_cpu(&client_config);
+end:
+ lib_ring_buffer_end(&client_config);
+ if (put_fallback_ref)
+ uatomic_dec(<tng_chan->chan->u.reserve_fallback_ref);
return ret;
}
void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
- lib_ring_buffer_put_cpu(&client_config);
+ lib_ring_buffer_end(&client_config);
}
static