* following fields may be used.
*/
struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_rseq_state rseq_state;
};
/**
ctx->ip = 0;
ctx->priv2 = priv2;
memset(ctx->padding2, 0, LTTNG_UST_RING_BUFFER_CTX_PADDING);
+ ctx->rseq_state.rseqp = NULL;
+ ctx->rseq_state.cpu_id = -1;
+ ctx->rseq_state.event_counter = 0;
}
/*
}
}
lttng_write_event_header(&client_config, ctx, event_id);
+
+ if (caa_likely(ctx->ctx_len
+ >= sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
+ ctx->rseq_state = rseq_state;
+
return 0;
end:
lib_ring_buffer_end(&client_config);
#include "frontend.h"
#include <urcu-bp.h>
#include <urcu/compiler.h>
+#include "rseq.h"
static inline
int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
unsigned long commit_count;
struct commit_counters_hot *cc_hot = shmp_index(handle,
buf->commit_hot, endidx);
+ struct lttng_rseq_state rseq_state;
+
+ if (caa_likely(ctx->ctx_len
+ >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
+ rseq_state = ctx->rseq_state;
+ } else {
+ rseq_state.cpu_id = -2;
+ }
if (caa_unlikely(!cc_hot))
return;
*/
cmm_smp_wmb();
+ if (caa_likely(rseq_state.cpu_id >= 0)) {
+ unsigned long newv;
+
+ newv = cc_hot->cc_rseq + ctx->slot_size;
+ if (caa_likely(__rseq_finish(NULL, 0, NULL, NULL, 0,
+ (intptr_t *)&cc_hot->cc_rseq,
+ (intptr_t) newv,
+ rseq_state, RSEQ_FINISH_SINGLE, false)))
+ goto add_done;
+ }
v_add(config, ctx->slot_size, &cc_hot->cc);
+add_done:
/*
* commit count read can race with concurrent OOO commit count updates.
* which is completely independent of the order.
*/
commit_count = v_read(config, &cc_hot->cc);
+ commit_count += cc_hot->cc_rseq;
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
commit_count, endidx, handle, ctx->tsc);
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
commit_count = v_read(config, &cc_hot->cc);
+ commit_count += cc_hot->cc_rseq;
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
#define RB_COMMIT_COUNT_HOT_PADDING 16
struct commit_counters_hot {
union v_atomic cc; /* Commit counter */
+ unsigned long cc_rseq; /* Commit counter for rseq */
union v_atomic seq; /* Consecutive commits */
char padding[RB_COMMIT_COUNT_HOT_PADDING];
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
if (!cc_cold)
return;
v_set(config, &cc_hot->cc, 0);
+ cc_hot->cc_rseq = 0;
v_set(config, &cc_hot->seq, 0);
v_set(config, &cc_cold->cc_sb, 0);
}
if (!cc_cold)
return;
commit_count = v_read(config, &cc_hot->cc);
+ commit_count += cc_hot->cc_rseq;
commit_count_sb = v_read(config, &cc_cold->cc_sb);
if (subbuf_offset(commit_count, chan) != 0)
v_add(config, config->cb.subbuffer_header_size(),
&cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc);
+ commit_count += cc_hot->cc_rseq;
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
commit_count, oldidx, handle, tsc);
return;
v_add(config, padding_size, &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc);
+ commit_count += cc_hot->cc_rseq;
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
commit_count, oldidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan,
return;
v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc);
+ commit_count += cc_hot->cc_rseq;
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
commit_count, beginidx, handle, tsc);