*
* Return :
* 0 on success.
- * -EAGAIN if channel is disabled.
+ * -EPERM if channel is disabled.
* -ENOSPC if event size is too large for packet.
* -ENOBUFS if there is currently not enough space in buffer for the event.
* -EIO if data cannot be written into the buffer for any other reason.
+ * -EAGAIN reserve aborted, should be attempted again.
*/
static inline
struct lttng_ust_lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
+ struct lttng_rseq_state rseq_state;
- if (caa_unlikely(uatomic_read(&chan->record_disabled)))
- return -EAGAIN;
+ if (caa_likely(ctx->ctx_len
+ >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
+ rseq_state = ctx->rseq_state;
+ } else {
+ rseq_state.cpu_id = -2;
+ rseq_state.event_counter = 0;
+ rseq_state.rseqp = NULL;
+ }
+ if (caa_unlikely(uatomic_read(&chan->record_disabled)))
+ return -EPERM;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
if (caa_unlikely(!buf))
return -EIO;
if (caa_unlikely(uatomic_read(&buf->record_disabled)))
- return -EAGAIN;
+ return -EPERM;
ctx->buf = buf;
/*
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
- if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
- != o_old))
- goto slow_path;
-
+ if (caa_unlikely(config->sync == RING_BUFFER_SYNC_GLOBAL
+ || rseq_state.cpu_id < 0
+ || uatomic_read(&chan->u.reserve_fallback_ref))) {
+ if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old,
+ o_end) != o_old))
+ goto slow_path;
+ } else {
+ /*
+ * Load reserve_fallback_ref before offset. Matches the
+ * implicit memory barrier after v_cmpxchg of offset.
+ */
+ cmm_smp_rmb();
+ if (caa_unlikely(ctx->buf->offset.a != o_old))
+ return -EAGAIN;
+ if (caa_unlikely(!__rseq_finish(NULL, 0, NULL, NULL, 0,
+ (intptr_t *) &ctx->buf->offset.a,
+ (intptr_t) o_end,
+ rseq_state, RSEQ_FINISH_SINGLE, false)))
+ return -EAGAIN;
+ }
/*
* Atomically update last_tsc. This update races against concurrent
* atomic updates, but the race will always cause supplementary full TSC
rseq_state = ctx->rseq_state;
} else {
rseq_state.cpu_id = -2;
+ rseq_state.event_counter = 0;
+ rseq_state.rseqp = NULL;
}
if (caa_unlikely(!cc_hot))
*/
cmm_smp_wmb();
- if (caa_likely(rseq_state.cpu_id >= 0)) {
+ if (caa_likely(config->sync == RING_BUFFER_SYNC_PER_CPU
+ && rseq_state.cpu_id >= 0)) {
unsigned long newv;
newv = cc_hot->cc_rseq + ctx->slot_size;