-#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
-#define _LINUX_RING_BUFFER_FRONTEND_API_H
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
+#define _LTTNG_RING_BUFFER_FRONTEND_API_H
/*
- * linux/ringbuffer/frontend_api.h
+ * libringbuffer/frontend_api.h
*
- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Ring Buffer Library Synchronization Header (buffer write API).
*
* Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
- *
- * Dual LGPL v2.1/GPL v2 license.
+ * See ring_buffer_frontend.c for more information on wait-free
+ * algorithms.
+ * See frontend.h for channel allocation and read-side API.
*/
#include "frontend.h"
-#include "ust/core.h"
#include <urcu-bp.h>
#include <urcu/compiler.h>
+#include "rseq.h"
+
+static inline
+int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+{
+ return lttng_ust_get_cpu();
+}
/**
- * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
+ * lib_ring_buffer_begin - Precedes ring buffer reserve/commit.
*
- * Grabs RCU read-side lock and keeps a ring buffer nesting count as
- * supplementary safety net to ensure tracer client code will never
- * trigger an endless recursion. Returns the processor ID on success,
- * -EPERM on failure (nesting count too high).
+ * Keeps a ring buffer nesting count as supplementary safety net to
+ * ensure tracer client code will never trigger an endless recursion.
+ * Returns the processor ID on success, -EPERM on failure (nesting count
+ * too high).
*
* asm volatile and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
* section.
*/
static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+int lib_ring_buffer_begin(const struct lttng_ust_lib_ring_buffer_config *config)
{
- int cpu, nesting;
+ int nesting;
- rcu_read_lock();
- cpu = ust_get_cpu();
- nesting = ++lib_ring_buffer_nesting; /* TLS */
+ nesting = ++URCU_TLS(lib_ring_buffer_nesting);
cmm_barrier();
- if (unlikely(nesting > 4)) {
+ if (caa_unlikely(nesting > 4)) {
WARN_ON_ONCE(1);
- lib_ring_buffer_nesting--; /* TLS */
- rcu_read_unlock();
+ URCU_TLS(lib_ring_buffer_nesting)--;
return -EPERM;
- } else
- return cpu;
+ }
+ return 0;
}
/**
- * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
+ * lib_ring_buffer_end - Follows ring buffer reserve/commit.
*/
static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+void lib_ring_buffer_end(const struct lttng_ust_lib_ring_buffer_config *config)
{
cmm_barrier();
- lib_ring_buffer_nesting--; /* TLS */
- rcu_read_unlock();
+ URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
}
/*
* returns 0 if reserve ok, or 1 if the slow path must be taken.
*/
static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
if (last_tsc_overflow(config, buf, ctx->tsc))
ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
- if (unlikely(subbuf_offset(*o_begin, chan) == 0))
+ if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
- if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+ if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
> chan->backend.subbuf_size))
return 1;
*/
*o_end = *o_begin + ctx->slot_size;
- if (unlikely((subbuf_offset(*o_end, chan)) == 0))
+ if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
/*
* The offset_end will fall at the very beginning of the next
* subbuffer.
*
* Return :
* 0 on success.
- * -EAGAIN if channel is disabled.
+ * -EPERM if channel is disabled.
* -ENOSPC if event size is too large for packet.
* -ENOBUFS if there is currently not enough space in buffer for the event.
* -EIO if data cannot be written into the buffer for any other reason.
+ * -EAGAIN reserve aborted, should be attempted again.
*/
static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_shm_handle *handle = ctx->handle;
+ struct lttng_ust_lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
-
- if (uatomic_read(&chan->record_disabled))
- return -EAGAIN;
-
+ struct lttng_rseq_state rseq_state;
+
+ if (caa_likely(ctx->ctx_len
+ >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
+ rseq_state = ctx->rseq_state;
+ } else {
+ rseq_state.cpu_id = -2;
+ rseq_state.event_counter = 0;
+ rseq_state.rseqp = NULL;
+ }
+
+ if (caa_unlikely(uatomic_read(&chan->record_disabled)))
+ return -EPERM;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = &shmp(chan->backend.buf)[ctx->cpu];
+ buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
- buf = shmp(chan->backend.buf);
- if (uatomic_read(&buf->record_disabled))
- return -EAGAIN;
+ buf = shmp(handle, chan->backend.buf[0].shmp);
+ if (caa_unlikely(!buf))
+ return -EIO;
+ if (caa_unlikely(uatomic_read(&buf->record_disabled)))
+ return -EPERM;
ctx->buf = buf;
/*
* Perform retryable operations.
*/
- if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+ if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
- if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
- != o_old))
- goto slow_path;
-
+ if (caa_unlikely(config->sync == RING_BUFFER_SYNC_GLOBAL
+ || rseq_state.cpu_id < 0
+ || uatomic_read(&chan->u.reserve_fallback_ref))) {
+ if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old,
+ o_end) != o_old))
+ goto slow_path;
+ } else {
+ /*
+ * Load reserve_fallback_ref before offset. Matches the
+ * implicit memory barrier after v_cmpxchg of offset.
+ */
+ cmm_smp_rmb();
+ if (caa_unlikely(ctx->buf->offset.a != o_old))
+ return -EAGAIN;
+ if (caa_unlikely(!__rseq_finish(NULL, 0, NULL, NULL, 0,
+ (intptr_t *) &ctx->buf->offset.a,
+ (intptr_t) o_end,
+ rseq_state, RSEQ_FINISH_SINGLE, false)))
+ return -EAGAIN;
+ }
/*
* Atomically update last_tsc. This update races against concurrent
* atomic updates, but the race will always cause supplementary full TSC
* Clear noref flag for this subbuffer.
*/
lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
- subbuf_index(o_end - 1, chan));
+ subbuf_index(o_end - 1, chan), handle);
ctx->pre_offset = o_begin;
ctx->buf_offset = o_begin + before_hdr_pad;
* disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
*/
static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+ struct lttng_ust_shm_handle *handle)
{
- lib_ring_buffer_switch_slow(buf, mode);
+ lib_ring_buffer_switch_slow(buf, mode, handle);
}
/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
* specified sub-buffer, and delivers it if necessary.
*/
static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
- const struct lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_shm_handle *handle = ctx->handle;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
unsigned long offset_end = ctx->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
+ struct commit_counters_hot *cc_hot = shmp_index(handle,
+ buf->commit_hot, endidx);
+ struct lttng_rseq_state rseq_state;
+
+ if (caa_likely(ctx->ctx_len
+ >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
+ rseq_state = ctx->rseq_state;
+ } else {
+ rseq_state.cpu_id = -2;
+ rseq_state.event_counter = 0;
+ rseq_state.rseqp = NULL;
+ }
+
+ if (caa_unlikely(!cc_hot))
+ return;
/*
* Must count record before incrementing the commit count.
*/
- subbuffer_count_record(config, &buf->backend, endidx);
+ subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
/*
* Order all writes to buffer before the commit count update that will
*/
cmm_smp_wmb();
- v_add(config, ctx->slot_size, &shmp(buf->commit_hot)[endidx].cc);
+ if (caa_likely(config->sync == RING_BUFFER_SYNC_PER_CPU
+ && rseq_state.cpu_id >= 0)) {
+ unsigned long newv;
+
+ newv = cc_hot->cc_rseq + ctx->slot_size;
+ if (caa_likely(__rseq_finish(NULL, 0, NULL, NULL, 0,
+ (intptr_t *)&cc_hot->cc_rseq,
+ (intptr_t) newv,
+ rseq_state, RSEQ_FINISH_SINGLE, false)))
+ goto add_done;
+ }
+ v_add(config, ctx->slot_size, &cc_hot->cc);
+add_done:
/*
* commit count read can race with concurrent OOO commit count updates.
* count reaches back the reserve offset for a specific sub-buffer,
* which is completely independent of the order.
*/
- commit_count = v_read(config, &shmp(buf->commit_hot)[endidx].cc);
+ commit_count = v_read(config, &cc_hot->cc);
+ commit_count += cc_hot->cc_rseq;
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx);
+ commit_count, endidx, handle, ctx->tsc);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
*/
- lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
- ctx->buf_offset, commit_count,
- ctx->slot_size);
+ lib_ring_buffer_write_commit_counter(config, buf, chan,
+ offset_end, commit_count, handle, cc_hot);
}
/**
* Returns 0 upon success, -EPERM if the record cannot be discarded.
*/
static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
- const struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
/*
*/
save_last_tsc(config, buf, 0ULL);
- if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+ if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
!= end_offset))
return -EPERM;
else
}
static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
+void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan)
{
uatomic_inc(&chan->record_disabled);
}
static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
+void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan)
{
uatomic_dec(&chan->record_disabled);
}
static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
uatomic_inc(&buf->record_disabled);
}
static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
uatomic_dec(&buf->record_disabled);
}
-#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
+#endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */