* Dual LGPL v2.1/GPL v2 license.
*/
+#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <urcu/compiler.h>
#include <urcu/ref.h>
+#include <helper.h>
#include "smp.h"
-#include <ust/ringbuffer-config.h>
+#include <lttng/ringbuffer-config.h>
+#include "vatomic.h"
#include "backend.h"
#include "frontend.h"
#include "shm.h"
+#include "tlsfixup.h"
#ifndef max
#define max(a, b) ((a) > (b) ? (a) : (b))
#endif
+/* Print DBG() messages about events lost only every 1048576 hits */
+#define DBG_PRINT_NR_LOST (1UL << 20)
+
/*
* Use POSIX SHM: shm_open(3) and shm_unlink(3).
* close(2) to close the fd returned by shm_open.
__thread unsigned int lib_ring_buffer_nesting;
-static
-void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu,
- struct shm_handle *handle);
-
/*
- * Must be called under cpu hotplug protection.
+ * TODO: this is unused. Errors are saved within the ring buffer.
+ * Eventually, allow consumerd to print these errors.
*/
-void lib_ring_buffer_free(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
-{
- struct channel *chan = shmp(handle, buf->backend.chan);
-
- lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu, handle);
- /* buf->commit_hot will be freed by shm teardown */
- /* buf->commit_cold will be freed by shm teardown */
-
- lib_ring_buffer_backend_free(&buf->backend);
-}
+static
+void lib_ring_buffer_print_errors(struct channel *chan,
+ struct lttng_ust_lib_ring_buffer *buf, int cpu,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((unused));
/**
* lib_ring_buffer_reset - Reset ring buffer to initial values.
* should not be using the iterator concurrently with reset. The previous
* current iterator record is reset.
*/
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
/*
/*
* Must be called under cpu hotplug protection.
*/
-int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
- struct shm_handle *handle,
+ struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
struct channel *chan = caa_container_of(chanb, struct channel, backend);
- void *priv = chanb->priv;
- unsigned int num_subbuf;
+ void *priv = channel_get_private(chan);
size_t subbuf_header_size;
- u64 tsc;
+ uint64_t tsc;
int ret;
/* Test for cpu hotplug */
goto free_commit;
}
- num_subbuf = chan->backend.num_subbuf;
- //init_waitqueue_head(&buf->read_wait);
-
/*
* Write the subbuffer header for first subbuffer so we know the total
* duration of data gathering.
free_commit:
/* commit_hot will be freed by shm teardown */
free_chanbuf:
- lib_ring_buffer_backend_free(&buf->backend);
return ret;
}
#if 0
static void switch_buffer_timer(unsigned long data)
{
- struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
+ struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
*/
- if (uatomic_read(&buf->active_readers))
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
//TODO timers
}
#endif //0
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+static void lib_ring_buffer_start_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ //const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
buf->switch_timer_enabled = 1;
}
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+static void lib_ring_buffer_stop_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
*/
static void read_buffer_timer(unsigned long data)
{
- struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
+ struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
- if (uatomic_read(&buf->active_readers)
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
//TODO
//wake_up_interruptible(&buf->read_wait);
}
#endif //0
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+static void lib_ring_buffer_start_read_timer(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
buf->read_timer_enabled = 1;
}
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+static void lib_ring_buffer_stop_read_timer(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
}
static void channel_unregister_notifiers(struct channel *chan,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
lib_ring_buffer_stop_switch_timer(buf, handle);
lib_ring_buffer_stop_read_timer(buf, handle);
}
} else {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
lib_ring_buffer_stop_switch_timer(buf, handle);
lib_ring_buffer_stop_read_timer(buf, handle);
//channel_backend_unregister_notifiers(&chan->backend);
}
-static void channel_free(struct channel *chan, struct shm_handle *handle)
+static void channel_free(struct channel *chan, struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- int ret;
-
- channel_backend_free(&chan->backend, handle);
+ if (!shadow)
+ channel_backend_free(&chan->backend, handle);
/* chan is freed by shm teardown */
shm_object_table_destroy(handle->table);
free(handle);
* channel_create - Create channel.
* @config: ring buffer instance configuration
* @name: name of the channel
- * @priv: ring buffer client private data
+ * @priv_data: ring buffer client private data area pointer (output)
+ * @priv_data_size: length, in bytes, of the private data area.
+ * @priv_data_init: initialization data for private data.
* @buf_addr: pointer the the beginning of the preallocated buffer contiguous
* address mapping. It is used only by RING_BUFFER_STATIC
* configuration. It can be set to NULL for other backends.
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
- const char *name, void *priv, void *buf_addr,
- size_t subbuf_size,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+ const char *name,
+ void **priv_data,
+ size_t priv_data_align,
+ size_t priv_data_size,
+ void *priv_data_init,
+ void *buf_addr, size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+ unsigned int read_timer_interval,
+ int **shm_fd, int **wait_fd, uint64_t **memory_map_size)
{
int ret, cpu;
- size_t shmsize;
+ size_t shmsize, chansize;
struct channel *chan;
- struct shm_handle *handle;
+ struct lttng_ust_shm_handle *handle;
struct shm_object *shmobj;
+ struct shm_ref *ref;
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
return NULL;
- handle = zmalloc(sizeof(struct shm_handle));
+ handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
if (!handle)
return NULL;
/* Calculate the shm allocation layout */
shmsize = sizeof(struct channel);
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- shmsize += sizeof(struct lib_ring_buffer_shmp) * num_possible_cpus();
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * num_possible_cpus();
else
- shmsize += sizeof(struct lib_ring_buffer_shmp);
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp);
+ chansize = shmsize;
+ shmsize += offset_align(shmsize, priv_data_align);
+ shmsize += priv_data_size;
shmobj = shm_object_table_append(handle->table, shmsize);
if (!shmobj)
goto error_append;
- set_shmp(handle->chan, zalloc_shm(shmobj, sizeof(struct channel)));
+ /* struct channel is at object 0, offset 0 (hardcoded) */
+ set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
+ assert(handle->chan._ref.index == 0);
+ assert(handle->chan._ref.offset == 0);
chan = shmp(handle, handle->chan);
if (!chan)
goto error_append;
- ret = channel_backend_init(&chan->backend, name, config, priv,
+ /* space for private data */
+ if (priv_data_size) {
+ DECLARE_SHMP(void, priv_data_alloc);
+
+ align_shm(shmobj, priv_data_align);
+ chan->priv_data_offset = shmobj->allocated_len;
+ set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
+ if (!shmp(handle, priv_data_alloc))
+ goto error_append;
+ *priv_data = channel_get_private(chan);
+ memcpy(*priv_data, priv_data_init, priv_data_size);
+ } else {
+ chan->priv_data_offset = -1;
+ *priv_data = NULL;
+ }
+
+ ret = channel_backend_init(&chan->backend, name, config,
subbuf_size, num_subbuf, handle);
if (ret)
goto error_backend_init;
* In that off case, we need to allocate for all possible cpus.
*/
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
lib_ring_buffer_start_switch_timer(buf, handle);
lib_ring_buffer_start_read_timer(buf, handle);
}
} else {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
lib_ring_buffer_start_switch_timer(buf, handle);
lib_ring_buffer_start_read_timer(buf, handle);
}
-
+ ref = &handle->chan._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd, memory_map_size);
return handle;
error_backend_init:
return NULL;
}
+struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd,
+ uint64_t memory_map_size)
+{
+ struct lttng_ust_shm_handle *handle;
+ struct shm_object *object;
+
+ handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ if (!handle)
+ return NULL;
+
+ /* Allocate table for channel + per-cpu buffers */
+ handle->table = shm_object_table_create(1 + num_possible_cpus());
+ if (!handle->table)
+ goto error_table_alloc;
+ /* Add channel object */
+ object = shm_object_table_append_shadow(handle->table,
+ shm_fd, wait_fd, memory_map_size);
+ if (!object)
+ goto error_table_object;
+ /* struct channel is at object 0, offset 0 (hardcoded) */
+ handle->chan._ref.index = 0;
+ handle->chan._ref.offset = 0;
+ return handle;
+
+error_table_object:
+ shm_object_table_destroy(handle->table);
+error_table_alloc:
+ free(handle);
+ return NULL;
+}
+
+int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
+ int shm_fd, int wait_fd, uint64_t memory_map_size)
+{
+ struct shm_object *object;
+
+ /* Add stream object */
+ object = shm_object_table_append_shadow(handle->table,
+ shm_fd, wait_fd, memory_map_size);
+ if (!object)
+ return -1;
+ return 0;
+}
+
static
-void channel_release(struct channel *chan, struct shm_handle *handle)
+void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- channel_free(chan, handle);
+ channel_free(chan, handle, shadow);
}
/**
* Call "destroy" callback, finalize channels, decrement the channel
* reference count. Note that when readers have completed data
* consumption of finalized channels, get_subbuf() will return -ENODATA.
- * They should release their handle at that point. Returns the private
- * data pointer.
+ * They should release their handle at that point.
*/
-void *channel_destroy(struct channel *chan, struct shm_handle *handle)
+void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
- void *priv;
- int cpu;
+ if (shadow) {
+ channel_release(chan, handle, shadow);
+ return;
+ }
channel_unregister_notifiers(chan, handle);
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
-
- if (config->cb.buffer_finalize)
- config->cb.buffer_finalize(buf,
- chan->backend.priv,
- cpu, handle);
- if (buf->backend.allocated)
- lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH,
- handle);
- /*
- * Perform flush before writing to finalized.
- */
- cmm_smp_wmb();
- CMM_ACCESS_ONCE(buf->finalized) = 1;
- //wake_up_interruptible(&buf->read_wait);
- }
- } else {
- struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
+ /*
+ * Note: the consumer takes care of finalizing and switching the
+ * buffers.
+ */
- if (config->cb.buffer_finalize)
- config->cb.buffer_finalize(buf, chan->backend.priv, -1, handle);
- if (buf->backend.allocated)
- lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH,
- handle);
- /*
- * Perform flush before writing to finalized.
- */
- cmm_smp_wmb();
- CMM_ACCESS_ONCE(buf->finalized) = 1;
- //wake_up_interruptible(&buf->read_wait);
- }
- CMM_ACCESS_ONCE(chan->finalized) = 1;
- //wake_up_interruptible(&chan->hp_wait);
- //wake_up_interruptible(&chan->read_wait);
/*
* sessiond/consumer are keeping a reference on the shm file
* descriptor directly. No need to refcount.
*/
- priv = chan->backend.priv;
- channel_release(chan, handle);
- return priv;
+ channel_release(chan, handle, shadow);
+ return;
}
-struct lib_ring_buffer *channel_get_ring_buffer(
- const struct lib_ring_buffer_config *config,
+struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, int cpu,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle,
+ int **shm_fd, int **wait_fd,
+ uint64_t **memory_map_size)
{
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
+ struct shm_ref *ref;
+
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+ ref = &chan->backend.buf[0].shmp._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd,
+ memory_map_size);
return shmp(handle, chan->backend.buf[0].shmp);
- else
+ } else {
+ if (cpu >= num_possible_cpus())
+ return NULL;
+ ref = &chan->backend.buf[cpu].shmp._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd,
+ memory_map_size);
return shmp(handle, chan->backend.buf[cpu].shmp);
+ }
}
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
-
+ if (shadow) {
+ if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0)
+ return -EBUSY;
+ cmm_smp_mb();
+ return 0;
+ }
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return -EBUSY;
cmm_smp_mb();
return 0;
}
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle,
+ int shadow)
{
struct channel *chan = shmp(handle, buf->backend.chan);
+ if (shadow) {
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1);
+ cmm_smp_mb();
+ uatomic_dec(&buf->active_shadow_readers);
+ return;
+ }
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
cmm_smp_mb();
uatomic_dec(&buf->active_readers);
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
* @buf: ring buffer
* @consumed_new: new consumed count value
*/
-void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
+void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
unsigned long consumed_new,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
struct channel *chan = shmp(handle, bufb->chan);
unsigned long consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
/*
* Only push the consumed value forward.
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
+int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
unsigned long consumed,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
int finalized;
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
* @buf: ring buffer
*/
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
struct channel *chan = shmp(handle, bufb->chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
if (!buf->get_subbuf) {
/*
* position and the writer position. (inclusive)
*/
static
-void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long cons_offset,
int cpu,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
if (subbuf_offset(commit_count, chan) != 0)
- ERRMSG("ring buffer %s, cpu %d: "
+ DBG("ring buffer %s, cpu %d: "
"commit count in subbuffer %lu,\n"
"expecting multiples of %lu bytes\n"
" [ %lu bytes committed, %lu bytes reader-visible ]\n",
chan->backend.subbuf_size,
commit_count, commit_count_sb);
- ERRMSG("ring buffer: %s, cpu %d: %lu bytes committed\n",
+ DBG("ring buffer: %s, cpu %d: %lu bytes committed\n",
chan->backend.name, cpu, commit_count);
}
static
-void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
void *priv, int cpu,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
- /*
- * Can be called in the error path of allocation when
- * trans_channel_data is not yet set.
- */
- if (!chan)
- return;
/*
* No need to order commit_count, write_offset and cons_offset reads
* because we execute at teardown when no more writer nor reader
write_offset = v_read(config, &buf->offset);
cons_offset = uatomic_read(&buf->consumed);
if (write_offset != cons_offset)
- ERRMSG("ring buffer %s, cpu %d: "
+ DBG("ring buffer %s, cpu %d: "
"non-consumed data\n"
" [ %lu bytes written, %lu bytes read ]\n",
chan->backend.name, cpu, write_offset, cons_offset);
static
void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu,
- struct shm_handle *handle)
+ struct lttng_ust_lib_ring_buffer *buf, int cpu,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
- void *priv = chan->backend.priv;
-
- ERRMSG("ring buffer %s, cpu %d: %lu records written, "
- "%lu records overrun\n",
- chan->backend.name, cpu,
- v_read(config, &buf->records_count),
- v_read(config, &buf->records_overrun));
-
- if (v_read(config, &buf->records_lost_full)
- || v_read(config, &buf->records_lost_wrap)
- || v_read(config, &buf->records_lost_big))
- ERRMSG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
- " [ %lu buffer full, %lu nest buffer wrap-around, "
- "%lu event too big ]\n",
- chan->backend.name, cpu,
- v_read(config, &buf->records_lost_full),
- v_read(config, &buf->records_lost_wrap),
- v_read(config, &buf->records_lost_big));
-
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ void *priv = channel_get_private(chan);
+
+ if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
+ DBG("ring buffer %s: %lu records written, "
+ "%lu records overrun\n",
+ chan->backend.name,
+ v_read(config, &buf->records_count),
+ v_read(config, &buf->records_overrun));
+ } else {
+ DBG("ring buffer %s, cpu %d: %lu records written, "
+ "%lu records overrun\n",
+ chan->backend.name, cpu,
+ v_read(config, &buf->records_count),
+ v_read(config, &buf->records_overrun));
+
+ if (v_read(config, &buf->records_lost_full)
+ || v_read(config, &buf->records_lost_wrap)
+ || v_read(config, &buf->records_lost_big))
+ DBG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
+ " [ %lu buffer full, %lu nest buffer wrap-around, "
+ "%lu event too big ]\n",
+ chan->backend.name, cpu,
+ v_read(config, &buf->records_lost_full),
+ v_read(config, &buf->records_lost_wrap),
+ v_read(config, &buf->records_lost_big));
+ }
lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu, handle);
}
* Only executed when the buffer is finalized, in SWITCH_FLUSH.
*/
static
-void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc,
- struct shm_handle *handle)
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
* subbuffer.
*/
static
-void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc,
- struct shm_handle *handle)
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
* that this code is executed before the deliver of this sub-buffer.
*/
static
-void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc,
- struct shm_handle *handle)
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
* have to do the deliver themselves.
*/
static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc,
- struct shm_handle *handle)
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx = subbuf_index(offsets->end - 1, chan);
unsigned long commit_count, padding_size, data_size;
*/
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
- struct lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 *tsc)
+ uint64_t *tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long off;
offsets->begin = v_read(config, &buf->offset);
* quiescence guarantees for the fusion merge.
*/
if (mode == SWITCH_FLUSH || off > 0) {
- if (unlikely(off == 0)) {
+ if (caa_unlikely(off == 0)) {
/*
* The client does not save any header information.
* Don't switch empty subbuffer on finalize, because it
* operations, this function must be called from the CPU which owns the buffer
* for a ACTIVE flush.
*/
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode,
- struct shm_handle *handle)
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+ struct lttng_ust_shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
- u64 tsc;
+ uint64_t tsc;
offsets.size = 0;
* -EIO if data cannot be written into the buffer for any other reason.
*/
static
-int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- struct lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
- struct shm_handle *handle = ctx->handle;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_shm_handle *handle = ctx->handle;
unsigned long reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
if (last_tsc_overflow(config, buf, ctx->tsc))
ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
- if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+ if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
} else {
offsets->size = config->cb.record_header_size(config, chan,
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
- if (unlikely(subbuf_offset(offsets->begin, chan) +
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
offsets->size > chan->backend.subbuf_size)) {
offsets->switch_old_end = 1; /* For offsets->old */
offsets->switch_new_start = 1; /* For offsets->begin */
}
}
- if (unlikely(offsets->switch_new_start)) {
+ if (caa_unlikely(offsets->switch_new_start)) {
unsigned long sb_index;
/*
* We are typically not filling the previous buffer completely.
*/
- if (likely(offsets->switch_old_end))
+ if (caa_likely(offsets->switch_old_end))
offsets->begin = subbuf_align(offsets->begin, chan);
offsets->begin = offsets->begin
+ config->cb.subbuffer_header_size();
- ((unsigned long) v_read(config,
&shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
& chan->commit_count_mask);
- if (likely(reserve_commit_diff == 0)) {
+ if (caa_likely(reserve_commit_diff == 0)) {
/* Next subbuffer not being written to. */
- if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+ if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
subbuf_trunc(offsets->begin, chan)
- subbuf_trunc((unsigned long)
uatomic_read(&buf->consumed), chan)
>= chan->backend.buf_size)) {
+ unsigned long nr_lost;
+
/*
* We do not overwrite non consumed buffers
* and we are full : record is lost.
*/
+ nr_lost = v_read(config, &buf->records_lost_full);
v_inc(config, &buf->records_lost_full);
+ if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+ DBG("%lu or more records lost in (%s:%d) (buffer full)\n",
+ nr_lost + 1, chan->backend.name,
+ buf->backend.cpu);
+ }
return -ENOBUFS;
} else {
/*
*/
}
} else {
+ unsigned long nr_lost;
+
/*
* Next subbuffer reserve offset does not match the
* commit offset. Drop record in producer-consumer and
* overwrite mode. Caused by either a writer OOPS or too
* many nested writes over a reserve/commit pair.
*/
+ nr_lost = v_read(config, &buf->records_lost_wrap);
v_inc(config, &buf->records_lost_wrap);
+ if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+ DBG("%lu or more records lost in (%s:%d) (wrap-around)\n",
+ nr_lost + 1, chan->backend.name,
+ buf->backend.cpu);
+ }
return -EIO;
}
offsets->size =
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
- if (unlikely(subbuf_offset(offsets->begin, chan)
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan)
+ offsets->size > chan->backend.subbuf_size)) {
+ unsigned long nr_lost;
+
/*
* Record too big for subbuffers, report error, don't
* complete the sub-buffer switch.
*/
+ nr_lost = v_read(config, &buf->records_lost_big);
v_inc(config, &buf->records_lost_big);
+ if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+ DBG("%lu or more records lost in (%s:%d) record size "
+ " of %zu bytes is too large for buffer\n",
+ nr_lost + 1, chan->backend.name,
+ buf->backend.cpu, offsets->size);
+ }
return -ENOSPC;
} else {
/*
}
offsets->end = offsets->begin + offsets->size;
- if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+ if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
/*
* The offset_end will fall at the very beginning of the next
* subbuffer.
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
- struct shm_handle *handle = ctx->handle;
- const struct lib_ring_buffer_config *config = chan->backend.config;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_shm_handle *handle = ctx->handle;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_lib_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
do {
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
ctx);
- if (unlikely(ret))
+ if (caa_unlikely(ret))
return ret;
- } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+ } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
offsets.end)
!= offsets.old));
/*
* Switch old subbuffer if needed.
*/
- if (unlikely(offsets.switch_old_end)) {
+ if (caa_unlikely(offsets.switch_old_end)) {
lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.old - 1, chan),
handle);
/*
* Populate new subbuffer.
*/
- if (unlikely(offsets.switch_new_start))
+ if (caa_unlikely(offsets.switch_new_start))
lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
- if (unlikely(offsets.switch_new_end))
+ if (caa_unlikely(offsets.switch_new_end))
lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
ctx->slot_size = offsets.size;
ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
return 0;
}
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_fixup_ringbuffer_tls(void)
+{
+ asm volatile ("" : : "m" (lib_ring_buffer_nesting));
+}