*/
#include <linux/module.h>
+#include <linux/types.h>
+#include "lib/bitfield.h"
+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
+#include "wrapper/trace-clock.h"
+#include "ltt-events.h"
#include "ltt-tracer.h"
+#include "wrapper/ringbuffer/frontend_types.h"
-struct ring_buffer_priv {
- struct dentry *dentry;
-}
+/*
+ * Keep the natural field alignment for _each field_ within this structure if
+ * you ever add/remove a field from this header. Packed attribute is not used
+ * because gcc generates poor code on at least powerpc and mips. Don't ever
+ * let gcc add padding between the structure elements.
+ *
+ * The guarantee we have with timestamps is that all the events in a
+ * packet are included (inclusive) within the begin/end timestamps of
+ * the packet. Another guarantee we have is that the "timestamp begin",
+ * as well as the event timestamps, are monotonically increasing (never
+ * decrease) when moving forward in a stream (physically). But this
+ * guarantee does not apply to "timestamp end", because it is sampled at
+ * commit time, which is not ordered with respect to space reservation.
+ */
-struct channel_priv {
- struct ltt_trace *trace;
- struct ring_buffer_priv *buf;
+struct packet_header {
+ /* Trace packet header */
+ uint32_t magic; /*
+ * Trace magic number.
+ * contains endianness information.
+ */
+ uint8_t uuid[16];
+ uint32_t stream_id;
+
+ struct {
+ /* Stream packet context */
+ uint64_t timestamp_begin; /* Cycle count at subbuffer start */
+ uint64_t timestamp_end; /* Cycle count at subbuffer end */
+ uint32_t events_discarded; /*
+ * Events lost in this subbuffer since
+ * the beginning of the trace.
+ * (may overflow)
+ */
+ uint32_t content_size; /* Size of data in subbuffer */
+ uint32_t packet_size; /* Subbuffer size (include padding) */
+ uint32_t cpu_id; /* CPU id associated with stream */
+ uint8_t header_end; /* End of header */
+ } ctx;
};
+
+static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
+{
+ return trace_clock_read64();
+}
+
+static inline
+size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+{
+ int i;
+ size_t orig_offset = offset;
+
+ if (likely(!ctx))
+ return 0;
+ for (i = 0; i < ctx->nr_fields; i++)
+ offset += ctx->fields[i].get_size(offset);
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_record(struct lib_ring_buffer_ctx *bufctx,
+ struct ltt_channel *chan,
+ struct lttng_ctx *ctx)
+{
+ int i;
+
+ if (likely(!ctx))
+ return;
+ for (i = 0; i < ctx->nr_fields; i++)
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+}
+
+/*
+ * record_header_size - Calculate the header size and padding necessary.
+ * @config: ring buffer instance configuration
+ * @chan: channel
+ * @offset: offset in the write buffer
+ * @pre_header_padding: padding to add before the header (output)
+ * @ctx: reservation context
+ *
+ * Returns the event header size (including padding).
+ *
+ * The payload must itself determine its own alignment from the biggest type it
+ * contains.
+ */
+static __inline__
+unsigned char record_header_size(const struct lib_ring_buffer_config *config,
+ struct channel *chan, size_t offset,
+ size_t *pre_header_padding,
+ struct lib_ring_buffer_ctx *ctx)
+{
+ struct ltt_channel *ltt_chan = channel_get_private(chan);
+ struct ltt_event *event = ctx->priv;
+ size_t orig_offset = offset;
+ size_t padding;
+
+ switch (ltt_chan->header_type) {
+ case 1: /* compact */
+ padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
+ offset += padding;
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
+ offset += sizeof(uint32_t); /* id and timestamp */
+ } else {
+ /* Minimum space taken by 5-bit id */
+ offset += sizeof(uint8_t);
+ /* Align extended struct on largest member */
+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
+ offset += sizeof(uint32_t); /* id */
+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
+ offset += sizeof(uint64_t); /* timestamp */
+ }
+ break;
+ case 2: /* large */
+ padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
+ offset += padding;
+ offset += sizeof(uint16_t);
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
+ offset += sizeof(uint32_t); /* timestamp */
+ } else {
+ /* Align extended struct on largest member */
+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
+ offset += sizeof(uint32_t); /* id */
+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
+ offset += sizeof(uint64_t); /* timestamp */
+ }
+ break;
+ default:
+ padding = 0;
+ WARN_ON_ONCE(1);
+ }
+ offset += ctx_get_size(offset, event->ctx);
+ offset += ctx_get_size(offset, ltt_chan->ctx);
+
+ *pre_header_padding = padding;
+ return offset - orig_offset;
+}
+
+#include "wrapper/ringbuffer/api.h"
+
+static
+void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_ctx *ctx,
+ uint32_t event_id);
+
+/*
+ * ltt_write_event_header
+ *
+ * Writes the event header to the offset (already aligned on 32-bits).
+ *
+ * @config: ring buffer instance configuration
+ * @ctx: reservation context
+ * @event_id: event ID
+ */
+static __inline__
+void ltt_write_event_header(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_ctx *ctx,
+ uint32_t event_id)
+{
+ struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ struct ltt_event *event = ctx->priv;
+
+ if (unlikely(ctx->rflags))
+ goto slow_path;
+
+ switch (ltt_chan->header_type) {
+ case 1: /* compact */
+ {
+ uint32_t id_time = 0;
+
+ bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
+ bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
+ break;
+ }
+ case 2: /* large */
+ {
+ uint32_t timestamp = (uint32_t) ctx->tsc;
+ uint16_t id = event_id;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
+ ctx_record(ctx, ltt_chan, event->ctx);
+
+ return;
+
+slow_path:
+ ltt_write_event_header_slow(config, ctx, event_id);
+}
+
+static
+void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_ctx *ctx,
+ uint32_t event_id)
+{
+ struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ struct ltt_event *event = ctx->priv;
+
+ switch (ltt_chan->header_type) {
+ case 1: /* compact */
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
+ uint32_t id_time = 0;
+
+ bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
+ bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
+ } else {
+ uint8_t id = 0;
+ uint64_t timestamp = ctx->tsc;
+
+ bt_bitfield_write(&id, uint8_t, 0, 5, 31);
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ /* Align extended struct on largest member */
+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ }
+ break;
+ case 2: /* large */
+ {
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx->tsc;
+ uint16_t id = event_id;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ } else {
+ uint16_t id = 65535;
+ uint64_t timestamp = ctx->tsc;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ /* Align extended struct on largest member */
+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ }
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
+ ctx_record(ctx, ltt_chan, event->ctx);
+}
+
static const struct lib_ring_buffer_config client_config;
static u64 client_ring_buffer_clock_read(struct channel *chan)
return lib_ring_buffer_clock_read(chan);
}
+static
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
- size_t data_size,
size_t *pre_header_padding,
- unsigned int rflags,
struct lib_ring_buffer_ctx *ctx)
{
- return record_header_size(config, chan, offset, data_size,
- pre_header_padding, rflags, ctx);
+ return record_header_size(config, chan, offset,
+ pre_header_padding, ctx);
}
/**
- * client_subbuffer_header_size - called on buffer-switch to a new sub-buffer
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
*
* Return header size without padding after the structure. Don't use packed
* structure because gcc generates inefficient code on some architectures
* (powerpc, mips..)
*/
-static size_t client_subbuffer_header_size(void)
+static size_t client_packet_header_size(void)
{
- return offsetof(struct subbuffer_header, header_end);
+ return offsetof(struct packet_header, ctx.header_end);
}
static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx)
{
struct channel *chan = buf->backend.chan;
- struct subbuffer_header *header =
- (struct subbuffer_header *)
+ struct packet_header *header =
+ (struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
subbuf_idx * chan->backend.subbuf_size);
-
- header->cycle_count_begin = tsc;
- header->data_size = 0xFFFFFFFF; /* for debugging */
- write_trace_header(chan->backend.priv, header);
+ struct ltt_channel *ltt_chan = channel_get_private(chan);
+ struct ltt_session *session = ltt_chan->session;
+
+ header->magic = CTF_MAGIC_NUMBER;
+ memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
+ header->stream_id = ltt_chan->id;
+ header->ctx.timestamp_begin = tsc;
+ header->ctx.timestamp_end = 0;
+ header->ctx.events_discarded = 0;
+ header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
+ header->ctx.packet_size = 0xFFFFFFFF;
+ header->ctx.cpu_id = buf->backend.cpu;
}
/*
unsigned int subbuf_idx, unsigned long data_size)
{
struct channel *chan = buf->backend.chan;
- struct subbuffer_header *header =
- (struct subbuffer_header *)
+ struct packet_header *header =
+ (struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
subbuf_idx * chan->backend.subbuf_size);
unsigned long records_lost = 0;
- header->data_size = data_size;
- header->subbuf_size = PAGE_ALIGN(data_size);
- header->cycle_count_end = tsc;
+ header->ctx.timestamp_end = tsc;
+ header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
+ header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
- header->events_lost = records_lost;
- header->subbuf_corrupt = 0; /* deprecated */
+ header->ctx.events_discarded = records_lost;
}
static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
int cpu, const char *name)
{
- struct channel_priv *chan_priv = priv;
- struct ring_buffer_priv *buf_priv;
- struct dentry *trace_dentry;
- char *tmpname;
- int ret = 0;
-
- if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf_priv = per_cpu_ptr(chan_priv->buf, cpu);
- else
- buf_priv = chan_priv->buf;
-
- tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
- if (!tmpname) {
- ret = -ENOMEM;
- goto end;
- }
-
- snprintf(tmpname, NAME_MAX, "%s%s_%d",
- (client_config.mode == RING_BUFFER_OVERWRITE) ? "flight-" : "",
- name, cpu);
-
- trace_dentry = chan_priv->trace->dentry.trace_root;
- buf_priv->dentry = debugfs_create_file(tmpname, S_IRUSR, trace_dentry,
- buf,
- &lib_ring_buffer_file_operations);
- if (!buf_priv->dentry) {
- ret = -ENOMEM;
- goto free_name;
- }
-free_name:
- kfree(tmpname);
-end:
- return ret;
+ return 0;
}
static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
{
- struct channel_priv *chan_priv = priv;
- struct lib_ring_buffer_priv *buf_priv;
-
- if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf_priv = per_cpu_ptr(chan_priv->buf, cpu);
- else
- buf_priv = chan_priv->buf;
-
- debugfs_remove(buf_priv->dentry);
}
static const struct lib_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
- .cb.subbuffer_header_size = client_subbuffer_header_size,
+ .cb.subbuffer_header_size = client_packet_header_size,
.cb.buffer_begin = client_buffer_begin,
.cb.buffer_end = client_buffer_end,
.cb.buffer_create = client_buffer_create,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
.sync = RING_BUFFER_SYNC_PER_CPU,
.mode = RING_BUFFER_MODE_TEMPLATE,
-#ifdef RING_BUFFER_ALIGN
- .align = RING_BUFFER_NATURAL,
-#else
- .align = RING_BUFFER_PACKED,
-#endif
.backend = RING_BUFFER_PAGE,
- .output = RING_BUFFER_SPLICE,
+ .output = RING_BUFFER_OUTPUT_TEMPLATE,
.oops = RING_BUFFER_OOPS_CONSISTENCY,
.ipi = RING_BUFFER_IPI_BARRIER,
.wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
};
-struct channel *ltt_channel_create(const char *name, struct ltt_trace *trace,
- void *buf_addr,
- size_t subbuf_size, size_t num_subbuf,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+static
+struct channel *_channel_create(const char *name,
+ struct ltt_channel *ltt_chan, void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval)
{
- struct channel *chan;
- struct chan_priv *chan_priv;
-
- chan_priv = kzalloc(sizeof(struct chan_priv), GFP_KERNEL);
- if (!chan_priv)
- return NULL;
- if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU) {
- chan_priv->buf = alloc_percpu(struct lib_ring_buffer_priv);
- memset(chan_priv->buf, 0, sizeof(*chan_priv->buf));
- } else
- chan_priv->buf = kzalloc(sizeof(*chan_priv->buf), GFP_KERNEL)
- if (!channel_priv->buf)
- goto free_chan_priv;
- chan_priv->trace = trace;
- chan = channel_create(&client_config, name, chan_priv, buf_addr,
+ return channel_create(&client_config, name, ltt_chan, buf_addr,
subbuf_size, num_subbuf, switch_timer_interval,
read_timer_interval);
- if (!chan)
- goto free_buf_priv;
- return chan;
-
-free_buf_priv:
- if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU)
- free_percpu(chan_priv->buf);
- else
- kfree(chan_priv->buf);
-free_chan_priv:
- kfree(chan_priv);
- return NULL;
}
+static
void ltt_channel_destroy(struct channel *chan)
{
- struct chan_priv *chan_priv = channel_get_private(chan);
-
channel_destroy(chan);
- if (client_config.alloc == RING_BUFFER_ALLOC_PER_CPU)
- free_percpu(chan_priv->buf);
- else
- kfree(chan_priv->buf);
- kfree(chan_priv);
}
-static void ltt_relay_remove_dirs(struct ltt_trace *trace)
+static
+struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
{
- debugfs_remove(trace->dentry.trace_root);
+ struct lib_ring_buffer *buf;
+ int cpu;
+
+ for_each_channel_cpu(cpu, chan) {
+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
+ if (!lib_ring_buffer_open_read(buf))
+ return buf;
+ }
+ return NULL;
}
-static int ltt_relay_create_dirs(struct ltt_trace *new_trace)
+static
+int ltt_buffer_has_read_closed_stream(struct channel *chan)
{
- struct dentry *ltt_root_dentry;
- int ret;
-
- ltt_root_dentry = get_ltt_root();
- if (!ltt_root_dentry)
- return ENOENT;
-
- new_trace->dentry.trace_root = debugfs_create_dir(new_trace->trace_name,
- ltt_root_dentry);
- put_ltt_root();
- if (new_trace->dentry.trace_root == NULL) {
- printk(KERN_ERR "LTT : Trace directory name %s already taken\n",
- new_trace->trace_name);
- return EEXIST;
+ struct lib_ring_buffer *buf;
+ int cpu;
+
+ for_each_channel_cpu(cpu, chan) {
+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
+ if (!atomic_long_read(&buf->active_readers))
+ return 1;
}
return 0;
}
+
+static
+void ltt_buffer_read_close(struct lib_ring_buffer *buf)
+{
+ lib_ring_buffer_release_read(buf);
+}
+
+static
+int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
+ uint32_t event_id)
+{
+ struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ int ret, cpu;
+
+ cpu = lib_ring_buffer_get_cpu(&client_config);
+ if (cpu < 0)
+ return -EPERM;
+ ctx->cpu = cpu;
+
+ switch (ltt_chan->header_type) {
+ case 1: /* compact */
+ if (event_id > 30)
+ ctx->rflags |= LTT_RFLAG_EXTENDED;
+ break;
+ case 2: /* large */
+ if (event_id > 65534)
+ ctx->rflags |= LTT_RFLAG_EXTENDED;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ ret = lib_ring_buffer_reserve(&client_config, ctx);
+ if (ret)
+ goto put;
+ ltt_write_event_header(&client_config, ctx, event_id);
+ return 0;
+put:
+ lib_ring_buffer_put_cpu(&client_config);
+ return ret;
+}
+
+static
+void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
+{
+ lib_ring_buffer_commit(&client_config, ctx);
+ lib_ring_buffer_put_cpu(&client_config);
+}
+
+static
+void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
+ size_t len)
+{
+ lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
+{
+ return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int ltt_is_disabled(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_disabled(chan);
+}
+
static struct ltt_transport ltt_relay_transport = {
.name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
.owner = THIS_MODULE,
.ops = {
- .create_dirs = ltt_relay_create_dirs,
- .remove_dirs = ltt_relay_remove_dirs,
+ .channel_create = _channel_create,
+ .channel_destroy = ltt_channel_destroy,
+ .buffer_read_open = ltt_buffer_read_open,
+ .buffer_has_read_closed_stream =
+ ltt_buffer_has_read_closed_stream,
+ .buffer_read_close = ltt_buffer_read_close,
+ .event_reserve = ltt_event_reserve,
+ .event_commit = ltt_event_commit,
+ .event_write = ltt_event_write,
+ .packet_avail_size = NULL, /* Would be racy anyway */
+ .get_reader_wait_queue = ltt_get_reader_wait_queue,
+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
+ .is_finalized = ltt_is_finalized,
+ .is_disabled = ltt_is_disabled,
},
};
static int __init ltt_ring_buffer_client_init(void)
{
- printk(KERN_INFO "LTT : ltt ring buffer client init\n");
+ /*
+ * This vmalloc sync all also takes care of the lib ring buffer
+ * vmalloc'd module pages when it is built as a module into LTTng.
+ */
+ wrapper_vmalloc_sync_all();
ltt_transport_register(<t_relay_transport);
return 0;
}
+module_init(ltt_ring_buffer_client_init);
+
static void __exit ltt_ring_buffer_client_exit(void)
{
- printk(KERN_INFO "LTT : ltt ring buffer client exit\n");
ltt_transport_unregister(<t_relay_transport);
}
+module_exit(ltt_ring_buffer_client_exit);
+
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING