* you ever add/remove a field from this header. Packed attribute is not used
* because gcc generates poor code on at least powerpc and mips. Don't ever
* let gcc add padding between the structure elements.
+ *
+ * The guarantee we have with timestamps is that all the events in a
+ * packet are included (inclusive) within the begin/end timestamps of
+ * the packet. Another guarantee we have is that the "timestamp begin",
+ * as well as the event timestamps, are monotonically increasing (never
+ * decrease) when moving forward in a stream (physically). But this
+ * guarantee does not apply to "timestamp end", because it is sampled at
+ * commit time, which is not ordered with respect to space reservation.
*/
struct packet_header {
return trace_clock_read64();
}
+static inline
+size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+{
+ int i;
+ size_t orig_offset = offset;
+
+ if (likely(!ctx))
+ return 0;
+ for (i = 0; i < ctx->nr_fields; i++)
+ offset += ctx->fields[i].get_size(offset);
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_record(struct lib_ring_buffer_ctx *bufctx,
+ struct ltt_channel *chan,
+ struct lttng_ctx *ctx)
+{
+ int i;
+
+ if (likely(!ctx))
+ return;
+ for (i = 0; i < ctx->nr_fields; i++)
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+}
+
/*
* record_header_size - Calculate the header size and padding necessary.
* @config: ring buffer instance configuration
* @chan: channel
* @offset: offset in the write buffer
- * @data_size: size of the payload
* @pre_header_padding: padding to add before the header (output)
- * @rflags: reservation flags
* @ctx: reservation context
*
* Returns the event header size (including padding).
static __inline__
unsigned char record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
- size_t data_size, size_t *pre_header_padding,
- unsigned int rflags,
+ size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx)
{
struct ltt_channel *ltt_chan = channel_get_private(chan);
+ struct ltt_event *event = ctx->priv;
size_t orig_offset = offset;
size_t padding;
case 1: /* compact */
padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
offset += padding;
- if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by 5-bit id */
padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
- if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
offset += sizeof(uint32_t); /* id */
offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
offset += sizeof(uint64_t); /* timestamp */
-
}
break;
default:
- WARN_ON(1);
+ padding = 0;
+ WARN_ON_ONCE(1);
}
+ offset += ctx_get_size(offset, event->ctx);
+ offset += ctx_get_size(offset, ltt_chan->ctx);
*pre_header_padding = padding;
return offset - orig_offset;
#include "wrapper/ringbuffer/api.h"
-extern
+static
void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
- u16 eID, u32 event_size);
+ uint32_t event_id);
/*
* ltt_write_event_header
*
* @config: ring buffer instance configuration
* @ctx: reservation context
- * @eID : event ID
- * @event_size : size of the event, excluding the event header.
+ * @event_id: event ID
*/
static __inline__
void ltt_write_event_header(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
- u16 eID, u32 event_size)
+ uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ struct ltt_event *event = ctx->priv;
if (unlikely(ctx->rflags))
goto slow_path;
{
uint32_t id_time = 0;
- bt_bitfield_write(&id_time, uint32_t, 0, 5, eID);
+ bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
break;
}
case 2: /* large */
{
- uint16_t event_id = eID;
uint32_t timestamp = (uint32_t) ctx->tsc;
+ uint16_t id = event_id;
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
break;
}
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
+
+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
+ ctx_record(ctx, ltt_chan, event->ctx);
+
return;
slow_path:
- ltt_write_event_header_slow(config, ctx, eID, event_size);
+ ltt_write_event_header_slow(config, ctx, event_id);
}
-/*
- * TODO: For now, we only support 65536 event ids per channel.
- */
+static
void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
- u16 eID, u32 event_size)
+ struct lib_ring_buffer_ctx *ctx,
+ uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ struct ltt_event *event = ctx->priv;
switch (ltt_chan->header_type) {
case 1: /* compact */
- if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
- bt_bitfield_write(&id_time, uint32_t, 0, 5, eID);
+ bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
- uint32_t event_id = (uint32_t) eID;
uint64_t timestamp = ctx->tsc;
bt_bitfield_write(&id, uint8_t, 0, 5, 31);
break;
case 2: /* large */
{
- if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
- uint16_t event_id = eID;
+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
uint32_t timestamp = (uint32_t) ctx->tsc;
+ uint16_t id = event_id;
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
- uint16_t event_id = 65535;
- uint32_t event_id_ext = (uint32_t) eID;
+ uint16_t id = 65535;
uint64_t timestamp = ctx->tsc;
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, &event_id_ext, sizeof(event_id_ext));
+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
}
break;
}
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
+ ctx_record(ctx, ltt_chan, event->ctx);
}
static const struct lib_ring_buffer_config client_config;
static
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
- size_t data_size,
size_t *pre_header_padding,
- unsigned int rflags,
struct lib_ring_buffer_ctx *ctx)
{
- return record_header_size(config, chan, offset, data_size,
- pre_header_padding, rflags, ctx);
+ return record_header_size(config, chan, offset,
+ pre_header_padding, ctx);
}
/**
unsigned long records_lost = 0;
header->ctx.timestamp_end = tsc;
- header->ctx.content_size = data_size;
- header->ctx.packet_size = PAGE_ALIGN(data_size);
+ header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
+ header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
.sync = RING_BUFFER_SYNC_PER_CPU,
.mode = RING_BUFFER_MODE_TEMPLATE,
.backend = RING_BUFFER_PAGE,
- .output = RING_BUFFER_SPLICE,
+ .output = RING_BUFFER_OUTPUT_TEMPLATE,
.oops = RING_BUFFER_OOPS_CONSISTENCY,
.ipi = RING_BUFFER_IPI_BARRIER,
.wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
return NULL;
}
+static
+int ltt_buffer_has_read_closed_stream(struct channel *chan)
+{
+ struct lib_ring_buffer *buf;
+ int cpu;
+
+ for_each_channel_cpu(cpu, chan) {
+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
+ if (!atomic_long_read(&buf->active_readers))
+ return 1;
+ }
+ return 0;
+}
+
static
void ltt_buffer_read_close(struct lib_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
-
}
static
-int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx)
+int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
+ uint32_t event_id)
{
+ struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
int ret, cpu;
cpu = lib_ring_buffer_get_cpu(&client_config);
return -EPERM;
ctx->cpu = cpu;
+ switch (ltt_chan->header_type) {
+ case 1: /* compact */
+ if (event_id > 30)
+ ctx->rflags |= LTT_RFLAG_EXTENDED;
+ break;
+ case 2: /* large */
+ if (event_id > 65534)
+ ctx->rflags |= LTT_RFLAG_EXTENDED;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
ret = lib_ring_buffer_reserve(&client_config, ctx);
if (ret)
goto put;
- return ret;
-
+ ltt_write_event_header(&client_config, ctx, event_id);
+ return 0;
put:
lib_ring_buffer_put_cpu(&client_config);
return ret;
}
static
-wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
+{
+ return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int ltt_is_disabled(struct channel *chan)
{
- return &chan->chan->read_wait;
+ return lib_ring_buffer_channel_is_disabled(chan);
}
static struct ltt_transport ltt_relay_transport = {
.channel_create = _channel_create,
.channel_destroy = ltt_channel_destroy,
.buffer_read_open = ltt_buffer_read_open,
+ .buffer_has_read_closed_stream =
+ ltt_buffer_has_read_closed_stream,
.buffer_read_close = ltt_buffer_read_close,
.event_reserve = ltt_event_reserve,
.event_commit = ltt_event_commit,
.event_write = ltt_event_write,
.packet_avail_size = NULL, /* Would be racy anyway */
.get_reader_wait_queue = ltt_get_reader_wait_queue,
+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
+ .is_finalized = ltt_is_finalized,
+ .is_disabled = ltt_is_disabled,
},
};
* vmalloc'd module pages when it is built as a module into LTTng.
*/
wrapper_vmalloc_sync_all();
- printk(KERN_INFO "LTT : ltt ring buffer client init\n");
ltt_transport_register(<t_relay_transport);
return 0;
}
static void __exit ltt_ring_buffer_client_exit(void)
{
- printk(KERN_INFO "LTT : ltt ring buffer client exit\n");
ltt_transport_unregister(<t_relay_transport);
}