return trace_clock_read64();
}
+static inline
+size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+{
+ int i;
+ size_t orig_offset = offset;
+
+ if (likely(!ctx))
+ return 0;
+ for (i = 0; i < ctx->nr_fields; i++)
+ offset += ctx->fields[i].get_size(offset);
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_record(struct lib_ring_buffer_ctx *bufctx,
+ struct ltt_channel *chan,
+ struct lttng_ctx *ctx)
+{
+ int i;
+
+ if (likely(!ctx))
+ return;
+ for (i = 0; i < ctx->nr_fields; i++)
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+}
+
/*
* record_header_size - Calculate the header size and padding necessary.
* @config: ring buffer instance configuration
struct lib_ring_buffer_ctx *ctx)
{
struct ltt_channel *ltt_chan = channel_get_private(chan);
+ struct ltt_event *event = ctx->priv;
size_t orig_offset = offset;
size_t padding;
offset += sizeof(uint32_t); /* id */
offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
offset += sizeof(uint64_t); /* timestamp */
-
}
break;
default:
WARN_ON_ONCE(1);
}
+ offset += ctx_get_size(offset, event->ctx);
+ offset += ctx_get_size(offset, ltt_chan->ctx);
*pre_header_padding = padding;
return offset - orig_offset;
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ struct ltt_event *event = ctx->priv;
if (unlikely(ctx->rflags))
goto slow_path;
case 2: /* large */
{
uint32_t timestamp = (uint32_t) ctx->tsc;
+ uint16_t id = event_id;
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
break;
default:
WARN_ON_ONCE(1);
}
+
+ ctx_record(ctx, ltt_chan, event->ctx);
+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
+
return;
slow_path:
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ struct ltt_event *event = ctx->priv;
switch (ltt_chan->header_type) {
case 1: /* compact */
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
- uint32_t event_id = (uint32_t) event_id;
uint64_t timestamp = ctx->tsc;
bt_bitfield_write(&id, uint8_t, 0, 5, 31);
{
if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
uint32_t timestamp = (uint32_t) ctx->tsc;
+ uint16_t id = event_id;
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
} else {
default:
WARN_ON_ONCE(1);
}
+ ctx_record(ctx, ltt_chan, event->ctx);
+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
}
static const struct lib_ring_buffer_config client_config;
void ltt_buffer_read_close(struct lib_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
-
}
static
}
static
-wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
+wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
+{
+ return &chan->read_wait;
+}
+
+static
+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int ltt_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int ltt_is_disabled(struct channel *chan)
{
- return &chan->chan->read_wait;
+ return lib_ring_buffer_channel_is_disabled(chan);
}
static struct ltt_transport ltt_relay_transport = {
.event_write = ltt_event_write,
.packet_avail_size = NULL, /* Would be racy anyway */
.get_reader_wait_queue = ltt_get_reader_wait_queue,
+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
+ .is_finalized = ltt_is_finalized,
+ .is_disabled = ltt_is_disabled,
},
};