b397a5e06b176b9e3027ab8095d2ddec54a09f63
[deliverable/lttng-ust.git] / liblttng-ust / lttng-ring-buffer-client.h
1 /*
2 * lttng-ring-buffer-client.h
3 *
4 * LTTng lib ring buffer client template.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdint.h>
24 #include <lttng/ust-events.h>
25 #include "lttng/bitfield.h"
26 #include "clock.h"
27 #include "lttng-tracer.h"
28 #include "../libringbuffer/frontend_types.h"
29 #include "../libringbuffer/rseq.h"
30
31 #define LTTNG_COMPACT_EVENT_BITS 5
32 #define LTTNG_COMPACT_TSC_BITS 27
33
34 enum app_ctx_mode {
35 APP_CTX_DISABLED,
36 APP_CTX_ENABLED,
37 };
38
39 /*
40 * Keep the natural field alignment for _each field_ within this structure if
41 * you ever add/remove a field from this header. Packed attribute is not used
42 * because gcc generates poor code on at least powerpc and mips. Don't ever
43 * let gcc add padding between the structure elements.
44 */
45
46 struct packet_header {
47 /* Trace packet header */
48 uint32_t magic; /*
49 * Trace magic number.
50 * contains endianness information.
51 */
52 uint8_t uuid[LTTNG_UST_UUID_LEN];
53 uint32_t stream_id;
54 uint64_t stream_instance_id;
55
56 struct {
57 /* Stream packet context */
58 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
59 uint64_t timestamp_end; /* Cycle count at subbuffer end */
60 uint64_t content_size; /* Size of data in subbuffer */
61 uint64_t packet_size; /* Subbuffer size (include padding) */
62 uint64_t packet_seq_num; /* Packet sequence number */
63 unsigned long events_discarded; /*
64 * Events lost in this subbuffer since
65 * the beginning of the trace.
66 * (may overflow)
67 */
68 uint32_t cpu_id; /* CPU id associated with stream */
69 uint8_t header_end; /* End of header */
70 } ctx;
71 };
72
73
74 static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
75 {
76 return trace_clock_read64();
77 }
78
79 static inline
80 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx,
81 enum app_ctx_mode mode)
82 {
83 int i;
84 size_t orig_offset = offset;
85
86 if (caa_likely(!ctx))
87 return 0;
88 offset += lib_ring_buffer_align(offset, ctx->largest_align);
89 for (i = 0; i < ctx->nr_fields; i++) {
90 if (mode == APP_CTX_ENABLED) {
91 offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
92 } else {
93 if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
94 /*
95 * Before UST 2.8, we cannot use the
96 * application context, because we
97 * cannot trust that the handler used
98 * for get_size is the same used for
99 * ctx_record, which would result in
100 * corrupted traces when tracing
101 * concurrently with application context
102 * register/unregister.
103 */
104 offset += lttng_ust_dummy_get_size(&ctx->fields[i], offset);
105 } else {
106 offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
107 }
108 }
109 }
110 return offset - orig_offset;
111 }
112
113 static inline
114 void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
115 struct lttng_channel *chan,
116 struct lttng_ctx *ctx,
117 enum app_ctx_mode mode)
118 {
119 int i;
120
121 if (caa_likely(!ctx))
122 return;
123 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
124 for (i = 0; i < ctx->nr_fields; i++) {
125 if (mode == APP_CTX_ENABLED) {
126 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
127 } else {
128 if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
129 /*
130 * Before UST 2.8, we cannot use the
131 * application context, because we
132 * cannot trust that the handler used
133 * for get_size is the same used for
134 * ctx_record, which would result in
135 * corrupted traces when tracing
136 * concurrently with application context
137 * register/unregister.
138 */
139 lttng_ust_dummy_record(&ctx->fields[i], bufctx, chan);
140 } else {
141 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
142 }
143 }
144 }
145 }
146
147 /*
148 * record_header_size - Calculate the header size and padding necessary.
149 * @config: ring buffer instance configuration
150 * @chan: channel
151 * @offset: offset in the write buffer
152 * @pre_header_padding: padding to add before the header (output)
153 * @ctx: reservation context
154 *
155 * Returns the event header size (including padding).
156 *
157 * The payload must itself determine its own alignment from the biggest type it
158 * contains.
159 */
160 static __inline__
161 size_t record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
162 struct channel *chan, size_t offset,
163 size_t *pre_header_padding,
164 struct lttng_ust_lib_ring_buffer_ctx *ctx)
165 {
166 struct lttng_channel *lttng_chan = channel_get_private(chan);
167 struct lttng_event *event = ctx->priv;
168 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
169 size_t orig_offset = offset;
170 size_t padding;
171
172 switch (lttng_chan->header_type) {
173 case 1: /* compact */
174 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
175 offset += padding;
176 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
177 offset += sizeof(uint32_t); /* id and timestamp */
178 } else {
179 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
180 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
181 /* Align extended struct on largest member */
182 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
183 offset += sizeof(uint32_t); /* id */
184 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
185 offset += sizeof(uint64_t); /* timestamp */
186 }
187 break;
188 case 2: /* large */
189 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
190 offset += padding;
191 offset += sizeof(uint16_t);
192 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
193 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
194 offset += sizeof(uint32_t); /* timestamp */
195 } else {
196 /* Align extended struct on largest member */
197 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
198 offset += sizeof(uint32_t); /* id */
199 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
200 offset += sizeof(uint64_t); /* timestamp */
201 }
202 break;
203 default:
204 padding = 0;
205 WARN_ON_ONCE(1);
206 }
207 if (lttng_ctx) {
208 /* 2.8+ probe ABI. */
209 offset += ctx_get_size(offset, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
210 offset += ctx_get_size(offset, lttng_ctx->event_ctx, APP_CTX_ENABLED);
211 } else {
212 /* Pre 2.8 probe ABI. */
213 offset += ctx_get_size(offset, lttng_chan->ctx, APP_CTX_DISABLED);
214 offset += ctx_get_size(offset, event->ctx, APP_CTX_DISABLED);
215 }
216 *pre_header_padding = padding;
217 return offset - orig_offset;
218 }
219
220 #include "../libringbuffer/api.h"
221 #include "lttng-rb-clients.h"
222
223 static
224 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
225 struct lttng_ust_lib_ring_buffer_ctx *ctx,
226 uint32_t event_id);
227
228 /*
229 * lttng_write_event_header
230 *
231 * Writes the event header to the offset (already aligned on 32-bits).
232 *
233 * @config: ring buffer instance configuration
234 * @ctx: reservation context
235 * @event_id: event ID
236 */
237 static __inline__
238 void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
239 struct lttng_ust_lib_ring_buffer_ctx *ctx,
240 uint32_t event_id)
241 {
242 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
243 struct lttng_event *event = ctx->priv;
244 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
245
246 if (caa_unlikely(ctx->rflags))
247 goto slow_path;
248
249 switch (lttng_chan->header_type) {
250 case 1: /* compact */
251 {
252 uint32_t id_time = 0;
253
254 bt_bitfield_write(&id_time, uint32_t,
255 0,
256 LTTNG_COMPACT_EVENT_BITS,
257 event_id);
258 bt_bitfield_write(&id_time, uint32_t,
259 LTTNG_COMPACT_EVENT_BITS,
260 LTTNG_COMPACT_TSC_BITS,
261 ctx->tsc);
262 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
263 break;
264 }
265 case 2: /* large */
266 {
267 uint32_t timestamp = (uint32_t) ctx->tsc;
268 uint16_t id = event_id;
269
270 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
271 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
272 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
273 break;
274 }
275 default:
276 WARN_ON_ONCE(1);
277 }
278
279 if (lttng_ctx) {
280 /* 2.8+ probe ABI. */
281 ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
282 ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
283 } else {
284 /* Pre 2.8 probe ABI. */
285 ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
286 ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
287 }
288 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
289
290 return;
291
292 slow_path:
293 lttng_write_event_header_slow(config, ctx, event_id);
294 }
295
296 static
297 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
298 struct lttng_ust_lib_ring_buffer_ctx *ctx,
299 uint32_t event_id)
300 {
301 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
302 struct lttng_event *event = ctx->priv;
303 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
304
305 switch (lttng_chan->header_type) {
306 case 1: /* compact */
307 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
308 uint32_t id_time = 0;
309
310 bt_bitfield_write(&id_time, uint32_t,
311 0,
312 LTTNG_COMPACT_EVENT_BITS,
313 event_id);
314 bt_bitfield_write(&id_time, uint32_t,
315 LTTNG_COMPACT_EVENT_BITS,
316 LTTNG_COMPACT_TSC_BITS,
317 ctx->tsc);
318 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
319 } else {
320 uint8_t id = 0;
321 uint64_t timestamp = ctx->tsc;
322
323 bt_bitfield_write(&id, uint8_t,
324 0,
325 LTTNG_COMPACT_EVENT_BITS,
326 31);
327 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
328 /* Align extended struct on largest member */
329 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
330 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
331 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
332 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
333 }
334 break;
335 case 2: /* large */
336 {
337 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
338 uint32_t timestamp = (uint32_t) ctx->tsc;
339 uint16_t id = event_id;
340
341 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
342 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
343 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
344 } else {
345 uint16_t id = 65535;
346 uint64_t timestamp = ctx->tsc;
347
348 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
349 /* Align extended struct on largest member */
350 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
351 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
352 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
353 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
354 }
355 break;
356 }
357 default:
358 WARN_ON_ONCE(1);
359 }
360 if (lttng_ctx) {
361 /* 2.8+ probe ABI. */
362 ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
363 ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
364 } else {
365 /* Pre 2.8 probe ABI. */
366 ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
367 ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
368 }
369 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
370 }
371
372 static const struct lttng_ust_lib_ring_buffer_config client_config;
373
374 static uint64_t client_ring_buffer_clock_read(struct channel *chan)
375 {
376 return lib_ring_buffer_clock_read(chan);
377 }
378
379 static
380 size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
381 struct channel *chan, size_t offset,
382 size_t *pre_header_padding,
383 struct lttng_ust_lib_ring_buffer_ctx *ctx)
384 {
385 return record_header_size(config, chan, offset,
386 pre_header_padding, ctx);
387 }
388
389 /**
390 * client_packet_header_size - called on buffer-switch to a new sub-buffer
391 *
392 * Return header size without padding after the structure. Don't use packed
393 * structure because gcc generates inefficient code on some architectures
394 * (powerpc, mips..)
395 */
396 static size_t client_packet_header_size(void)
397 {
398 return offsetof(struct packet_header, ctx.header_end);
399 }
400
401 static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
402 unsigned int subbuf_idx,
403 struct lttng_ust_shm_handle *handle)
404 {
405 struct channel *chan = shmp(handle, buf->backend.chan);
406 struct packet_header *header =
407 (struct packet_header *)
408 lib_ring_buffer_offset_address(&buf->backend,
409 subbuf_idx * chan->backend.subbuf_size,
410 handle);
411 struct lttng_channel *lttng_chan = channel_get_private(chan);
412 uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
413
414 assert(header);
415 if (!header)
416 return;
417 header->magic = CTF_MAGIC_NUMBER;
418 memcpy(header->uuid, lttng_chan->uuid, sizeof(lttng_chan->uuid));
419 header->stream_id = lttng_chan->id;
420 header->stream_instance_id = buf->backend.cpu;
421 header->ctx.timestamp_begin = tsc;
422 header->ctx.timestamp_end = 0;
423 header->ctx.content_size = ~0ULL; /* for debugging */
424 header->ctx.packet_size = ~0ULL;
425 header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
426 header->ctx.events_discarded = 0;
427 header->ctx.cpu_id = buf->backend.cpu;
428 }
429
430 /*
431 * offset is assumed to never be 0 here : never deliver a completely empty
432 * subbuffer. data_size is between 1 and subbuf_size.
433 */
434 static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
435 unsigned int subbuf_idx, unsigned long data_size,
436 struct lttng_ust_shm_handle *handle)
437 {
438 struct channel *chan = shmp(handle, buf->backend.chan);
439 struct packet_header *header =
440 (struct packet_header *)
441 lib_ring_buffer_offset_address(&buf->backend,
442 subbuf_idx * chan->backend.subbuf_size,
443 handle);
444 unsigned long records_lost = 0;
445
446 assert(header);
447 if (!header)
448 return;
449 header->ctx.timestamp_end = tsc;
450 header->ctx.content_size =
451 (uint64_t) data_size * CHAR_BIT; /* in bits */
452 header->ctx.packet_size =
453 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
454
455 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
456 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
457 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
458 header->ctx.events_discarded = records_lost;
459 }
460
461 static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
462 int cpu, const char *name, struct lttng_ust_shm_handle *handle)
463 {
464 return 0;
465 }
466
467 static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
468 {
469 }
470
471 static void client_content_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
472 size_t *offset, size_t *length)
473 {
474 *offset = offsetof(struct packet_header, ctx.content_size);
475 *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
476 }
477
478 static void client_packet_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
479 size_t *offset, size_t *length)
480 {
481 *offset = offsetof(struct packet_header, ctx.packet_size);
482 *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
483 }
484
485 static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
486 struct lttng_ust_shm_handle *handle)
487 {
488 return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
489 }
490
491 static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
492 struct lttng_ust_shm_handle *handle,
493 uint64_t *timestamp_begin)
494 {
495 struct packet_header *header;
496
497 header = client_packet_header(buf, handle);
498 if (!header)
499 return -1;
500 *timestamp_begin = header->ctx.timestamp_begin;
501 return 0;
502 }
503
504 static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
505 struct lttng_ust_shm_handle *handle,
506 uint64_t *timestamp_end)
507 {
508 struct packet_header *header;
509
510 header = client_packet_header(buf, handle);
511 if (!header)
512 return -1;
513 *timestamp_end = header->ctx.timestamp_end;
514 return 0;
515 }
516
517 static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
518 struct lttng_ust_shm_handle *handle,
519 uint64_t *events_discarded)
520 {
521 struct packet_header *header;
522
523 header = client_packet_header(buf, handle);
524 if (!header)
525 return -1;
526 *events_discarded = header->ctx.events_discarded;
527 return 0;
528 }
529
530 static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
531 struct lttng_ust_shm_handle *handle,
532 uint64_t *content_size)
533 {
534 struct packet_header *header;
535
536 header = client_packet_header(buf, handle);
537 if (!header)
538 return -1;
539 *content_size = header->ctx.content_size;
540 return 0;
541 }
542
543 static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
544 struct lttng_ust_shm_handle *handle,
545 uint64_t *packet_size)
546 {
547 struct packet_header *header;
548
549 header = client_packet_header(buf, handle);
550 if (!header)
551 return -1;
552 *packet_size = header->ctx.packet_size;
553 return 0;
554 }
555
556 static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf,
557 struct lttng_ust_shm_handle *handle,
558 uint64_t *stream_id)
559 {
560 struct packet_header *header;
561
562 header = client_packet_header(buf, handle);
563 if (!header)
564 return -1;
565 *stream_id = header->stream_id;
566 return 0;
567 }
568
569 static int client_current_timestamp(struct lttng_ust_lib_ring_buffer *buf,
570 struct lttng_ust_shm_handle *handle,
571 uint64_t *ts)
572 {
573 struct channel *chan;
574
575 chan = shmp(handle, handle->chan);
576 *ts = client_ring_buffer_clock_read(chan);
577
578 return 0;
579 }
580
581 static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
582 struct lttng_ust_shm_handle *handle,
583 uint64_t *seq)
584 {
585 struct packet_header *header;
586
587 header = client_packet_header(buf, handle);
588 *seq = header->ctx.packet_seq_num;
589 return 0;
590 }
591
592 static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
593 struct lttng_ust_shm_handle *handle,
594 uint64_t *id)
595 {
596 struct packet_header *header;
597
598 header = client_packet_header(buf, handle);
599 *id = header->stream_instance_id;
600 return 0;
601 }
602
603 static const
604 struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
605 .parent = {
606 .ring_buffer_clock_read = client_ring_buffer_clock_read,
607 .record_header_size = client_record_header_size,
608 .subbuffer_header_size = client_packet_header_size,
609 .buffer_begin = client_buffer_begin,
610 .buffer_end = client_buffer_end,
611 .buffer_create = client_buffer_create,
612 .buffer_finalize = client_buffer_finalize,
613 .content_size_field = client_content_size_field,
614 .packet_size_field = client_packet_size_field,
615 },
616 .timestamp_begin = client_timestamp_begin,
617 .timestamp_end = client_timestamp_end,
618 .events_discarded = client_events_discarded,
619 .content_size = client_content_size,
620 .packet_size = client_packet_size,
621 .stream_id = client_stream_id,
622 .current_timestamp = client_current_timestamp,
623 .sequence_number = client_sequence_number,
624 .instance_id = client_instance_id,
625 };
626
627 static const struct lttng_ust_lib_ring_buffer_config client_config = {
628 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
629 .cb.record_header_size = client_record_header_size,
630 .cb.subbuffer_header_size = client_packet_header_size,
631 .cb.buffer_begin = client_buffer_begin,
632 .cb.buffer_end = client_buffer_end,
633 .cb.buffer_create = client_buffer_create,
634 .cb.buffer_finalize = client_buffer_finalize,
635 .cb.content_size_field = client_content_size_field,
636 .cb.packet_size_field = client_packet_size_field,
637
638 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
639 .alloc = RING_BUFFER_ALLOC_PER_CPU,
640 .sync = RING_BUFFER_SYNC_GLOBAL,
641 .mode = RING_BUFFER_MODE_TEMPLATE,
642 .backend = RING_BUFFER_PAGE,
643 .output = RING_BUFFER_MMAP,
644 .oops = RING_BUFFER_OOPS_CONSISTENCY,
645 .ipi = RING_BUFFER_NO_IPI_BARRIER,
646 .wakeup = LTTNG_CLIENT_WAKEUP,
647 .client_type = LTTNG_CLIENT_TYPE,
648
649 .cb_ptr = &client_cb.parent,
650 };
651
652 const struct lttng_ust_client_lib_ring_buffer_client_cb *LTTNG_CLIENT_CALLBACKS = &client_cb;
653
654 static
655 struct lttng_channel *_channel_create(const char *name,
656 void *buf_addr,
657 size_t subbuf_size, size_t num_subbuf,
658 unsigned int switch_timer_interval,
659 unsigned int read_timer_interval,
660 unsigned char *uuid,
661 uint32_t chan_id,
662 const int *stream_fds, int nr_stream_fds)
663 {
664 struct lttng_channel chan_priv_init;
665 struct lttng_ust_shm_handle *handle;
666 struct lttng_channel *lttng_chan;
667 void *priv;
668
669 memset(&chan_priv_init, 0, sizeof(chan_priv_init));
670 memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
671 chan_priv_init.id = chan_id;
672 handle = channel_create(&client_config, name,
673 &priv, __alignof__(struct lttng_channel),
674 sizeof(struct lttng_channel),
675 &chan_priv_init,
676 buf_addr, subbuf_size, num_subbuf,
677 switch_timer_interval, read_timer_interval,
678 stream_fds, nr_stream_fds);
679 if (!handle)
680 return NULL;
681 lttng_chan = priv;
682 lttng_chan->handle = handle;
683 lttng_chan->chan = shmp(handle, handle->chan);
684 return lttng_chan;
685 }
686
687 static
688 void lttng_channel_destroy(struct lttng_channel *chan)
689 {
690 channel_destroy(chan->chan, chan->handle, 1);
691 }
692
693 static
694 int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
695 uint32_t event_id)
696 {
697 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
698 struct lttng_rseq_state rseq_state;
699 int ret, cpu;
700
701 if (lib_ring_buffer_begin(&client_config))
702 return -EPERM;
703 retry:
704 rseq_state = rseq_start();
705 if (caa_unlikely(rseq_cpu_at_start(rseq_state) < 0)) {
706 if (caa_unlikely(rseq_cpu_at_start(rseq_state) == -1)) {
707 if (!rseq_register_current_thread())
708 goto retry;
709 }
710 /* rseq is unavailable. */
711 cpu = lib_ring_buffer_get_cpu(&client_config);
712 if (caa_unlikely(cpu < 0)) {
713 ret = -EPERM;
714 goto end;
715 }
716 } else {
717 cpu = rseq_cpu_at_start(rseq_state);
718 }
719 ctx->cpu = cpu;
720
721 switch (lttng_chan->header_type) {
722 case 1: /* compact */
723 if (event_id > 30)
724 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
725 break;
726 case 2: /* large */
727 if (event_id > 65534)
728 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
729 break;
730 default:
731 WARN_ON_ONCE(1);
732 }
733
734 ret = lib_ring_buffer_reserve(&client_config, ctx);
735 if (caa_unlikely(ret))
736 goto put;
737 if (caa_likely(ctx->ctx_len
738 >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
739 if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
740 &ctx->backend_pages)) {
741 ret = -EPERM;
742 goto end;
743 }
744 }
745 lttng_write_event_header(&client_config, ctx, event_id);
746 return 0;
747 end:
748 lib_ring_buffer_end(&client_config);
749 return ret;
750 }
751
752 static
753 void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
754 {
755 lib_ring_buffer_commit(&client_config, ctx);
756 lib_ring_buffer_end(&client_config);
757 }
758
759 static
760 void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
761 size_t len)
762 {
763 lib_ring_buffer_write(&client_config, ctx, src, len);
764 }
765
766 static
767 void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx, const char *src,
768 size_t len)
769 {
770 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
771 }
772
773 #if 0
774 static
775 wait_queue_head_t *lttng_get_reader_wait_queue(struct channel *chan)
776 {
777 return &chan->read_wait;
778 }
779
780 static
781 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
782 {
783 return &chan->hp_wait;
784 }
785 #endif //0
786
787 static
788 int lttng_is_finalized(struct channel *chan)
789 {
790 return lib_ring_buffer_channel_is_finalized(chan);
791 }
792
793 static
794 int lttng_is_disabled(struct channel *chan)
795 {
796 return lib_ring_buffer_channel_is_disabled(chan);
797 }
798
799 static
800 int lttng_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
801 {
802 struct lttng_ust_lib_ring_buffer *buf;
803 int cpu;
804
805 for_each_channel_cpu(cpu, chan) {
806 int shm_fd, wait_fd, wakeup_fd;
807 uint64_t memory_map_size;
808
809 buf = channel_get_ring_buffer(&client_config, chan,
810 cpu, handle, &shm_fd, &wait_fd,
811 &wakeup_fd, &memory_map_size);
812 lib_ring_buffer_switch(&client_config, buf,
813 SWITCH_ACTIVE, handle);
814 }
815 return 0;
816 }
817
818 static struct lttng_transport lttng_relay_transport = {
819 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
820 .ops = {
821 .channel_create = _channel_create,
822 .channel_destroy = lttng_channel_destroy,
823 .u.has_strcpy = 1,
824 .event_reserve = lttng_event_reserve,
825 .event_commit = lttng_event_commit,
826 .event_write = lttng_event_write,
827 .packet_avail_size = NULL, /* Would be racy anyway */
828 //.get_reader_wait_queue = lttng_get_reader_wait_queue,
829 //.get_hp_wait_queue = lttng_get_hp_wait_queue,
830 .is_finalized = lttng_is_finalized,
831 .is_disabled = lttng_is_disabled,
832 .flush_buffer = lttng_flush_buffer,
833 .event_strcpy = lttng_event_strcpy,
834 },
835 .client_config = &client_config,
836 };
837
838 void RING_BUFFER_MODE_TEMPLATE_INIT(void)
839 {
840 DBG("LTT : ltt ring buffer client \"%s\" init\n",
841 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
842 lttng_transport_register(&lttng_relay_transport);
843 }
844
845 void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
846 {
847 DBG("LTT : ltt ring buffer client \"%s\" exit\n",
848 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
849 lttng_transport_unregister(&lttng_relay_transport);
850 }
This page took 0.047964 seconds and 4 git commands to generate.