rseq: do 8 rseq attempts before using refcount fallback
[lttng-ust.git] / liblttng-ust / lttng-ring-buffer-client.h
1 /*
2 * lttng-ring-buffer-client.h
3 *
4 * LTTng lib ring buffer client template.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdint.h>
24 #include <lttng/ust-events.h>
25 #include "lttng/bitfield.h"
26 #include "clock.h"
27 #include "lttng-tracer.h"
28 #include "../libringbuffer/frontend_types.h"
29 #include "../libringbuffer/rseq.h"
30
31 #define LTTNG_COMPACT_EVENT_BITS 5
32 #define LTTNG_COMPACT_TSC_BITS 27
33 #define LTTNG_RSEQ_ATTEMPTS 8
34
35 enum app_ctx_mode {
36 APP_CTX_DISABLED,
37 APP_CTX_ENABLED,
38 };
39
40 /*
41 * Keep the natural field alignment for _each field_ within this structure if
42 * you ever add/remove a field from this header. Packed attribute is not used
43 * because gcc generates poor code on at least powerpc and mips. Don't ever
44 * let gcc add padding between the structure elements.
45 */
46
47 struct packet_header {
48 /* Trace packet header */
49 uint32_t magic; /*
50 * Trace magic number.
51 * contains endianness information.
52 */
53 uint8_t uuid[LTTNG_UST_UUID_LEN];
54 uint32_t stream_id;
55 uint64_t stream_instance_id;
56
57 struct {
58 /* Stream packet context */
59 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
60 uint64_t timestamp_end; /* Cycle count at subbuffer end */
61 uint64_t content_size; /* Size of data in subbuffer */
62 uint64_t packet_size; /* Subbuffer size (include padding) */
63 uint64_t packet_seq_num; /* Packet sequence number */
64 unsigned long events_discarded; /*
65 * Events lost in this subbuffer since
66 * the beginning of the trace.
67 * (may overflow)
68 */
69 uint32_t cpu_id; /* CPU id associated with stream */
70 uint8_t header_end; /* End of header */
71 } ctx;
72 };
73
74
75 static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
76 {
77 return trace_clock_read64();
78 }
79
80 static inline
81 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx,
82 enum app_ctx_mode mode)
83 {
84 int i;
85 size_t orig_offset = offset;
86
87 if (caa_likely(!ctx))
88 return 0;
89 offset += lib_ring_buffer_align(offset, ctx->largest_align);
90 for (i = 0; i < ctx->nr_fields; i++) {
91 if (mode == APP_CTX_ENABLED) {
92 offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
93 } else {
94 if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
95 /*
96 * Before UST 2.8, we cannot use the
97 * application context, because we
98 * cannot trust that the handler used
99 * for get_size is the same used for
100 * ctx_record, which would result in
101 * corrupted traces when tracing
102 * concurrently with application context
103 * register/unregister.
104 */
105 offset += lttng_ust_dummy_get_size(&ctx->fields[i], offset);
106 } else {
107 offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
108 }
109 }
110 }
111 return offset - orig_offset;
112 }
113
114 static inline
115 void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
116 struct lttng_channel *chan,
117 struct lttng_ctx *ctx,
118 enum app_ctx_mode mode)
119 {
120 int i;
121
122 if (caa_likely(!ctx))
123 return;
124 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
125 for (i = 0; i < ctx->nr_fields; i++) {
126 if (mode == APP_CTX_ENABLED) {
127 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
128 } else {
129 if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
130 /*
131 * Before UST 2.8, we cannot use the
132 * application context, because we
133 * cannot trust that the handler used
134 * for get_size is the same used for
135 * ctx_record, which would result in
136 * corrupted traces when tracing
137 * concurrently with application context
138 * register/unregister.
139 */
140 lttng_ust_dummy_record(&ctx->fields[i], bufctx, chan);
141 } else {
142 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
143 }
144 }
145 }
146 }
147
148 /*
149 * record_header_size - Calculate the header size and padding necessary.
150 * @config: ring buffer instance configuration
151 * @chan: channel
152 * @offset: offset in the write buffer
153 * @pre_header_padding: padding to add before the header (output)
154 * @ctx: reservation context
155 *
156 * Returns the event header size (including padding).
157 *
158 * The payload must itself determine its own alignment from the biggest type it
159 * contains.
160 */
161 static __inline__
162 size_t record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
163 struct channel *chan, size_t offset,
164 size_t *pre_header_padding,
165 struct lttng_ust_lib_ring_buffer_ctx *ctx)
166 {
167 struct lttng_channel *lttng_chan = channel_get_private(chan);
168 struct lttng_event *event = ctx->priv;
169 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
170 size_t orig_offset = offset;
171 size_t padding;
172
173 switch (lttng_chan->header_type) {
174 case 1: /* compact */
175 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
176 offset += padding;
177 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
178 offset += sizeof(uint32_t); /* id and timestamp */
179 } else {
180 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
181 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
182 /* Align extended struct on largest member */
183 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
184 offset += sizeof(uint32_t); /* id */
185 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
186 offset += sizeof(uint64_t); /* timestamp */
187 }
188 break;
189 case 2: /* large */
190 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
191 offset += padding;
192 offset += sizeof(uint16_t);
193 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
194 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
195 offset += sizeof(uint32_t); /* timestamp */
196 } else {
197 /* Align extended struct on largest member */
198 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
199 offset += sizeof(uint32_t); /* id */
200 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
201 offset += sizeof(uint64_t); /* timestamp */
202 }
203 break;
204 default:
205 padding = 0;
206 WARN_ON_ONCE(1);
207 }
208 if (lttng_ctx) {
209 /* 2.8+ probe ABI. */
210 offset += ctx_get_size(offset, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
211 offset += ctx_get_size(offset, lttng_ctx->event_ctx, APP_CTX_ENABLED);
212 } else {
213 /* Pre 2.8 probe ABI. */
214 offset += ctx_get_size(offset, lttng_chan->ctx, APP_CTX_DISABLED);
215 offset += ctx_get_size(offset, event->ctx, APP_CTX_DISABLED);
216 }
217 *pre_header_padding = padding;
218 return offset - orig_offset;
219 }
220
221 #include "../libringbuffer/api.h"
222 #include "lttng-rb-clients.h"
223
224 static
225 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
226 struct lttng_ust_lib_ring_buffer_ctx *ctx,
227 uint32_t event_id);
228
229 /*
230 * lttng_write_event_header
231 *
232 * Writes the event header to the offset (already aligned on 32-bits).
233 *
234 * @config: ring buffer instance configuration
235 * @ctx: reservation context
236 * @event_id: event ID
237 */
238 static __inline__
239 void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
240 struct lttng_ust_lib_ring_buffer_ctx *ctx,
241 uint32_t event_id)
242 {
243 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
244 struct lttng_event *event = ctx->priv;
245 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
246
247 if (caa_unlikely(ctx->rflags))
248 goto slow_path;
249
250 switch (lttng_chan->header_type) {
251 case 1: /* compact */
252 {
253 uint32_t id_time = 0;
254
255 bt_bitfield_write(&id_time, uint32_t,
256 0,
257 LTTNG_COMPACT_EVENT_BITS,
258 event_id);
259 bt_bitfield_write(&id_time, uint32_t,
260 LTTNG_COMPACT_EVENT_BITS,
261 LTTNG_COMPACT_TSC_BITS,
262 ctx->tsc);
263 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
264 break;
265 }
266 case 2: /* large */
267 {
268 uint32_t timestamp = (uint32_t) ctx->tsc;
269 uint16_t id = event_id;
270
271 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
272 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
273 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
274 break;
275 }
276 default:
277 WARN_ON_ONCE(1);
278 }
279
280 if (lttng_ctx) {
281 /* 2.8+ probe ABI. */
282 ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
283 ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
284 } else {
285 /* Pre 2.8 probe ABI. */
286 ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
287 ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
288 }
289 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
290
291 return;
292
293 slow_path:
294 lttng_write_event_header_slow(config, ctx, event_id);
295 }
296
297 static
298 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
299 struct lttng_ust_lib_ring_buffer_ctx *ctx,
300 uint32_t event_id)
301 {
302 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
303 struct lttng_event *event = ctx->priv;
304 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
305
306 switch (lttng_chan->header_type) {
307 case 1: /* compact */
308 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
309 uint32_t id_time = 0;
310
311 bt_bitfield_write(&id_time, uint32_t,
312 0,
313 LTTNG_COMPACT_EVENT_BITS,
314 event_id);
315 bt_bitfield_write(&id_time, uint32_t,
316 LTTNG_COMPACT_EVENT_BITS,
317 LTTNG_COMPACT_TSC_BITS,
318 ctx->tsc);
319 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
320 } else {
321 uint8_t id = 0;
322 uint64_t timestamp = ctx->tsc;
323
324 bt_bitfield_write(&id, uint8_t,
325 0,
326 LTTNG_COMPACT_EVENT_BITS,
327 31);
328 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
329 /* Align extended struct on largest member */
330 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
331 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
332 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
333 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
334 }
335 break;
336 case 2: /* large */
337 {
338 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
339 uint32_t timestamp = (uint32_t) ctx->tsc;
340 uint16_t id = event_id;
341
342 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
343 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
344 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
345 } else {
346 uint16_t id = 65535;
347 uint64_t timestamp = ctx->tsc;
348
349 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
350 /* Align extended struct on largest member */
351 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
352 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
353 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
354 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
355 }
356 break;
357 }
358 default:
359 WARN_ON_ONCE(1);
360 }
361 if (lttng_ctx) {
362 /* 2.8+ probe ABI. */
363 ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
364 ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
365 } else {
366 /* Pre 2.8 probe ABI. */
367 ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
368 ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
369 }
370 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
371 }
372
373 static const struct lttng_ust_lib_ring_buffer_config client_config;
374
375 static uint64_t client_ring_buffer_clock_read(struct channel *chan)
376 {
377 return lib_ring_buffer_clock_read(chan);
378 }
379
380 static
381 size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
382 struct channel *chan, size_t offset,
383 size_t *pre_header_padding,
384 struct lttng_ust_lib_ring_buffer_ctx *ctx)
385 {
386 return record_header_size(config, chan, offset,
387 pre_header_padding, ctx);
388 }
389
390 /**
391 * client_packet_header_size - called on buffer-switch to a new sub-buffer
392 *
393 * Return header size without padding after the structure. Don't use packed
394 * structure because gcc generates inefficient code on some architectures
395 * (powerpc, mips..)
396 */
397 static size_t client_packet_header_size(void)
398 {
399 return offsetof(struct packet_header, ctx.header_end);
400 }
401
402 static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
403 unsigned int subbuf_idx,
404 struct lttng_ust_shm_handle *handle)
405 {
406 struct channel *chan = shmp(handle, buf->backend.chan);
407 struct packet_header *header =
408 (struct packet_header *)
409 lib_ring_buffer_offset_address(&buf->backend,
410 subbuf_idx * chan->backend.subbuf_size,
411 handle);
412 struct lttng_channel *lttng_chan = channel_get_private(chan);
413 uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
414
415 assert(header);
416 if (!header)
417 return;
418 header->magic = CTF_MAGIC_NUMBER;
419 memcpy(header->uuid, lttng_chan->uuid, sizeof(lttng_chan->uuid));
420 header->stream_id = lttng_chan->id;
421 header->stream_instance_id = buf->backend.cpu;
422 header->ctx.timestamp_begin = tsc;
423 header->ctx.timestamp_end = 0;
424 header->ctx.content_size = ~0ULL; /* for debugging */
425 header->ctx.packet_size = ~0ULL;
426 header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
427 header->ctx.events_discarded = 0;
428 header->ctx.cpu_id = buf->backend.cpu;
429 }
430
431 /*
432 * offset is assumed to never be 0 here : never deliver a completely empty
433 * subbuffer. data_size is between 1 and subbuf_size.
434 */
435 static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
436 unsigned int subbuf_idx, unsigned long data_size,
437 struct lttng_ust_shm_handle *handle)
438 {
439 struct channel *chan = shmp(handle, buf->backend.chan);
440 struct packet_header *header =
441 (struct packet_header *)
442 lib_ring_buffer_offset_address(&buf->backend,
443 subbuf_idx * chan->backend.subbuf_size,
444 handle);
445 unsigned long records_lost = 0;
446
447 assert(header);
448 if (!header)
449 return;
450 header->ctx.timestamp_end = tsc;
451 header->ctx.content_size =
452 (uint64_t) data_size * CHAR_BIT; /* in bits */
453 header->ctx.packet_size =
454 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
455
456 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
457 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
458 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
459 header->ctx.events_discarded = records_lost;
460 }
461
462 static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
463 int cpu, const char *name, struct lttng_ust_shm_handle *handle)
464 {
465 return 0;
466 }
467
468 static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
469 {
470 }
471
472 static void client_content_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
473 size_t *offset, size_t *length)
474 {
475 *offset = offsetof(struct packet_header, ctx.content_size);
476 *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
477 }
478
479 static void client_packet_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
480 size_t *offset, size_t *length)
481 {
482 *offset = offsetof(struct packet_header, ctx.packet_size);
483 *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
484 }
485
486 static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
487 struct lttng_ust_shm_handle *handle)
488 {
489 return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
490 }
491
492 static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
493 struct lttng_ust_shm_handle *handle,
494 uint64_t *timestamp_begin)
495 {
496 struct packet_header *header;
497
498 header = client_packet_header(buf, handle);
499 if (!header)
500 return -1;
501 *timestamp_begin = header->ctx.timestamp_begin;
502 return 0;
503 }
504
505 static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
506 struct lttng_ust_shm_handle *handle,
507 uint64_t *timestamp_end)
508 {
509 struct packet_header *header;
510
511 header = client_packet_header(buf, handle);
512 if (!header)
513 return -1;
514 *timestamp_end = header->ctx.timestamp_end;
515 return 0;
516 }
517
518 static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
519 struct lttng_ust_shm_handle *handle,
520 uint64_t *events_discarded)
521 {
522 struct packet_header *header;
523
524 header = client_packet_header(buf, handle);
525 if (!header)
526 return -1;
527 *events_discarded = header->ctx.events_discarded;
528 return 0;
529 }
530
531 static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
532 struct lttng_ust_shm_handle *handle,
533 uint64_t *content_size)
534 {
535 struct packet_header *header;
536
537 header = client_packet_header(buf, handle);
538 if (!header)
539 return -1;
540 *content_size = header->ctx.content_size;
541 return 0;
542 }
543
544 static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
545 struct lttng_ust_shm_handle *handle,
546 uint64_t *packet_size)
547 {
548 struct packet_header *header;
549
550 header = client_packet_header(buf, handle);
551 if (!header)
552 return -1;
553 *packet_size = header->ctx.packet_size;
554 return 0;
555 }
556
557 static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf,
558 struct lttng_ust_shm_handle *handle,
559 uint64_t *stream_id)
560 {
561 struct packet_header *header;
562
563 header = client_packet_header(buf, handle);
564 if (!header)
565 return -1;
566 *stream_id = header->stream_id;
567 return 0;
568 }
569
570 static int client_current_timestamp(struct lttng_ust_lib_ring_buffer *buf,
571 struct lttng_ust_shm_handle *handle,
572 uint64_t *ts)
573 {
574 struct channel *chan;
575
576 chan = shmp(handle, handle->chan);
577 *ts = client_ring_buffer_clock_read(chan);
578
579 return 0;
580 }
581
582 static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
583 struct lttng_ust_shm_handle *handle,
584 uint64_t *seq)
585 {
586 struct packet_header *header;
587
588 header = client_packet_header(buf, handle);
589 *seq = header->ctx.packet_seq_num;
590 return 0;
591 }
592
593 static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
594 struct lttng_ust_shm_handle *handle,
595 uint64_t *id)
596 {
597 struct packet_header *header;
598
599 header = client_packet_header(buf, handle);
600 *id = header->stream_instance_id;
601 return 0;
602 }
603
604 static const
605 struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
606 .parent = {
607 .ring_buffer_clock_read = client_ring_buffer_clock_read,
608 .record_header_size = client_record_header_size,
609 .subbuffer_header_size = client_packet_header_size,
610 .buffer_begin = client_buffer_begin,
611 .buffer_end = client_buffer_end,
612 .buffer_create = client_buffer_create,
613 .buffer_finalize = client_buffer_finalize,
614 .content_size_field = client_content_size_field,
615 .packet_size_field = client_packet_size_field,
616 },
617 .timestamp_begin = client_timestamp_begin,
618 .timestamp_end = client_timestamp_end,
619 .events_discarded = client_events_discarded,
620 .content_size = client_content_size,
621 .packet_size = client_packet_size,
622 .stream_id = client_stream_id,
623 .current_timestamp = client_current_timestamp,
624 .sequence_number = client_sequence_number,
625 .instance_id = client_instance_id,
626 };
627
628 static const struct lttng_ust_lib_ring_buffer_config client_config = {
629 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
630 .cb.record_header_size = client_record_header_size,
631 .cb.subbuffer_header_size = client_packet_header_size,
632 .cb.buffer_begin = client_buffer_begin,
633 .cb.buffer_end = client_buffer_end,
634 .cb.buffer_create = client_buffer_create,
635 .cb.buffer_finalize = client_buffer_finalize,
636 .cb.content_size_field = client_content_size_field,
637 .cb.packet_size_field = client_packet_size_field,
638
639 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
640 .alloc = RING_BUFFER_ALLOC_PER_CPU,
641 .sync = RING_BUFFER_SYNC_PER_CPU,
642 .mode = RING_BUFFER_MODE_TEMPLATE,
643 .backend = RING_BUFFER_PAGE,
644 .output = RING_BUFFER_MMAP,
645 .oops = RING_BUFFER_OOPS_CONSISTENCY,
646 .ipi = RING_BUFFER_NO_IPI_BARRIER,
647 .wakeup = LTTNG_CLIENT_WAKEUP,
648 .client_type = LTTNG_CLIENT_TYPE,
649
650 .cb_ptr = &client_cb.parent,
651 };
652
653 const struct lttng_ust_client_lib_ring_buffer_client_cb *LTTNG_CLIENT_CALLBACKS = &client_cb;
654
655 static
656 struct lttng_channel *_channel_create(const char *name,
657 void *buf_addr,
658 size_t subbuf_size, size_t num_subbuf,
659 unsigned int switch_timer_interval,
660 unsigned int read_timer_interval,
661 unsigned char *uuid,
662 uint32_t chan_id,
663 const int *stream_fds, int nr_stream_fds)
664 {
665 struct lttng_channel chan_priv_init;
666 struct lttng_ust_shm_handle *handle;
667 struct lttng_channel *lttng_chan;
668 void *priv;
669
670 memset(&chan_priv_init, 0, sizeof(chan_priv_init));
671 memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
672 chan_priv_init.id = chan_id;
673 handle = channel_create(&client_config, name,
674 &priv, __alignof__(struct lttng_channel),
675 sizeof(struct lttng_channel),
676 &chan_priv_init,
677 buf_addr, subbuf_size, num_subbuf,
678 switch_timer_interval, read_timer_interval,
679 stream_fds, nr_stream_fds);
680 if (!handle)
681 return NULL;
682 lttng_chan = priv;
683 lttng_chan->handle = handle;
684 lttng_chan->chan = shmp(handle, handle->chan);
685 return lttng_chan;
686 }
687
688 static
689 void lttng_channel_destroy(struct lttng_channel *chan)
690 {
691 channel_destroy(chan->chan, chan->handle, 1);
692 }
693
694 static
695 bool refcount_get_saturate(long *ref)
696 {
697 long old, _new, res;
698
699 old = uatomic_read(ref);
700 for (;;) {
701 if (old == LONG_MAX) {
702 return false; /* Saturated. */
703 }
704 _new = old + 1;
705 res = uatomic_cmpxchg(ref, old, _new);
706 if (res == old) {
707 if (_new == LONG_MAX) {
708 return false; /* Saturation. */
709 }
710 return true; /* Success. */
711 }
712 old = res;
713 }
714 }
715
716 static
717 int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
718 uint32_t event_id)
719 {
720 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
721 struct lttng_rseq_state rseq_state;
722 int ret, cpu, attempt = 0;
723 bool put_fallback_ref = false;
724
725 if (lib_ring_buffer_begin(&client_config))
726 return -EPERM;
727 retry:
728 rseq_state = rseq_start();
729 if (caa_unlikely(rseq_cpu_at_start(rseq_state) < 0)) {
730 if (caa_unlikely(rseq_cpu_at_start(rseq_state) == -1)) {
731 if (!rseq_register_current_thread())
732 goto retry;
733 }
734 /* rseq is unavailable. */
735 cpu = lib_ring_buffer_get_cpu(&client_config);
736 if (caa_unlikely(cpu < 0)) {
737 ret = -EPERM;
738 goto end;
739 }
740 } else {
741 cpu = rseq_cpu_at_start(rseq_state);
742 }
743 fallback:
744 ctx->cpu = cpu;
745
746 switch (lttng_chan->header_type) {
747 case 1: /* compact */
748 if (event_id > 30)
749 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
750 break;
751 case 2: /* large */
752 if (event_id > 65534)
753 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
754 break;
755 default:
756 WARN_ON_ONCE(1);
757 }
758
759 if (caa_likely(ctx->ctx_len
760 >= sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
761 ctx->rseq_state = rseq_state;
762
763 ret = lib_ring_buffer_reserve(&client_config, ctx);
764 if (caa_unlikely(ret)) {
765 if (ret == -EAGAIN) {
766 assert(!put_fallback_ref);
767 if (++attempt < LTTNG_RSEQ_ATTEMPTS) {
768 caa_cpu_relax();
769 goto retry;
770 }
771 put_fallback_ref = refcount_get_saturate(
772 &lttng_chan->chan->u.reserve_fallback_ref);
773 cpu = lib_ring_buffer_get_cpu(&client_config);
774 if (caa_unlikely(cpu < 0)) {
775 ret = -EPERM;
776 goto end;
777 }
778 goto fallback;
779 }
780 goto end;
781 }
782 if (caa_likely(ctx->ctx_len
783 >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
784 if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
785 &ctx->backend_pages)) {
786 ret = -EPERM;
787 goto end;
788 }
789 }
790 lttng_write_event_header(&client_config, ctx, event_id);
791
792 if (caa_unlikely(put_fallback_ref))
793 uatomic_dec(&lttng_chan->chan->u.reserve_fallback_ref);
794
795 return 0;
796 end:
797 lib_ring_buffer_end(&client_config);
798 if (put_fallback_ref)
799 uatomic_dec(&lttng_chan->chan->u.reserve_fallback_ref);
800 return ret;
801 }
802
803 static
804 void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
805 {
806 lib_ring_buffer_commit(&client_config, ctx);
807 lib_ring_buffer_end(&client_config);
808 }
809
810 static
811 void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
812 size_t len)
813 {
814 lib_ring_buffer_write(&client_config, ctx, src, len);
815 }
816
817 static
818 void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx, const char *src,
819 size_t len)
820 {
821 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
822 }
823
824 #if 0
825 static
826 wait_queue_head_t *lttng_get_reader_wait_queue(struct channel *chan)
827 {
828 return &chan->read_wait;
829 }
830
831 static
832 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
833 {
834 return &chan->hp_wait;
835 }
836 #endif //0
837
838 static
839 int lttng_is_finalized(struct channel *chan)
840 {
841 return lib_ring_buffer_channel_is_finalized(chan);
842 }
843
844 static
845 int lttng_is_disabled(struct channel *chan)
846 {
847 return lib_ring_buffer_channel_is_disabled(chan);
848 }
849
850 static
851 int lttng_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
852 {
853 struct lttng_ust_lib_ring_buffer *buf;
854 int cpu;
855
856 for_each_channel_cpu(cpu, chan) {
857 int shm_fd, wait_fd, wakeup_fd;
858 uint64_t memory_map_size;
859
860 buf = channel_get_ring_buffer(&client_config, chan,
861 cpu, handle, &shm_fd, &wait_fd,
862 &wakeup_fd, &memory_map_size);
863 lib_ring_buffer_switch(&client_config, buf,
864 SWITCH_ACTIVE, handle);
865 }
866 return 0;
867 }
868
869 static struct lttng_transport lttng_relay_transport = {
870 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
871 .ops = {
872 .channel_create = _channel_create,
873 .channel_destroy = lttng_channel_destroy,
874 .u.has_strcpy = 1,
875 .event_reserve = lttng_event_reserve,
876 .event_commit = lttng_event_commit,
877 .event_write = lttng_event_write,
878 .packet_avail_size = NULL, /* Would be racy anyway */
879 //.get_reader_wait_queue = lttng_get_reader_wait_queue,
880 //.get_hp_wait_queue = lttng_get_hp_wait_queue,
881 .is_finalized = lttng_is_finalized,
882 .is_disabled = lttng_is_disabled,
883 .flush_buffer = lttng_flush_buffer,
884 .event_strcpy = lttng_event_strcpy,
885 },
886 .client_config = &client_config,
887 };
888
889 void RING_BUFFER_MODE_TEMPLATE_INIT(void)
890 {
891 DBG("LTT : ltt ring buffer client \"%s\" init\n",
892 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
893 lttng_transport_register(&lttng_relay_transport);
894 }
895
896 void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
897 {
898 DBG("LTT : ltt ring buffer client \"%s\" exit\n",
899 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
900 lttng_transport_unregister(&lttng_relay_transport);
901 }
This page took 0.049102 seconds and 5 git commands to generate.