Consumer: strip ring buffer header when consuming ctf2 ring buffer packet
[lttng-tools.git] / src / common / consumer / consumer-stream.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <inttypes.h>
12 #include <sys/mman.h>
13 #include <unistd.h>
14
15 #include <common/buffer-view.hpp>
16 #include <common/common.hpp>
17 #include <common/consumer/consumer-timer.hpp>
18 #include <common/consumer/consumer.hpp>
19 #include <common/consumer/metadata-bucket.hpp>
20 #include <common/index/index.hpp>
21 #include <common/kernel-consumer/kernel-consumer.hpp>
22 #include <common/kernel-ctl/kernel-ctl.hpp>
23 #include <common/macros.hpp>
24 #include <common/relayd/relayd.hpp>
25 #include <common/ust-consumer/ust-consumer.hpp>
26 #include <common/utils.hpp>
27
28 #include "consumer-stream.hpp"
29
30 struct metadata_packet_header {
31 uint32_t magic; /* 0x75D11D57 */
32 uint8_t uuid[16]; /* Unique Universal Identifier */
33 uint32_t checksum; /* 0 if unused */
34 uint32_t content_size; /* in bits */
35 uint32_t packet_size; /* in bits */
36 uint8_t compression_scheme; /* 0 if unused */
37 uint8_t encryption_scheme; /* 0 if unused */
38 uint8_t checksum_scheme; /* 0 if unused */
39 uint8_t major; /* CTF spec major version number */
40 uint8_t minor; /* CTF spec minor version number */
41 uint8_t header_end[0];
42 };
43
44 static size_t metadata_length(void)
45 {
46 return offsetof(struct metadata_packet_header, header_end);
47 }
48
49 /*
50 * RCU call to free stream. MUST only be used with call_rcu().
51 */
52 static void free_stream_rcu(struct rcu_head *head)
53 {
54 struct lttng_ht_node_u64 *node =
55 lttng::utils::container_of(head, &lttng_ht_node_u64::head);
56 struct lttng_consumer_stream *stream =
57 lttng::utils::container_of(node, &lttng_consumer_stream::node);
58
59 pthread_mutex_destroy(&stream->lock);
60 free(stream);
61 }
62
63 static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream)
64 {
65 pthread_mutex_lock(&stream->chan->lock);
66 pthread_mutex_lock(&stream->lock);
67 }
68
69 static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream)
70 {
71 pthread_mutex_unlock(&stream->lock);
72 pthread_mutex_unlock(&stream->chan->lock);
73 }
74
75 static void consumer_stream_data_assert_locked_all(struct lttng_consumer_stream *stream)
76 {
77 ASSERT_LOCKED(stream->lock);
78 ASSERT_LOCKED(stream->chan->lock);
79 }
80
81 static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream)
82 {
83 consumer_stream_data_lock_all(stream);
84 pthread_mutex_lock(&stream->metadata_rdv_lock);
85 }
86
87 static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream)
88 {
89 pthread_mutex_unlock(&stream->metadata_rdv_lock);
90 consumer_stream_data_unlock_all(stream);
91 }
92
93 static void consumer_stream_metadata_assert_locked_all(struct lttng_consumer_stream *stream)
94 {
95 ASSERT_LOCKED(stream->metadata_rdv_lock);
96 consumer_stream_data_assert_locked_all(stream);
97 }
98
99 /* Only used for data streams. */
100 static int consumer_stream_update_stats(
101 struct lttng_consumer_stream *stream, struct stream_subbuffer *subbuf_)
102 {
103 int ret = 0;
104 uint64_t sequence_number;
105 const stream_subbuffer *subbuf = subbuf_;
106 const uint64_t discarded_events = subbuf->info.data.events_discarded;
107
108 if (!subbuf->info.data.sequence_number.is_set) {
109 /* Command not supported by the tracer. */
110 sequence_number = -1ULL;
111 stream->sequence_number_unavailable = true;
112 } else {
113 sequence_number = subbuf->info.data.sequence_number.value;
114 }
115
116 /*
117 * Start the sequence when we extract the first packet in case we don't
118 * start at 0 (for example if a consumer is not connected to the
119 * session immediately after the beginning).
120 */
121 if (stream->last_sequence_number == -1ULL) {
122 stream->last_sequence_number = sequence_number;
123 } else if (sequence_number > stream->last_sequence_number) {
124 stream->chan->lost_packets += sequence_number -
125 stream->last_sequence_number - 1;
126 } else {
127 /* seq <= last_sequence_number */
128 ERR("Sequence number inconsistent : prev = %" PRIu64
129 ", current = %" PRIu64,
130 stream->last_sequence_number, sequence_number);
131 ret = -1;
132 goto end;
133 }
134 stream->last_sequence_number = sequence_number;
135
136 if (discarded_events < stream->last_discarded_events) {
137 /*
138 * Overflow has occurred. We assume only one wrap-around
139 * has occurred.
140 */
141 stream->chan->discarded_events +=
142 (1ULL << (CAA_BITS_PER_LONG - 1)) -
143 stream->last_discarded_events +
144 discarded_events;
145 } else {
146 stream->chan->discarded_events += discarded_events -
147 stream->last_discarded_events;
148 }
149 stream->last_discarded_events = discarded_events;
150 ret = 0;
151
152 end:
153 return ret;
154 }
155
156 static
157 void ctf_packet_index_populate(struct ctf_packet_index *index,
158 off_t offset, const struct stream_subbuffer *subbuffer)
159 {
160 *index = (typeof(*index)){
161 .offset = htobe64(offset),
162 .packet_size = htobe64(subbuffer->info.data.packet_size),
163 .content_size = htobe64(subbuffer->info.data.content_size),
164 .timestamp_begin = htobe64(
165 subbuffer->info.data.timestamp_begin),
166 .timestamp_end = htobe64(
167 subbuffer->info.data.timestamp_end),
168 .events_discarded = htobe64(
169 subbuffer->info.data.events_discarded),
170 .stream_id = htobe64(subbuffer->info.data.stream_id),
171 .stream_instance_id = htobe64(
172 subbuffer->info.data.stream_instance_id.is_set ?
173 subbuffer->info.data.stream_instance_id.value : -1ULL),
174 .packet_seq_num = htobe64(
175 subbuffer->info.data.sequence_number.is_set ?
176 subbuffer->info.data.sequence_number.value : -1ULL),
177 };
178 }
179
180 static ssize_t consumer_stream_consume_mmap(
181 struct lttng_consumer_local_data *ctx __attribute__((unused)),
182 struct lttng_consumer_stream *stream,
183 const struct stream_subbuffer *subbuffer)
184 {
185 const unsigned long padding_size =
186 subbuffer->info.data.padded_subbuf_size -
187 subbuffer->info.data.subbuf_size;
188 const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_mmap(
189 stream, &subbuffer->buffer.buffer, padding_size);
190
191 if (stream->net_seq_idx == -1ULL) {
192 /*
193 * When writing on disk, check that only the subbuffer (no
194 * padding) was written to disk.
195 */
196 if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
197 DBG("Failed to write the entire padded subbuffer on disk (written_bytes: %zd, padded subbuffer size %lu)",
198 written_bytes,
199 subbuffer->info.data.padded_subbuf_size);
200 }
201 } else {
202 /*
203 * When streaming over the network, check that the entire
204 * subbuffer including padding was successfully written.
205 */
206 if (written_bytes != subbuffer->info.data.subbuf_size) {
207 DBG("Failed to write only the subbuffer over the network (written_bytes: %zd, subbuffer size %lu)",
208 written_bytes,
209 subbuffer->info.data.subbuf_size);
210 }
211 }
212
213 /*
214 * If `lttng_consumer_on_read_subbuffer_mmap()` returned an error, pass
215 * it along to the caller, else return zero.
216 */
217 if (written_bytes < 0) {
218 ERR("Error reading mmap subbuffer: %zd", written_bytes);
219 }
220
221 return written_bytes;
222 }
223
224 static ssize_t consumer_stream_consume_splice(
225 struct lttng_consumer_local_data *ctx,
226 struct lttng_consumer_stream *stream,
227 const struct stream_subbuffer *subbuffer)
228 {
229 const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_splice(
230 ctx, stream, subbuffer->info.data.padded_subbuf_size, 0);
231
232 if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
233 DBG("Failed to write the entire padded subbuffer (written_bytes: %zd, padded subbuffer size %lu)",
234 written_bytes,
235 subbuffer->info.data.padded_subbuf_size);
236 }
237
238 /*
239 * If `lttng_consumer_on_read_subbuffer_splice()` returned an error,
240 * pass it along to the caller, else return zero.
241 */
242 if (written_bytes < 0) {
243 ERR("Error reading splice subbuffer: %zd", written_bytes);
244 }
245
246 return written_bytes;
247 }
248
249 static int consumer_stream_send_index(
250 struct lttng_consumer_stream *stream,
251 const struct stream_subbuffer *subbuffer,
252 struct lttng_consumer_local_data *ctx __attribute__((unused)))
253 {
254 off_t packet_offset = 0;
255 struct ctf_packet_index index = {};
256
257 /*
258 * This is called after consuming the sub-buffer; substract the
259 * effect this sub-buffer from the offset.
260 */
261 if (stream->net_seq_idx == (uint64_t) -1ULL) {
262 packet_offset = stream->out_fd_offset -
263 subbuffer->info.data.padded_subbuf_size;
264 }
265
266 ctf_packet_index_populate(&index, packet_offset, subbuffer);
267 return consumer_stream_write_index(stream, &index);
268 }
269
270 /*
271 * Actually do the metadata sync using the given metadata stream.
272 *
273 * Return 0 on success else a negative value. ENODATA can be returned also
274 * indicating that there is no metadata available for that stream.
275 */
276 static int do_sync_metadata(struct lttng_consumer_stream *metadata,
277 struct lttng_consumer_local_data *ctx)
278 {
279 int ret;
280 enum sync_metadata_status status;
281
282 LTTNG_ASSERT(metadata);
283 LTTNG_ASSERT(metadata->metadata_flag);
284 LTTNG_ASSERT(ctx);
285
286 /*
287 * In UST, since we have to write the metadata from the cache packet
288 * by packet, we might need to start this procedure multiple times
289 * until all the metadata from the cache has been extracted.
290 */
291 do {
292 /*
293 * Steps :
294 * - Lock the metadata stream
295 * - Check if metadata stream node was deleted before locking.
296 * - if yes, release and return success
297 * - Check if new metadata is ready (flush + snapshot pos)
298 * - If nothing : release and return.
299 * - Lock the metadata_rdv_lock
300 * - Unlock the metadata stream
301 * - cond_wait on metadata_rdv to wait the wakeup from the
302 * metadata thread
303 * - Unlock the metadata_rdv_lock
304 */
305 pthread_mutex_lock(&metadata->lock);
306
307 /*
308 * There is a possibility that we were able to acquire a reference on the
309 * stream from the RCU hash table but between then and now, the node might
310 * have been deleted just before the lock is acquired. Thus, after locking,
311 * we make sure the metadata node has not been deleted which means that the
312 * buffers are closed.
313 *
314 * In that case, there is no need to sync the metadata hence returning a
315 * success return code.
316 */
317 ret = cds_lfht_is_node_deleted(&metadata->node.node);
318 if (ret) {
319 ret = 0;
320 goto end_unlock_mutex;
321 }
322
323 switch (ctx->type) {
324 case LTTNG_CONSUMER_KERNEL:
325 /*
326 * Empty the metadata cache and flush the current stream.
327 */
328 status = lttng_kconsumer_sync_metadata(metadata);
329 break;
330 case LTTNG_CONSUMER32_UST:
331 case LTTNG_CONSUMER64_UST:
332 /*
333 * Ask the sessiond if we have new metadata waiting and update the
334 * consumer metadata cache.
335 */
336 status = lttng_ustconsumer_sync_metadata(ctx, metadata);
337 break;
338 default:
339 abort();
340 }
341
342 switch (status) {
343 case SYNC_METADATA_STATUS_NEW_DATA:
344 break;
345 case SYNC_METADATA_STATUS_NO_DATA:
346 ret = 0;
347 goto end_unlock_mutex;
348 case SYNC_METADATA_STATUS_ERROR:
349 ret = -1;
350 goto end_unlock_mutex;
351 default:
352 abort();
353 }
354
355 /*
356 * At this point, new metadata have been flushed, so we wait on the
357 * rendez-vous point for the metadata thread to wake us up when it
358 * finishes consuming the metadata and continue execution.
359 */
360
361 pthread_mutex_lock(&metadata->metadata_rdv_lock);
362
363 /*
364 * Release metadata stream lock so the metadata thread can process it.
365 */
366 pthread_mutex_unlock(&metadata->lock);
367
368 /*
369 * Wait on the rendez-vous point. Once woken up, it means the metadata was
370 * consumed and thus synchronization is achieved.
371 */
372 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
373 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
374 } while (status == SYNC_METADATA_STATUS_NEW_DATA);
375
376 /* Success */
377 return 0;
378
379 end_unlock_mutex:
380 pthread_mutex_unlock(&metadata->lock);
381 return ret;
382 }
383
384 /*
385 * Synchronize the metadata using a given session ID. A successful acquisition
386 * of a metadata stream will trigger a request to the session daemon and a
387 * snapshot so the metadata thread can consume it.
388 *
389 * This function call is a rendez-vous point between the metadata thread and
390 * the data thread.
391 *
392 * Return 0 on success or else a negative value.
393 */
394 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
395 uint64_t session_id)
396 {
397 int ret;
398 struct lttng_consumer_stream *stream = NULL;
399 struct lttng_ht_iter iter;
400 struct lttng_ht *ht;
401
402 LTTNG_ASSERT(ctx);
403
404 /* Ease our life a bit. */
405 ht = the_consumer_data.stream_list_ht;
406
407 rcu_read_lock();
408
409 /* Search the metadata associated with the session id of the given stream. */
410
411 cds_lfht_for_each_entry_duplicate(ht->ht,
412 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
413 &session_id, &iter.iter, stream, node_session_id.node) {
414 if (!stream->metadata_flag) {
415 continue;
416 }
417
418 ret = do_sync_metadata(stream, ctx);
419 if (ret < 0) {
420 goto end;
421 }
422 }
423
424 /*
425 * Force return code to 0 (success) since ret might be ENODATA for instance
426 * which is not an error but rather that we should come back.
427 */
428 ret = 0;
429
430 end:
431 rcu_read_unlock();
432 return ret;
433 }
434
435 static int consumer_stream_sync_metadata_index(
436 struct lttng_consumer_stream *stream,
437 const struct stream_subbuffer *subbuffer,
438 struct lttng_consumer_local_data *ctx)
439 {
440 bool missed_metadata_flush;
441 int ret;
442
443 /* Block until all the metadata is sent. */
444 pthread_mutex_lock(&stream->metadata_timer_lock);
445 LTTNG_ASSERT(!stream->missed_metadata_flush);
446 stream->waiting_on_metadata = true;
447 pthread_mutex_unlock(&stream->metadata_timer_lock);
448
449 ret = consumer_stream_sync_metadata(ctx, stream->session_id);
450
451 pthread_mutex_lock(&stream->metadata_timer_lock);
452 stream->waiting_on_metadata = false;
453 missed_metadata_flush = stream->missed_metadata_flush;
454 if (missed_metadata_flush) {
455 stream->missed_metadata_flush = false;
456 }
457 pthread_mutex_unlock(&stream->metadata_timer_lock);
458 if (ret < 0) {
459 goto end;
460 }
461
462 ret = consumer_stream_send_index(stream, subbuffer, ctx);
463 /*
464 * Send the live inactivity beacon to handle the situation where
465 * the live timer is prevented from sampling this stream
466 * because the stream lock was being held while this stream is
467 * waiting on metadata. This ensures live viewer progress in the
468 * unlikely scenario where a live timer would be prevented from
469 * locking a stream lock repeatedly due to a steady flow of
470 * incoming metadata, for a stream which is mostly inactive.
471 *
472 * It is important to send the inactivity beacon packet to
473 * relayd _after_ sending the index associated with the data
474 * that was just sent, otherwise this can cause live viewers to
475 * observe timestamps going backwards between an inactivity
476 * beacon and a following trace packet.
477 */
478 if (missed_metadata_flush) {
479 (void) stream->read_subbuffer_ops.send_live_beacon(stream);
480 }
481 end:
482 return ret;
483 }
484
485 /*
486 * Check if the local version of the metadata stream matches with the version
487 * of the metadata stream in the kernel. If it was updated, set the reset flag
488 * on the stream.
489 */
490 static void metadata_stream_check_version(
491 struct lttng_consumer_stream *stream, const struct stream_subbuffer *subbuffer)
492 {
493 if (stream->metadata_version == subbuffer->info.metadata.version) {
494 return;
495 }
496
497 DBG("New metadata version detected");
498 consumer_stream_metadata_set_version(stream,
499 subbuffer->info.metadata.version);
500
501 if (stream->read_subbuffer_ops.reset_metadata) {
502 stream->read_subbuffer_ops.reset_metadata(stream);
503 }
504 }
505
506 static void strip_packet_header_from_subbuffer(struct stream_subbuffer *buffer)
507 {
508 /*
509 * Change the view and hide the packer header and padding from the view
510 */
511 size_t new_subbuf_size = buffer->info.metadata.subbuf_size - metadata_length();
512
513 buffer->buffer.buffer = lttng_buffer_view_from_view(
514 &buffer->buffer.buffer, metadata_length(), new_subbuf_size);
515
516 buffer->info.metadata.subbuf_size = new_subbuf_size;
517 /* Padding is not present in the view anymore */
518 buffer->info.metadata.padded_subbuf_size = new_subbuf_size;
519 }
520
521 static int metadata_stream_pre_consume_ctf1(
522 struct lttng_consumer_stream *stream, struct stream_subbuffer *subbuffer)
523 {
524 (void) metadata_stream_check_version(stream, subbuffer);
525 return 0;
526 }
527
528 static int metadata_stream_pre_consume_ctf2(
529 struct lttng_consumer_stream *stream, struct stream_subbuffer *subbuffer)
530 {
531 (void) metadata_stream_check_version(stream, subbuffer);
532 (void) strip_packet_header_from_subbuffer(subbuffer);
533 return 0;
534 }
535
536 static
537 bool stream_is_rotating_to_null_chunk(
538 const struct lttng_consumer_stream *stream)
539 {
540 bool rotating_to_null_chunk = false;
541
542 if (stream->rotate_position == -1ULL) {
543 /* No rotation ongoing. */
544 goto end;
545 }
546
547 if (stream->trace_chunk == stream->chan->trace_chunk ||
548 !stream->chan->trace_chunk) {
549 rotating_to_null_chunk = true;
550 }
551 end:
552 return rotating_to_null_chunk;
553 }
554
555 enum consumer_stream_open_packet_status consumer_stream_open_packet(
556 struct lttng_consumer_stream *stream)
557 {
558 int ret;
559 enum consumer_stream_open_packet_status status;
560 unsigned long produced_pos_before, produced_pos_after;
561
562 ret = lttng_consumer_sample_snapshot_positions(stream);
563 if (ret < 0) {
564 ERR("Failed to snapshot positions before post-rotation empty packet flush: stream id = %" PRIu64
565 ", channel name = %s, session id = %" PRIu64,
566 stream->key, stream->chan->name,
567 stream->chan->session_id);
568 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
569 goto end;
570 }
571
572 ret = lttng_consumer_get_produced_snapshot(
573 stream, &produced_pos_before);
574 if (ret < 0) {
575 ERR("Failed to read produced position before post-rotation empty packet flush: stream id = %" PRIu64
576 ", channel name = %s, session id = %" PRIu64,
577 stream->key, stream->chan->name,
578 stream->chan->session_id);
579 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
580 goto end;
581 }
582
583 ret = consumer_stream_flush_buffer(stream, 0);
584 if (ret) {
585 ERR("Failed to flush an empty packet at rotation point: stream id = %" PRIu64
586 ", channel name = %s, session id = %" PRIu64,
587 stream->key, stream->chan->name,
588 stream->chan->session_id);
589 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
590 goto end;
591 }
592
593 ret = lttng_consumer_sample_snapshot_positions(stream);
594 if (ret < 0) {
595 ERR("Failed to snapshot positions after post-rotation empty packet flush: stream id = %" PRIu64
596 ", channel name = %s, session id = %" PRIu64,
597 stream->key, stream->chan->name,
598 stream->chan->session_id);
599 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
600 goto end;
601 }
602
603 ret = lttng_consumer_get_produced_snapshot(stream, &produced_pos_after);
604 if (ret < 0) {
605 ERR("Failed to read produced position after post-rotation empty packet flush: stream id = %" PRIu64
606 ", channel name = %s, session id = %" PRIu64,
607 stream->key, stream->chan->name,
608 stream->chan->session_id);
609 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
610 goto end;
611 }
612
613 /*
614 * Determine if the flush had an effect by comparing the produced
615 * positons before and after the flush.
616 */
617 status = produced_pos_before != produced_pos_after ?
618 CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED :
619 CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE;
620 if (status == CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED) {
621 stream->opened_packet_in_current_trace_chunk = true;
622 }
623
624 end:
625 return status;
626 }
627
628 /*
629 * An attempt to open a new packet is performed after a rotation completes to
630 * get a begin timestamp as close as possible to the rotation point.
631 *
632 * However, that initial attempt at opening a packet can fail due to a full
633 * ring-buffer. In that case, a second attempt is performed after consuming
634 * a packet since that will have freed enough space in the ring-buffer.
635 */
636 static
637 int post_consume_open_new_packet(struct lttng_consumer_stream *stream,
638 const struct stream_subbuffer *subbuffer __attribute__((unused)),
639 struct lttng_consumer_local_data *ctx __attribute__((unused)))
640 {
641 int ret = 0;
642
643 if (!stream->opened_packet_in_current_trace_chunk &&
644 stream->trace_chunk &&
645 !stream_is_rotating_to_null_chunk(stream)) {
646 const enum consumer_stream_open_packet_status status =
647 consumer_stream_open_packet(stream);
648
649 switch (status) {
650 case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED:
651 DBG("Opened a packet after consuming a packet rotation: stream id = %" PRIu64
652 ", channel name = %s, session id = %" PRIu64,
653 stream->key, stream->chan->name,
654 stream->chan->session_id);
655 stream->opened_packet_in_current_trace_chunk = true;
656 break;
657 case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE:
658 /*
659 * Can't open a packet as there is no space left.
660 * This means that new events were produced, resulting
661 * in a packet being opened, which is what we want
662 * anyhow.
663 */
664 DBG("No space left to open a packet after consuming a packet: stream id = %" PRIu64
665 ", channel name = %s, session id = %" PRIu64,
666 stream->key, stream->chan->name,
667 stream->chan->session_id);
668 stream->opened_packet_in_current_trace_chunk = true;
669 break;
670 case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR:
671 /* Logged by callee. */
672 ret = -1;
673 goto end;
674 default:
675 abort();
676 }
677
678 stream->opened_packet_in_current_trace_chunk = true;
679 }
680
681 end:
682 return ret;
683 }
684
685 struct lttng_consumer_stream *consumer_stream_create(struct lttng_consumer_channel *channel,
686 uint64_t channel_key,
687 uint64_t stream_key,
688 const char *channel_name,
689 uint64_t relayd_id,
690 uint64_t session_id,
691 struct lttng_trace_chunk *trace_chunk,
692 int cpu,
693 int *alloc_ret,
694 enum consumer_channel_type type,
695 unsigned int monitor,
696 int trace_format)
697 {
698 int ret;
699 struct lttng_consumer_stream *stream;
700
701 stream = zmalloc<lttng_consumer_stream>();
702 if (stream == NULL) {
703 PERROR("malloc struct lttng_consumer_stream");
704 ret = -ENOMEM;
705 goto end;
706 }
707
708 rcu_read_lock();
709
710 if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) {
711 ERR("Failed to acquire trace chunk reference during the creation of a stream");
712 ret = -1;
713 goto error;
714 }
715
716 stream->send_node = CDS_LIST_HEAD_INIT(stream->send_node);
717 stream->chan = channel;
718 stream->key = stream_key;
719 stream->trace_chunk = trace_chunk;
720 stream->out_fd = -1;
721 stream->out_fd_offset = 0;
722 stream->output_written = 0;
723 stream->net_seq_idx = relayd_id;
724 stream->session_id = session_id;
725 stream->monitor = monitor;
726 stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
727 stream->index_file = NULL;
728 stream->last_sequence_number = -1ULL;
729 stream->rotate_position = -1ULL;
730 /* Buffer is created with an open packet. */
731 stream->opened_packet_in_current_trace_chunk = true;
732 pthread_mutex_init(&stream->lock, NULL);
733 pthread_mutex_init(&stream->metadata_timer_lock, NULL);
734
735 /* If channel is the metadata, flag this stream as metadata. */
736 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
737 stream->metadata_flag = 1;
738 /* Metadata is flat out. */
739 strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
740 /* Live rendez-vous point. */
741 pthread_cond_init(&stream->metadata_rdv, NULL);
742 pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
743 } else {
744 /* Format stream name to <channel_name>_<cpu_number> */
745 ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
746 channel_name, cpu);
747 if (ret < 0) {
748 PERROR("snprintf stream name");
749 goto error;
750 }
751 }
752
753 switch (channel->output) {
754 case CONSUMER_CHANNEL_SPLICE:
755 stream->output = LTTNG_EVENT_SPLICE;
756 ret = utils_create_pipe(stream->splice_pipe);
757 if (ret < 0) {
758 goto error;
759 }
760 break;
761 case CONSUMER_CHANNEL_MMAP:
762 stream->output = LTTNG_EVENT_MMAP;
763 break;
764 default:
765 abort();
766 }
767
768 /* Key is always the wait_fd for streams. */
769 lttng_ht_node_init_u64(&stream->node, stream->key);
770
771 /* Init node per channel id key */
772 lttng_ht_node_init_u64(&stream->node_channel_id, channel_key);
773
774 /* Init session id node with the stream session id */
775 lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
776
777 DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
778 " relayd_id %" PRIu64 ", session_id %" PRIu64,
779 stream->name, stream->key, channel_key,
780 stream->net_seq_idx, stream->session_id);
781
782 rcu_read_unlock();
783
784 lttng_dynamic_array_init(&stream->read_subbuffer_ops.post_consume_cbs,
785 sizeof(post_consume_cb), NULL);
786
787 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
788 stream->read_subbuffer_ops.lock =
789 consumer_stream_metadata_lock_all;
790 stream->read_subbuffer_ops.unlock =
791 consumer_stream_metadata_unlock_all;
792 stream->read_subbuffer_ops.assert_locked =
793 consumer_stream_metadata_assert_locked_all;
794 if (trace_format == 1) {
795 stream->read_subbuffer_ops.pre_consume_subbuffer =
796 metadata_stream_pre_consume_ctf1;
797 } else if (trace_format == 2) {
798 stream->read_subbuffer_ops.pre_consume_subbuffer =
799 metadata_stream_pre_consume_ctf2;
800 } else {
801 abort();
802 }
803 } else {
804 const post_consume_cb post_consume_index_op = channel->is_live ?
805 consumer_stream_sync_metadata_index :
806 consumer_stream_send_index;
807 const post_consume_cb post_consume_open_new_packet_ =
808 post_consume_open_new_packet;
809
810 ret = lttng_dynamic_array_add_element(
811 &stream->read_subbuffer_ops.post_consume_cbs,
812 &post_consume_index_op);
813 if (ret) {
814 PERROR("Failed to add `send index` callback to stream's post consumption callbacks");
815 goto error;
816 }
817
818 ret = lttng_dynamic_array_add_element(
819 &stream->read_subbuffer_ops.post_consume_cbs,
820 &post_consume_open_new_packet_);
821 if (ret) {
822 PERROR("Failed to add `open new packet` callback to stream's post consumption callbacks");
823 goto error;
824 }
825
826 stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all;
827 stream->read_subbuffer_ops.unlock =
828 consumer_stream_data_unlock_all;
829 stream->read_subbuffer_ops.assert_locked =
830 consumer_stream_data_assert_locked_all;
831 stream->read_subbuffer_ops.pre_consume_subbuffer =
832 consumer_stream_update_stats;
833 }
834
835 if (channel->output == CONSUMER_CHANNEL_MMAP) {
836 stream->read_subbuffer_ops.consume_subbuffer =
837 consumer_stream_consume_mmap;
838 } else {
839 stream->read_subbuffer_ops.consume_subbuffer =
840 consumer_stream_consume_splice;
841 }
842
843 return stream;
844
845 error:
846 rcu_read_unlock();
847 lttng_trace_chunk_put(stream->trace_chunk);
848 lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs);
849 free(stream);
850 end:
851 if (alloc_ret) {
852 *alloc_ret = ret;
853 }
854 return NULL;
855 }
856
857 /*
858 * Close stream on the relayd side. This call can destroy a relayd if the
859 * conditions are met.
860 *
861 * A RCU read side lock MUST be acquired if the relayd object was looked up in
862 * a hash table before calling this.
863 */
864 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
865 struct consumer_relayd_sock_pair *relayd)
866 {
867 int ret;
868
869 LTTNG_ASSERT(stream);
870 LTTNG_ASSERT(relayd);
871
872 if (stream->sent_to_relayd) {
873 uatomic_dec(&relayd->refcount);
874 LTTNG_ASSERT(uatomic_read(&relayd->refcount) >= 0);
875 }
876
877 /* Closing streams requires to lock the control socket. */
878 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
879 ret = relayd_send_close_stream(&relayd->control_sock,
880 stream->relayd_stream_id,
881 stream->next_net_seq_num - 1);
882 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
883 if (ret < 0) {
884 ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx);
885 lttng_consumer_cleanup_relayd(relayd);
886 }
887
888 /* Both conditions are met, we destroy the relayd. */
889 if (uatomic_read(&relayd->refcount) == 0 &&
890 uatomic_read(&relayd->destroy_flag)) {
891 consumer_destroy_relayd(relayd);
892 }
893 stream->net_seq_idx = (uint64_t) -1ULL;
894 stream->sent_to_relayd = 0;
895 }
896
897 /*
898 * Close stream's file descriptors and, if needed, close stream also on the
899 * relayd side.
900 *
901 * The consumer data lock MUST be acquired.
902 * The stream lock MUST be acquired.
903 */
904 void consumer_stream_close(struct lttng_consumer_stream *stream)
905 {
906 int ret;
907 struct consumer_relayd_sock_pair *relayd;
908
909 LTTNG_ASSERT(stream);
910
911 switch (the_consumer_data.type) {
912 case LTTNG_CONSUMER_KERNEL:
913 if (stream->mmap_base != NULL) {
914 ret = munmap(stream->mmap_base, stream->mmap_len);
915 if (ret != 0) {
916 PERROR("munmap");
917 }
918 }
919
920 if (stream->wait_fd >= 0) {
921 ret = close(stream->wait_fd);
922 if (ret) {
923 PERROR("close");
924 }
925 stream->wait_fd = -1;
926 }
927 if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) {
928 utils_close_pipe(stream->splice_pipe);
929 }
930 break;
931 case LTTNG_CONSUMER32_UST:
932 case LTTNG_CONSUMER64_UST:
933 {
934 /*
935 * Special case for the metadata since the wait fd is an internal pipe
936 * polled in the metadata thread.
937 */
938 if (stream->metadata_flag && stream->chan->monitor) {
939 int rpipe = stream->ust_metadata_poll_pipe[0];
940
941 /*
942 * This will stop the channel timer if one and close the write side
943 * of the metadata poll pipe.
944 */
945 lttng_ustconsumer_close_metadata(stream->chan);
946 if (rpipe >= 0) {
947 ret = close(rpipe);
948 if (ret < 0) {
949 PERROR("closing metadata pipe read side");
950 }
951 stream->ust_metadata_poll_pipe[0] = -1;
952 }
953 }
954 break;
955 }
956 default:
957 ERR("Unknown consumer_data type");
958 abort();
959 }
960
961 /* Close output fd. Could be a socket or local file at this point. */
962 if (stream->out_fd >= 0) {
963 ret = close(stream->out_fd);
964 if (ret) {
965 PERROR("close");
966 }
967 stream->out_fd = -1;
968 }
969
970 if (stream->index_file) {
971 lttng_index_file_put(stream->index_file);
972 stream->index_file = NULL;
973 }
974
975 lttng_trace_chunk_put(stream->trace_chunk);
976 stream->trace_chunk = NULL;
977
978 /* Check and cleanup relayd if needed. */
979 rcu_read_lock();
980 relayd = consumer_find_relayd(stream->net_seq_idx);
981 if (relayd != NULL) {
982 consumer_stream_relayd_close(stream, relayd);
983 }
984 rcu_read_unlock();
985 }
986
987 /*
988 * Delete the stream from all possible hash tables.
989 *
990 * The consumer data lock MUST be acquired.
991 * The stream lock MUST be acquired.
992 */
993 void consumer_stream_delete(struct lttng_consumer_stream *stream,
994 struct lttng_ht *ht)
995 {
996 int ret;
997 struct lttng_ht_iter iter;
998
999 LTTNG_ASSERT(stream);
1000 /* Should NEVER be called not in monitor mode. */
1001 LTTNG_ASSERT(stream->chan->monitor);
1002
1003 rcu_read_lock();
1004
1005 if (ht) {
1006 iter.iter.node = &stream->node.node;
1007 ret = lttng_ht_del(ht, &iter);
1008 LTTNG_ASSERT(!ret);
1009 }
1010
1011 /* Delete from stream per channel ID hash table. */
1012 iter.iter.node = &stream->node_channel_id.node;
1013 /*
1014 * The returned value is of no importance. Even if the node is NOT in the
1015 * hash table, we continue since we may have been called by a code path
1016 * that did not add the stream to a (all) hash table. Same goes for the
1017 * next call ht del call.
1018 */
1019 (void) lttng_ht_del(the_consumer_data.stream_per_chan_id_ht, &iter);
1020
1021 /* Delete from the global stream list. */
1022 iter.iter.node = &stream->node_session_id.node;
1023 /* See the previous ht del on why we ignore the returned value. */
1024 (void) lttng_ht_del(the_consumer_data.stream_list_ht, &iter);
1025
1026 rcu_read_unlock();
1027
1028 if (!stream->metadata_flag) {
1029 /* Decrement the stream count of the global consumer data. */
1030 LTTNG_ASSERT(the_consumer_data.stream_count > 0);
1031 the_consumer_data.stream_count--;
1032 }
1033 }
1034
1035 /*
1036 * Free the given stream within a RCU call.
1037 */
1038 void consumer_stream_free(struct lttng_consumer_stream *stream)
1039 {
1040 LTTNG_ASSERT(stream);
1041
1042 metadata_bucket_destroy(stream->metadata_bucket);
1043 call_rcu(&stream->node.head, free_stream_rcu);
1044 }
1045
1046 /*
1047 * Destroy the stream's buffers of the tracer.
1048 */
1049 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
1050 {
1051 LTTNG_ASSERT(stream);
1052
1053 switch (the_consumer_data.type) {
1054 case LTTNG_CONSUMER_KERNEL:
1055 break;
1056 case LTTNG_CONSUMER32_UST:
1057 case LTTNG_CONSUMER64_UST:
1058 lttng_ustconsumer_del_stream(stream);
1059 break;
1060 default:
1061 ERR("Unknown consumer_data type");
1062 abort();
1063 }
1064 }
1065
1066 /*
1067 * Destroy and close a already created stream.
1068 */
1069 static void destroy_close_stream(struct lttng_consumer_stream *stream)
1070 {
1071 LTTNG_ASSERT(stream);
1072
1073 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
1074
1075 /* Destroy tracer buffers of the stream. */
1076 consumer_stream_destroy_buffers(stream);
1077 /* Close down everything including the relayd if one. */
1078 consumer_stream_close(stream);
1079 }
1080
1081 /*
1082 * Decrement the stream's channel refcount and if down to 0, return the channel
1083 * pointer so it can be destroyed by the caller or NULL if not.
1084 */
1085 static struct lttng_consumer_channel *unref_channel(
1086 struct lttng_consumer_stream *stream)
1087 {
1088 struct lttng_consumer_channel *free_chan = NULL;
1089
1090 LTTNG_ASSERT(stream);
1091 LTTNG_ASSERT(stream->chan);
1092
1093 /* Update refcount of channel and see if we need to destroy it. */
1094 if (!uatomic_sub_return(&stream->chan->refcount, 1)
1095 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
1096 free_chan = stream->chan;
1097 }
1098
1099 return free_chan;
1100 }
1101
1102 /*
1103 * Destroy a stream completely. This will delete, close and free the stream.
1104 * Once return, the stream is NO longer usable. Its channel may get destroyed
1105 * if conditions are met for a monitored stream.
1106 *
1107 * This MUST be called WITHOUT the consumer data and stream lock acquired if
1108 * the stream is in _monitor_ mode else it does not matter.
1109 */
1110 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
1111 struct lttng_ht *ht)
1112 {
1113 LTTNG_ASSERT(stream);
1114
1115 cds_list_del_init(&stream->send_node);
1116
1117 /* Stream is in monitor mode. */
1118 if (stream->monitor) {
1119 struct lttng_consumer_channel *free_chan = NULL;
1120
1121 /*
1122 * This means that the stream was successfully removed from the streams
1123 * list of the channel and sent to the right thread managing this
1124 * stream thus being globally visible.
1125 */
1126 if (stream->globally_visible) {
1127 pthread_mutex_lock(&the_consumer_data.lock);
1128 pthread_mutex_lock(&stream->chan->lock);
1129
1130 pthread_mutex_lock(&stream->lock);
1131 /* Remove every reference of the stream in the consumer. */
1132 consumer_stream_delete(stream, ht);
1133
1134
1135 destroy_close_stream(stream);
1136
1137 /* Update channel's refcount of the stream. */
1138 free_chan = unref_channel(stream);
1139
1140 /* Indicates that the consumer data state MUST be updated after this. */
1141 the_consumer_data.need_update = 1;
1142
1143 pthread_mutex_unlock(&stream->lock);
1144 pthread_mutex_unlock(&stream->chan->lock);
1145 pthread_mutex_unlock(&the_consumer_data.lock);
1146 } else {
1147 /*
1148 * If the stream is not visible globally, this needs to be done
1149 * outside of the consumer data lock section.
1150 */
1151 destroy_close_stream(stream);
1152 free_chan = unref_channel(stream);
1153 }
1154
1155 if (free_chan) {
1156 consumer_del_channel(free_chan);
1157 }
1158 } else {
1159 destroy_close_stream(stream);
1160 }
1161
1162 /* Free stream within a RCU call. */
1163 lttng_trace_chunk_put(stream->trace_chunk);
1164 stream->trace_chunk = NULL;
1165 lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs);
1166 consumer_stream_free(stream);
1167 }
1168
1169 /*
1170 * Write index of a specific stream either on the relayd or local disk.
1171 *
1172 * Return 0 on success or else a negative value.
1173 */
1174 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
1175 struct ctf_packet_index *element)
1176 {
1177 int ret;
1178
1179 LTTNG_ASSERT(stream);
1180 LTTNG_ASSERT(element);
1181
1182 rcu_read_lock();
1183 if (stream->net_seq_idx != (uint64_t) -1ULL) {
1184 struct consumer_relayd_sock_pair *relayd;
1185 relayd = consumer_find_relayd(stream->net_seq_idx);
1186 if (relayd) {
1187 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1188 ret = relayd_send_index(&relayd->control_sock, element,
1189 stream->relayd_stream_id, stream->next_net_seq_num - 1);
1190 if (ret < 0) {
1191 /*
1192 * Communication error with lttng-relayd,
1193 * perform cleanup now
1194 */
1195 ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx);
1196 lttng_consumer_cleanup_relayd(relayd);
1197 ret = -1;
1198 }
1199 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1200 } else {
1201 ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.",
1202 stream->key, stream->net_seq_idx);
1203 ret = -1;
1204 }
1205 } else {
1206 if (lttng_index_file_write(stream->index_file, element)) {
1207 ret = -1;
1208 } else {
1209 ret = 0;
1210 }
1211 }
1212 if (ret < 0) {
1213 goto error;
1214 }
1215
1216 error:
1217 rcu_read_unlock();
1218 return ret;
1219 }
1220
1221 int consumer_stream_create_output_files(struct lttng_consumer_stream *stream,
1222 bool create_index)
1223 {
1224 int ret;
1225 enum lttng_trace_chunk_status chunk_status;
1226 const int flags = O_WRONLY | O_CREAT | O_TRUNC;
1227 const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
1228 char stream_path[LTTNG_PATH_MAX];
1229
1230 ASSERT_LOCKED(stream->lock);
1231 LTTNG_ASSERT(stream->trace_chunk);
1232
1233 ret = utils_stream_file_path(stream->chan->pathname, stream->name,
1234 stream->chan->tracefile_size,
1235 stream->tracefile_count_current, NULL,
1236 stream_path, sizeof(stream_path));
1237 if (ret < 0) {
1238 goto end;
1239 }
1240
1241 if (stream->out_fd >= 0) {
1242 ret = close(stream->out_fd);
1243 if (ret < 0) {
1244 PERROR("Failed to close stream file \"%s\"",
1245 stream->name);
1246 goto end;
1247 }
1248 stream->out_fd = -1;
1249 }
1250
1251 DBG("Opening stream output file \"%s\"", stream_path);
1252 chunk_status = lttng_trace_chunk_open_file(stream->trace_chunk, stream_path,
1253 flags, mode, &stream->out_fd, false);
1254 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1255 ERR("Failed to open stream file \"%s\"", stream->name);
1256 ret = -1;
1257 goto end;
1258 }
1259
1260 if (!stream->metadata_flag && (create_index || stream->index_file)) {
1261 if (stream->index_file) {
1262 lttng_index_file_put(stream->index_file);
1263 }
1264 chunk_status = lttng_index_file_create_from_trace_chunk(
1265 stream->trace_chunk,
1266 stream->chan->pathname,
1267 stream->name,
1268 stream->chan->tracefile_size,
1269 stream->tracefile_count_current,
1270 CTF_INDEX_MAJOR, CTF_INDEX_MINOR,
1271 false, &stream->index_file);
1272 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1273 ret = -1;
1274 goto end;
1275 }
1276 }
1277
1278 /* Reset current size because we just perform a rotation. */
1279 stream->tracefile_size_current = 0;
1280 stream->out_fd_offset = 0;
1281 end:
1282 return ret;
1283 }
1284
1285 int consumer_stream_rotate_output_files(struct lttng_consumer_stream *stream)
1286 {
1287 int ret;
1288
1289 stream->tracefile_count_current++;
1290 if (stream->chan->tracefile_count > 0) {
1291 stream->tracefile_count_current %=
1292 stream->chan->tracefile_count;
1293 }
1294
1295 DBG("Rotating output files of stream \"%s\"", stream->name);
1296 ret = consumer_stream_create_output_files(stream, true);
1297 if (ret) {
1298 goto end;
1299 }
1300
1301 end:
1302 return ret;
1303 }
1304
1305 bool consumer_stream_is_deleted(struct lttng_consumer_stream *stream)
1306 {
1307 /*
1308 * This function does not take a const stream since
1309 * cds_lfht_is_node_deleted was not const before liburcu 0.12.
1310 */
1311 LTTNG_ASSERT(stream);
1312 return cds_lfht_is_node_deleted(&stream->node.node);
1313 }
1314
1315 static ssize_t metadata_bucket_flush(
1316 const struct stream_subbuffer *buffer, void *data)
1317 {
1318 ssize_t ret;
1319 struct lttng_consumer_stream *stream = (lttng_consumer_stream *) data;
1320
1321 ret = consumer_stream_consume_mmap(NULL, stream, buffer);
1322 if (ret < 0) {
1323 goto end;
1324 }
1325 end:
1326 return ret;
1327 }
1328
1329 static ssize_t metadata_bucket_consume(
1330 struct lttng_consumer_local_data *unused __attribute__((unused)),
1331 struct lttng_consumer_stream *stream,
1332 const struct stream_subbuffer *subbuffer)
1333 {
1334 ssize_t ret;
1335 enum metadata_bucket_status status;
1336
1337 status = metadata_bucket_fill(stream->metadata_bucket, subbuffer);
1338 switch (status) {
1339 case METADATA_BUCKET_STATUS_OK:
1340 /* Return consumed size. */
1341 ret = subbuffer->buffer.buffer.size;
1342 break;
1343 default:
1344 ret = -1;
1345 }
1346
1347 return ret;
1348 }
1349
1350 int consumer_stream_enable_metadata_bucketization(
1351 struct lttng_consumer_stream *stream)
1352 {
1353 int ret = 0;
1354
1355 LTTNG_ASSERT(stream->metadata_flag);
1356 LTTNG_ASSERT(!stream->metadata_bucket);
1357 LTTNG_ASSERT(stream->chan->output == CONSUMER_CHANNEL_MMAP);
1358
1359 stream->metadata_bucket = metadata_bucket_create(
1360 metadata_bucket_flush, stream);
1361 if (!stream->metadata_bucket) {
1362 ret = -1;
1363 goto end;
1364 }
1365
1366 stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume;
1367 end:
1368 return ret;
1369 }
1370
1371 void consumer_stream_metadata_set_version(
1372 struct lttng_consumer_stream *stream, uint64_t new_version)
1373 {
1374 LTTNG_ASSERT(new_version > stream->metadata_version);
1375 stream->metadata_version = new_version;
1376 stream->reset_metadata_flag = 1;
1377
1378 if (stream->metadata_bucket) {
1379 metadata_bucket_reset(stream->metadata_bucket);
1380 }
1381 }
1382
1383 int consumer_stream_flush_buffer(struct lttng_consumer_stream *stream,
1384 bool producer_active)
1385 {
1386 int ret = 0;
1387
1388 switch (the_consumer_data.type) {
1389 case LTTNG_CONSUMER_KERNEL:
1390 if (producer_active) {
1391 ret = kernctl_buffer_flush(stream->wait_fd);
1392 if (ret < 0) {
1393 ERR("Failed to flush kernel stream");
1394 goto end;
1395 }
1396 } else {
1397 ret = kernctl_buffer_flush_empty(stream->wait_fd);
1398 if (ret < 0) {
1399 /*
1400 * Doing a buffer flush which does not take into
1401 * account empty packets. This is not perfect,
1402 * but required as a fall-back when
1403 * "flush_empty" is not implemented by
1404 * lttng-modules.
1405 */
1406 ret = kernctl_buffer_flush(stream->wait_fd);
1407 if (ret < 0) {
1408 ERR("Failed to flush kernel stream");
1409 goto end;
1410 }
1411 }
1412 }
1413 break;
1414 case LTTNG_CONSUMER32_UST:
1415 case LTTNG_CONSUMER64_UST:
1416 ret = lttng_ustconsumer_flush_buffer(stream, (int) producer_active);
1417 break;
1418 default:
1419 ERR("Unknown consumer_data type");
1420 abort();
1421 }
1422
1423 end:
1424 return ret;
1425 }
This page took 0.061473 seconds and 5 git commands to generate.