Fix: consumer-stream: live viewers observe timestamps going backwards
[lttng-tools.git] / src / common / consumer / consumer-stream.c
1 /*
2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <assert.h>
12 #include <inttypes.h>
13 #include <sys/mman.h>
14 #include <unistd.h>
15
16 #include <common/common.h>
17 #include <common/consumer/consumer-timer.h>
18 #include <common/consumer/consumer.h>
19 #include <common/consumer/metadata-bucket.h>
20 #include <common/index/index.h>
21 #include <common/kernel-consumer/kernel-consumer.h>
22 #include <common/macros.h>
23 #include <common/relayd/relayd.h>
24 #include <common/ust-consumer/ust-consumer.h>
25 #include <common/utils.h>
26
27 #include "consumer-stream.h"
28
29 /*
30 * RCU call to free stream. MUST only be used with call_rcu().
31 */
32 static void free_stream_rcu(struct rcu_head *head)
33 {
34 struct lttng_ht_node_u64 *node =
35 caa_container_of(head, struct lttng_ht_node_u64, head);
36 struct lttng_consumer_stream *stream =
37 caa_container_of(node, struct lttng_consumer_stream, node);
38
39 pthread_mutex_destroy(&stream->lock);
40 free(stream);
41 }
42
43 static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream)
44 {
45 pthread_mutex_lock(&stream->chan->lock);
46 pthread_mutex_lock(&stream->lock);
47 }
48
49 static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream)
50 {
51 pthread_mutex_unlock(&stream->lock);
52 pthread_mutex_unlock(&stream->chan->lock);
53 }
54
55 static void consumer_stream_data_assert_locked_all(struct lttng_consumer_stream *stream)
56 {
57 ASSERT_LOCKED(stream->lock);
58 ASSERT_LOCKED(stream->chan->lock);
59 }
60
61 static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream)
62 {
63 consumer_stream_data_lock_all(stream);
64 pthread_mutex_lock(&stream->metadata_rdv_lock);
65 }
66
67 static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream)
68 {
69 pthread_mutex_unlock(&stream->metadata_rdv_lock);
70 consumer_stream_data_unlock_all(stream);
71 }
72
73 static void consumer_stream_metadata_assert_locked_all(struct lttng_consumer_stream *stream)
74 {
75 ASSERT_LOCKED(stream->metadata_rdv_lock);
76 consumer_stream_data_assert_locked_all(stream);
77 }
78
79 /* Only used for data streams. */
80 static int consumer_stream_update_stats(struct lttng_consumer_stream *stream,
81 const struct stream_subbuffer *subbuf)
82 {
83 int ret = 0;
84 uint64_t sequence_number;
85 const uint64_t discarded_events = subbuf->info.data.events_discarded;
86
87 if (!subbuf->info.data.sequence_number.is_set) {
88 /* Command not supported by the tracer. */
89 sequence_number = -1ULL;
90 stream->sequence_number_unavailable = true;
91 } else {
92 sequence_number = subbuf->info.data.sequence_number.value;
93 }
94
95 /*
96 * Start the sequence when we extract the first packet in case we don't
97 * start at 0 (for example if a consumer is not connected to the
98 * session immediately after the beginning).
99 */
100 if (stream->last_sequence_number == -1ULL) {
101 stream->last_sequence_number = sequence_number;
102 } else if (sequence_number > stream->last_sequence_number) {
103 stream->chan->lost_packets += sequence_number -
104 stream->last_sequence_number - 1;
105 } else {
106 /* seq <= last_sequence_number */
107 ERR("Sequence number inconsistent : prev = %" PRIu64
108 ", current = %" PRIu64,
109 stream->last_sequence_number, sequence_number);
110 ret = -1;
111 goto end;
112 }
113 stream->last_sequence_number = sequence_number;
114
115 if (discarded_events < stream->last_discarded_events) {
116 /*
117 * Overflow has occurred. We assume only one wrap-around
118 * has occurred.
119 */
120 stream->chan->discarded_events +=
121 (1ULL << (CAA_BITS_PER_LONG - 1)) -
122 stream->last_discarded_events +
123 discarded_events;
124 } else {
125 stream->chan->discarded_events += discarded_events -
126 stream->last_discarded_events;
127 }
128 stream->last_discarded_events = discarded_events;
129 ret = 0;
130
131 end:
132 return ret;
133 }
134
135 static
136 void ctf_packet_index_populate(struct ctf_packet_index *index,
137 off_t offset, const struct stream_subbuffer *subbuffer)
138 {
139 *index = (typeof(*index)){
140 .offset = htobe64(offset),
141 .packet_size = htobe64(subbuffer->info.data.packet_size),
142 .content_size = htobe64(subbuffer->info.data.content_size),
143 .timestamp_begin = htobe64(
144 subbuffer->info.data.timestamp_begin),
145 .timestamp_end = htobe64(
146 subbuffer->info.data.timestamp_end),
147 .events_discarded = htobe64(
148 subbuffer->info.data.events_discarded),
149 .stream_id = htobe64(subbuffer->info.data.stream_id),
150 .stream_instance_id = htobe64(
151 subbuffer->info.data.stream_instance_id.is_set ?
152 subbuffer->info.data.stream_instance_id.value : -1ULL),
153 .packet_seq_num = htobe64(
154 subbuffer->info.data.sequence_number.is_set ?
155 subbuffer->info.data.sequence_number.value : -1ULL),
156 };
157 }
158
159 static ssize_t consumer_stream_consume_mmap(
160 struct lttng_consumer_local_data *ctx,
161 struct lttng_consumer_stream *stream,
162 const struct stream_subbuffer *subbuffer)
163 {
164 const unsigned long padding_size =
165 subbuffer->info.data.padded_subbuf_size -
166 subbuffer->info.data.subbuf_size;
167 const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_mmap(
168 stream, &subbuffer->buffer.buffer, padding_size);
169
170 if (stream->net_seq_idx == -1ULL) {
171 /*
172 * When writing on disk, check that only the subbuffer (no
173 * padding) was written to disk.
174 */
175 if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
176 DBG("Failed to write the entire padded subbuffer on disk (written_bytes: %zd, padded subbuffer size %lu)",
177 written_bytes,
178 subbuffer->info.data.padded_subbuf_size);
179 }
180 } else {
181 /*
182 * When streaming over the network, check that the entire
183 * subbuffer including padding was successfully written.
184 */
185 if (written_bytes != subbuffer->info.data.subbuf_size) {
186 DBG("Failed to write only the subbuffer over the network (written_bytes: %zd, subbuffer size %lu)",
187 written_bytes,
188 subbuffer->info.data.subbuf_size);
189 }
190 }
191
192 /*
193 * If `lttng_consumer_on_read_subbuffer_mmap()` returned an error, pass
194 * it along to the caller, else return zero.
195 */
196 if (written_bytes < 0) {
197 ERR("Error reading mmap subbuffer: %zd", written_bytes);
198 }
199
200 return written_bytes;
201 }
202
203 static ssize_t consumer_stream_consume_splice(
204 struct lttng_consumer_local_data *ctx,
205 struct lttng_consumer_stream *stream,
206 const struct stream_subbuffer *subbuffer)
207 {
208 const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_splice(
209 ctx, stream, subbuffer->info.data.padded_subbuf_size, 0);
210
211 if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
212 DBG("Failed to write the entire padded subbuffer (written_bytes: %zd, padded subbuffer size %lu)",
213 written_bytes,
214 subbuffer->info.data.padded_subbuf_size);
215 }
216
217 /*
218 * If `lttng_consumer_on_read_subbuffer_splice()` returned an error,
219 * pass it along to the caller, else return zero.
220 */
221 if (written_bytes < 0) {
222 ERR("Error reading splice subbuffer: %zd", written_bytes);
223 }
224
225 return written_bytes;
226 }
227
228 static int consumer_stream_send_index(
229 struct lttng_consumer_stream *stream,
230 const struct stream_subbuffer *subbuffer,
231 struct lttng_consumer_local_data *ctx)
232 {
233 off_t packet_offset = 0;
234 struct ctf_packet_index index = {};
235
236 /*
237 * This is called after consuming the sub-buffer; substract the
238 * effect this sub-buffer from the offset.
239 */
240 if (stream->net_seq_idx == (uint64_t) -1ULL) {
241 packet_offset = stream->out_fd_offset -
242 subbuffer->info.data.padded_subbuf_size;
243 }
244
245 ctf_packet_index_populate(&index, packet_offset, subbuffer);
246 return consumer_stream_write_index(stream, &index);
247 }
248
249 /*
250 * Actually do the metadata sync using the given metadata stream.
251 *
252 * Return 0 on success else a negative value. ENODATA can be returned also
253 * indicating that there is no metadata available for that stream.
254 */
255 static int do_sync_metadata(struct lttng_consumer_stream *metadata,
256 struct lttng_consumer_local_data *ctx)
257 {
258 int ret;
259 enum sync_metadata_status status;
260
261 assert(metadata);
262 assert(metadata->metadata_flag);
263 assert(ctx);
264
265 /*
266 * In UST, since we have to write the metadata from the cache packet
267 * by packet, we might need to start this procedure multiple times
268 * until all the metadata from the cache has been extracted.
269 */
270 do {
271 /*
272 * Steps :
273 * - Lock the metadata stream
274 * - Check if metadata stream node was deleted before locking.
275 * - if yes, release and return success
276 * - Check if new metadata is ready (flush + snapshot pos)
277 * - If nothing : release and return.
278 * - Lock the metadata_rdv_lock
279 * - Unlock the metadata stream
280 * - cond_wait on metadata_rdv to wait the wakeup from the
281 * metadata thread
282 * - Unlock the metadata_rdv_lock
283 */
284 pthread_mutex_lock(&metadata->lock);
285
286 /*
287 * There is a possibility that we were able to acquire a reference on the
288 * stream from the RCU hash table but between then and now, the node might
289 * have been deleted just before the lock is acquired. Thus, after locking,
290 * we make sure the metadata node has not been deleted which means that the
291 * buffers are closed.
292 *
293 * In that case, there is no need to sync the metadata hence returning a
294 * success return code.
295 */
296 ret = cds_lfht_is_node_deleted(&metadata->node.node);
297 if (ret) {
298 ret = 0;
299 goto end_unlock_mutex;
300 }
301
302 switch (ctx->type) {
303 case LTTNG_CONSUMER_KERNEL:
304 /*
305 * Empty the metadata cache and flush the current stream.
306 */
307 status = lttng_kconsumer_sync_metadata(metadata);
308 break;
309 case LTTNG_CONSUMER32_UST:
310 case LTTNG_CONSUMER64_UST:
311 /*
312 * Ask the sessiond if we have new metadata waiting and update the
313 * consumer metadata cache.
314 */
315 status = lttng_ustconsumer_sync_metadata(ctx, metadata);
316 break;
317 default:
318 abort();
319 }
320
321 switch (status) {
322 case SYNC_METADATA_STATUS_NEW_DATA:
323 break;
324 case SYNC_METADATA_STATUS_NO_DATA:
325 ret = 0;
326 goto end_unlock_mutex;
327 case SYNC_METADATA_STATUS_ERROR:
328 ret = -1;
329 goto end_unlock_mutex;
330 default:
331 abort();
332 }
333
334 /*
335 * At this point, new metadata have been flushed, so we wait on the
336 * rendez-vous point for the metadata thread to wake us up when it
337 * finishes consuming the metadata and continue execution.
338 */
339
340 pthread_mutex_lock(&metadata->metadata_rdv_lock);
341
342 /*
343 * Release metadata stream lock so the metadata thread can process it.
344 */
345 pthread_mutex_unlock(&metadata->lock);
346
347 /*
348 * Wait on the rendez-vous point. Once woken up, it means the metadata was
349 * consumed and thus synchronization is achieved.
350 */
351 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
352 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
353 } while (status == SYNC_METADATA_STATUS_NEW_DATA);
354
355 /* Success */
356 return 0;
357
358 end_unlock_mutex:
359 pthread_mutex_unlock(&metadata->lock);
360 return ret;
361 }
362
363 /*
364 * Synchronize the metadata using a given session ID. A successful acquisition
365 * of a metadata stream will trigger a request to the session daemon and a
366 * snapshot so the metadata thread can consume it.
367 *
368 * This function call is a rendez-vous point between the metadata thread and
369 * the data thread.
370 *
371 * Return 0 on success or else a negative value.
372 */
373 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
374 uint64_t session_id)
375 {
376 int ret;
377 struct lttng_consumer_stream *stream = NULL;
378 struct lttng_ht_iter iter;
379 struct lttng_ht *ht;
380
381 assert(ctx);
382
383 /* Ease our life a bit. */
384 ht = consumer_data.stream_list_ht;
385
386 rcu_read_lock();
387
388 /* Search the metadata associated with the session id of the given stream. */
389
390 cds_lfht_for_each_entry_duplicate(ht->ht,
391 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
392 &session_id, &iter.iter, stream, node_session_id.node) {
393 if (!stream->metadata_flag) {
394 continue;
395 }
396
397 ret = do_sync_metadata(stream, ctx);
398 if (ret < 0) {
399 goto end;
400 }
401 }
402
403 /*
404 * Force return code to 0 (success) since ret might be ENODATA for instance
405 * which is not an error but rather that we should come back.
406 */
407 ret = 0;
408
409 end:
410 rcu_read_unlock();
411 return ret;
412 }
413
414 static int consumer_stream_sync_metadata_index(
415 struct lttng_consumer_stream *stream,
416 const struct stream_subbuffer *subbuffer,
417 struct lttng_consumer_local_data *ctx)
418 {
419 bool missed_metadata_flush;
420 int ret;
421
422 /* Block until all the metadata is sent. */
423 pthread_mutex_lock(&stream->metadata_timer_lock);
424 assert(!stream->missed_metadata_flush);
425 stream->waiting_on_metadata = true;
426 pthread_mutex_unlock(&stream->metadata_timer_lock);
427
428 ret = consumer_stream_sync_metadata(ctx, stream->session_id);
429
430 pthread_mutex_lock(&stream->metadata_timer_lock);
431 stream->waiting_on_metadata = false;
432 missed_metadata_flush = stream->missed_metadata_flush;
433 if (missed_metadata_flush) {
434 stream->missed_metadata_flush = false;
435 }
436 pthread_mutex_unlock(&stream->metadata_timer_lock);
437 if (ret < 0) {
438 goto end;
439 }
440
441 ret = consumer_stream_send_index(stream, subbuffer, ctx);
442 /*
443 * Send the live inactivity beacon to handle the situation where
444 * the live timer is prevented from sampling this stream
445 * because the stream lock was being held while this stream is
446 * waiting on metadata. This ensures live viewer progress in the
447 * unlikely scenario where a live timer would be prevented from
448 * locking a stream lock repeatedly due to a steady flow of
449 * incoming metadata, for a stream which is mostly inactive.
450 *
451 * It is important to send the inactivity beacon packet to
452 * relayd _after_ sending the index associated with the data
453 * that was just sent, otherwise this can cause live viewers to
454 * observe timestamps going backwards between an inactivity
455 * beacon and a following trace packet.
456 */
457 if (missed_metadata_flush) {
458 (void) stream->read_subbuffer_ops.send_live_beacon(stream);
459 }
460 end:
461 return ret;
462 }
463
464 /*
465 * Check if the local version of the metadata stream matches with the version
466 * of the metadata stream in the kernel. If it was updated, set the reset flag
467 * on the stream.
468 */
469 static
470 int metadata_stream_check_version(struct lttng_consumer_stream *stream,
471 const struct stream_subbuffer *subbuffer)
472 {
473 if (stream->metadata_version == subbuffer->info.metadata.version) {
474 goto end;
475 }
476
477 DBG("New metadata version detected");
478 consumer_stream_metadata_set_version(stream,
479 subbuffer->info.metadata.version);
480
481 if (stream->read_subbuffer_ops.reset_metadata) {
482 stream->read_subbuffer_ops.reset_metadata(stream);
483 }
484
485 end:
486 return 0;
487 }
488
489 struct lttng_consumer_stream *consumer_stream_create(
490 struct lttng_consumer_channel *channel,
491 uint64_t channel_key,
492 uint64_t stream_key,
493 const char *channel_name,
494 uint64_t relayd_id,
495 uint64_t session_id,
496 struct lttng_trace_chunk *trace_chunk,
497 int cpu,
498 int *alloc_ret,
499 enum consumer_channel_type type,
500 unsigned int monitor)
501 {
502 int ret;
503 struct lttng_consumer_stream *stream;
504
505 stream = zmalloc(sizeof(*stream));
506 if (stream == NULL) {
507 PERROR("malloc struct lttng_consumer_stream");
508 ret = -ENOMEM;
509 goto end;
510 }
511
512 rcu_read_lock();
513
514 if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) {
515 ERR("Failed to acquire trace chunk reference during the creation of a stream");
516 ret = -1;
517 goto error;
518 }
519
520 stream->chan = channel;
521 stream->key = stream_key;
522 stream->trace_chunk = trace_chunk;
523 stream->out_fd = -1;
524 stream->out_fd_offset = 0;
525 stream->output_written = 0;
526 stream->net_seq_idx = relayd_id;
527 stream->session_id = session_id;
528 stream->monitor = monitor;
529 stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
530 stream->index_file = NULL;
531 stream->last_sequence_number = -1ULL;
532 stream->rotate_position = -1ULL;
533 /* Buffer is created with an open packet. */
534 stream->opened_packet_in_current_trace_chunk = true;
535 pthread_mutex_init(&stream->lock, NULL);
536 pthread_mutex_init(&stream->metadata_timer_lock, NULL);
537
538 /* If channel is the metadata, flag this stream as metadata. */
539 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
540 stream->metadata_flag = 1;
541 /* Metadata is flat out. */
542 strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
543 /* Live rendez-vous point. */
544 pthread_cond_init(&stream->metadata_rdv, NULL);
545 pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
546 } else {
547 /* Format stream name to <channel_name>_<cpu_number> */
548 ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
549 channel_name, cpu);
550 if (ret < 0) {
551 PERROR("snprintf stream name");
552 goto error;
553 }
554 }
555
556 switch (channel->output) {
557 case CONSUMER_CHANNEL_SPLICE:
558 stream->output = LTTNG_EVENT_SPLICE;
559 ret = utils_create_pipe(stream->splice_pipe);
560 if (ret < 0) {
561 goto error;
562 }
563 break;
564 case CONSUMER_CHANNEL_MMAP:
565 stream->output = LTTNG_EVENT_MMAP;
566 break;
567 default:
568 abort();
569 }
570
571 /* Key is always the wait_fd for streams. */
572 lttng_ht_node_init_u64(&stream->node, stream->key);
573
574 /* Init node per channel id key */
575 lttng_ht_node_init_u64(&stream->node_channel_id, channel_key);
576
577 /* Init session id node with the stream session id */
578 lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
579
580 DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
581 " relayd_id %" PRIu64 ", session_id %" PRIu64,
582 stream->name, stream->key, channel_key,
583 stream->net_seq_idx, stream->session_id);
584
585 rcu_read_unlock();
586
587 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
588 stream->read_subbuffer_ops.lock =
589 consumer_stream_metadata_lock_all;
590 stream->read_subbuffer_ops.unlock =
591 consumer_stream_metadata_unlock_all;
592 stream->read_subbuffer_ops.assert_locked =
593 consumer_stream_metadata_assert_locked_all;
594 stream->read_subbuffer_ops.pre_consume_subbuffer =
595 metadata_stream_check_version;
596 } else {
597 stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all;
598 stream->read_subbuffer_ops.unlock =
599 consumer_stream_data_unlock_all;
600 stream->read_subbuffer_ops.assert_locked =
601 consumer_stream_data_assert_locked_all;
602 stream->read_subbuffer_ops.pre_consume_subbuffer =
603 consumer_stream_update_stats;
604 if (channel->is_live) {
605 stream->read_subbuffer_ops.post_consume =
606 consumer_stream_sync_metadata_index;
607 } else {
608 stream->read_subbuffer_ops.post_consume =
609 consumer_stream_send_index;
610 }
611 }
612
613 if (channel->output == CONSUMER_CHANNEL_MMAP) {
614 stream->read_subbuffer_ops.consume_subbuffer =
615 consumer_stream_consume_mmap;
616 } else {
617 stream->read_subbuffer_ops.consume_subbuffer =
618 consumer_stream_consume_splice;
619 }
620
621 return stream;
622
623 error:
624 rcu_read_unlock();
625 lttng_trace_chunk_put(stream->trace_chunk);
626 free(stream);
627 end:
628 if (alloc_ret) {
629 *alloc_ret = ret;
630 }
631 return NULL;
632 }
633
634 /*
635 * Close stream on the relayd side. This call can destroy a relayd if the
636 * conditions are met.
637 *
638 * A RCU read side lock MUST be acquired if the relayd object was looked up in
639 * a hash table before calling this.
640 */
641 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
642 struct consumer_relayd_sock_pair *relayd)
643 {
644 int ret;
645
646 assert(stream);
647 assert(relayd);
648
649 if (stream->sent_to_relayd) {
650 uatomic_dec(&relayd->refcount);
651 assert(uatomic_read(&relayd->refcount) >= 0);
652 }
653
654 /* Closing streams requires to lock the control socket. */
655 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
656 ret = relayd_send_close_stream(&relayd->control_sock,
657 stream->relayd_stream_id,
658 stream->next_net_seq_num - 1);
659 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
660 if (ret < 0) {
661 ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx);
662 lttng_consumer_cleanup_relayd(relayd);
663 }
664
665 /* Both conditions are met, we destroy the relayd. */
666 if (uatomic_read(&relayd->refcount) == 0 &&
667 uatomic_read(&relayd->destroy_flag)) {
668 consumer_destroy_relayd(relayd);
669 }
670 stream->net_seq_idx = (uint64_t) -1ULL;
671 stream->sent_to_relayd = 0;
672 }
673
674 /*
675 * Close stream's file descriptors and, if needed, close stream also on the
676 * relayd side.
677 *
678 * The consumer data lock MUST be acquired.
679 * The stream lock MUST be acquired.
680 */
681 void consumer_stream_close(struct lttng_consumer_stream *stream)
682 {
683 int ret;
684 struct consumer_relayd_sock_pair *relayd;
685
686 assert(stream);
687
688 switch (consumer_data.type) {
689 case LTTNG_CONSUMER_KERNEL:
690 if (stream->mmap_base != NULL) {
691 ret = munmap(stream->mmap_base, stream->mmap_len);
692 if (ret != 0) {
693 PERROR("munmap");
694 }
695 }
696
697 if (stream->wait_fd >= 0) {
698 ret = close(stream->wait_fd);
699 if (ret) {
700 PERROR("close");
701 }
702 stream->wait_fd = -1;
703 }
704 if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) {
705 utils_close_pipe(stream->splice_pipe);
706 }
707 break;
708 case LTTNG_CONSUMER32_UST:
709 case LTTNG_CONSUMER64_UST:
710 {
711 /*
712 * Special case for the metadata since the wait fd is an internal pipe
713 * polled in the metadata thread.
714 */
715 if (stream->metadata_flag && stream->chan->monitor) {
716 int rpipe = stream->ust_metadata_poll_pipe[0];
717
718 /*
719 * This will stop the channel timer if one and close the write side
720 * of the metadata poll pipe.
721 */
722 lttng_ustconsumer_close_metadata(stream->chan);
723 if (rpipe >= 0) {
724 ret = close(rpipe);
725 if (ret < 0) {
726 PERROR("closing metadata pipe read side");
727 }
728 stream->ust_metadata_poll_pipe[0] = -1;
729 }
730 }
731 break;
732 }
733 default:
734 ERR("Unknown consumer_data type");
735 assert(0);
736 }
737
738 /* Close output fd. Could be a socket or local file at this point. */
739 if (stream->out_fd >= 0) {
740 ret = close(stream->out_fd);
741 if (ret) {
742 PERROR("close");
743 }
744 stream->out_fd = -1;
745 }
746
747 if (stream->index_file) {
748 lttng_index_file_put(stream->index_file);
749 stream->index_file = NULL;
750 }
751
752 lttng_trace_chunk_put(stream->trace_chunk);
753 stream->trace_chunk = NULL;
754
755 /* Check and cleanup relayd if needed. */
756 rcu_read_lock();
757 relayd = consumer_find_relayd(stream->net_seq_idx);
758 if (relayd != NULL) {
759 consumer_stream_relayd_close(stream, relayd);
760 }
761 rcu_read_unlock();
762 }
763
764 /*
765 * Delete the stream from all possible hash tables.
766 *
767 * The consumer data lock MUST be acquired.
768 * The stream lock MUST be acquired.
769 */
770 void consumer_stream_delete(struct lttng_consumer_stream *stream,
771 struct lttng_ht *ht)
772 {
773 int ret;
774 struct lttng_ht_iter iter;
775
776 assert(stream);
777 /* Should NEVER be called not in monitor mode. */
778 assert(stream->chan->monitor);
779
780 rcu_read_lock();
781
782 if (ht) {
783 iter.iter.node = &stream->node.node;
784 ret = lttng_ht_del(ht, &iter);
785 assert(!ret);
786 }
787
788 /* Delete from stream per channel ID hash table. */
789 iter.iter.node = &stream->node_channel_id.node;
790 /*
791 * The returned value is of no importance. Even if the node is NOT in the
792 * hash table, we continue since we may have been called by a code path
793 * that did not add the stream to a (all) hash table. Same goes for the
794 * next call ht del call.
795 */
796 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
797
798 /* Delete from the global stream list. */
799 iter.iter.node = &stream->node_session_id.node;
800 /* See the previous ht del on why we ignore the returned value. */
801 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
802
803 rcu_read_unlock();
804
805 if (!stream->metadata_flag) {
806 /* Decrement the stream count of the global consumer data. */
807 assert(consumer_data.stream_count > 0);
808 consumer_data.stream_count--;
809 }
810 }
811
812 /*
813 * Free the given stream within a RCU call.
814 */
815 void consumer_stream_free(struct lttng_consumer_stream *stream)
816 {
817 assert(stream);
818
819 metadata_bucket_destroy(stream->metadata_bucket);
820 call_rcu(&stream->node.head, free_stream_rcu);
821 }
822
823 /*
824 * Destroy the stream's buffers of the tracer.
825 */
826 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
827 {
828 assert(stream);
829
830 switch (consumer_data.type) {
831 case LTTNG_CONSUMER_KERNEL:
832 break;
833 case LTTNG_CONSUMER32_UST:
834 case LTTNG_CONSUMER64_UST:
835 lttng_ustconsumer_del_stream(stream);
836 break;
837 default:
838 ERR("Unknown consumer_data type");
839 assert(0);
840 }
841 }
842
843 /*
844 * Destroy and close a already created stream.
845 */
846 static void destroy_close_stream(struct lttng_consumer_stream *stream)
847 {
848 assert(stream);
849
850 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
851
852 /* Destroy tracer buffers of the stream. */
853 consumer_stream_destroy_buffers(stream);
854 /* Close down everything including the relayd if one. */
855 consumer_stream_close(stream);
856 }
857
858 /*
859 * Decrement the stream's channel refcount and if down to 0, return the channel
860 * pointer so it can be destroyed by the caller or NULL if not.
861 */
862 static struct lttng_consumer_channel *unref_channel(
863 struct lttng_consumer_stream *stream)
864 {
865 struct lttng_consumer_channel *free_chan = NULL;
866
867 assert(stream);
868 assert(stream->chan);
869
870 /* Update refcount of channel and see if we need to destroy it. */
871 if (!uatomic_sub_return(&stream->chan->refcount, 1)
872 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
873 free_chan = stream->chan;
874 }
875
876 return free_chan;
877 }
878
879 /*
880 * Destroy a stream completely. This will delete, close and free the stream.
881 * Once return, the stream is NO longer usable. Its channel may get destroyed
882 * if conditions are met for a monitored stream.
883 *
884 * This MUST be called WITHOUT the consumer data and stream lock acquired if
885 * the stream is in _monitor_ mode else it does not matter.
886 */
887 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
888 struct lttng_ht *ht)
889 {
890 assert(stream);
891
892 /* Stream is in monitor mode. */
893 if (stream->monitor) {
894 struct lttng_consumer_channel *free_chan = NULL;
895
896 /*
897 * This means that the stream was successfully removed from the streams
898 * list of the channel and sent to the right thread managing this
899 * stream thus being globally visible.
900 */
901 if (stream->globally_visible) {
902 pthread_mutex_lock(&consumer_data.lock);
903 pthread_mutex_lock(&stream->chan->lock);
904 pthread_mutex_lock(&stream->lock);
905 /* Remove every reference of the stream in the consumer. */
906 consumer_stream_delete(stream, ht);
907
908 destroy_close_stream(stream);
909
910 /* Update channel's refcount of the stream. */
911 free_chan = unref_channel(stream);
912
913 /* Indicates that the consumer data state MUST be updated after this. */
914 consumer_data.need_update = 1;
915
916 pthread_mutex_unlock(&stream->lock);
917 pthread_mutex_unlock(&stream->chan->lock);
918 pthread_mutex_unlock(&consumer_data.lock);
919 } else {
920 /*
921 * If the stream is not visible globally, this needs to be done
922 * outside of the consumer data lock section.
923 */
924 free_chan = unref_channel(stream);
925 }
926
927 if (free_chan) {
928 consumer_del_channel(free_chan);
929 }
930 } else {
931 destroy_close_stream(stream);
932 }
933
934 /* Free stream within a RCU call. */
935 lttng_trace_chunk_put(stream->trace_chunk);
936 stream->trace_chunk = NULL;
937 consumer_stream_free(stream);
938 }
939
940 /*
941 * Write index of a specific stream either on the relayd or local disk.
942 *
943 * Return 0 on success or else a negative value.
944 */
945 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
946 struct ctf_packet_index *element)
947 {
948 int ret;
949
950 assert(stream);
951 assert(element);
952
953 rcu_read_lock();
954 if (stream->net_seq_idx != (uint64_t) -1ULL) {
955 struct consumer_relayd_sock_pair *relayd;
956 relayd = consumer_find_relayd(stream->net_seq_idx);
957 if (relayd) {
958 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
959 ret = relayd_send_index(&relayd->control_sock, element,
960 stream->relayd_stream_id, stream->next_net_seq_num - 1);
961 if (ret < 0) {
962 /*
963 * Communication error with lttng-relayd,
964 * perform cleanup now
965 */
966 ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx);
967 lttng_consumer_cleanup_relayd(relayd);
968 ret = -1;
969 }
970 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
971 } else {
972 ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.",
973 stream->key, stream->net_seq_idx);
974 ret = -1;
975 }
976 } else {
977 if (lttng_index_file_write(stream->index_file, element)) {
978 ret = -1;
979 } else {
980 ret = 0;
981 }
982 }
983 if (ret < 0) {
984 goto error;
985 }
986
987 error:
988 rcu_read_unlock();
989 return ret;
990 }
991
992 int consumer_stream_create_output_files(struct lttng_consumer_stream *stream,
993 bool create_index)
994 {
995 int ret;
996 enum lttng_trace_chunk_status chunk_status;
997 const int flags = O_WRONLY | O_CREAT | O_TRUNC;
998 const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
999 char stream_path[LTTNG_PATH_MAX];
1000
1001 ASSERT_LOCKED(stream->lock);
1002 assert(stream->trace_chunk);
1003
1004 ret = utils_stream_file_path(stream->chan->pathname, stream->name,
1005 stream->chan->tracefile_size,
1006 stream->tracefile_count_current, NULL,
1007 stream_path, sizeof(stream_path));
1008 if (ret < 0) {
1009 goto end;
1010 }
1011
1012 if (stream->out_fd >= 0) {
1013 ret = close(stream->out_fd);
1014 if (ret < 0) {
1015 PERROR("Failed to close stream file \"%s\"",
1016 stream->name);
1017 goto end;
1018 }
1019 stream->out_fd = -1;
1020 }
1021
1022 DBG("Opening stream output file \"%s\"", stream_path);
1023 chunk_status = lttng_trace_chunk_open_file(stream->trace_chunk, stream_path,
1024 flags, mode, &stream->out_fd, false);
1025 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1026 ERR("Failed to open stream file \"%s\"", stream->name);
1027 ret = -1;
1028 goto end;
1029 }
1030
1031 if (!stream->metadata_flag && (create_index || stream->index_file)) {
1032 if (stream->index_file) {
1033 lttng_index_file_put(stream->index_file);
1034 }
1035 chunk_status = lttng_index_file_create_from_trace_chunk(
1036 stream->trace_chunk,
1037 stream->chan->pathname,
1038 stream->name,
1039 stream->chan->tracefile_size,
1040 stream->tracefile_count_current,
1041 CTF_INDEX_MAJOR, CTF_INDEX_MINOR,
1042 false, &stream->index_file);
1043 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1044 ret = -1;
1045 goto end;
1046 }
1047 }
1048
1049 /* Reset current size because we just perform a rotation. */
1050 stream->tracefile_size_current = 0;
1051 stream->out_fd_offset = 0;
1052 end:
1053 return ret;
1054 }
1055
1056 int consumer_stream_rotate_output_files(struct lttng_consumer_stream *stream)
1057 {
1058 int ret;
1059
1060 stream->tracefile_count_current++;
1061 if (stream->chan->tracefile_count > 0) {
1062 stream->tracefile_count_current %=
1063 stream->chan->tracefile_count;
1064 }
1065
1066 DBG("Rotating output files of stream \"%s\"", stream->name);
1067 ret = consumer_stream_create_output_files(stream, true);
1068 if (ret) {
1069 goto end;
1070 }
1071
1072 end:
1073 return ret;
1074 }
1075
1076 bool consumer_stream_is_deleted(struct lttng_consumer_stream *stream)
1077 {
1078 /*
1079 * This function does not take a const stream since
1080 * cds_lfht_is_node_deleted was not const before liburcu 0.12.
1081 */
1082 assert(stream);
1083 return cds_lfht_is_node_deleted(&stream->node.node);
1084 }
1085
1086 static ssize_t metadata_bucket_flush(
1087 const struct stream_subbuffer *buffer, void *data)
1088 {
1089 ssize_t ret;
1090 struct lttng_consumer_stream *stream = data;
1091
1092 ret = consumer_stream_consume_mmap(NULL, stream, buffer);
1093 if (ret < 0) {
1094 goto end;
1095 }
1096 end:
1097 return ret;
1098 }
1099
1100 static ssize_t metadata_bucket_consume(
1101 struct lttng_consumer_local_data *unused,
1102 struct lttng_consumer_stream *stream,
1103 const struct stream_subbuffer *subbuffer)
1104 {
1105 ssize_t ret;
1106 enum metadata_bucket_status status;
1107
1108 status = metadata_bucket_fill(stream->metadata_bucket, subbuffer);
1109 switch (status) {
1110 case METADATA_BUCKET_STATUS_OK:
1111 /* Return consumed size. */
1112 ret = subbuffer->buffer.buffer.size;
1113 break;
1114 default:
1115 ret = -1;
1116 }
1117
1118 return ret;
1119 }
1120
1121 int consumer_stream_enable_metadata_bucketization(
1122 struct lttng_consumer_stream *stream)
1123 {
1124 int ret = 0;
1125
1126 assert(stream->metadata_flag);
1127 assert(!stream->metadata_bucket);
1128 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
1129
1130 stream->metadata_bucket = metadata_bucket_create(
1131 metadata_bucket_flush, stream);
1132 if (!stream->metadata_bucket) {
1133 ret = -1;
1134 goto end;
1135 }
1136
1137 stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume;
1138 end:
1139 return ret;
1140 }
1141
1142 void consumer_stream_metadata_set_version(
1143 struct lttng_consumer_stream *stream, uint64_t new_version)
1144 {
1145 assert(new_version > stream->metadata_version);
1146 stream->metadata_version = new_version;
1147 stream->reset_metadata_flag = 1;
1148
1149 if (stream->metadata_bucket) {
1150 metadata_bucket_reset(stream->metadata_bucket);
1151 }
1152 }
This page took 0.085956 seconds and 5 git commands to generate.