47f2eb2171fa2883d84ce8a7dfe8830fa10134fb
[lttng-tools.git] / src / common / consumer / consumer-stream.c
1 /*
2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <assert.h>
12 #include <inttypes.h>
13 #include <sys/mman.h>
14 #include <unistd.h>
15
16 #include <common/common.h>
17 #include <common/index/index.h>
18 #include <common/kernel-consumer/kernel-consumer.h>
19 #include <common/relayd/relayd.h>
20 #include <common/ust-consumer/ust-consumer.h>
21 #include <common/utils.h>
22 #include <common/consumer/consumer.h>
23 #include <common/consumer/consumer-timer.h>
24 #include <common/consumer/metadata-bucket.h>
25
26 #include "consumer-stream.h"
27
28 /*
29 * RCU call to free stream. MUST only be used with call_rcu().
30 */
31 static void free_stream_rcu(struct rcu_head *head)
32 {
33 struct lttng_ht_node_u64 *node =
34 caa_container_of(head, struct lttng_ht_node_u64, head);
35 struct lttng_consumer_stream *stream =
36 caa_container_of(node, struct lttng_consumer_stream, node);
37
38 pthread_mutex_destroy(&stream->lock);
39 free(stream);
40 }
41
42 static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream)
43 {
44 pthread_mutex_lock(&stream->chan->lock);
45 pthread_mutex_lock(&stream->lock);
46 }
47
48 static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream)
49 {
50 pthread_mutex_unlock(&stream->lock);
51 pthread_mutex_unlock(&stream->chan->lock);
52 }
53
54 static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream)
55 {
56 consumer_stream_data_lock_all(stream);
57 pthread_mutex_lock(&stream->metadata_rdv_lock);
58 }
59
60 static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream)
61 {
62 pthread_mutex_unlock(&stream->metadata_rdv_lock);
63 consumer_stream_data_unlock_all(stream);
64 }
65
66 /* Only used for data streams. */
67 static int consumer_stream_update_stats(struct lttng_consumer_stream *stream,
68 const struct stream_subbuffer *subbuf)
69 {
70 int ret = 0;
71 uint64_t sequence_number;
72 const uint64_t discarded_events = subbuf->info.data.events_discarded;
73
74 if (!subbuf->info.data.sequence_number.is_set) {
75 /* Command not supported by the tracer. */
76 sequence_number = -1ULL;
77 stream->sequence_number_unavailable = true;
78 } else {
79 sequence_number = subbuf->info.data.sequence_number.value;
80 }
81
82 /*
83 * Start the sequence when we extract the first packet in case we don't
84 * start at 0 (for example if a consumer is not connected to the
85 * session immediately after the beginning).
86 */
87 if (stream->last_sequence_number == -1ULL) {
88 stream->last_sequence_number = sequence_number;
89 } else if (sequence_number > stream->last_sequence_number) {
90 stream->chan->lost_packets += sequence_number -
91 stream->last_sequence_number - 1;
92 } else {
93 /* seq <= last_sequence_number */
94 ERR("Sequence number inconsistent : prev = %" PRIu64
95 ", current = %" PRIu64,
96 stream->last_sequence_number, sequence_number);
97 ret = -1;
98 goto end;
99 }
100 stream->last_sequence_number = sequence_number;
101
102 if (discarded_events < stream->last_discarded_events) {
103 /*
104 * Overflow has occurred. We assume only one wrap-around
105 * has occurred.
106 */
107 stream->chan->discarded_events +=
108 (1ULL << (CAA_BITS_PER_LONG - 1)) -
109 stream->last_discarded_events +
110 discarded_events;
111 } else {
112 stream->chan->discarded_events += discarded_events -
113 stream->last_discarded_events;
114 }
115 stream->last_discarded_events = discarded_events;
116 ret = 0;
117
118 end:
119 return ret;
120 }
121
122 static
123 void ctf_packet_index_populate(struct ctf_packet_index *index,
124 off_t offset, const struct stream_subbuffer *subbuffer)
125 {
126 *index = (typeof(*index)){
127 .offset = htobe64(offset),
128 .packet_size = htobe64(subbuffer->info.data.packet_size),
129 .content_size = htobe64(subbuffer->info.data.content_size),
130 .timestamp_begin = htobe64(
131 subbuffer->info.data.timestamp_begin),
132 .timestamp_end = htobe64(
133 subbuffer->info.data.timestamp_end),
134 .events_discarded = htobe64(
135 subbuffer->info.data.events_discarded),
136 .stream_id = htobe64(subbuffer->info.data.stream_id),
137 .stream_instance_id = htobe64(
138 subbuffer->info.data.stream_instance_id.is_set ?
139 subbuffer->info.data.stream_instance_id.value : -1ULL),
140 .packet_seq_num = htobe64(
141 subbuffer->info.data.sequence_number.is_set ?
142 subbuffer->info.data.sequence_number.value : -1ULL),
143 };
144 }
145
146 static ssize_t consumer_stream_consume_mmap(
147 struct lttng_consumer_local_data *ctx,
148 struct lttng_consumer_stream *stream,
149 const struct stream_subbuffer *subbuffer)
150 {
151 const unsigned long padding_size =
152 subbuffer->info.data.padded_subbuf_size -
153 subbuffer->info.data.subbuf_size;
154 const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_mmap(
155 stream, &subbuffer->buffer.buffer, padding_size);
156
157 if (stream->net_seq_idx == -1ULL) {
158 /*
159 * When writing on disk, check that only the subbuffer (no
160 * padding) was written to disk.
161 */
162 if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
163 DBG("Failed to write the entire padded subbuffer on disk (written_bytes: %zd, padded subbuffer size %lu)",
164 written_bytes,
165 subbuffer->info.data.padded_subbuf_size);
166 }
167 } else {
168 /*
169 * When streaming over the network, check that the entire
170 * subbuffer including padding was successfully written.
171 */
172 if (written_bytes != subbuffer->info.data.subbuf_size) {
173 DBG("Failed to write only the subbuffer over the network (written_bytes: %zd, subbuffer size %lu)",
174 written_bytes,
175 subbuffer->info.data.subbuf_size);
176 }
177 }
178
179 /*
180 * If `lttng_consumer_on_read_subbuffer_mmap()` returned an error, pass
181 * it along to the caller, else return zero.
182 */
183 if (written_bytes < 0) {
184 ERR("Error reading mmap subbuffer: %zd", written_bytes);
185 }
186
187 return written_bytes;
188 }
189
190 static ssize_t consumer_stream_consume_splice(
191 struct lttng_consumer_local_data *ctx,
192 struct lttng_consumer_stream *stream,
193 const struct stream_subbuffer *subbuffer)
194 {
195 const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_splice(
196 ctx, stream, subbuffer->info.data.padded_subbuf_size, 0);
197
198 if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
199 DBG("Failed to write the entire padded subbuffer (written_bytes: %zd, padded subbuffer size %lu)",
200 written_bytes,
201 subbuffer->info.data.padded_subbuf_size);
202 }
203
204 /*
205 * If `lttng_consumer_on_read_subbuffer_splice()` returned an error,
206 * pass it along to the caller, else return zero.
207 */
208 if (written_bytes < 0) {
209 ERR("Error reading splice subbuffer: %zd", written_bytes);
210 }
211
212 return written_bytes;
213 }
214
215 static int consumer_stream_send_index(
216 struct lttng_consumer_stream *stream,
217 const struct stream_subbuffer *subbuffer,
218 struct lttng_consumer_local_data *ctx)
219 {
220 off_t packet_offset = 0;
221 struct ctf_packet_index index = {};
222
223 /*
224 * This is called after consuming the sub-buffer; substract the
225 * effect this sub-buffer from the offset.
226 */
227 if (stream->net_seq_idx == (uint64_t) -1ULL) {
228 packet_offset = stream->out_fd_offset -
229 subbuffer->info.data.padded_subbuf_size;
230 }
231
232 ctf_packet_index_populate(&index, packet_offset, subbuffer);
233 return consumer_stream_write_index(stream, &index);
234 }
235
236 /*
237 * Actually do the metadata sync using the given metadata stream.
238 *
239 * Return 0 on success else a negative value. ENODATA can be returned also
240 * indicating that there is no metadata available for that stream.
241 */
242 static int do_sync_metadata(struct lttng_consumer_stream *metadata,
243 struct lttng_consumer_local_data *ctx)
244 {
245 int ret;
246 enum sync_metadata_status status;
247
248 assert(metadata);
249 assert(metadata->metadata_flag);
250 assert(ctx);
251
252 /*
253 * In UST, since we have to write the metadata from the cache packet
254 * by packet, we might need to start this procedure multiple times
255 * until all the metadata from the cache has been extracted.
256 */
257 do {
258 /*
259 * Steps :
260 * - Lock the metadata stream
261 * - Check if metadata stream node was deleted before locking.
262 * - if yes, release and return success
263 * - Check if new metadata is ready (flush + snapshot pos)
264 * - If nothing : release and return.
265 * - Lock the metadata_rdv_lock
266 * - Unlock the metadata stream
267 * - cond_wait on metadata_rdv to wait the wakeup from the
268 * metadata thread
269 * - Unlock the metadata_rdv_lock
270 */
271 pthread_mutex_lock(&metadata->lock);
272
273 /*
274 * There is a possibility that we were able to acquire a reference on the
275 * stream from the RCU hash table but between then and now, the node might
276 * have been deleted just before the lock is acquired. Thus, after locking,
277 * we make sure the metadata node has not been deleted which means that the
278 * buffers are closed.
279 *
280 * In that case, there is no need to sync the metadata hence returning a
281 * success return code.
282 */
283 ret = cds_lfht_is_node_deleted(&metadata->node.node);
284 if (ret) {
285 ret = 0;
286 goto end_unlock_mutex;
287 }
288
289 switch (ctx->type) {
290 case LTTNG_CONSUMER_KERNEL:
291 /*
292 * Empty the metadata cache and flush the current stream.
293 */
294 status = lttng_kconsumer_sync_metadata(metadata);
295 break;
296 case LTTNG_CONSUMER32_UST:
297 case LTTNG_CONSUMER64_UST:
298 /*
299 * Ask the sessiond if we have new metadata waiting and update the
300 * consumer metadata cache.
301 */
302 status = lttng_ustconsumer_sync_metadata(ctx, metadata);
303 break;
304 default:
305 abort();
306 }
307
308 switch (status) {
309 case SYNC_METADATA_STATUS_NEW_DATA:
310 break;
311 case SYNC_METADATA_STATUS_NO_DATA:
312 ret = 0;
313 goto end_unlock_mutex;
314 case SYNC_METADATA_STATUS_ERROR:
315 ret = -1;
316 goto end_unlock_mutex;
317 default:
318 abort();
319 }
320
321 /*
322 * At this point, new metadata have been flushed, so we wait on the
323 * rendez-vous point for the metadata thread to wake us up when it
324 * finishes consuming the metadata and continue execution.
325 */
326
327 pthread_mutex_lock(&metadata->metadata_rdv_lock);
328
329 /*
330 * Release metadata stream lock so the metadata thread can process it.
331 */
332 pthread_mutex_unlock(&metadata->lock);
333
334 /*
335 * Wait on the rendez-vous point. Once woken up, it means the metadata was
336 * consumed and thus synchronization is achieved.
337 */
338 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
339 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
340 } while (status == SYNC_METADATA_STATUS_NEW_DATA);
341
342 /* Success */
343 return 0;
344
345 end_unlock_mutex:
346 pthread_mutex_unlock(&metadata->lock);
347 return ret;
348 }
349
350 /*
351 * Synchronize the metadata using a given session ID. A successful acquisition
352 * of a metadata stream will trigger a request to the session daemon and a
353 * snapshot so the metadata thread can consume it.
354 *
355 * This function call is a rendez-vous point between the metadata thread and
356 * the data thread.
357 *
358 * Return 0 on success or else a negative value.
359 */
360 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
361 uint64_t session_id)
362 {
363 int ret;
364 struct lttng_consumer_stream *stream = NULL;
365 struct lttng_ht_iter iter;
366 struct lttng_ht *ht;
367
368 assert(ctx);
369
370 /* Ease our life a bit. */
371 ht = consumer_data.stream_list_ht;
372
373 rcu_read_lock();
374
375 /* Search the metadata associated with the session id of the given stream. */
376
377 cds_lfht_for_each_entry_duplicate(ht->ht,
378 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
379 &session_id, &iter.iter, stream, node_session_id.node) {
380 if (!stream->metadata_flag) {
381 continue;
382 }
383
384 ret = do_sync_metadata(stream, ctx);
385 if (ret < 0) {
386 goto end;
387 }
388 }
389
390 /*
391 * Force return code to 0 (success) since ret might be ENODATA for instance
392 * which is not an error but rather that we should come back.
393 */
394 ret = 0;
395
396 end:
397 rcu_read_unlock();
398 return ret;
399 }
400
401 static int consumer_stream_sync_metadata_index(
402 struct lttng_consumer_stream *stream,
403 const struct stream_subbuffer *subbuffer,
404 struct lttng_consumer_local_data *ctx)
405 {
406 int ret;
407
408 /* Block until all the metadata is sent. */
409 pthread_mutex_lock(&stream->metadata_timer_lock);
410 assert(!stream->missed_metadata_flush);
411 stream->waiting_on_metadata = true;
412 pthread_mutex_unlock(&stream->metadata_timer_lock);
413
414 ret = consumer_stream_sync_metadata(ctx, stream->session_id);
415
416 pthread_mutex_lock(&stream->metadata_timer_lock);
417 stream->waiting_on_metadata = false;
418 if (stream->missed_metadata_flush) {
419 stream->missed_metadata_flush = false;
420 pthread_mutex_unlock(&stream->metadata_timer_lock);
421 (void) stream->read_subbuffer_ops.send_live_beacon(stream);
422 } else {
423 pthread_mutex_unlock(&stream->metadata_timer_lock);
424 }
425 if (ret < 0) {
426 goto end;
427 }
428
429 ret = consumer_stream_send_index(stream, subbuffer, ctx);
430 end:
431 return ret;
432 }
433
434 /*
435 * Check if the local version of the metadata stream matches with the version
436 * of the metadata stream in the kernel. If it was updated, set the reset flag
437 * on the stream.
438 */
439 static
440 int metadata_stream_check_version(struct lttng_consumer_stream *stream,
441 const struct stream_subbuffer *subbuffer)
442 {
443 if (stream->metadata_version == subbuffer->info.metadata.version) {
444 goto end;
445 }
446
447 DBG("New metadata version detected");
448 consumer_stream_metadata_set_version(stream,
449 subbuffer->info.metadata.version);
450
451 if (stream->read_subbuffer_ops.reset_metadata) {
452 stream->read_subbuffer_ops.reset_metadata(stream);
453 }
454
455 end:
456 return 0;
457 }
458
459 struct lttng_consumer_stream *consumer_stream_create(
460 struct lttng_consumer_channel *channel,
461 uint64_t channel_key,
462 uint64_t stream_key,
463 const char *channel_name,
464 uint64_t relayd_id,
465 uint64_t session_id,
466 struct lttng_trace_chunk *trace_chunk,
467 int cpu,
468 int *alloc_ret,
469 enum consumer_channel_type type,
470 unsigned int monitor)
471 {
472 int ret;
473 struct lttng_consumer_stream *stream;
474
475 stream = zmalloc(sizeof(*stream));
476 if (stream == NULL) {
477 PERROR("malloc struct lttng_consumer_stream");
478 ret = -ENOMEM;
479 goto end;
480 }
481
482 if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) {
483 ERR("Failed to acquire trace chunk reference during the creation of a stream");
484 ret = -1;
485 goto error;
486 }
487
488 rcu_read_lock();
489 stream->chan = channel;
490 stream->key = stream_key;
491 stream->trace_chunk = trace_chunk;
492 stream->out_fd = -1;
493 stream->out_fd_offset = 0;
494 stream->output_written = 0;
495 stream->net_seq_idx = relayd_id;
496 stream->session_id = session_id;
497 stream->monitor = monitor;
498 stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
499 stream->index_file = NULL;
500 stream->last_sequence_number = -1ULL;
501 stream->rotate_position = -1ULL;
502 /* Buffer is created with an open packet. */
503 stream->opened_packet_in_current_trace_chunk = true;
504 pthread_mutex_init(&stream->lock, NULL);
505 pthread_mutex_init(&stream->metadata_timer_lock, NULL);
506
507 /* If channel is the metadata, flag this stream as metadata. */
508 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
509 stream->metadata_flag = 1;
510 /* Metadata is flat out. */
511 strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
512 /* Live rendez-vous point. */
513 pthread_cond_init(&stream->metadata_rdv, NULL);
514 pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
515 } else {
516 /* Format stream name to <channel_name>_<cpu_number> */
517 ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
518 channel_name, cpu);
519 if (ret < 0) {
520 PERROR("snprintf stream name");
521 goto error;
522 }
523 }
524
525 switch (channel->output) {
526 case CONSUMER_CHANNEL_SPLICE:
527 stream->output = LTTNG_EVENT_SPLICE;
528 ret = utils_create_pipe(stream->splice_pipe);
529 if (ret < 0) {
530 goto error;
531 }
532 break;
533 case CONSUMER_CHANNEL_MMAP:
534 stream->output = LTTNG_EVENT_MMAP;
535 break;
536 default:
537 abort();
538 }
539
540 /* Key is always the wait_fd for streams. */
541 lttng_ht_node_init_u64(&stream->node, stream->key);
542
543 /* Init node per channel id key */
544 lttng_ht_node_init_u64(&stream->node_channel_id, channel_key);
545
546 /* Init session id node with the stream session id */
547 lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
548
549 DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
550 " relayd_id %" PRIu64 ", session_id %" PRIu64,
551 stream->name, stream->key, channel_key,
552 stream->net_seq_idx, stream->session_id);
553
554 rcu_read_unlock();
555
556 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
557 stream->read_subbuffer_ops.lock =
558 consumer_stream_metadata_lock_all;
559 stream->read_subbuffer_ops.unlock =
560 consumer_stream_metadata_unlock_all;
561 stream->read_subbuffer_ops.pre_consume_subbuffer =
562 metadata_stream_check_version;
563 } else {
564 stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all;
565 stream->read_subbuffer_ops.unlock =
566 consumer_stream_data_unlock_all;
567 stream->read_subbuffer_ops.pre_consume_subbuffer =
568 consumer_stream_update_stats;
569 if (channel->is_live) {
570 stream->read_subbuffer_ops.post_consume =
571 consumer_stream_sync_metadata_index;
572 } else {
573 stream->read_subbuffer_ops.post_consume =
574 consumer_stream_send_index;
575 }
576 }
577
578 if (channel->output == CONSUMER_CHANNEL_MMAP) {
579 stream->read_subbuffer_ops.consume_subbuffer =
580 consumer_stream_consume_mmap;
581 } else {
582 stream->read_subbuffer_ops.consume_subbuffer =
583 consumer_stream_consume_splice;
584 }
585
586 return stream;
587
588 error:
589 rcu_read_unlock();
590 lttng_trace_chunk_put(stream->trace_chunk);
591 free(stream);
592 end:
593 if (alloc_ret) {
594 *alloc_ret = ret;
595 }
596 return NULL;
597 }
598
599 /*
600 * Close stream on the relayd side. This call can destroy a relayd if the
601 * conditions are met.
602 *
603 * A RCU read side lock MUST be acquired if the relayd object was looked up in
604 * a hash table before calling this.
605 */
606 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
607 struct consumer_relayd_sock_pair *relayd)
608 {
609 int ret;
610
611 assert(stream);
612 assert(relayd);
613
614 if (stream->sent_to_relayd) {
615 uatomic_dec(&relayd->refcount);
616 assert(uatomic_read(&relayd->refcount) >= 0);
617 }
618
619 /* Closing streams requires to lock the control socket. */
620 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
621 ret = relayd_send_close_stream(&relayd->control_sock,
622 stream->relayd_stream_id,
623 stream->next_net_seq_num - 1);
624 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
625 if (ret < 0) {
626 ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx);
627 lttng_consumer_cleanup_relayd(relayd);
628 }
629
630 /* Both conditions are met, we destroy the relayd. */
631 if (uatomic_read(&relayd->refcount) == 0 &&
632 uatomic_read(&relayd->destroy_flag)) {
633 consumer_destroy_relayd(relayd);
634 }
635 stream->net_seq_idx = (uint64_t) -1ULL;
636 stream->sent_to_relayd = 0;
637 }
638
639 /*
640 * Close stream's file descriptors and, if needed, close stream also on the
641 * relayd side.
642 *
643 * The consumer data lock MUST be acquired.
644 * The stream lock MUST be acquired.
645 */
646 void consumer_stream_close(struct lttng_consumer_stream *stream)
647 {
648 int ret;
649 struct consumer_relayd_sock_pair *relayd;
650
651 assert(stream);
652
653 switch (consumer_data.type) {
654 case LTTNG_CONSUMER_KERNEL:
655 if (stream->mmap_base != NULL) {
656 ret = munmap(stream->mmap_base, stream->mmap_len);
657 if (ret != 0) {
658 PERROR("munmap");
659 }
660 }
661
662 if (stream->wait_fd >= 0) {
663 ret = close(stream->wait_fd);
664 if (ret) {
665 PERROR("close");
666 }
667 stream->wait_fd = -1;
668 }
669 if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) {
670 utils_close_pipe(stream->splice_pipe);
671 }
672 break;
673 case LTTNG_CONSUMER32_UST:
674 case LTTNG_CONSUMER64_UST:
675 {
676 /*
677 * Special case for the metadata since the wait fd is an internal pipe
678 * polled in the metadata thread.
679 */
680 if (stream->metadata_flag && stream->chan->monitor) {
681 int rpipe = stream->ust_metadata_poll_pipe[0];
682
683 /*
684 * This will stop the channel timer if one and close the write side
685 * of the metadata poll pipe.
686 */
687 lttng_ustconsumer_close_metadata(stream->chan);
688 if (rpipe >= 0) {
689 ret = close(rpipe);
690 if (ret < 0) {
691 PERROR("closing metadata pipe read side");
692 }
693 stream->ust_metadata_poll_pipe[0] = -1;
694 }
695 }
696 break;
697 }
698 default:
699 ERR("Unknown consumer_data type");
700 assert(0);
701 }
702
703 /* Close output fd. Could be a socket or local file at this point. */
704 if (stream->out_fd >= 0) {
705 ret = close(stream->out_fd);
706 if (ret) {
707 PERROR("close");
708 }
709 stream->out_fd = -1;
710 }
711
712 if (stream->index_file) {
713 lttng_index_file_put(stream->index_file);
714 stream->index_file = NULL;
715 }
716
717 lttng_trace_chunk_put(stream->trace_chunk);
718 stream->trace_chunk = NULL;
719
720 /* Check and cleanup relayd if needed. */
721 rcu_read_lock();
722 relayd = consumer_find_relayd(stream->net_seq_idx);
723 if (relayd != NULL) {
724 consumer_stream_relayd_close(stream, relayd);
725 }
726 rcu_read_unlock();
727 }
728
729 /*
730 * Delete the stream from all possible hash tables.
731 *
732 * The consumer data lock MUST be acquired.
733 * The stream lock MUST be acquired.
734 */
735 void consumer_stream_delete(struct lttng_consumer_stream *stream,
736 struct lttng_ht *ht)
737 {
738 int ret;
739 struct lttng_ht_iter iter;
740
741 assert(stream);
742 /* Should NEVER be called not in monitor mode. */
743 assert(stream->chan->monitor);
744
745 rcu_read_lock();
746
747 if (ht) {
748 iter.iter.node = &stream->node.node;
749 ret = lttng_ht_del(ht, &iter);
750 assert(!ret);
751 }
752
753 /* Delete from stream per channel ID hash table. */
754 iter.iter.node = &stream->node_channel_id.node;
755 /*
756 * The returned value is of no importance. Even if the node is NOT in the
757 * hash table, we continue since we may have been called by a code path
758 * that did not add the stream to a (all) hash table. Same goes for the
759 * next call ht del call.
760 */
761 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
762
763 /* Delete from the global stream list. */
764 iter.iter.node = &stream->node_session_id.node;
765 /* See the previous ht del on why we ignore the returned value. */
766 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
767
768 rcu_read_unlock();
769
770 if (!stream->metadata_flag) {
771 /* Decrement the stream count of the global consumer data. */
772 assert(consumer_data.stream_count > 0);
773 consumer_data.stream_count--;
774 }
775 }
776
777 /*
778 * Free the given stream within a RCU call.
779 */
780 void consumer_stream_free(struct lttng_consumer_stream *stream)
781 {
782 assert(stream);
783
784 metadata_bucket_destroy(stream->metadata_bucket);
785 call_rcu(&stream->node.head, free_stream_rcu);
786 }
787
788 /*
789 * Destroy the stream's buffers of the tracer.
790 */
791 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
792 {
793 assert(stream);
794
795 switch (consumer_data.type) {
796 case LTTNG_CONSUMER_KERNEL:
797 break;
798 case LTTNG_CONSUMER32_UST:
799 case LTTNG_CONSUMER64_UST:
800 lttng_ustconsumer_del_stream(stream);
801 break;
802 default:
803 ERR("Unknown consumer_data type");
804 assert(0);
805 }
806 }
807
808 /*
809 * Destroy and close a already created stream.
810 */
811 static void destroy_close_stream(struct lttng_consumer_stream *stream)
812 {
813 assert(stream);
814
815 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
816
817 /* Destroy tracer buffers of the stream. */
818 consumer_stream_destroy_buffers(stream);
819 /* Close down everything including the relayd if one. */
820 consumer_stream_close(stream);
821 }
822
823 /*
824 * Decrement the stream's channel refcount and if down to 0, return the channel
825 * pointer so it can be destroyed by the caller or NULL if not.
826 */
827 static struct lttng_consumer_channel *unref_channel(
828 struct lttng_consumer_stream *stream)
829 {
830 struct lttng_consumer_channel *free_chan = NULL;
831
832 assert(stream);
833 assert(stream->chan);
834
835 /* Update refcount of channel and see if we need to destroy it. */
836 if (!uatomic_sub_return(&stream->chan->refcount, 1)
837 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
838 free_chan = stream->chan;
839 }
840
841 return free_chan;
842 }
843
844 /*
845 * Destroy a stream completely. This will delete, close and free the stream.
846 * Once return, the stream is NO longer usable. Its channel may get destroyed
847 * if conditions are met for a monitored stream.
848 *
849 * This MUST be called WITHOUT the consumer data and stream lock acquired if
850 * the stream is in _monitor_ mode else it does not matter.
851 */
852 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
853 struct lttng_ht *ht)
854 {
855 assert(stream);
856
857 /* Stream is in monitor mode. */
858 if (stream->monitor) {
859 struct lttng_consumer_channel *free_chan = NULL;
860
861 /*
862 * This means that the stream was successfully removed from the streams
863 * list of the channel and sent to the right thread managing this
864 * stream thus being globally visible.
865 */
866 if (stream->globally_visible) {
867 pthread_mutex_lock(&consumer_data.lock);
868 pthread_mutex_lock(&stream->chan->lock);
869 pthread_mutex_lock(&stream->lock);
870 /* Remove every reference of the stream in the consumer. */
871 consumer_stream_delete(stream, ht);
872
873 destroy_close_stream(stream);
874
875 /* Update channel's refcount of the stream. */
876 free_chan = unref_channel(stream);
877
878 /* Indicates that the consumer data state MUST be updated after this. */
879 consumer_data.need_update = 1;
880
881 pthread_mutex_unlock(&stream->lock);
882 pthread_mutex_unlock(&stream->chan->lock);
883 pthread_mutex_unlock(&consumer_data.lock);
884 } else {
885 /*
886 * If the stream is not visible globally, this needs to be done
887 * outside of the consumer data lock section.
888 */
889 free_chan = unref_channel(stream);
890 }
891
892 if (free_chan) {
893 consumer_del_channel(free_chan);
894 }
895 } else {
896 destroy_close_stream(stream);
897 }
898
899 /* Free stream within a RCU call. */
900 lttng_trace_chunk_put(stream->trace_chunk);
901 stream->trace_chunk = NULL;
902 consumer_stream_free(stream);
903 }
904
905 /*
906 * Write index of a specific stream either on the relayd or local disk.
907 *
908 * Return 0 on success or else a negative value.
909 */
910 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
911 struct ctf_packet_index *element)
912 {
913 int ret;
914
915 assert(stream);
916 assert(element);
917
918 rcu_read_lock();
919 if (stream->net_seq_idx != (uint64_t) -1ULL) {
920 struct consumer_relayd_sock_pair *relayd;
921 relayd = consumer_find_relayd(stream->net_seq_idx);
922 if (relayd) {
923 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
924 ret = relayd_send_index(&relayd->control_sock, element,
925 stream->relayd_stream_id, stream->next_net_seq_num - 1);
926 if (ret < 0) {
927 /*
928 * Communication error with lttng-relayd,
929 * perform cleanup now
930 */
931 ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx);
932 lttng_consumer_cleanup_relayd(relayd);
933 ret = -1;
934 }
935 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
936 } else {
937 ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.",
938 stream->key, stream->net_seq_idx);
939 ret = -1;
940 }
941 } else {
942 if (lttng_index_file_write(stream->index_file, element)) {
943 ret = -1;
944 } else {
945 ret = 0;
946 }
947 }
948 if (ret < 0) {
949 goto error;
950 }
951
952 error:
953 rcu_read_unlock();
954 return ret;
955 }
956
957 int consumer_stream_create_output_files(struct lttng_consumer_stream *stream,
958 bool create_index)
959 {
960 int ret;
961 enum lttng_trace_chunk_status chunk_status;
962 const int flags = O_WRONLY | O_CREAT | O_TRUNC;
963 const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
964 char stream_path[LTTNG_PATH_MAX];
965
966 ASSERT_LOCKED(stream->lock);
967 assert(stream->trace_chunk);
968
969 ret = utils_stream_file_path(stream->chan->pathname, stream->name,
970 stream->chan->tracefile_size,
971 stream->tracefile_count_current, NULL,
972 stream_path, sizeof(stream_path));
973 if (ret < 0) {
974 goto end;
975 }
976
977 if (stream->out_fd >= 0) {
978 ret = close(stream->out_fd);
979 if (ret < 0) {
980 PERROR("Failed to close stream file \"%s\"",
981 stream->name);
982 goto end;
983 }
984 stream->out_fd = -1;
985 }
986
987 DBG("Opening stream output file \"%s\"", stream_path);
988 chunk_status = lttng_trace_chunk_open_file(stream->trace_chunk, stream_path,
989 flags, mode, &stream->out_fd, false);
990 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
991 ERR("Failed to open stream file \"%s\"", stream->name);
992 ret = -1;
993 goto end;
994 }
995
996 if (!stream->metadata_flag && (create_index || stream->index_file)) {
997 if (stream->index_file) {
998 lttng_index_file_put(stream->index_file);
999 }
1000 chunk_status = lttng_index_file_create_from_trace_chunk(
1001 stream->trace_chunk,
1002 stream->chan->pathname,
1003 stream->name,
1004 stream->chan->tracefile_size,
1005 stream->tracefile_count_current,
1006 CTF_INDEX_MAJOR, CTF_INDEX_MINOR,
1007 false, &stream->index_file);
1008 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1009 ret = -1;
1010 goto end;
1011 }
1012 }
1013
1014 /* Reset current size because we just perform a rotation. */
1015 stream->tracefile_size_current = 0;
1016 stream->out_fd_offset = 0;
1017 end:
1018 return ret;
1019 }
1020
1021 int consumer_stream_rotate_output_files(struct lttng_consumer_stream *stream)
1022 {
1023 int ret;
1024
1025 stream->tracefile_count_current++;
1026 if (stream->chan->tracefile_count > 0) {
1027 stream->tracefile_count_current %=
1028 stream->chan->tracefile_count;
1029 }
1030
1031 DBG("Rotating output files of stream \"%s\"", stream->name);
1032 ret = consumer_stream_create_output_files(stream, true);
1033 if (ret) {
1034 goto end;
1035 }
1036
1037 end:
1038 return ret;
1039 }
1040
1041 bool consumer_stream_is_deleted(struct lttng_consumer_stream *stream)
1042 {
1043 /*
1044 * This function does not take a const stream since
1045 * cds_lfht_is_node_deleted was not const before liburcu 0.12.
1046 */
1047 assert(stream);
1048 return cds_lfht_is_node_deleted(&stream->node.node);
1049 }
1050
1051 static ssize_t metadata_bucket_flush(
1052 const struct stream_subbuffer *buffer, void *data)
1053 {
1054 ssize_t ret;
1055 struct lttng_consumer_stream *stream = data;
1056
1057 ret = consumer_stream_consume_mmap(NULL, stream, buffer);
1058 if (ret < 0) {
1059 goto end;
1060 }
1061 end:
1062 return ret;
1063 }
1064
1065 static ssize_t metadata_bucket_consume(
1066 struct lttng_consumer_local_data *unused,
1067 struct lttng_consumer_stream *stream,
1068 const struct stream_subbuffer *subbuffer)
1069 {
1070 ssize_t ret;
1071 enum metadata_bucket_status status;
1072
1073 status = metadata_bucket_fill(stream->metadata_bucket, subbuffer);
1074 switch (status) {
1075 case METADATA_BUCKET_STATUS_OK:
1076 /* Return consumed size. */
1077 ret = subbuffer->buffer.buffer.size;
1078 break;
1079 default:
1080 ret = -1;
1081 }
1082
1083 return ret;
1084 }
1085
1086 int consumer_stream_enable_metadata_bucketization(
1087 struct lttng_consumer_stream *stream)
1088 {
1089 int ret = 0;
1090
1091 assert(stream->metadata_flag);
1092 assert(!stream->metadata_bucket);
1093 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
1094
1095 stream->metadata_bucket = metadata_bucket_create(
1096 metadata_bucket_flush, stream);
1097 if (!stream->metadata_bucket) {
1098 ret = -1;
1099 goto end;
1100 }
1101
1102 stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume;
1103 end:
1104 return ret;
1105 }
1106
1107 void consumer_stream_metadata_set_version(
1108 struct lttng_consumer_stream *stream, uint64_t new_version)
1109 {
1110 assert(new_version > stream->metadata_version);
1111 stream->metadata_version = new_version;
1112 stream->reset_metadata_flag = 1;
1113
1114 if (stream->metadata_bucket) {
1115 metadata_bucket_reset(stream->metadata_bucket);
1116 }
1117 }
This page took 0.051698 seconds and 4 git commands to generate.