Fix: invalid discarded events on start/stop without event production
[lttng-tools.git] / src / common / consumer / consumer-stream.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <inttypes.h>
23 #include <sys/mman.h>
24 #include <unistd.h>
25
26 #include <common/common.h>
27 #include <common/index/index.h>
28 #include <common/kernel-consumer/kernel-consumer.h>
29 #include <common/relayd/relayd.h>
30 #include <common/ust-consumer/ust-consumer.h>
31 #include <common/utils.h>
32 #include <common/consumer/consumer.h>
33 #include <common/consumer/consumer-timer.h>
34 #include <common/consumer/metadata-bucket.h>
35
36 #include "consumer-stream.h"
37
38 /*
39 * RCU call to free stream. MUST only be used with call_rcu().
40 */
41 static void free_stream_rcu(struct rcu_head *head)
42 {
43 struct lttng_ht_node_u64 *node =
44 caa_container_of(head, struct lttng_ht_node_u64, head);
45 struct lttng_consumer_stream *stream =
46 caa_container_of(node, struct lttng_consumer_stream, node);
47
48 pthread_mutex_destroy(&stream->lock);
49 free(stream);
50 }
51
52 static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream)
53 {
54 pthread_mutex_lock(&stream->chan->lock);
55 pthread_mutex_lock(&stream->lock);
56 }
57
58 static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream)
59 {
60 pthread_mutex_unlock(&stream->lock);
61 pthread_mutex_unlock(&stream->chan->lock);
62 }
63
64 static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream)
65 {
66 consumer_stream_data_lock_all(stream);
67 pthread_mutex_lock(&stream->metadata_rdv_lock);
68 }
69
70 static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream)
71 {
72 pthread_mutex_unlock(&stream->metadata_rdv_lock);
73 consumer_stream_data_unlock_all(stream);
74 }
75
76 /* Only used for data streams. */
77 static int consumer_stream_update_stats(struct lttng_consumer_stream *stream,
78 const struct stream_subbuffer *subbuf)
79 {
80 int ret = 0;
81 uint64_t sequence_number;
82 const uint64_t discarded_events = subbuf->info.data.events_discarded;
83
84 if (!subbuf->info.data.sequence_number.is_set) {
85 /* Command not supported by the tracer. */
86 sequence_number = -1ULL;
87 } else {
88 sequence_number = subbuf->info.data.sequence_number.value;
89 }
90
91 /*
92 * Start the sequence when we extract the first packet in case we don't
93 * start at 0 (for example if a consumer is not connected to the
94 * session immediately after the beginning).
95 */
96 if (stream->last_sequence_number == -1ULL) {
97 stream->last_sequence_number = sequence_number;
98 } else if (sequence_number > stream->last_sequence_number) {
99 stream->chan->lost_packets += sequence_number -
100 stream->last_sequence_number - 1;
101 } else {
102 /* seq <= last_sequence_number */
103 ERR("Sequence number inconsistent : prev = %" PRIu64
104 ", current = %" PRIu64,
105 stream->last_sequence_number, sequence_number);
106 ret = -1;
107 goto end;
108 }
109 stream->last_sequence_number = sequence_number;
110
111 if (discarded_events < stream->last_discarded_events) {
112 /*
113 * Overflow has occurred. We assume only one wrap-around
114 * has occurred.
115 */
116 stream->chan->discarded_events +=
117 (1ULL << (CAA_BITS_PER_LONG - 1)) -
118 stream->last_discarded_events +
119 discarded_events;
120 } else {
121 stream->chan->discarded_events += discarded_events -
122 stream->last_discarded_events;
123 }
124 stream->last_discarded_events = discarded_events;
125 ret = 0;
126
127 end:
128 return ret;
129 }
130
131 static
132 void ctf_packet_index_populate(struct ctf_packet_index *index,
133 off_t offset, const struct stream_subbuffer *subbuffer)
134 {
135 *index = (typeof(*index)){
136 .offset = htobe64(offset),
137 .packet_size = htobe64(subbuffer->info.data.packet_size),
138 .content_size = htobe64(subbuffer->info.data.content_size),
139 .timestamp_begin = htobe64(
140 subbuffer->info.data.timestamp_begin),
141 .timestamp_end = htobe64(
142 subbuffer->info.data.timestamp_end),
143 .events_discarded = htobe64(
144 subbuffer->info.data.events_discarded),
145 .stream_id = htobe64(subbuffer->info.data.stream_id),
146 .stream_instance_id = htobe64(
147 subbuffer->info.data.stream_instance_id.is_set ?
148 subbuffer->info.data.stream_instance_id.value : -1ULL),
149 .packet_seq_num = htobe64(
150 subbuffer->info.data.sequence_number.is_set ?
151 subbuffer->info.data.sequence_number.value : -1ULL),
152 };
153 }
154
155 static ssize_t consumer_stream_consume_mmap(
156 struct lttng_consumer_local_data *ctx,
157 struct lttng_consumer_stream *stream,
158 const struct stream_subbuffer *subbuffer)
159 {
160 const unsigned long padding_size =
161 subbuffer->info.data.padded_subbuf_size -
162 subbuffer->info.data.subbuf_size;
163
164 return lttng_consumer_on_read_subbuffer_mmap(
165 stream, &subbuffer->buffer.buffer, padding_size);
166 }
167
168 static ssize_t consumer_stream_consume_splice(
169 struct lttng_consumer_local_data *ctx,
170 struct lttng_consumer_stream *stream,
171 const struct stream_subbuffer *subbuffer)
172 {
173 return lttng_consumer_on_read_subbuffer_splice(ctx, stream,
174 subbuffer->info.data.padded_subbuf_size, 0);
175 }
176
177 static int consumer_stream_send_index(
178 struct lttng_consumer_stream *stream,
179 const struct stream_subbuffer *subbuffer,
180 struct lttng_consumer_local_data *ctx)
181 {
182 off_t packet_offset = 0;
183 struct ctf_packet_index index = {};
184
185 /*
186 * This is called after consuming the sub-buffer; substract the
187 * effect this sub-buffer from the offset.
188 */
189 if (stream->relayd_id == (uint64_t) -1ULL) {
190 packet_offset = stream->out_fd_offset -
191 subbuffer->info.data.padded_subbuf_size;
192 }
193
194 ctf_packet_index_populate(&index, packet_offset, subbuffer);
195 return consumer_stream_write_index(stream, &index);
196 }
197
198 /*
199 * Actually do the metadata sync using the given metadata stream.
200 *
201 * Return 0 on success else a negative value. ENODATA can be returned also
202 * indicating that there is no metadata available for that stream.
203 */
204 static int do_sync_metadata(struct lttng_consumer_stream *metadata,
205 struct lttng_consumer_local_data *ctx)
206 {
207 int ret;
208
209 assert(metadata);
210 assert(metadata->metadata_flag);
211 assert(ctx);
212
213 /*
214 * In UST, since we have to write the metadata from the cache packet
215 * by packet, we might need to start this procedure multiple times
216 * until all the metadata from the cache has been extracted.
217 */
218 do {
219 /*
220 * Steps :
221 * - Lock the metadata stream
222 * - Check if metadata stream node was deleted before locking.
223 * - if yes, release and return success
224 * - Check if new metadata is ready (flush + snapshot pos)
225 * - If nothing : release and return.
226 * - Lock the metadata_rdv_lock
227 * - Unlock the metadata stream
228 * - cond_wait on metadata_rdv to wait the wakeup from the
229 * metadata thread
230 * - Unlock the metadata_rdv_lock
231 */
232 pthread_mutex_lock(&metadata->lock);
233
234 /*
235 * There is a possibility that we were able to acquire a reference on the
236 * stream from the RCU hash table but between then and now, the node might
237 * have been deleted just before the lock is acquired. Thus, after locking,
238 * we make sure the metadata node has not been deleted which means that the
239 * buffers are closed.
240 *
241 * In that case, there is no need to sync the metadata hence returning a
242 * success return code.
243 */
244 ret = cds_lfht_is_node_deleted(&metadata->node.node);
245 if (ret) {
246 ret = 0;
247 goto end_unlock_mutex;
248 }
249
250 switch (ctx->type) {
251 case LTTNG_CONSUMER_KERNEL:
252 /*
253 * Empty the metadata cache and flush the current stream.
254 */
255 ret = lttng_kconsumer_sync_metadata(metadata);
256 break;
257 case LTTNG_CONSUMER32_UST:
258 case LTTNG_CONSUMER64_UST:
259 /*
260 * Ask the sessiond if we have new metadata waiting and update the
261 * consumer metadata cache.
262 */
263 ret = lttng_ustconsumer_sync_metadata(ctx, metadata);
264 break;
265 default:
266 assert(0);
267 ret = -1;
268 break;
269 }
270 /*
271 * Error or no new metadata, we exit here.
272 */
273 if (ret <= 0 || ret == ENODATA) {
274 goto end_unlock_mutex;
275 }
276
277 /*
278 * At this point, new metadata have been flushed, so we wait on the
279 * rendez-vous point for the metadata thread to wake us up when it
280 * finishes consuming the metadata and continue execution.
281 */
282
283 pthread_mutex_lock(&metadata->metadata_rdv_lock);
284
285 /*
286 * Release metadata stream lock so the metadata thread can process it.
287 */
288 pthread_mutex_unlock(&metadata->lock);
289
290 /*
291 * Wait on the rendez-vous point. Once woken up, it means the metadata was
292 * consumed and thus synchronization is achieved.
293 */
294 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
295 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
296 } while (ret == EAGAIN);
297
298 /* Success */
299 return 0;
300
301 end_unlock_mutex:
302 pthread_mutex_unlock(&metadata->lock);
303 return ret;
304 }
305
306 /*
307 * Synchronize the metadata using a given session ID. A successful acquisition
308 * of a metadata stream will trigger a request to the session daemon and a
309 * snapshot so the metadata thread can consume it.
310 *
311 * This function call is a rendez-vous point between the metadata thread and
312 * the data thread.
313 *
314 * Return 0 on success or else a negative value.
315 */
316 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
317 uint64_t session_id)
318 {
319 int ret;
320 struct lttng_consumer_stream *stream = NULL;
321 struct lttng_ht_iter iter;
322 struct lttng_ht *ht;
323
324 assert(ctx);
325
326 /* Ease our life a bit. */
327 ht = consumer_data.stream_list_ht;
328
329 rcu_read_lock();
330
331 /* Search the metadata associated with the session id of the given stream. */
332
333 cds_lfht_for_each_entry_duplicate(ht->ht,
334 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
335 &session_id, &iter.iter, stream, node_session_id.node) {
336 if (!stream->metadata_flag) {
337 continue;
338 }
339
340 ret = do_sync_metadata(stream, ctx);
341 if (ret < 0) {
342 goto end;
343 }
344 }
345
346 /*
347 * Force return code to 0 (success) since ret might be ENODATA for instance
348 * which is not an error but rather that we should come back.
349 */
350 ret = 0;
351
352 end:
353 rcu_read_unlock();
354 return ret;
355 }
356
357 static int consumer_stream_sync_metadata_index(
358 struct lttng_consumer_stream *stream,
359 const struct stream_subbuffer *subbuffer,
360 struct lttng_consumer_local_data *ctx)
361 {
362 int ret;
363
364 /* Block until all the metadata is sent. */
365 pthread_mutex_lock(&stream->metadata_timer_lock);
366 assert(!stream->missed_metadata_flush);
367 stream->waiting_on_metadata = true;
368 pthread_mutex_unlock(&stream->metadata_timer_lock);
369
370 ret = consumer_stream_sync_metadata(ctx, stream->session_id);
371
372 pthread_mutex_lock(&stream->metadata_timer_lock);
373 stream->waiting_on_metadata = false;
374 if (stream->missed_metadata_flush) {
375 stream->missed_metadata_flush = false;
376 pthread_mutex_unlock(&stream->metadata_timer_lock);
377 (void) stream->read_subbuffer_ops.send_live_beacon(stream);
378 } else {
379 pthread_mutex_unlock(&stream->metadata_timer_lock);
380 }
381 if (ret < 0) {
382 goto end;
383 }
384
385 ret = consumer_stream_send_index(stream, subbuffer, ctx);
386 end:
387 return ret;
388 }
389
390 /*
391 * Check if the local version of the metadata stream matches with the version
392 * of the metadata stream in the kernel. If it was updated, set the reset flag
393 * on the stream.
394 */
395 static
396 int metadata_stream_check_version(struct lttng_consumer_stream *stream,
397 const struct stream_subbuffer *subbuffer)
398 {
399 if (stream->metadata_version == subbuffer->info.metadata.version) {
400 goto end;
401 }
402
403 DBG("New metadata version detected");
404 consumer_stream_metadata_set_version(stream,
405 subbuffer->info.metadata.version);
406
407 if (stream->read_subbuffer_ops.reset_metadata) {
408 stream->read_subbuffer_ops.reset_metadata(stream);
409 }
410
411 end:
412 return 0;
413 }
414
415 struct lttng_consumer_stream *consumer_stream_create(
416 struct lttng_consumer_channel *channel,
417 uint64_t channel_key,
418 uint64_t stream_key,
419 enum lttng_consumer_stream_state state,
420 const char *channel_name,
421 uid_t uid,
422 gid_t gid,
423 uint64_t relayd_id,
424 uint64_t session_id,
425 int cpu,
426 int *alloc_ret,
427 enum consumer_channel_type type,
428 unsigned int monitor)
429 {
430 int ret;
431 struct lttng_consumer_stream *stream;
432
433 stream = zmalloc(sizeof(*stream));
434 if (stream == NULL) {
435 PERROR("malloc struct lttng_consumer_stream");
436 ret = -ENOMEM;
437 goto end;
438 }
439
440 rcu_read_lock();
441 stream->chan = channel;
442 stream->key = stream_key;
443 stream->out_fd = -1;
444 stream->out_fd_offset = 0;
445 stream->output_written = 0;
446 stream->state = state;
447 stream->uid = uid;
448 stream->gid = gid;
449 stream->relayd_id = relayd_id;
450 stream->session_id = session_id;
451 stream->monitor = monitor;
452 stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
453 stream->index_file = NULL;
454 stream->last_sequence_number = -1ULL;
455 pthread_mutex_init(&stream->lock, NULL);
456 pthread_mutex_init(&stream->metadata_timer_lock, NULL);
457
458 /* If channel is the metadata, flag this stream as metadata. */
459 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
460 stream->metadata_flag = 1;
461 /* Metadata is flat out. */
462 strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
463 /* Live rendez-vous point. */
464 pthread_cond_init(&stream->metadata_rdv, NULL);
465 pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
466 } else {
467 /* Format stream name to <channel_name>_<cpu_number> */
468 ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
469 channel_name, cpu);
470 if (ret < 0) {
471 PERROR("snprintf stream name");
472 goto error;
473 }
474 }
475
476 /* Key is always the wait_fd for streams. */
477 lttng_ht_node_init_u64(&stream->node, stream->key);
478
479 /* Init node per channel id key */
480 lttng_ht_node_init_u64(&stream->node_channel_id, channel_key);
481
482 /* Init session id node with the stream session id */
483 lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
484
485 DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
486 " relayd_id %" PRIu64 ", session_id %" PRIu64,
487 stream->name, stream->key, channel_key,
488 stream->relayd_id, stream->session_id);
489
490 rcu_read_unlock();
491
492 switch (channel->output) {
493 case CONSUMER_CHANNEL_SPLICE:
494 stream->output = LTTNG_EVENT_SPLICE;
495 ret = utils_create_pipe(stream->splice_pipe);
496 if (ret < 0) {
497 goto error;
498 }
499 break;
500 case CONSUMER_CHANNEL_MMAP:
501 stream->output = LTTNG_EVENT_MMAP;
502 break;
503 default:
504 abort();
505 }
506
507 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
508 stream->read_subbuffer_ops.lock =
509 consumer_stream_metadata_lock_all;
510 stream->read_subbuffer_ops.unlock =
511 consumer_stream_metadata_unlock_all;
512 stream->read_subbuffer_ops.pre_consume_subbuffer =
513 metadata_stream_check_version;
514 } else {
515 stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all;
516 stream->read_subbuffer_ops.unlock =
517 consumer_stream_data_unlock_all;
518 stream->read_subbuffer_ops.pre_consume_subbuffer =
519 consumer_stream_update_stats;
520 if (channel->is_live) {
521 stream->read_subbuffer_ops.post_consume =
522 consumer_stream_sync_metadata_index;
523 } else {
524 stream->read_subbuffer_ops.post_consume =
525 consumer_stream_send_index;
526 }
527 }
528
529 if (channel->output == CONSUMER_CHANNEL_MMAP) {
530 stream->read_subbuffer_ops.consume_subbuffer =
531 consumer_stream_consume_mmap;
532 } else {
533 stream->read_subbuffer_ops.consume_subbuffer =
534 consumer_stream_consume_splice;
535 }
536
537 return stream;
538
539 error:
540 rcu_read_unlock();
541 free(stream);
542 end:
543 if (alloc_ret) {
544 *alloc_ret = ret;
545 }
546 return NULL;
547 }
548
549 /*
550 * Close stream on the relayd side. This call can destroy a relayd if the
551 * conditions are met.
552 *
553 * A RCU read side lock MUST be acquired if the relayd object was looked up in
554 * a hash table before calling this.
555 */
556 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
557 struct consumer_relayd_sock_pair *relayd)
558 {
559 int ret;
560
561 assert(stream);
562 assert(relayd);
563
564 if (stream->sent_to_relayd) {
565 uatomic_dec(&relayd->refcount);
566 assert(uatomic_read(&relayd->refcount) >= 0);
567 }
568
569 /* Closing streams requires to lock the control socket. */
570 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
571 ret = relayd_send_close_stream(&relayd->control_sock,
572 stream->relayd_stream_id,
573 stream->next_net_seq_num - 1);
574 if (ret < 0) {
575 ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->id);
576 lttng_consumer_cleanup_relayd(relayd);
577 }
578 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
579
580 /* Both conditions are met, we destroy the relayd. */
581 if (uatomic_read(&relayd->refcount) == 0 &&
582 uatomic_read(&relayd->destroy_flag)) {
583 consumer_destroy_relayd(relayd);
584 }
585 stream->relayd_id = (uint64_t) -1ULL;
586 stream->sent_to_relayd = 0;
587 }
588
589 /*
590 * Close stream's file descriptors and, if needed, close stream also on the
591 * relayd side.
592 *
593 * The consumer data lock MUST be acquired.
594 * The stream lock MUST be acquired.
595 */
596 void consumer_stream_close(struct lttng_consumer_stream *stream)
597 {
598 int ret;
599 struct consumer_relayd_sock_pair *relayd;
600
601 assert(stream);
602
603 switch (consumer_data.type) {
604 case LTTNG_CONSUMER_KERNEL:
605 if (stream->mmap_base != NULL) {
606 ret = munmap(stream->mmap_base, stream->mmap_len);
607 if (ret != 0) {
608 PERROR("munmap");
609 }
610 }
611
612 if (stream->wait_fd >= 0) {
613 ret = close(stream->wait_fd);
614 if (ret) {
615 PERROR("close");
616 }
617 stream->wait_fd = -1;
618 }
619 if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) {
620 utils_close_pipe(stream->splice_pipe);
621 }
622 break;
623 case LTTNG_CONSUMER32_UST:
624 case LTTNG_CONSUMER64_UST:
625 {
626 /*
627 * Special case for the metadata since the wait fd is an internal pipe
628 * polled in the metadata thread.
629 */
630 if (stream->metadata_flag && stream->chan->monitor) {
631 int rpipe = stream->ust_metadata_poll_pipe[0];
632
633 /*
634 * This will stop the channel timer if one and close the write side
635 * of the metadata poll pipe.
636 */
637 lttng_ustconsumer_close_metadata(stream->chan);
638 if (rpipe >= 0) {
639 ret = close(rpipe);
640 if (ret < 0) {
641 PERROR("closing metadata pipe read side");
642 }
643 stream->ust_metadata_poll_pipe[0] = -1;
644 }
645 }
646 break;
647 }
648 default:
649 ERR("Unknown consumer_data type");
650 assert(0);
651 }
652
653 /* Close output fd. Could be a socket or local file at this point. */
654 if (stream->out_fd >= 0) {
655 ret = close(stream->out_fd);
656 if (ret) {
657 PERROR("close");
658 }
659 stream->out_fd = -1;
660 }
661
662 if (stream->index_file) {
663 lttng_index_file_put(stream->index_file);
664 stream->index_file = NULL;
665 }
666
667 /* Check and cleanup relayd if needed. */
668 rcu_read_lock();
669 relayd = consumer_find_relayd(stream->relayd_id);
670 if (relayd != NULL) {
671 consumer_stream_relayd_close(stream, relayd);
672 }
673 rcu_read_unlock();
674 }
675
676 /*
677 * Delete the stream from all possible hash tables.
678 *
679 * The consumer data lock MUST be acquired.
680 * The stream lock MUST be acquired.
681 */
682 void consumer_stream_delete(struct lttng_consumer_stream *stream,
683 struct lttng_ht *ht)
684 {
685 int ret;
686 struct lttng_ht_iter iter;
687
688 assert(stream);
689 /* Should NEVER be called not in monitor mode. */
690 assert(stream->chan->monitor);
691
692 rcu_read_lock();
693
694 if (ht) {
695 iter.iter.node = &stream->node.node;
696 ret = lttng_ht_del(ht, &iter);
697 assert(!ret);
698 }
699
700 /* Delete from stream per channel ID hash table. */
701 iter.iter.node = &stream->node_channel_id.node;
702 /*
703 * The returned value is of no importance. Even if the node is NOT in the
704 * hash table, we continue since we may have been called by a code path
705 * that did not add the stream to a (all) hash table. Same goes for the
706 * next call ht del call.
707 */
708 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
709
710 /* Delete from the global stream list. */
711 iter.iter.node = &stream->node_session_id.node;
712 /* See the previous ht del on why we ignore the returned value. */
713 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
714
715 rcu_read_unlock();
716
717 if (!stream->metadata_flag) {
718 /* Decrement the stream count of the global consumer data. */
719 assert(consumer_data.stream_count > 0);
720 consumer_data.stream_count--;
721 }
722 }
723
724 /*
725 * Free the given stream within a RCU call.
726 */
727 void consumer_stream_free(struct lttng_consumer_stream *stream)
728 {
729 assert(stream);
730
731 metadata_bucket_destroy(stream->metadata_bucket);
732 call_rcu(&stream->node.head, free_stream_rcu);
733 }
734
735 /*
736 * Destroy the stream's buffers of the tracer.
737 */
738 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
739 {
740 assert(stream);
741
742 switch (consumer_data.type) {
743 case LTTNG_CONSUMER_KERNEL:
744 break;
745 case LTTNG_CONSUMER32_UST:
746 case LTTNG_CONSUMER64_UST:
747 lttng_ustconsumer_del_stream(stream);
748 break;
749 default:
750 ERR("Unknown consumer_data type");
751 assert(0);
752 }
753 }
754
755 /*
756 * Destroy and close a already created stream.
757 */
758 static void destroy_close_stream(struct lttng_consumer_stream *stream)
759 {
760 assert(stream);
761
762 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
763
764 /* Destroy tracer buffers of the stream. */
765 consumer_stream_destroy_buffers(stream);
766 /* Close down everything including the relayd if one. */
767 consumer_stream_close(stream);
768 }
769
770 /*
771 * Decrement the stream's channel refcount and if down to 0, return the channel
772 * pointer so it can be destroyed by the caller or NULL if not.
773 */
774 static struct lttng_consumer_channel *unref_channel(
775 struct lttng_consumer_stream *stream)
776 {
777 struct lttng_consumer_channel *free_chan = NULL;
778
779 assert(stream);
780 assert(stream->chan);
781
782 /* Update refcount of channel and see if we need to destroy it. */
783 if (!uatomic_sub_return(&stream->chan->refcount, 1)
784 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
785 free_chan = stream->chan;
786 }
787
788 return free_chan;
789 }
790
791 /*
792 * Destroy a stream completely. This will delete, close and free the stream.
793 * Once return, the stream is NO longer usable. Its channel may get destroyed
794 * if conditions are met for a monitored stream.
795 *
796 * This MUST be called WITHOUT the consumer data and stream lock acquired if
797 * the stream is in _monitor_ mode else it does not matter.
798 */
799 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
800 struct lttng_ht *ht)
801 {
802 assert(stream);
803
804 /* Stream is in monitor mode. */
805 if (stream->monitor) {
806 struct lttng_consumer_channel *free_chan = NULL;
807
808 /*
809 * This means that the stream was successfully removed from the streams
810 * list of the channel and sent to the right thread managing this
811 * stream thus being globally visible.
812 */
813 if (stream->globally_visible) {
814 pthread_mutex_lock(&consumer_data.lock);
815 pthread_mutex_lock(&stream->chan->lock);
816 pthread_mutex_lock(&stream->lock);
817 /* Remove every reference of the stream in the consumer. */
818 consumer_stream_delete(stream, ht);
819
820 destroy_close_stream(stream);
821
822 /* Update channel's refcount of the stream. */
823 free_chan = unref_channel(stream);
824
825 /* Indicates that the consumer data state MUST be updated after this. */
826 consumer_data.need_update = 1;
827
828 pthread_mutex_unlock(&stream->lock);
829 pthread_mutex_unlock(&stream->chan->lock);
830 pthread_mutex_unlock(&consumer_data.lock);
831 } else {
832 /*
833 * If the stream is not visible globally, this needs to be done
834 * outside of the consumer data lock section.
835 */
836 free_chan = unref_channel(stream);
837 }
838
839 if (free_chan) {
840 consumer_del_channel(free_chan);
841 }
842 } else {
843 destroy_close_stream(stream);
844 }
845
846 /* Free stream within a RCU call. */
847 consumer_stream_free(stream);
848 }
849
850 /*
851 * Write index of a specific stream either on the relayd or local disk.
852 *
853 * Return 0 on success or else a negative value.
854 */
855 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
856 struct ctf_packet_index *element)
857 {
858 int ret;
859
860 assert(stream);
861 assert(element);
862
863 rcu_read_lock();
864 if (stream->relayd_id != (uint64_t) -1ULL) {
865 struct consumer_relayd_sock_pair *relayd;
866
867 relayd = consumer_find_relayd(stream->relayd_id);
868 if (relayd) {
869 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
870 ret = relayd_send_index(&relayd->control_sock, element,
871 stream->relayd_stream_id, stream->next_net_seq_num - 1);
872 if (ret < 0) {
873 /*
874 * Communication error with lttng-relayd,
875 * perform cleanup now
876 */
877 ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->id);
878 lttng_consumer_cleanup_relayd(relayd);
879 ret = -1;
880 }
881 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
882 } else {
883 ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.",
884 stream->key, stream->relayd_id);
885 ret = -1;
886 }
887 } else {
888 if (lttng_index_file_write(stream->index_file, element)) {
889 ret = -1;
890 } else {
891 ret = 0;
892 }
893 }
894 if (ret < 0) {
895 goto error;
896 }
897
898 error:
899 rcu_read_unlock();
900 return ret;
901 }
902
903 static ssize_t metadata_bucket_flush(
904 const struct stream_subbuffer *buffer, void *data)
905 {
906 ssize_t ret;
907 struct lttng_consumer_stream *stream = data;
908
909 ret = consumer_stream_consume_mmap(NULL, stream, buffer);
910 if (ret < 0) {
911 goto end;
912 }
913 end:
914 return ret;
915 }
916
917 static ssize_t metadata_bucket_consume(
918 struct lttng_consumer_local_data *unused,
919 struct lttng_consumer_stream *stream,
920 const struct stream_subbuffer *subbuffer)
921 {
922 ssize_t ret;
923 enum metadata_bucket_status status;
924
925 status = metadata_bucket_fill(stream->metadata_bucket, subbuffer);
926 switch (status) {
927 case METADATA_BUCKET_STATUS_OK:
928 /* Return consumed size. */
929 ret = subbuffer->buffer.buffer.size;
930 break;
931 default:
932 ret = -1;
933 }
934
935 return ret;
936 }
937
938 int consumer_stream_enable_metadata_bucketization(
939 struct lttng_consumer_stream *stream)
940 {
941 int ret = 0;
942
943 assert(stream->metadata_flag);
944 assert(!stream->metadata_bucket);
945 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
946
947 stream->metadata_bucket = metadata_bucket_create(
948 metadata_bucket_flush, stream);
949 if (!stream->metadata_bucket) {
950 ret = -1;
951 goto end;
952 }
953
954 stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume;
955 end:
956 return ret;
957 }
958
959 void consumer_stream_metadata_set_version(
960 struct lttng_consumer_stream *stream, uint64_t new_version)
961 {
962 assert(new_version > stream->metadata_version);
963 stream->metadata_version = new_version;
964 stream->reset_metadata_flag = 1;
965
966 if (stream->metadata_bucket) {
967 metadata_bucket_reset(stream->metadata_bucket);
968 }
969 }
This page took 0.092142 seconds and 5 git commands to generate.