Fix: consumerd: live client receives incomplete metadata
[lttng-tools.git] / src / common / consumer / consumer-stream.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <inttypes.h>
23 #include <sys/mman.h>
24 #include <unistd.h>
25
26 #include <common/common.h>
27 #include <common/index/index.h>
28 #include <common/kernel-consumer/kernel-consumer.h>
29 #include <common/relayd/relayd.h>
30 #include <common/ust-consumer/ust-consumer.h>
31 #include <common/utils.h>
32 #include <common/consumer/consumer.h>
33 #include <common/consumer/consumer-timer.h>
34 #include <common/consumer/metadata-bucket.h>
35
36 #include "consumer-stream.h"
37
38 /*
39 * RCU call to free stream. MUST only be used with call_rcu().
40 */
41 static void free_stream_rcu(struct rcu_head *head)
42 {
43 struct lttng_ht_node_u64 *node =
44 caa_container_of(head, struct lttng_ht_node_u64, head);
45 struct lttng_consumer_stream *stream =
46 caa_container_of(node, struct lttng_consumer_stream, node);
47
48 pthread_mutex_destroy(&stream->lock);
49 free(stream);
50 }
51
52 static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream)
53 {
54 pthread_mutex_lock(&stream->chan->lock);
55 pthread_mutex_lock(&stream->lock);
56 }
57
58 static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream)
59 {
60 pthread_mutex_unlock(&stream->lock);
61 pthread_mutex_unlock(&stream->chan->lock);
62 }
63
64 static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream)
65 {
66 consumer_stream_data_lock_all(stream);
67 pthread_mutex_lock(&stream->metadata_rdv_lock);
68 }
69
70 static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream)
71 {
72 pthread_mutex_unlock(&stream->metadata_rdv_lock);
73 consumer_stream_data_unlock_all(stream);
74 }
75
76 /* Only used for data streams. */
77 static int consumer_stream_update_stats(struct lttng_consumer_stream *stream,
78 const struct stream_subbuffer *subbuf)
79 {
80 int ret = 0;
81 uint64_t sequence_number;
82 const uint64_t discarded_events =
83 LTTNG_OPTIONAL_GET(subbuf->info.data.sequence_number);
84
85 if (!subbuf->info.data.sequence_number.is_set) {
86 /* Command not supported by the tracer. */
87 sequence_number = -1ULL;
88 } else {
89 sequence_number = subbuf->info.data.sequence_number.value;
90 }
91
92 /*
93 * Start the sequence when we extract the first packet in case we don't
94 * start at 0 (for example if a consumer is not connected to the
95 * session immediately after the beginning).
96 */
97 if (stream->last_sequence_number == -1ULL) {
98 stream->last_sequence_number = sequence_number;
99 } else if (sequence_number > stream->last_sequence_number) {
100 stream->chan->lost_packets += sequence_number -
101 stream->last_sequence_number - 1;
102 } else {
103 /* seq <= last_sequence_number */
104 ERR("Sequence number inconsistent : prev = %" PRIu64
105 ", current = %" PRIu64,
106 stream->last_sequence_number, sequence_number);
107 ret = -1;
108 goto end;
109 }
110 stream->last_sequence_number = sequence_number;
111
112 if (discarded_events < stream->last_discarded_events) {
113 /*
114 * Overflow has occurred. We assume only one wrap-around
115 * has occurred.
116 */
117 stream->chan->discarded_events +=
118 (1ULL << (CAA_BITS_PER_LONG - 1)) -
119 stream->last_discarded_events +
120 discarded_events;
121 } else {
122 stream->chan->discarded_events += discarded_events -
123 stream->last_discarded_events;
124 }
125 stream->last_discarded_events = discarded_events;
126 ret = 0;
127
128 end:
129 return ret;
130 }
131
132 static
133 void ctf_packet_index_populate(struct ctf_packet_index *index,
134 off_t offset, const struct stream_subbuffer *subbuffer)
135 {
136 *index = (typeof(*index)){
137 .offset = htobe64(offset),
138 .packet_size = htobe64(subbuffer->info.data.packet_size),
139 .content_size = htobe64(subbuffer->info.data.content_size),
140 .timestamp_begin = htobe64(
141 subbuffer->info.data.timestamp_begin),
142 .timestamp_end = htobe64(
143 subbuffer->info.data.timestamp_end),
144 .events_discarded = htobe64(
145 subbuffer->info.data.events_discarded),
146 .stream_id = htobe64(subbuffer->info.data.stream_id),
147 .stream_instance_id = htobe64(
148 subbuffer->info.data.stream_instance_id.is_set ?
149 subbuffer->info.data.stream_instance_id.value : -1ULL),
150 .packet_seq_num = htobe64(
151 subbuffer->info.data.sequence_number.is_set ?
152 subbuffer->info.data.sequence_number.value : -1ULL),
153 };
154 }
155
156 static ssize_t consumer_stream_consume_mmap(
157 struct lttng_consumer_local_data *ctx,
158 struct lttng_consumer_stream *stream,
159 const struct stream_subbuffer *subbuffer)
160 {
161 const unsigned long padding_size =
162 subbuffer->info.data.padded_subbuf_size -
163 subbuffer->info.data.subbuf_size;
164
165 return lttng_consumer_on_read_subbuffer_mmap(
166 stream, &subbuffer->buffer.buffer, padding_size);
167 }
168
169 static ssize_t consumer_stream_consume_splice(
170 struct lttng_consumer_local_data *ctx,
171 struct lttng_consumer_stream *stream,
172 const struct stream_subbuffer *subbuffer)
173 {
174 return lttng_consumer_on_read_subbuffer_splice(ctx, stream,
175 subbuffer->info.data.padded_subbuf_size, 0);
176 }
177
178 static int consumer_stream_send_index(
179 struct lttng_consumer_stream *stream,
180 const struct stream_subbuffer *subbuffer,
181 struct lttng_consumer_local_data *ctx)
182 {
183 off_t packet_offset = 0;
184 struct ctf_packet_index index = {};
185
186 /*
187 * This is called after consuming the sub-buffer; substract the
188 * effect this sub-buffer from the offset.
189 */
190 if (stream->relayd_id == (uint64_t) -1ULL) {
191 packet_offset = stream->out_fd_offset -
192 subbuffer->info.data.padded_subbuf_size;
193 }
194
195 ctf_packet_index_populate(&index, packet_offset, subbuffer);
196 return consumer_stream_write_index(stream, &index);
197 }
198
199 /*
200 * Actually do the metadata sync using the given metadata stream.
201 *
202 * Return 0 on success else a negative value. ENODATA can be returned also
203 * indicating that there is no metadata available for that stream.
204 */
205 static int do_sync_metadata(struct lttng_consumer_stream *metadata,
206 struct lttng_consumer_local_data *ctx)
207 {
208 int ret;
209
210 assert(metadata);
211 assert(metadata->metadata_flag);
212 assert(ctx);
213
214 /*
215 * In UST, since we have to write the metadata from the cache packet
216 * by packet, we might need to start this procedure multiple times
217 * until all the metadata from the cache has been extracted.
218 */
219 do {
220 /*
221 * Steps :
222 * - Lock the metadata stream
223 * - Check if metadata stream node was deleted before locking.
224 * - if yes, release and return success
225 * - Check if new metadata is ready (flush + snapshot pos)
226 * - If nothing : release and return.
227 * - Lock the metadata_rdv_lock
228 * - Unlock the metadata stream
229 * - cond_wait on metadata_rdv to wait the wakeup from the
230 * metadata thread
231 * - Unlock the metadata_rdv_lock
232 */
233 pthread_mutex_lock(&metadata->lock);
234
235 /*
236 * There is a possibility that we were able to acquire a reference on the
237 * stream from the RCU hash table but between then and now, the node might
238 * have been deleted just before the lock is acquired. Thus, after locking,
239 * we make sure the metadata node has not been deleted which means that the
240 * buffers are closed.
241 *
242 * In that case, there is no need to sync the metadata hence returning a
243 * success return code.
244 */
245 ret = cds_lfht_is_node_deleted(&metadata->node.node);
246 if (ret) {
247 ret = 0;
248 goto end_unlock_mutex;
249 }
250
251 switch (ctx->type) {
252 case LTTNG_CONSUMER_KERNEL:
253 /*
254 * Empty the metadata cache and flush the current stream.
255 */
256 ret = lttng_kconsumer_sync_metadata(metadata);
257 break;
258 case LTTNG_CONSUMER32_UST:
259 case LTTNG_CONSUMER64_UST:
260 /*
261 * Ask the sessiond if we have new metadata waiting and update the
262 * consumer metadata cache.
263 */
264 ret = lttng_ustconsumer_sync_metadata(ctx, metadata);
265 break;
266 default:
267 assert(0);
268 ret = -1;
269 break;
270 }
271 /*
272 * Error or no new metadata, we exit here.
273 */
274 if (ret <= 0 || ret == ENODATA) {
275 goto end_unlock_mutex;
276 }
277
278 /*
279 * At this point, new metadata have been flushed, so we wait on the
280 * rendez-vous point for the metadata thread to wake us up when it
281 * finishes consuming the metadata and continue execution.
282 */
283
284 pthread_mutex_lock(&metadata->metadata_rdv_lock);
285
286 /*
287 * Release metadata stream lock so the metadata thread can process it.
288 */
289 pthread_mutex_unlock(&metadata->lock);
290
291 /*
292 * Wait on the rendez-vous point. Once woken up, it means the metadata was
293 * consumed and thus synchronization is achieved.
294 */
295 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
296 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
297 } while (ret == EAGAIN);
298
299 /* Success */
300 return 0;
301
302 end_unlock_mutex:
303 pthread_mutex_unlock(&metadata->lock);
304 return ret;
305 }
306
307 /*
308 * Synchronize the metadata using a given session ID. A successful acquisition
309 * of a metadata stream will trigger a request to the session daemon and a
310 * snapshot so the metadata thread can consume it.
311 *
312 * This function call is a rendez-vous point between the metadata thread and
313 * the data thread.
314 *
315 * Return 0 on success or else a negative value.
316 */
317 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
318 uint64_t session_id)
319 {
320 int ret;
321 struct lttng_consumer_stream *stream = NULL;
322 struct lttng_ht_iter iter;
323 struct lttng_ht *ht;
324
325 assert(ctx);
326
327 /* Ease our life a bit. */
328 ht = consumer_data.stream_list_ht;
329
330 rcu_read_lock();
331
332 /* Search the metadata associated with the session id of the given stream. */
333
334 cds_lfht_for_each_entry_duplicate(ht->ht,
335 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
336 &session_id, &iter.iter, stream, node_session_id.node) {
337 if (!stream->metadata_flag) {
338 continue;
339 }
340
341 ret = do_sync_metadata(stream, ctx);
342 if (ret < 0) {
343 goto end;
344 }
345 }
346
347 /*
348 * Force return code to 0 (success) since ret might be ENODATA for instance
349 * which is not an error but rather that we should come back.
350 */
351 ret = 0;
352
353 end:
354 rcu_read_unlock();
355 return ret;
356 }
357
358 static int consumer_stream_sync_metadata_index(
359 struct lttng_consumer_stream *stream,
360 const struct stream_subbuffer *subbuffer,
361 struct lttng_consumer_local_data *ctx)
362 {
363 int ret;
364
365 /* Block until all the metadata is sent. */
366 pthread_mutex_lock(&stream->metadata_timer_lock);
367 assert(!stream->missed_metadata_flush);
368 stream->waiting_on_metadata = true;
369 pthread_mutex_unlock(&stream->metadata_timer_lock);
370
371 ret = consumer_stream_sync_metadata(ctx, stream->session_id);
372
373 pthread_mutex_lock(&stream->metadata_timer_lock);
374 stream->waiting_on_metadata = false;
375 if (stream->missed_metadata_flush) {
376 stream->missed_metadata_flush = false;
377 pthread_mutex_unlock(&stream->metadata_timer_lock);
378 (void) stream->read_subbuffer_ops.send_live_beacon(stream);
379 } else {
380 pthread_mutex_unlock(&stream->metadata_timer_lock);
381 }
382 if (ret < 0) {
383 goto end;
384 }
385
386 ret = consumer_stream_send_index(stream, subbuffer, ctx);
387 end:
388 return ret;
389 }
390
391 /*
392 * Check if the local version of the metadata stream matches with the version
393 * of the metadata stream in the kernel. If it was updated, set the reset flag
394 * on the stream.
395 */
396 static
397 int metadata_stream_check_version(struct lttng_consumer_stream *stream,
398 const struct stream_subbuffer *subbuffer)
399 {
400 if (stream->metadata_version == subbuffer->info.metadata.version) {
401 goto end;
402 }
403
404 DBG("New metadata version detected");
405 stream->metadata_version = subbuffer->info.metadata.version;
406 stream->reset_metadata_flag = 1;
407
408 if (stream->metadata_bucket) {
409 metadata_bucket_reset(stream->metadata_bucket);
410 }
411
412 if (stream->read_subbuffer_ops.reset_metadata) {
413 stream->read_subbuffer_ops.reset_metadata(stream);
414 }
415
416 end:
417 return 0;
418 }
419
420 struct lttng_consumer_stream *consumer_stream_create(
421 struct lttng_consumer_channel *channel,
422 uint64_t channel_key,
423 uint64_t stream_key,
424 enum lttng_consumer_stream_state state,
425 const char *channel_name,
426 uid_t uid,
427 gid_t gid,
428 uint64_t relayd_id,
429 uint64_t session_id,
430 int cpu,
431 int *alloc_ret,
432 enum consumer_channel_type type,
433 unsigned int monitor)
434 {
435 int ret;
436 struct lttng_consumer_stream *stream;
437
438 stream = zmalloc(sizeof(*stream));
439 if (stream == NULL) {
440 PERROR("malloc struct lttng_consumer_stream");
441 ret = -ENOMEM;
442 goto end;
443 }
444
445 rcu_read_lock();
446 stream->chan = channel;
447 stream->key = stream_key;
448 stream->out_fd = -1;
449 stream->out_fd_offset = 0;
450 stream->output_written = 0;
451 stream->state = state;
452 stream->uid = uid;
453 stream->gid = gid;
454 stream->relayd_id = relayd_id;
455 stream->session_id = session_id;
456 stream->monitor = monitor;
457 stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
458 stream->index_file = NULL;
459 stream->last_sequence_number = -1ULL;
460 pthread_mutex_init(&stream->lock, NULL);
461 pthread_mutex_init(&stream->metadata_timer_lock, NULL);
462
463 /* If channel is the metadata, flag this stream as metadata. */
464 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
465 stream->metadata_flag = 1;
466 /* Metadata is flat out. */
467 strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
468 /* Live rendez-vous point. */
469 pthread_cond_init(&stream->metadata_rdv, NULL);
470 pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
471 } else {
472 /* Format stream name to <channel_name>_<cpu_number> */
473 ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
474 channel_name, cpu);
475 if (ret < 0) {
476 PERROR("snprintf stream name");
477 goto error;
478 }
479 }
480
481 /* Key is always the wait_fd for streams. */
482 lttng_ht_node_init_u64(&stream->node, stream->key);
483
484 /* Init node per channel id key */
485 lttng_ht_node_init_u64(&stream->node_channel_id, channel_key);
486
487 /* Init session id node with the stream session id */
488 lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
489
490 DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
491 " relayd_id %" PRIu64 ", session_id %" PRIu64,
492 stream->name, stream->key, channel_key,
493 stream->relayd_id, stream->session_id);
494
495 rcu_read_unlock();
496
497 switch (channel->output) {
498 case CONSUMER_CHANNEL_SPLICE:
499 stream->output = LTTNG_EVENT_SPLICE;
500 ret = utils_create_pipe(stream->splice_pipe);
501 if (ret < 0) {
502 goto error;
503 }
504 break;
505 case CONSUMER_CHANNEL_MMAP:
506 stream->output = LTTNG_EVENT_MMAP;
507 break;
508 default:
509 abort();
510 }
511
512 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
513 stream->read_subbuffer_ops.lock =
514 consumer_stream_metadata_lock_all;
515 stream->read_subbuffer_ops.unlock =
516 consumer_stream_metadata_unlock_all;
517 stream->read_subbuffer_ops.pre_consume_subbuffer =
518 metadata_stream_check_version;
519 } else {
520 stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all;
521 stream->read_subbuffer_ops.unlock =
522 consumer_stream_data_unlock_all;
523 stream->read_subbuffer_ops.pre_consume_subbuffer =
524 consumer_stream_update_stats;
525 if (channel->is_live) {
526 stream->read_subbuffer_ops.post_consume =
527 consumer_stream_sync_metadata_index;
528 } else {
529 stream->read_subbuffer_ops.post_consume =
530 consumer_stream_send_index;
531 }
532 }
533
534 if (channel->output == CONSUMER_CHANNEL_MMAP) {
535 stream->read_subbuffer_ops.consume_subbuffer =
536 consumer_stream_consume_mmap;
537 } else {
538 stream->read_subbuffer_ops.consume_subbuffer =
539 consumer_stream_consume_splice;
540 }
541
542 return stream;
543
544 error:
545 rcu_read_unlock();
546 free(stream);
547 end:
548 if (alloc_ret) {
549 *alloc_ret = ret;
550 }
551 return NULL;
552 }
553
554 /*
555 * Close stream on the relayd side. This call can destroy a relayd if the
556 * conditions are met.
557 *
558 * A RCU read side lock MUST be acquired if the relayd object was looked up in
559 * a hash table before calling this.
560 */
561 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
562 struct consumer_relayd_sock_pair *relayd)
563 {
564 int ret;
565
566 assert(stream);
567 assert(relayd);
568
569 if (stream->sent_to_relayd) {
570 uatomic_dec(&relayd->refcount);
571 assert(uatomic_read(&relayd->refcount) >= 0);
572 }
573
574 /* Closing streams requires to lock the control socket. */
575 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
576 ret = relayd_send_close_stream(&relayd->control_sock,
577 stream->relayd_stream_id,
578 stream->next_net_seq_num - 1);
579 if (ret < 0) {
580 ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->id);
581 lttng_consumer_cleanup_relayd(relayd);
582 }
583 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
584
585 /* Both conditions are met, we destroy the relayd. */
586 if (uatomic_read(&relayd->refcount) == 0 &&
587 uatomic_read(&relayd->destroy_flag)) {
588 consumer_destroy_relayd(relayd);
589 }
590 stream->relayd_id = (uint64_t) -1ULL;
591 stream->sent_to_relayd = 0;
592 }
593
594 /*
595 * Close stream's file descriptors and, if needed, close stream also on the
596 * relayd side.
597 *
598 * The consumer data lock MUST be acquired.
599 * The stream lock MUST be acquired.
600 */
601 void consumer_stream_close(struct lttng_consumer_stream *stream)
602 {
603 int ret;
604 struct consumer_relayd_sock_pair *relayd;
605
606 assert(stream);
607
608 switch (consumer_data.type) {
609 case LTTNG_CONSUMER_KERNEL:
610 if (stream->mmap_base != NULL) {
611 ret = munmap(stream->mmap_base, stream->mmap_len);
612 if (ret != 0) {
613 PERROR("munmap");
614 }
615 }
616
617 if (stream->wait_fd >= 0) {
618 ret = close(stream->wait_fd);
619 if (ret) {
620 PERROR("close");
621 }
622 stream->wait_fd = -1;
623 }
624 if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) {
625 utils_close_pipe(stream->splice_pipe);
626 }
627 break;
628 case LTTNG_CONSUMER32_UST:
629 case LTTNG_CONSUMER64_UST:
630 {
631 /*
632 * Special case for the metadata since the wait fd is an internal pipe
633 * polled in the metadata thread.
634 */
635 if (stream->metadata_flag && stream->chan->monitor) {
636 int rpipe = stream->ust_metadata_poll_pipe[0];
637
638 /*
639 * This will stop the channel timer if one and close the write side
640 * of the metadata poll pipe.
641 */
642 lttng_ustconsumer_close_metadata(stream->chan);
643 if (rpipe >= 0) {
644 ret = close(rpipe);
645 if (ret < 0) {
646 PERROR("closing metadata pipe read side");
647 }
648 stream->ust_metadata_poll_pipe[0] = -1;
649 }
650 }
651 break;
652 }
653 default:
654 ERR("Unknown consumer_data type");
655 assert(0);
656 }
657
658 /* Close output fd. Could be a socket or local file at this point. */
659 if (stream->out_fd >= 0) {
660 ret = close(stream->out_fd);
661 if (ret) {
662 PERROR("close");
663 }
664 stream->out_fd = -1;
665 }
666
667 if (stream->index_file) {
668 lttng_index_file_put(stream->index_file);
669 stream->index_file = NULL;
670 }
671
672 /* Check and cleanup relayd if needed. */
673 rcu_read_lock();
674 relayd = consumer_find_relayd(stream->relayd_id);
675 if (relayd != NULL) {
676 consumer_stream_relayd_close(stream, relayd);
677 }
678 rcu_read_unlock();
679 }
680
681 /*
682 * Delete the stream from all possible hash tables.
683 *
684 * The consumer data lock MUST be acquired.
685 * The stream lock MUST be acquired.
686 */
687 void consumer_stream_delete(struct lttng_consumer_stream *stream,
688 struct lttng_ht *ht)
689 {
690 int ret;
691 struct lttng_ht_iter iter;
692
693 assert(stream);
694 /* Should NEVER be called not in monitor mode. */
695 assert(stream->chan->monitor);
696
697 rcu_read_lock();
698
699 if (ht) {
700 iter.iter.node = &stream->node.node;
701 ret = lttng_ht_del(ht, &iter);
702 assert(!ret);
703 }
704
705 /* Delete from stream per channel ID hash table. */
706 iter.iter.node = &stream->node_channel_id.node;
707 /*
708 * The returned value is of no importance. Even if the node is NOT in the
709 * hash table, we continue since we may have been called by a code path
710 * that did not add the stream to a (all) hash table. Same goes for the
711 * next call ht del call.
712 */
713 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
714
715 /* Delete from the global stream list. */
716 iter.iter.node = &stream->node_session_id.node;
717 /* See the previous ht del on why we ignore the returned value. */
718 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
719
720 rcu_read_unlock();
721
722 if (!stream->metadata_flag) {
723 /* Decrement the stream count of the global consumer data. */
724 assert(consumer_data.stream_count > 0);
725 consumer_data.stream_count--;
726 }
727 }
728
729 /*
730 * Free the given stream within a RCU call.
731 */
732 void consumer_stream_free(struct lttng_consumer_stream *stream)
733 {
734 assert(stream);
735
736 metadata_bucket_destroy(stream->metadata_bucket);
737 call_rcu(&stream->node.head, free_stream_rcu);
738 }
739
740 /*
741 * Destroy the stream's buffers of the tracer.
742 */
743 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
744 {
745 assert(stream);
746
747 switch (consumer_data.type) {
748 case LTTNG_CONSUMER_KERNEL:
749 break;
750 case LTTNG_CONSUMER32_UST:
751 case LTTNG_CONSUMER64_UST:
752 lttng_ustconsumer_del_stream(stream);
753 break;
754 default:
755 ERR("Unknown consumer_data type");
756 assert(0);
757 }
758 }
759
760 /*
761 * Destroy and close a already created stream.
762 */
763 static void destroy_close_stream(struct lttng_consumer_stream *stream)
764 {
765 assert(stream);
766
767 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
768
769 /* Destroy tracer buffers of the stream. */
770 consumer_stream_destroy_buffers(stream);
771 /* Close down everything including the relayd if one. */
772 consumer_stream_close(stream);
773 }
774
775 /*
776 * Decrement the stream's channel refcount and if down to 0, return the channel
777 * pointer so it can be destroyed by the caller or NULL if not.
778 */
779 static struct lttng_consumer_channel *unref_channel(
780 struct lttng_consumer_stream *stream)
781 {
782 struct lttng_consumer_channel *free_chan = NULL;
783
784 assert(stream);
785 assert(stream->chan);
786
787 /* Update refcount of channel and see if we need to destroy it. */
788 if (!uatomic_sub_return(&stream->chan->refcount, 1)
789 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
790 free_chan = stream->chan;
791 }
792
793 return free_chan;
794 }
795
796 /*
797 * Destroy a stream completely. This will delete, close and free the stream.
798 * Once return, the stream is NO longer usable. Its channel may get destroyed
799 * if conditions are met for a monitored stream.
800 *
801 * This MUST be called WITHOUT the consumer data and stream lock acquired if
802 * the stream is in _monitor_ mode else it does not matter.
803 */
804 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
805 struct lttng_ht *ht)
806 {
807 assert(stream);
808
809 /* Stream is in monitor mode. */
810 if (stream->monitor) {
811 struct lttng_consumer_channel *free_chan = NULL;
812
813 /*
814 * This means that the stream was successfully removed from the streams
815 * list of the channel and sent to the right thread managing this
816 * stream thus being globally visible.
817 */
818 if (stream->globally_visible) {
819 pthread_mutex_lock(&consumer_data.lock);
820 pthread_mutex_lock(&stream->chan->lock);
821 pthread_mutex_lock(&stream->lock);
822 /* Remove every reference of the stream in the consumer. */
823 consumer_stream_delete(stream, ht);
824
825 destroy_close_stream(stream);
826
827 /* Update channel's refcount of the stream. */
828 free_chan = unref_channel(stream);
829
830 /* Indicates that the consumer data state MUST be updated after this. */
831 consumer_data.need_update = 1;
832
833 pthread_mutex_unlock(&stream->lock);
834 pthread_mutex_unlock(&stream->chan->lock);
835 pthread_mutex_unlock(&consumer_data.lock);
836 } else {
837 /*
838 * If the stream is not visible globally, this needs to be done
839 * outside of the consumer data lock section.
840 */
841 free_chan = unref_channel(stream);
842 }
843
844 if (free_chan) {
845 consumer_del_channel(free_chan);
846 }
847 } else {
848 destroy_close_stream(stream);
849 }
850
851 /* Free stream within a RCU call. */
852 consumer_stream_free(stream);
853 }
854
855 /*
856 * Write index of a specific stream either on the relayd or local disk.
857 *
858 * Return 0 on success or else a negative value.
859 */
860 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
861 struct ctf_packet_index *element)
862 {
863 int ret;
864
865 assert(stream);
866 assert(element);
867
868 rcu_read_lock();
869 if (stream->relayd_id != (uint64_t) -1ULL) {
870 struct consumer_relayd_sock_pair *relayd;
871
872 relayd = consumer_find_relayd(stream->relayd_id);
873 if (relayd) {
874 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
875 ret = relayd_send_index(&relayd->control_sock, element,
876 stream->relayd_stream_id, stream->next_net_seq_num - 1);
877 if (ret < 0) {
878 /*
879 * Communication error with lttng-relayd,
880 * perform cleanup now
881 */
882 ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->id);
883 lttng_consumer_cleanup_relayd(relayd);
884 ret = -1;
885 }
886 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
887 } else {
888 ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.",
889 stream->key, stream->relayd_id);
890 ret = -1;
891 }
892 } else {
893 if (lttng_index_file_write(stream->index_file, element)) {
894 ret = -1;
895 } else {
896 ret = 0;
897 }
898 }
899 if (ret < 0) {
900 goto error;
901 }
902
903 error:
904 rcu_read_unlock();
905 return ret;
906 }
907
908 static ssize_t metadata_bucket_flush(
909 const struct stream_subbuffer *buffer, void *data)
910 {
911 ssize_t ret;
912 struct lttng_consumer_stream *stream = data;
913
914 ret = consumer_stream_consume_mmap(NULL, stream, buffer);
915 if (ret < 0) {
916 goto end;
917 }
918 end:
919 return ret;
920 }
921
922 static ssize_t metadata_bucket_consume(
923 struct lttng_consumer_local_data *unused,
924 struct lttng_consumer_stream *stream,
925 const struct stream_subbuffer *subbuffer)
926 {
927 ssize_t ret;
928 enum metadata_bucket_status status;
929
930 status = metadata_bucket_fill(stream->metadata_bucket, subbuffer);
931 switch (status) {
932 case METADATA_BUCKET_STATUS_OK:
933 /* Return consumed size. */
934 ret = subbuffer->buffer.buffer.size;
935 break;
936 default:
937 ret = -1;
938 }
939
940 return ret;
941 }
942
943 int consumer_stream_enable_metadata_bucketization(
944 struct lttng_consumer_stream *stream)
945 {
946 int ret = 0;
947
948 assert(stream->metadata_flag);
949 assert(!stream->metadata_bucket);
950 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
951
952 stream->metadata_bucket = metadata_bucket_create(
953 metadata_bucket_flush, stream);
954 if (!stream->metadata_bucket) {
955 ret = -1;
956 goto end;
957 }
958
959 stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume;
960 end:
961 return ret;
962 }
This page took 0.084028 seconds and 5 git commands to generate.