c9a55505b80657a58478abbf03fc29cc15b3a5b3
[lttng-tools.git] / src / common / consumer / consumer-stream.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <inttypes.h>
23 #include <sys/mman.h>
24 #include <unistd.h>
25
26 #include <common/common.h>
27 #include <common/index/index.h>
28 #include <common/kernel-consumer/kernel-consumer.h>
29 #include <common/relayd/relayd.h>
30 #include <common/ust-consumer/ust-consumer.h>
31 #include <common/utils.h>
32 #include <common/consumer/consumer.h>
33 #include <common/consumer/consumer-timer.h>
34 #include <common/consumer/metadata-bucket.h>
35
36 #include "consumer-stream.h"
37
38 /*
39 * RCU call to free stream. MUST only be used with call_rcu().
40 */
41 static void free_stream_rcu(struct rcu_head *head)
42 {
43 struct lttng_ht_node_u64 *node =
44 caa_container_of(head, struct lttng_ht_node_u64, head);
45 struct lttng_consumer_stream *stream =
46 caa_container_of(node, struct lttng_consumer_stream, node);
47
48 pthread_mutex_destroy(&stream->lock);
49 free(stream);
50 }
51
52 static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream)
53 {
54 pthread_mutex_lock(&stream->chan->lock);
55 pthread_mutex_lock(&stream->lock);
56 }
57
58 static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream)
59 {
60 pthread_mutex_unlock(&stream->lock);
61 pthread_mutex_unlock(&stream->chan->lock);
62 }
63
64 static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream)
65 {
66 consumer_stream_data_lock_all(stream);
67 pthread_mutex_lock(&stream->metadata_rdv_lock);
68 }
69
70 static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream)
71 {
72 pthread_mutex_unlock(&stream->metadata_rdv_lock);
73 consumer_stream_data_unlock_all(stream);
74 }
75
76 /* Only used for data streams. */
77 static int consumer_stream_update_stats(struct lttng_consumer_stream *stream,
78 const struct stream_subbuffer *subbuf)
79 {
80 int ret = 0;
81 uint64_t sequence_number;
82 const uint64_t discarded_events =
83 LTTNG_OPTIONAL_GET(subbuf->info.data.sequence_number);
84
85 if (!subbuf->info.data.sequence_number.is_set) {
86 /* Command not supported by the tracer. */
87 sequence_number = -1ULL;
88 } else {
89 sequence_number = subbuf->info.data.sequence_number.value;
90 }
91
92 /*
93 * Start the sequence when we extract the first packet in case we don't
94 * start at 0 (for example if a consumer is not connected to the
95 * session immediately after the beginning).
96 */
97 if (stream->last_sequence_number == -1ULL) {
98 stream->last_sequence_number = sequence_number;
99 } else if (sequence_number > stream->last_sequence_number) {
100 stream->chan->lost_packets += sequence_number -
101 stream->last_sequence_number - 1;
102 } else {
103 /* seq <= last_sequence_number */
104 ERR("Sequence number inconsistent : prev = %" PRIu64
105 ", current = %" PRIu64,
106 stream->last_sequence_number, sequence_number);
107 ret = -1;
108 goto end;
109 }
110 stream->last_sequence_number = sequence_number;
111
112 if (discarded_events < stream->last_discarded_events) {
113 /*
114 * Overflow has occurred. We assume only one wrap-around
115 * has occurred.
116 */
117 stream->chan->discarded_events +=
118 (1ULL << (CAA_BITS_PER_LONG - 1)) -
119 stream->last_discarded_events +
120 discarded_events;
121 } else {
122 stream->chan->discarded_events += discarded_events -
123 stream->last_discarded_events;
124 }
125 stream->last_discarded_events = discarded_events;
126 ret = 0;
127
128 end:
129 return ret;
130 }
131
132 static
133 void ctf_packet_index_populate(struct ctf_packet_index *index,
134 off_t offset, const struct stream_subbuffer *subbuffer)
135 {
136 *index = (typeof(*index)){
137 .offset = htobe64(offset),
138 .packet_size = htobe64(subbuffer->info.data.packet_size),
139 .content_size = htobe64(subbuffer->info.data.content_size),
140 .timestamp_begin = htobe64(
141 subbuffer->info.data.timestamp_begin),
142 .timestamp_end = htobe64(
143 subbuffer->info.data.timestamp_end),
144 .events_discarded = htobe64(
145 subbuffer->info.data.events_discarded),
146 .stream_id = htobe64(subbuffer->info.data.stream_id),
147 .stream_instance_id = htobe64(
148 subbuffer->info.data.stream_instance_id.is_set ?
149 subbuffer->info.data.stream_instance_id.value : -1ULL),
150 .packet_seq_num = htobe64(
151 subbuffer->info.data.sequence_number.is_set ?
152 subbuffer->info.data.sequence_number.value : -1ULL),
153 };
154 }
155
156 static ssize_t consumer_stream_consume_mmap(
157 struct lttng_consumer_local_data *ctx,
158 struct lttng_consumer_stream *stream,
159 const struct stream_subbuffer *subbuffer)
160 {
161 const unsigned long padding_size =
162 subbuffer->info.data.padded_subbuf_size -
163 subbuffer->info.data.subbuf_size;
164
165 return lttng_consumer_on_read_subbuffer_mmap(
166 stream, &subbuffer->buffer.buffer, padding_size);
167 }
168
169 static ssize_t consumer_stream_consume_splice(
170 struct lttng_consumer_local_data *ctx,
171 struct lttng_consumer_stream *stream,
172 const struct stream_subbuffer *subbuffer)
173 {
174 return lttng_consumer_on_read_subbuffer_splice(ctx, stream,
175 subbuffer->info.data.padded_subbuf_size, 0);
176 }
177
178 static int consumer_stream_send_index(
179 struct lttng_consumer_stream *stream,
180 const struct stream_subbuffer *subbuffer,
181 struct lttng_consumer_local_data *ctx)
182 {
183 off_t packet_offset = 0;
184 struct ctf_packet_index index = {};
185
186 /*
187 * This is called after consuming the sub-buffer; substract the
188 * effect this sub-buffer from the offset.
189 */
190 if (stream->relayd_id == (uint64_t) -1ULL) {
191 packet_offset = stream->out_fd_offset -
192 subbuffer->info.data.padded_subbuf_size;
193 }
194
195 ctf_packet_index_populate(&index, packet_offset, subbuffer);
196 return consumer_stream_write_index(stream, &index);
197 }
198
199 /*
200 * Actually do the metadata sync using the given metadata stream.
201 *
202 * Return 0 on success else a negative value. ENODATA can be returned also
203 * indicating that there is no metadata available for that stream.
204 */
205 static int do_sync_metadata(struct lttng_consumer_stream *metadata,
206 struct lttng_consumer_local_data *ctx)
207 {
208 int ret;
209
210 assert(metadata);
211 assert(metadata->metadata_flag);
212 assert(ctx);
213
214 /*
215 * In UST, since we have to write the metadata from the cache packet
216 * by packet, we might need to start this procedure multiple times
217 * until all the metadata from the cache has been extracted.
218 */
219 do {
220 /*
221 * Steps :
222 * - Lock the metadata stream
223 * - Check if metadata stream node was deleted before locking.
224 * - if yes, release and return success
225 * - Check if new metadata is ready (flush + snapshot pos)
226 * - If nothing : release and return.
227 * - Lock the metadata_rdv_lock
228 * - Unlock the metadata stream
229 * - cond_wait on metadata_rdv to wait the wakeup from the
230 * metadata thread
231 * - Unlock the metadata_rdv_lock
232 */
233 pthread_mutex_lock(&metadata->lock);
234
235 /*
236 * There is a possibility that we were able to acquire a reference on the
237 * stream from the RCU hash table but between then and now, the node might
238 * have been deleted just before the lock is acquired. Thus, after locking,
239 * we make sure the metadata node has not been deleted which means that the
240 * buffers are closed.
241 *
242 * In that case, there is no need to sync the metadata hence returning a
243 * success return code.
244 */
245 ret = cds_lfht_is_node_deleted(&metadata->node.node);
246 if (ret) {
247 ret = 0;
248 goto end_unlock_mutex;
249 }
250
251 switch (ctx->type) {
252 case LTTNG_CONSUMER_KERNEL:
253 /*
254 * Empty the metadata cache and flush the current stream.
255 */
256 ret = lttng_kconsumer_sync_metadata(metadata);
257 break;
258 case LTTNG_CONSUMER32_UST:
259 case LTTNG_CONSUMER64_UST:
260 /*
261 * Ask the sessiond if we have new metadata waiting and update the
262 * consumer metadata cache.
263 */
264 ret = lttng_ustconsumer_sync_metadata(ctx, metadata);
265 break;
266 default:
267 assert(0);
268 ret = -1;
269 break;
270 }
271 /*
272 * Error or no new metadata, we exit here.
273 */
274 if (ret <= 0 || ret == ENODATA) {
275 goto end_unlock_mutex;
276 }
277
278 /*
279 * At this point, new metadata have been flushed, so we wait on the
280 * rendez-vous point for the metadata thread to wake us up when it
281 * finishes consuming the metadata and continue execution.
282 */
283
284 pthread_mutex_lock(&metadata->metadata_rdv_lock);
285
286 /*
287 * Release metadata stream lock so the metadata thread can process it.
288 */
289 pthread_mutex_unlock(&metadata->lock);
290
291 /*
292 * Wait on the rendez-vous point. Once woken up, it means the metadata was
293 * consumed and thus synchronization is achieved.
294 */
295 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
296 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
297 } while (ret == EAGAIN);
298
299 /* Success */
300 return 0;
301
302 end_unlock_mutex:
303 pthread_mutex_unlock(&metadata->lock);
304 return ret;
305 }
306
307 /*
308 * Synchronize the metadata using a given session ID. A successful acquisition
309 * of a metadata stream will trigger a request to the session daemon and a
310 * snapshot so the metadata thread can consume it.
311 *
312 * This function call is a rendez-vous point between the metadata thread and
313 * the data thread.
314 *
315 * Return 0 on success or else a negative value.
316 */
317 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
318 uint64_t session_id)
319 {
320 int ret;
321 struct lttng_consumer_stream *stream = NULL;
322 struct lttng_ht_iter iter;
323 struct lttng_ht *ht;
324
325 assert(ctx);
326
327 /* Ease our life a bit. */
328 ht = consumer_data.stream_list_ht;
329
330 rcu_read_lock();
331
332 /* Search the metadata associated with the session id of the given stream. */
333
334 cds_lfht_for_each_entry_duplicate(ht->ht,
335 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
336 &session_id, &iter.iter, stream, node_session_id.node) {
337 if (!stream->metadata_flag) {
338 continue;
339 }
340
341 ret = do_sync_metadata(stream, ctx);
342 if (ret < 0) {
343 goto end;
344 }
345 }
346
347 /*
348 * Force return code to 0 (success) since ret might be ENODATA for instance
349 * which is not an error but rather that we should come back.
350 */
351 ret = 0;
352
353 end:
354 rcu_read_unlock();
355 return ret;
356 }
357
358 static int consumer_stream_sync_metadata_index(
359 struct lttng_consumer_stream *stream,
360 const struct stream_subbuffer *subbuffer,
361 struct lttng_consumer_local_data *ctx)
362 {
363 int ret;
364
365 /* Block until all the metadata is sent. */
366 pthread_mutex_lock(&stream->metadata_timer_lock);
367 assert(!stream->missed_metadata_flush);
368 stream->waiting_on_metadata = true;
369 pthread_mutex_unlock(&stream->metadata_timer_lock);
370
371 ret = consumer_stream_sync_metadata(ctx, stream->session_id);
372
373 pthread_mutex_lock(&stream->metadata_timer_lock);
374 stream->waiting_on_metadata = false;
375 if (stream->missed_metadata_flush) {
376 stream->missed_metadata_flush = false;
377 pthread_mutex_unlock(&stream->metadata_timer_lock);
378 (void) stream->read_subbuffer_ops.send_live_beacon(stream);
379 } else {
380 pthread_mutex_unlock(&stream->metadata_timer_lock);
381 }
382 if (ret < 0) {
383 goto end;
384 }
385
386 ret = consumer_stream_send_index(stream, subbuffer, ctx);
387 end:
388 return ret;
389 }
390
391 /*
392 * Check if the local version of the metadata stream matches with the version
393 * of the metadata stream in the kernel. If it was updated, set the reset flag
394 * on the stream.
395 */
396 static
397 int metadata_stream_check_version(struct lttng_consumer_stream *stream,
398 const struct stream_subbuffer *subbuffer)
399 {
400 if (stream->metadata_version == subbuffer->info.metadata.version) {
401 goto end;
402 }
403
404 DBG("New metadata version detected");
405 consumer_stream_metadata_set_version(stream,
406 subbuffer->info.metadata.version);
407
408 if (stream->read_subbuffer_ops.reset_metadata) {
409 stream->read_subbuffer_ops.reset_metadata(stream);
410 }
411
412 end:
413 return 0;
414 }
415
416 struct lttng_consumer_stream *consumer_stream_create(
417 struct lttng_consumer_channel *channel,
418 uint64_t channel_key,
419 uint64_t stream_key,
420 enum lttng_consumer_stream_state state,
421 const char *channel_name,
422 uid_t uid,
423 gid_t gid,
424 uint64_t relayd_id,
425 uint64_t session_id,
426 int cpu,
427 int *alloc_ret,
428 enum consumer_channel_type type,
429 unsigned int monitor)
430 {
431 int ret;
432 struct lttng_consumer_stream *stream;
433
434 stream = zmalloc(sizeof(*stream));
435 if (stream == NULL) {
436 PERROR("malloc struct lttng_consumer_stream");
437 ret = -ENOMEM;
438 goto end;
439 }
440
441 rcu_read_lock();
442 stream->chan = channel;
443 stream->key = stream_key;
444 stream->out_fd = -1;
445 stream->out_fd_offset = 0;
446 stream->output_written = 0;
447 stream->state = state;
448 stream->uid = uid;
449 stream->gid = gid;
450 stream->relayd_id = relayd_id;
451 stream->session_id = session_id;
452 stream->monitor = monitor;
453 stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
454 stream->index_file = NULL;
455 stream->last_sequence_number = -1ULL;
456 pthread_mutex_init(&stream->lock, NULL);
457 pthread_mutex_init(&stream->metadata_timer_lock, NULL);
458
459 /* If channel is the metadata, flag this stream as metadata. */
460 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
461 stream->metadata_flag = 1;
462 /* Metadata is flat out. */
463 strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
464 /* Live rendez-vous point. */
465 pthread_cond_init(&stream->metadata_rdv, NULL);
466 pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
467 } else {
468 /* Format stream name to <channel_name>_<cpu_number> */
469 ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
470 channel_name, cpu);
471 if (ret < 0) {
472 PERROR("snprintf stream name");
473 goto error;
474 }
475 }
476
477 /* Key is always the wait_fd for streams. */
478 lttng_ht_node_init_u64(&stream->node, stream->key);
479
480 /* Init node per channel id key */
481 lttng_ht_node_init_u64(&stream->node_channel_id, channel_key);
482
483 /* Init session id node with the stream session id */
484 lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
485
486 DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
487 " relayd_id %" PRIu64 ", session_id %" PRIu64,
488 stream->name, stream->key, channel_key,
489 stream->relayd_id, stream->session_id);
490
491 rcu_read_unlock();
492
493 switch (channel->output) {
494 case CONSUMER_CHANNEL_SPLICE:
495 stream->output = LTTNG_EVENT_SPLICE;
496 ret = utils_create_pipe(stream->splice_pipe);
497 if (ret < 0) {
498 goto error;
499 }
500 break;
501 case CONSUMER_CHANNEL_MMAP:
502 stream->output = LTTNG_EVENT_MMAP;
503 break;
504 default:
505 abort();
506 }
507
508 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
509 stream->read_subbuffer_ops.lock =
510 consumer_stream_metadata_lock_all;
511 stream->read_subbuffer_ops.unlock =
512 consumer_stream_metadata_unlock_all;
513 stream->read_subbuffer_ops.pre_consume_subbuffer =
514 metadata_stream_check_version;
515 } else {
516 stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all;
517 stream->read_subbuffer_ops.unlock =
518 consumer_stream_data_unlock_all;
519 stream->read_subbuffer_ops.pre_consume_subbuffer =
520 consumer_stream_update_stats;
521 if (channel->is_live) {
522 stream->read_subbuffer_ops.post_consume =
523 consumer_stream_sync_metadata_index;
524 } else {
525 stream->read_subbuffer_ops.post_consume =
526 consumer_stream_send_index;
527 }
528 }
529
530 if (channel->output == CONSUMER_CHANNEL_MMAP) {
531 stream->read_subbuffer_ops.consume_subbuffer =
532 consumer_stream_consume_mmap;
533 } else {
534 stream->read_subbuffer_ops.consume_subbuffer =
535 consumer_stream_consume_splice;
536 }
537
538 return stream;
539
540 error:
541 rcu_read_unlock();
542 free(stream);
543 end:
544 if (alloc_ret) {
545 *alloc_ret = ret;
546 }
547 return NULL;
548 }
549
550 /*
551 * Close stream on the relayd side. This call can destroy a relayd if the
552 * conditions are met.
553 *
554 * A RCU read side lock MUST be acquired if the relayd object was looked up in
555 * a hash table before calling this.
556 */
557 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
558 struct consumer_relayd_sock_pair *relayd)
559 {
560 int ret;
561
562 assert(stream);
563 assert(relayd);
564
565 if (stream->sent_to_relayd) {
566 uatomic_dec(&relayd->refcount);
567 assert(uatomic_read(&relayd->refcount) >= 0);
568 }
569
570 /* Closing streams requires to lock the control socket. */
571 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
572 ret = relayd_send_close_stream(&relayd->control_sock,
573 stream->relayd_stream_id,
574 stream->next_net_seq_num - 1);
575 if (ret < 0) {
576 ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->id);
577 lttng_consumer_cleanup_relayd(relayd);
578 }
579 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
580
581 /* Both conditions are met, we destroy the relayd. */
582 if (uatomic_read(&relayd->refcount) == 0 &&
583 uatomic_read(&relayd->destroy_flag)) {
584 consumer_destroy_relayd(relayd);
585 }
586 stream->relayd_id = (uint64_t) -1ULL;
587 stream->sent_to_relayd = 0;
588 }
589
590 /*
591 * Close stream's file descriptors and, if needed, close stream also on the
592 * relayd side.
593 *
594 * The consumer data lock MUST be acquired.
595 * The stream lock MUST be acquired.
596 */
597 void consumer_stream_close(struct lttng_consumer_stream *stream)
598 {
599 int ret;
600 struct consumer_relayd_sock_pair *relayd;
601
602 assert(stream);
603
604 switch (consumer_data.type) {
605 case LTTNG_CONSUMER_KERNEL:
606 if (stream->mmap_base != NULL) {
607 ret = munmap(stream->mmap_base, stream->mmap_len);
608 if (ret != 0) {
609 PERROR("munmap");
610 }
611 }
612
613 if (stream->wait_fd >= 0) {
614 ret = close(stream->wait_fd);
615 if (ret) {
616 PERROR("close");
617 }
618 stream->wait_fd = -1;
619 }
620 if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) {
621 utils_close_pipe(stream->splice_pipe);
622 }
623 break;
624 case LTTNG_CONSUMER32_UST:
625 case LTTNG_CONSUMER64_UST:
626 {
627 /*
628 * Special case for the metadata since the wait fd is an internal pipe
629 * polled in the metadata thread.
630 */
631 if (stream->metadata_flag && stream->chan->monitor) {
632 int rpipe = stream->ust_metadata_poll_pipe[0];
633
634 /*
635 * This will stop the channel timer if one and close the write side
636 * of the metadata poll pipe.
637 */
638 lttng_ustconsumer_close_metadata(stream->chan);
639 if (rpipe >= 0) {
640 ret = close(rpipe);
641 if (ret < 0) {
642 PERROR("closing metadata pipe read side");
643 }
644 stream->ust_metadata_poll_pipe[0] = -1;
645 }
646 }
647 break;
648 }
649 default:
650 ERR("Unknown consumer_data type");
651 assert(0);
652 }
653
654 /* Close output fd. Could be a socket or local file at this point. */
655 if (stream->out_fd >= 0) {
656 ret = close(stream->out_fd);
657 if (ret) {
658 PERROR("close");
659 }
660 stream->out_fd = -1;
661 }
662
663 if (stream->index_file) {
664 lttng_index_file_put(stream->index_file);
665 stream->index_file = NULL;
666 }
667
668 /* Check and cleanup relayd if needed. */
669 rcu_read_lock();
670 relayd = consumer_find_relayd(stream->relayd_id);
671 if (relayd != NULL) {
672 consumer_stream_relayd_close(stream, relayd);
673 }
674 rcu_read_unlock();
675 }
676
677 /*
678 * Delete the stream from all possible hash tables.
679 *
680 * The consumer data lock MUST be acquired.
681 * The stream lock MUST be acquired.
682 */
683 void consumer_stream_delete(struct lttng_consumer_stream *stream,
684 struct lttng_ht *ht)
685 {
686 int ret;
687 struct lttng_ht_iter iter;
688
689 assert(stream);
690 /* Should NEVER be called not in monitor mode. */
691 assert(stream->chan->monitor);
692
693 rcu_read_lock();
694
695 if (ht) {
696 iter.iter.node = &stream->node.node;
697 ret = lttng_ht_del(ht, &iter);
698 assert(!ret);
699 }
700
701 /* Delete from stream per channel ID hash table. */
702 iter.iter.node = &stream->node_channel_id.node;
703 /*
704 * The returned value is of no importance. Even if the node is NOT in the
705 * hash table, we continue since we may have been called by a code path
706 * that did not add the stream to a (all) hash table. Same goes for the
707 * next call ht del call.
708 */
709 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
710
711 /* Delete from the global stream list. */
712 iter.iter.node = &stream->node_session_id.node;
713 /* See the previous ht del on why we ignore the returned value. */
714 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
715
716 rcu_read_unlock();
717
718 if (!stream->metadata_flag) {
719 /* Decrement the stream count of the global consumer data. */
720 assert(consumer_data.stream_count > 0);
721 consumer_data.stream_count--;
722 }
723 }
724
725 /*
726 * Free the given stream within a RCU call.
727 */
728 void consumer_stream_free(struct lttng_consumer_stream *stream)
729 {
730 assert(stream);
731
732 metadata_bucket_destroy(stream->metadata_bucket);
733 call_rcu(&stream->node.head, free_stream_rcu);
734 }
735
736 /*
737 * Destroy the stream's buffers of the tracer.
738 */
739 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
740 {
741 assert(stream);
742
743 switch (consumer_data.type) {
744 case LTTNG_CONSUMER_KERNEL:
745 break;
746 case LTTNG_CONSUMER32_UST:
747 case LTTNG_CONSUMER64_UST:
748 lttng_ustconsumer_del_stream(stream);
749 break;
750 default:
751 ERR("Unknown consumer_data type");
752 assert(0);
753 }
754 }
755
756 /*
757 * Destroy and close a already created stream.
758 */
759 static void destroy_close_stream(struct lttng_consumer_stream *stream)
760 {
761 assert(stream);
762
763 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
764
765 /* Destroy tracer buffers of the stream. */
766 consumer_stream_destroy_buffers(stream);
767 /* Close down everything including the relayd if one. */
768 consumer_stream_close(stream);
769 }
770
771 /*
772 * Decrement the stream's channel refcount and if down to 0, return the channel
773 * pointer so it can be destroyed by the caller or NULL if not.
774 */
775 static struct lttng_consumer_channel *unref_channel(
776 struct lttng_consumer_stream *stream)
777 {
778 struct lttng_consumer_channel *free_chan = NULL;
779
780 assert(stream);
781 assert(stream->chan);
782
783 /* Update refcount of channel and see if we need to destroy it. */
784 if (!uatomic_sub_return(&stream->chan->refcount, 1)
785 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
786 free_chan = stream->chan;
787 }
788
789 return free_chan;
790 }
791
792 /*
793 * Destroy a stream completely. This will delete, close and free the stream.
794 * Once return, the stream is NO longer usable. Its channel may get destroyed
795 * if conditions are met for a monitored stream.
796 *
797 * This MUST be called WITHOUT the consumer data and stream lock acquired if
798 * the stream is in _monitor_ mode else it does not matter.
799 */
800 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
801 struct lttng_ht *ht)
802 {
803 assert(stream);
804
805 /* Stream is in monitor mode. */
806 if (stream->monitor) {
807 struct lttng_consumer_channel *free_chan = NULL;
808
809 /*
810 * This means that the stream was successfully removed from the streams
811 * list of the channel and sent to the right thread managing this
812 * stream thus being globally visible.
813 */
814 if (stream->globally_visible) {
815 pthread_mutex_lock(&consumer_data.lock);
816 pthread_mutex_lock(&stream->chan->lock);
817 pthread_mutex_lock(&stream->lock);
818 /* Remove every reference of the stream in the consumer. */
819 consumer_stream_delete(stream, ht);
820
821 destroy_close_stream(stream);
822
823 /* Update channel's refcount of the stream. */
824 free_chan = unref_channel(stream);
825
826 /* Indicates that the consumer data state MUST be updated after this. */
827 consumer_data.need_update = 1;
828
829 pthread_mutex_unlock(&stream->lock);
830 pthread_mutex_unlock(&stream->chan->lock);
831 pthread_mutex_unlock(&consumer_data.lock);
832 } else {
833 /*
834 * If the stream is not visible globally, this needs to be done
835 * outside of the consumer data lock section.
836 */
837 free_chan = unref_channel(stream);
838 }
839
840 if (free_chan) {
841 consumer_del_channel(free_chan);
842 }
843 } else {
844 destroy_close_stream(stream);
845 }
846
847 /* Free stream within a RCU call. */
848 consumer_stream_free(stream);
849 }
850
851 /*
852 * Write index of a specific stream either on the relayd or local disk.
853 *
854 * Return 0 on success or else a negative value.
855 */
856 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
857 struct ctf_packet_index *element)
858 {
859 int ret;
860
861 assert(stream);
862 assert(element);
863
864 rcu_read_lock();
865 if (stream->relayd_id != (uint64_t) -1ULL) {
866 struct consumer_relayd_sock_pair *relayd;
867
868 relayd = consumer_find_relayd(stream->relayd_id);
869 if (relayd) {
870 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
871 ret = relayd_send_index(&relayd->control_sock, element,
872 stream->relayd_stream_id, stream->next_net_seq_num - 1);
873 if (ret < 0) {
874 /*
875 * Communication error with lttng-relayd,
876 * perform cleanup now
877 */
878 ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->id);
879 lttng_consumer_cleanup_relayd(relayd);
880 ret = -1;
881 }
882 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
883 } else {
884 ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.",
885 stream->key, stream->relayd_id);
886 ret = -1;
887 }
888 } else {
889 if (lttng_index_file_write(stream->index_file, element)) {
890 ret = -1;
891 } else {
892 ret = 0;
893 }
894 }
895 if (ret < 0) {
896 goto error;
897 }
898
899 error:
900 rcu_read_unlock();
901 return ret;
902 }
903
904 static ssize_t metadata_bucket_flush(
905 const struct stream_subbuffer *buffer, void *data)
906 {
907 ssize_t ret;
908 struct lttng_consumer_stream *stream = data;
909
910 ret = consumer_stream_consume_mmap(NULL, stream, buffer);
911 if (ret < 0) {
912 goto end;
913 }
914 end:
915 return ret;
916 }
917
918 static ssize_t metadata_bucket_consume(
919 struct lttng_consumer_local_data *unused,
920 struct lttng_consumer_stream *stream,
921 const struct stream_subbuffer *subbuffer)
922 {
923 ssize_t ret;
924 enum metadata_bucket_status status;
925
926 status = metadata_bucket_fill(stream->metadata_bucket, subbuffer);
927 switch (status) {
928 case METADATA_BUCKET_STATUS_OK:
929 /* Return consumed size. */
930 ret = subbuffer->buffer.buffer.size;
931 break;
932 default:
933 ret = -1;
934 }
935
936 return ret;
937 }
938
939 int consumer_stream_enable_metadata_bucketization(
940 struct lttng_consumer_stream *stream)
941 {
942 int ret = 0;
943
944 assert(stream->metadata_flag);
945 assert(!stream->metadata_bucket);
946 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
947
948 stream->metadata_bucket = metadata_bucket_create(
949 metadata_bucket_flush, stream);
950 if (!stream->metadata_bucket) {
951 ret = -1;
952 goto end;
953 }
954
955 stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume;
956 end:
957 return ret;
958 }
959
960 void consumer_stream_metadata_set_version(
961 struct lttng_consumer_stream *stream, uint64_t new_version)
962 {
963 assert(new_version > stream->metadata_version);
964 stream->metadata_version = new_version;
965 stream->reset_metadata_flag = 1;
966
967 if (stream->metadata_bucket) {
968 metadata_bucket_reset(stream->metadata_bucket);
969 }
970 }
This page took 0.051056 seconds and 4 git commands to generate.