2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
17 #include <sys/socket.h>
18 #include <sys/types.h>
23 #include <bin/lttng-consumerd/health-consumerd.h>
24 #include <common/common.h>
25 #include <common/utils.h>
26 #include <common/time.h>
27 #include <common/compat/poll.h>
28 #include <common/compat/endian.h>
29 #include <common/index/index.h>
30 #include <common/kernel-ctl/kernel-ctl.h>
31 #include <common/sessiond-comm/relayd.h>
32 #include <common/sessiond-comm/sessiond-comm.h>
33 #include <common/kernel-consumer/kernel-consumer.h>
34 #include <common/relayd/relayd.h>
35 #include <common/ust-consumer/ust-consumer.h>
36 #include <common/consumer/consumer-timer.h>
37 #include <common/consumer/consumer.h>
38 #include <common/consumer/consumer-stream.h>
39 #include <common/consumer/consumer-testpoint.h>
40 #include <common/align.h>
41 #include <common/consumer/consumer-metadata-cache.h>
42 #include <common/trace-chunk.h>
43 #include <common/trace-chunk-registry.h>
44 #include <common/string-utils/format.h>
45 #include <common/dynamic-array.h>
47 struct lttng_consumer_global_data consumer_data
= {
50 .type
= LTTNG_CONSUMER_UNKNOWN
,
53 enum consumer_channel_action
{
56 CONSUMER_CHANNEL_QUIT
,
59 struct consumer_channel_msg
{
60 enum consumer_channel_action action
;
61 struct lttng_consumer_channel
*chan
; /* add */
62 uint64_t key
; /* del */
65 /* Flag used to temporarily pause data consumption from testpoints. */
66 int data_consumption_paused
;
69 * Flag to inform the polling thread to quit when all fd hung up. Updated by
70 * the consumer_thread_receive_fds when it notices that all fds has hung up.
71 * Also updated by the signal handler (consumer_should_exit()). Read by the
77 * Global hash table containing respectively metadata and data streams. The
78 * stream element in this ht should only be updated by the metadata poll thread
79 * for the metadata and the data poll thread for the data.
81 static struct lttng_ht
*metadata_ht
;
82 static struct lttng_ht
*data_ht
;
84 static const char *get_consumer_domain(void)
86 switch (consumer_data
.type
) {
87 case LTTNG_CONSUMER_KERNEL
:
88 return DEFAULT_KERNEL_TRACE_DIR
;
89 case LTTNG_CONSUMER64_UST
:
91 case LTTNG_CONSUMER32_UST
:
92 return DEFAULT_UST_TRACE_DIR
;
99 * Notify a thread lttng pipe to poll back again. This usually means that some
100 * global state has changed so we just send back the thread in a poll wait
103 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
105 struct lttng_consumer_stream
*null_stream
= NULL
;
109 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
112 static void notify_health_quit_pipe(int *pipe
)
116 ret
= lttng_write(pipe
[1], "4", 1);
118 PERROR("write consumer health quit");
122 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
123 struct lttng_consumer_channel
*chan
,
125 enum consumer_channel_action action
)
127 struct consumer_channel_msg msg
;
130 memset(&msg
, 0, sizeof(msg
));
135 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
136 if (ret
< sizeof(msg
)) {
137 PERROR("notify_channel_pipe write error");
141 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
144 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
147 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
148 struct lttng_consumer_channel
**chan
,
150 enum consumer_channel_action
*action
)
152 struct consumer_channel_msg msg
;
155 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
156 if (ret
< sizeof(msg
)) {
160 *action
= msg
.action
;
168 * Cleanup the stream list of a channel. Those streams are not yet globally
171 static void clean_channel_stream_list(struct lttng_consumer_channel
*channel
)
173 struct lttng_consumer_stream
*stream
, *stmp
;
177 /* Delete streams that might have been left in the stream list. */
178 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
180 cds_list_del(&stream
->send_node
);
182 * Once a stream is added to this list, the buffers were created so we
183 * have a guarantee that this call will succeed. Setting the monitor
184 * mode to 0 so we don't lock nor try to delete the stream from the
188 consumer_stream_destroy(stream
, NULL
);
193 * Find a stream. The consumer_data.lock must be locked during this
196 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
199 struct lttng_ht_iter iter
;
200 struct lttng_ht_node_u64
*node
;
201 struct lttng_consumer_stream
*stream
= NULL
;
205 /* -1ULL keys are lookup failures */
206 if (key
== (uint64_t) -1ULL) {
212 lttng_ht_lookup(ht
, &key
, &iter
);
213 node
= lttng_ht_iter_get_node_u64(&iter
);
215 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
223 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
225 struct lttng_consumer_stream
*stream
;
228 stream
= find_stream(key
, ht
);
230 stream
->key
= (uint64_t) -1ULL;
232 * We don't want the lookup to match, but we still need
233 * to iterate on this stream when iterating over the hash table. Just
234 * change the node key.
236 stream
->node
.key
= (uint64_t) -1ULL;
242 * Return a channel object for the given key.
244 * RCU read side lock MUST be acquired before calling this function and
245 * protects the channel ptr.
247 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
249 struct lttng_ht_iter iter
;
250 struct lttng_ht_node_u64
*node
;
251 struct lttng_consumer_channel
*channel
= NULL
;
253 /* -1ULL keys are lookup failures */
254 if (key
== (uint64_t) -1ULL) {
258 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
259 node
= lttng_ht_iter_get_node_u64(&iter
);
261 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
268 * There is a possibility that the consumer does not have enough time between
269 * the close of the channel on the session daemon and the cleanup in here thus
270 * once we have a channel add with an existing key, we know for sure that this
271 * channel will eventually get cleaned up by all streams being closed.
273 * This function just nullifies the already existing channel key.
275 static void steal_channel_key(uint64_t key
)
277 struct lttng_consumer_channel
*channel
;
280 channel
= consumer_find_channel(key
);
282 channel
->key
= (uint64_t) -1ULL;
284 * We don't want the lookup to match, but we still need to iterate on
285 * this channel when iterating over the hash table. Just change the
288 channel
->node
.key
= (uint64_t) -1ULL;
293 static void free_channel_rcu(struct rcu_head
*head
)
295 struct lttng_ht_node_u64
*node
=
296 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
297 struct lttng_consumer_channel
*channel
=
298 caa_container_of(node
, struct lttng_consumer_channel
, node
);
300 switch (consumer_data
.type
) {
301 case LTTNG_CONSUMER_KERNEL
:
303 case LTTNG_CONSUMER32_UST
:
304 case LTTNG_CONSUMER64_UST
:
305 lttng_ustconsumer_free_channel(channel
);
308 ERR("Unknown consumer_data type");
315 * RCU protected relayd socket pair free.
317 static void free_relayd_rcu(struct rcu_head
*head
)
319 struct lttng_ht_node_u64
*node
=
320 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
321 struct consumer_relayd_sock_pair
*relayd
=
322 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
325 * Close all sockets. This is done in the call RCU since we don't want the
326 * socket fds to be reassigned thus potentially creating bad state of the
329 * We do not have to lock the control socket mutex here since at this stage
330 * there is no one referencing to this relayd object.
332 (void) relayd_close(&relayd
->control_sock
);
333 (void) relayd_close(&relayd
->data_sock
);
335 pthread_mutex_destroy(&relayd
->ctrl_sock_mutex
);
340 * Destroy and free relayd socket pair object.
342 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
345 struct lttng_ht_iter iter
;
347 if (relayd
== NULL
) {
351 DBG("Consumer destroy and close relayd socket pair");
353 iter
.iter
.node
= &relayd
->node
.node
;
354 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
356 /* We assume the relayd is being or is destroyed */
360 /* RCU free() call */
361 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
365 * Remove a channel from the global list protected by a mutex. This function is
366 * also responsible for freeing its data structures.
368 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
370 struct lttng_ht_iter iter
;
372 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
374 pthread_mutex_lock(&consumer_data
.lock
);
375 pthread_mutex_lock(&channel
->lock
);
377 /* Destroy streams that might have been left in the stream list. */
378 clean_channel_stream_list(channel
);
380 if (channel
->live_timer_enabled
== 1) {
381 consumer_timer_live_stop(channel
);
383 if (channel
->monitor_timer_enabled
== 1) {
384 consumer_timer_monitor_stop(channel
);
387 switch (consumer_data
.type
) {
388 case LTTNG_CONSUMER_KERNEL
:
390 case LTTNG_CONSUMER32_UST
:
391 case LTTNG_CONSUMER64_UST
:
392 lttng_ustconsumer_del_channel(channel
);
395 ERR("Unknown consumer_data type");
400 lttng_trace_chunk_put(channel
->trace_chunk
);
401 channel
->trace_chunk
= NULL
;
403 if (channel
->is_published
) {
407 iter
.iter
.node
= &channel
->node
.node
;
408 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
411 iter
.iter
.node
= &channel
->channels_by_session_id_ht_node
.node
;
412 ret
= lttng_ht_del(consumer_data
.channels_by_session_id_ht
,
418 channel
->is_deleted
= true;
419 call_rcu(&channel
->node
.head
, free_channel_rcu
);
421 pthread_mutex_unlock(&channel
->lock
);
422 pthread_mutex_unlock(&consumer_data
.lock
);
426 * Iterate over the relayd hash table and destroy each element. Finally,
427 * destroy the whole hash table.
429 static void cleanup_relayd_ht(void)
431 struct lttng_ht_iter iter
;
432 struct consumer_relayd_sock_pair
*relayd
;
436 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
438 consumer_destroy_relayd(relayd
);
443 lttng_ht_destroy(consumer_data
.relayd_ht
);
447 * Update the end point status of all streams having the given network sequence
448 * index (relayd index).
450 * It's atomically set without having the stream mutex locked which is fine
451 * because we handle the write/read race with a pipe wakeup for each thread.
453 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
454 enum consumer_endpoint_status status
)
456 struct lttng_ht_iter iter
;
457 struct lttng_consumer_stream
*stream
;
459 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
463 /* Let's begin with metadata */
464 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
465 if (stream
->net_seq_idx
== net_seq_idx
) {
466 uatomic_set(&stream
->endpoint_status
, status
);
467 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
471 /* Follow up by the data streams */
472 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
473 if (stream
->net_seq_idx
== net_seq_idx
) {
474 uatomic_set(&stream
->endpoint_status
, status
);
475 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
482 * Cleanup a relayd object by flagging every associated streams for deletion,
483 * destroying the object meaning removing it from the relayd hash table,
484 * closing the sockets and freeing the memory in a RCU call.
486 * If a local data context is available, notify the threads that the streams'
487 * state have changed.
489 void lttng_consumer_cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
)
495 DBG("Cleaning up relayd object ID %"PRIu64
, relayd
->net_seq_idx
);
497 /* Save the net sequence index before destroying the object */
498 netidx
= relayd
->net_seq_idx
;
501 * Delete the relayd from the relayd hash table, close the sockets and free
502 * the object in a RCU call.
504 consumer_destroy_relayd(relayd
);
506 /* Set inactive endpoint to all streams */
507 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
510 * With a local data context, notify the threads that the streams' state
511 * have changed. The write() action on the pipe acts as an "implicit"
512 * memory barrier ordering the updates of the end point status from the
513 * read of this status which happens AFTER receiving this notify.
515 notify_thread_lttng_pipe(relayd
->ctx
->consumer_data_pipe
);
516 notify_thread_lttng_pipe(relayd
->ctx
->consumer_metadata_pipe
);
520 * Flag a relayd socket pair for destruction. Destroy it if the refcount
523 * RCU read side lock MUST be aquired before calling this function.
525 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
529 /* Set destroy flag for this object */
530 uatomic_set(&relayd
->destroy_flag
, 1);
532 /* Destroy the relayd if refcount is 0 */
533 if (uatomic_read(&relayd
->refcount
) == 0) {
534 consumer_destroy_relayd(relayd
);
539 * Completly destroy stream from every visiable data structure and the given
542 * One this call returns, the stream object is not longer usable nor visible.
544 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
547 consumer_stream_destroy(stream
, ht
);
551 * XXX naming of del vs destroy is all mixed up.
553 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
555 consumer_stream_destroy(stream
, data_ht
);
558 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
560 consumer_stream_destroy(stream
, metadata_ht
);
563 void consumer_stream_update_channel_attributes(
564 struct lttng_consumer_stream
*stream
,
565 struct lttng_consumer_channel
*channel
)
567 stream
->channel_read_only_attributes
.tracefile_size
=
568 channel
->tracefile_size
;
571 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
573 const char *channel_name
,
576 struct lttng_trace_chunk
*trace_chunk
,
579 enum consumer_channel_type type
,
580 unsigned int monitor
)
583 struct lttng_consumer_stream
*stream
;
585 stream
= zmalloc(sizeof(*stream
));
586 if (stream
== NULL
) {
587 PERROR("malloc struct lttng_consumer_stream");
592 if (trace_chunk
&& !lttng_trace_chunk_get(trace_chunk
)) {
593 ERR("Failed to acquire trace chunk reference during the creation of a stream");
599 stream
->key
= stream_key
;
600 stream
->trace_chunk
= trace_chunk
;
602 stream
->out_fd_offset
= 0;
603 stream
->output_written
= 0;
604 stream
->net_seq_idx
= relayd_id
;
605 stream
->session_id
= session_id
;
606 stream
->monitor
= monitor
;
607 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
608 stream
->index_file
= NULL
;
609 stream
->last_sequence_number
= -1ULL;
610 stream
->rotate_position
= -1ULL;
611 pthread_mutex_init(&stream
->lock
, NULL
);
612 pthread_mutex_init(&stream
->metadata_timer_lock
, NULL
);
614 /* If channel is the metadata, flag this stream as metadata. */
615 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
616 stream
->metadata_flag
= 1;
617 /* Metadata is flat out. */
618 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
619 /* Live rendez-vous point. */
620 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
621 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
623 /* Format stream name to <channel_name>_<cpu_number> */
624 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
627 PERROR("snprintf stream name");
632 /* Key is always the wait_fd for streams. */
633 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
635 /* Init node per channel id key */
636 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
638 /* Init session id node with the stream session id */
639 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
641 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
642 " relayd_id %" PRIu64
", session_id %" PRIu64
,
643 stream
->name
, stream
->key
, channel_key
,
644 stream
->net_seq_idx
, stream
->session_id
);
651 lttng_trace_chunk_put(stream
->trace_chunk
);
661 * Add a stream to the global list protected by a mutex.
663 void consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
665 struct lttng_ht
*ht
= data_ht
;
670 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
672 pthread_mutex_lock(&consumer_data
.lock
);
673 pthread_mutex_lock(&stream
->chan
->lock
);
674 pthread_mutex_lock(&stream
->chan
->timer_lock
);
675 pthread_mutex_lock(&stream
->lock
);
678 /* Steal stream identifier to avoid having streams with the same key */
679 steal_stream_key(stream
->key
, ht
);
681 lttng_ht_add_unique_u64(ht
, &stream
->node
);
683 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
684 &stream
->node_channel_id
);
687 * Add stream to the stream_list_ht of the consumer data. No need to steal
688 * the key since the HT does not use it and we allow to add redundant keys
691 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
694 * When nb_init_stream_left reaches 0, we don't need to trigger any action
695 * in terms of destroying the associated channel, because the action that
696 * causes the count to become 0 also causes a stream to be added. The
697 * channel deletion will thus be triggered by the following removal of this
700 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
701 /* Increment refcount before decrementing nb_init_stream_left */
703 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
706 /* Update consumer data once the node is inserted. */
707 consumer_data
.stream_count
++;
708 consumer_data
.need_update
= 1;
711 pthread_mutex_unlock(&stream
->lock
);
712 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
713 pthread_mutex_unlock(&stream
->chan
->lock
);
714 pthread_mutex_unlock(&consumer_data
.lock
);
718 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
719 * be acquired before calling this.
721 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
724 struct lttng_ht_node_u64
*node
;
725 struct lttng_ht_iter iter
;
729 lttng_ht_lookup(consumer_data
.relayd_ht
,
730 &relayd
->net_seq_idx
, &iter
);
731 node
= lttng_ht_iter_get_node_u64(&iter
);
735 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
742 * Allocate and return a consumer relayd socket.
744 static struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
745 uint64_t net_seq_idx
)
747 struct consumer_relayd_sock_pair
*obj
= NULL
;
749 /* net sequence index of -1 is a failure */
750 if (net_seq_idx
== (uint64_t) -1ULL) {
754 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
756 PERROR("zmalloc relayd sock");
760 obj
->net_seq_idx
= net_seq_idx
;
762 obj
->destroy_flag
= 0;
763 obj
->control_sock
.sock
.fd
= -1;
764 obj
->data_sock
.sock
.fd
= -1;
765 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
766 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
773 * Find a relayd socket pair in the global consumer data.
775 * Return the object if found else NULL.
776 * RCU read-side lock must be held across this call and while using the
779 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
781 struct lttng_ht_iter iter
;
782 struct lttng_ht_node_u64
*node
;
783 struct consumer_relayd_sock_pair
*relayd
= NULL
;
785 /* Negative keys are lookup failures */
786 if (key
== (uint64_t) -1ULL) {
790 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
792 node
= lttng_ht_iter_get_node_u64(&iter
);
794 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
802 * Find a relayd and send the stream
804 * Returns 0 on success, < 0 on error
806 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
810 struct consumer_relayd_sock_pair
*relayd
;
813 assert(stream
->net_seq_idx
!= -1ULL);
816 /* The stream is not metadata. Get relayd reference if exists. */
818 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
819 if (relayd
!= NULL
) {
820 /* Add stream on the relayd */
821 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
822 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
823 get_consumer_domain(), path
, &stream
->relayd_stream_id
,
824 stream
->chan
->tracefile_size
,
825 stream
->chan
->tracefile_count
,
826 stream
->trace_chunk
);
827 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
829 ERR("Relayd add stream failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
830 lttng_consumer_cleanup_relayd(relayd
);
834 uatomic_inc(&relayd
->refcount
);
835 stream
->sent_to_relayd
= 1;
837 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
838 stream
->key
, stream
->net_seq_idx
);
843 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
844 stream
->name
, stream
->key
, stream
->net_seq_idx
);
852 * Find a relayd and send the streams sent message
854 * Returns 0 on success, < 0 on error
856 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
859 struct consumer_relayd_sock_pair
*relayd
;
861 assert(net_seq_idx
!= -1ULL);
863 /* The stream is not metadata. Get relayd reference if exists. */
865 relayd
= consumer_find_relayd(net_seq_idx
);
866 if (relayd
!= NULL
) {
867 /* Add stream on the relayd */
868 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
869 ret
= relayd_streams_sent(&relayd
->control_sock
);
870 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
872 ERR("Relayd streams sent failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
873 lttng_consumer_cleanup_relayd(relayd
);
877 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
884 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
892 * Find a relayd and close the stream
894 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
896 struct consumer_relayd_sock_pair
*relayd
;
898 /* The stream is not metadata. Get relayd reference if exists. */
900 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
902 consumer_stream_relayd_close(stream
, relayd
);
908 * Handle stream for relayd transmission if the stream applies for network
909 * streaming where the net sequence index is set.
911 * Return destination file descriptor or negative value on error.
913 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
914 size_t data_size
, unsigned long padding
,
915 struct consumer_relayd_sock_pair
*relayd
)
918 struct lttcomm_relayd_data_hdr data_hdr
;
924 /* Reset data header */
925 memset(&data_hdr
, 0, sizeof(data_hdr
));
927 if (stream
->metadata_flag
) {
928 /* Caller MUST acquire the relayd control socket lock */
929 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
934 /* Metadata are always sent on the control socket. */
935 outfd
= relayd
->control_sock
.sock
.fd
;
937 /* Set header with stream information */
938 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
939 data_hdr
.data_size
= htobe32(data_size
);
940 data_hdr
.padding_size
= htobe32(padding
);
943 * Note that net_seq_num below is assigned with the *current* value of
944 * next_net_seq_num and only after that the next_net_seq_num will be
945 * increment. This is why when issuing a command on the relayd using
946 * this next value, 1 should always be substracted in order to compare
947 * the last seen sequence number on the relayd side to the last sent.
949 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
950 /* Other fields are zeroed previously */
952 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
958 ++stream
->next_net_seq_num
;
960 /* Set to go on data socket */
961 outfd
= relayd
->data_sock
.sock
.fd
;
969 * Trigger a dump of the metadata content. Following/during the succesful
970 * completion of this call, the metadata poll thread will start receiving
971 * metadata packets to consume.
973 * The caller must hold the channel and stream locks.
976 int consumer_metadata_stream_dump(struct lttng_consumer_stream
*stream
)
980 ASSERT_LOCKED(stream
->chan
->lock
);
981 ASSERT_LOCKED(stream
->lock
);
982 assert(stream
->metadata_flag
);
983 assert(stream
->chan
->trace_chunk
);
985 switch (consumer_data
.type
) {
986 case LTTNG_CONSUMER_KERNEL
:
988 * Reset the position of what has been read from the
989 * metadata cache to 0 so we can dump it again.
991 ret
= kernctl_metadata_cache_dump(stream
->wait_fd
);
993 case LTTNG_CONSUMER32_UST
:
994 case LTTNG_CONSUMER64_UST
:
996 * Reset the position pushed from the metadata cache so it
997 * will write from the beginning on the next push.
999 stream
->ust_metadata_pushed
= 0;
1000 ret
= consumer_metadata_wakeup_pipe(stream
->chan
);
1003 ERR("Unknown consumer_data type");
1007 ERR("Failed to dump the metadata cache");
1013 int lttng_consumer_channel_set_trace_chunk(
1014 struct lttng_consumer_channel
*channel
,
1015 struct lttng_trace_chunk
*new_trace_chunk
)
1017 pthread_mutex_lock(&channel
->lock
);
1018 if (channel
->is_deleted
) {
1020 * The channel has been logically deleted and should no longer
1021 * be used. It has released its reference to its current trace
1022 * chunk and should not acquire a new one.
1024 * Return success as there is nothing for the caller to do.
1030 * The acquisition of the reference cannot fail (barring
1031 * a severe internal error) since a reference to the published
1032 * chunk is already held by the caller.
1034 if (new_trace_chunk
) {
1035 const bool acquired_reference
= lttng_trace_chunk_get(
1038 assert(acquired_reference
);
1041 lttng_trace_chunk_put(channel
->trace_chunk
);
1042 channel
->trace_chunk
= new_trace_chunk
;
1044 pthread_mutex_unlock(&channel
->lock
);
1049 * Allocate and return a new lttng_consumer_channel object using the given key
1050 * to initialize the hash table node.
1052 * On error, return NULL.
1054 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
1055 uint64_t session_id
,
1056 const uint64_t *chunk_id
,
1057 const char *pathname
,
1060 enum lttng_event_output output
,
1061 uint64_t tracefile_size
,
1062 uint64_t tracefile_count
,
1063 uint64_t session_id_per_pid
,
1064 unsigned int monitor
,
1065 unsigned int live_timer_interval
,
1066 const char *root_shm_path
,
1067 const char *shm_path
)
1069 struct lttng_consumer_channel
*channel
= NULL
;
1070 struct lttng_trace_chunk
*trace_chunk
= NULL
;
1073 trace_chunk
= lttng_trace_chunk_registry_find_chunk(
1074 consumer_data
.chunk_registry
, session_id
,
1077 ERR("Failed to find trace chunk reference during creation of channel");
1082 channel
= zmalloc(sizeof(*channel
));
1083 if (channel
== NULL
) {
1084 PERROR("malloc struct lttng_consumer_channel");
1089 channel
->refcount
= 0;
1090 channel
->session_id
= session_id
;
1091 channel
->session_id_per_pid
= session_id_per_pid
;
1092 channel
->relayd_id
= relayd_id
;
1093 channel
->tracefile_size
= tracefile_size
;
1094 channel
->tracefile_count
= tracefile_count
;
1095 channel
->monitor
= monitor
;
1096 channel
->live_timer_interval
= live_timer_interval
;
1097 pthread_mutex_init(&channel
->lock
, NULL
);
1098 pthread_mutex_init(&channel
->timer_lock
, NULL
);
1101 case LTTNG_EVENT_SPLICE
:
1102 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
1104 case LTTNG_EVENT_MMAP
:
1105 channel
->output
= CONSUMER_CHANNEL_MMAP
;
1115 * In monitor mode, the streams associated with the channel will be put in
1116 * a special list ONLY owned by this channel. So, the refcount is set to 1
1117 * here meaning that the channel itself has streams that are referenced.
1119 * On a channel deletion, once the channel is no longer visible, the
1120 * refcount is decremented and checked for a zero value to delete it. With
1121 * streams in no monitor mode, it will now be safe to destroy the channel.
1123 if (!channel
->monitor
) {
1124 channel
->refcount
= 1;
1127 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
1128 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
1130 strncpy(channel
->name
, name
, sizeof(channel
->name
));
1131 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
1133 if (root_shm_path
) {
1134 strncpy(channel
->root_shm_path
, root_shm_path
, sizeof(channel
->root_shm_path
));
1135 channel
->root_shm_path
[sizeof(channel
->root_shm_path
) - 1] = '\0';
1138 strncpy(channel
->shm_path
, shm_path
, sizeof(channel
->shm_path
));
1139 channel
->shm_path
[sizeof(channel
->shm_path
) - 1] = '\0';
1142 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
1143 lttng_ht_node_init_u64(&channel
->channels_by_session_id_ht_node
,
1144 channel
->session_id
);
1146 channel
->wait_fd
= -1;
1147 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
1150 int ret
= lttng_consumer_channel_set_trace_chunk(channel
,
1157 DBG("Allocated channel (key %" PRIu64
")", channel
->key
);
1160 lttng_trace_chunk_put(trace_chunk
);
1163 consumer_del_channel(channel
);
1169 * Add a channel to the global list protected by a mutex.
1171 * Always return 0 indicating success.
1173 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
1174 struct lttng_consumer_local_data
*ctx
)
1176 pthread_mutex_lock(&consumer_data
.lock
);
1177 pthread_mutex_lock(&channel
->lock
);
1178 pthread_mutex_lock(&channel
->timer_lock
);
1181 * This gives us a guarantee that the channel we are about to add to the
1182 * channel hash table will be unique. See this function comment on the why
1183 * we need to steel the channel key at this stage.
1185 steal_channel_key(channel
->key
);
1188 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1189 lttng_ht_add_u64(consumer_data
.channels_by_session_id_ht
,
1190 &channel
->channels_by_session_id_ht_node
);
1192 channel
->is_published
= true;
1194 pthread_mutex_unlock(&channel
->timer_lock
);
1195 pthread_mutex_unlock(&channel
->lock
);
1196 pthread_mutex_unlock(&consumer_data
.lock
);
1198 if (channel
->wait_fd
!= -1 && channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1199 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1206 * Allocate the pollfd structure and the local view of the out fds to avoid
1207 * doing a lookup in the linked list and concurrency issues when writing is
1208 * needed. Called with consumer_data.lock held.
1210 * Returns the number of fds in the structures.
1212 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1213 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1214 struct lttng_ht
*ht
, int *nb_inactive_fd
)
1217 struct lttng_ht_iter iter
;
1218 struct lttng_consumer_stream
*stream
;
1223 assert(local_stream
);
1225 DBG("Updating poll fd array");
1226 *nb_inactive_fd
= 0;
1228 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1230 * Only active streams with an active end point can be added to the
1231 * poll set and local stream storage of the thread.
1233 * There is a potential race here for endpoint_status to be updated
1234 * just after the check. However, this is OK since the stream(s) will
1235 * be deleted once the thread is notified that the end point state has
1236 * changed where this function will be called back again.
1238 * We track the number of inactive FDs because they still need to be
1239 * closed by the polling thread after a wakeup on the data_pipe or
1242 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1243 (*nb_inactive_fd
)++;
1247 * This clobbers way too much the debug output. Uncomment that if you
1248 * need it for debugging purposes.
1250 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1251 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1252 local_stream
[i
] = stream
;
1258 * Insert the consumer_data_pipe at the end of the array and don't
1259 * increment i so nb_fd is the number of real FD.
1261 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1262 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1264 (*pollfd
)[i
+ 1].fd
= lttng_pipe_get_readfd(ctx
->consumer_wakeup_pipe
);
1265 (*pollfd
)[i
+ 1].events
= POLLIN
| POLLPRI
;
1270 * Poll on the should_quit pipe and the command socket return -1 on
1271 * error, 1 if should exit, 0 if data is available on the command socket
1273 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1278 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1279 if (num_rdy
== -1) {
1281 * Restart interrupted system call.
1283 if (errno
== EINTR
) {
1286 PERROR("Poll error");
1289 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1290 DBG("consumer_should_quit wake up");
1297 * Set the error socket.
1299 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1302 ctx
->consumer_error_socket
= sock
;
1306 * Set the command socket path.
1308 void lttng_consumer_set_command_sock_path(
1309 struct lttng_consumer_local_data
*ctx
, char *sock
)
1311 ctx
->consumer_command_sock_path
= sock
;
1315 * Send return code to the session daemon.
1316 * If the socket is not defined, we return 0, it is not a fatal error
1318 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1320 if (ctx
->consumer_error_socket
> 0) {
1321 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1322 sizeof(enum lttcomm_sessiond_command
));
1329 * Close all the tracefiles and stream fds and MUST be called when all
1330 * instances are destroyed i.e. when all threads were joined and are ended.
1332 void lttng_consumer_cleanup(void)
1334 struct lttng_ht_iter iter
;
1335 struct lttng_consumer_channel
*channel
;
1336 unsigned int trace_chunks_left
;
1340 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1342 consumer_del_channel(channel
);
1347 lttng_ht_destroy(consumer_data
.channel_ht
);
1348 lttng_ht_destroy(consumer_data
.channels_by_session_id_ht
);
1350 cleanup_relayd_ht();
1352 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1355 * This HT contains streams that are freed by either the metadata thread or
1356 * the data thread so we do *nothing* on the hash table and simply destroy
1359 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1362 * Trace chunks in the registry may still exist if the session
1363 * daemon has encountered an internal error and could not
1364 * tear down its sessions and/or trace chunks properly.
1366 * Release the session daemon's implicit reference to any remaining
1367 * trace chunk and print an error if any trace chunk was found. Note
1368 * that there are _no_ legitimate cases for trace chunks to be left,
1369 * it is a leak. However, it can happen following a crash of the
1370 * session daemon and not emptying the registry would cause an assertion
1373 trace_chunks_left
= lttng_trace_chunk_registry_put_each_chunk(
1374 consumer_data
.chunk_registry
);
1375 if (trace_chunks_left
) {
1376 ERR("%u trace chunks are leaked by lttng-consumerd. "
1377 "This can be caused by an internal error of the session daemon.",
1380 /* Run all callbacks freeing each chunk. */
1382 lttng_trace_chunk_registry_destroy(consumer_data
.chunk_registry
);
1386 * Called from signal handler.
1388 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1392 CMM_STORE_SHARED(consumer_quit
, 1);
1393 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1395 PERROR("write consumer quit");
1398 DBG("Consumer flag that it should quit");
1403 * Flush pending writes to trace output disk file.
1406 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1410 int outfd
= stream
->out_fd
;
1413 * This does a blocking write-and-wait on any page that belongs to the
1414 * subbuffer prior to the one we just wrote.
1415 * Don't care about error values, as these are just hints and ways to
1416 * limit the amount of page cache used.
1418 if (orig_offset
< stream
->max_sb_size
) {
1421 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1422 stream
->max_sb_size
,
1423 SYNC_FILE_RANGE_WAIT_BEFORE
1424 | SYNC_FILE_RANGE_WRITE
1425 | SYNC_FILE_RANGE_WAIT_AFTER
);
1427 * Give hints to the kernel about how we access the file:
1428 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1431 * We need to call fadvise again after the file grows because the
1432 * kernel does not seem to apply fadvise to non-existing parts of the
1435 * Call fadvise _after_ having waited for the page writeback to
1436 * complete because the dirty page writeback semantic is not well
1437 * defined. So it can be expected to lead to lower throughput in
1440 ret
= posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1441 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1442 if (ret
&& ret
!= -ENOSYS
) {
1444 PERROR("posix_fadvise on fd %i", outfd
);
1449 * Initialise the necessary environnement :
1450 * - create a new context
1451 * - create the poll_pipe
1452 * - create the should_quit pipe (for signal handler)
1453 * - create the thread pipe (for splice)
1455 * Takes a function pointer as argument, this function is called when data is
1456 * available on a buffer. This function is responsible to do the
1457 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1458 * buffer configuration and then kernctl_put_next_subbuf at the end.
1460 * Returns a pointer to the new context or NULL on error.
1462 struct lttng_consumer_local_data
*lttng_consumer_create(
1463 enum lttng_consumer_type type
,
1464 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1465 struct lttng_consumer_local_data
*ctx
),
1466 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1467 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1468 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1471 struct lttng_consumer_local_data
*ctx
;
1473 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1474 consumer_data
.type
== type
);
1475 consumer_data
.type
= type
;
1477 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1479 PERROR("allocating context");
1483 ctx
->consumer_error_socket
= -1;
1484 ctx
->consumer_metadata_socket
= -1;
1485 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1486 /* assign the callbacks */
1487 ctx
->on_buffer_ready
= buffer_ready
;
1488 ctx
->on_recv_channel
= recv_channel
;
1489 ctx
->on_recv_stream
= recv_stream
;
1490 ctx
->on_update_stream
= update_stream
;
1492 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1493 if (!ctx
->consumer_data_pipe
) {
1494 goto error_poll_pipe
;
1497 ctx
->consumer_wakeup_pipe
= lttng_pipe_open(0);
1498 if (!ctx
->consumer_wakeup_pipe
) {
1499 goto error_wakeup_pipe
;
1502 ret
= pipe(ctx
->consumer_should_quit
);
1504 PERROR("Error creating recv pipe");
1505 goto error_quit_pipe
;
1508 ret
= pipe(ctx
->consumer_channel_pipe
);
1510 PERROR("Error creating channel pipe");
1511 goto error_channel_pipe
;
1514 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1515 if (!ctx
->consumer_metadata_pipe
) {
1516 goto error_metadata_pipe
;
1519 ctx
->channel_monitor_pipe
= -1;
1523 error_metadata_pipe
:
1524 utils_close_pipe(ctx
->consumer_channel_pipe
);
1526 utils_close_pipe(ctx
->consumer_should_quit
);
1528 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1530 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1538 * Iterate over all streams of the hashtable and free them properly.
1540 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1542 struct lttng_ht_iter iter
;
1543 struct lttng_consumer_stream
*stream
;
1550 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1552 * Ignore return value since we are currently cleaning up so any error
1555 (void) consumer_del_stream(stream
, ht
);
1559 lttng_ht_destroy(ht
);
1563 * Iterate over all streams of the metadata hashtable and free them
1566 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1568 struct lttng_ht_iter iter
;
1569 struct lttng_consumer_stream
*stream
;
1576 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1578 * Ignore return value since we are currently cleaning up so any error
1581 (void) consumer_del_metadata_stream(stream
, ht
);
1585 lttng_ht_destroy(ht
);
1589 * Close all fds associated with the instance and free the context.
1591 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1595 DBG("Consumer destroying it. Closing everything.");
1601 destroy_data_stream_ht(data_ht
);
1602 destroy_metadata_stream_ht(metadata_ht
);
1604 ret
= close(ctx
->consumer_error_socket
);
1608 ret
= close(ctx
->consumer_metadata_socket
);
1612 utils_close_pipe(ctx
->consumer_channel_pipe
);
1613 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1614 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1615 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1616 utils_close_pipe(ctx
->consumer_should_quit
);
1618 unlink(ctx
->consumer_command_sock_path
);
1623 * Write the metadata stream id on the specified file descriptor.
1625 static int write_relayd_metadata_id(int fd
,
1626 struct lttng_consumer_stream
*stream
,
1627 unsigned long padding
)
1630 struct lttcomm_relayd_metadata_payload hdr
;
1632 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1633 hdr
.padding_size
= htobe32(padding
);
1634 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1635 if (ret
< sizeof(hdr
)) {
1637 * This error means that the fd's end is closed so ignore the PERROR
1638 * not to clubber the error output since this can happen in a normal
1641 if (errno
!= EPIPE
) {
1642 PERROR("write metadata stream id");
1644 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1646 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1647 * handle writting the missing part so report that as an error and
1648 * don't lie to the caller.
1653 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1654 stream
->relayd_stream_id
, padding
);
1661 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1662 * core function for writing trace buffers to either the local filesystem or
1665 * It must be called with the stream and the channel lock held.
1667 * Careful review MUST be put if any changes occur!
1669 * Returns the number of bytes written
1671 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1672 struct lttng_consumer_local_data
*ctx
,
1673 struct lttng_consumer_stream
*stream
,
1676 unsigned long padding
,
1677 struct ctf_packet_index
*index
)
1680 off_t orig_offset
= stream
->out_fd_offset
;
1681 /* Default is on the disk */
1682 int outfd
= stream
->out_fd
;
1683 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1684 unsigned int relayd_hang_up
= 0;
1686 /* RCU lock for the relayd pointer */
1688 assert(stream
->net_seq_idx
!= (uint64_t) -1ULL ||
1689 stream
->trace_chunk
);
1691 /* Flag that the current stream if set for network streaming. */
1692 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1693 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1694 if (relayd
== NULL
) {
1700 /* Handle stream on the relayd if the output is on the network */
1702 unsigned long netlen
= len
;
1705 * Lock the control socket for the complete duration of the function
1706 * since from this point on we will use the socket.
1708 if (stream
->metadata_flag
) {
1709 /* Metadata requires the control socket. */
1710 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1711 if (stream
->reset_metadata_flag
) {
1712 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1713 stream
->relayd_stream_id
,
1714 stream
->metadata_version
);
1719 stream
->reset_metadata_flag
= 0;
1721 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1724 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1729 /* Use the returned socket. */
1732 /* Write metadata stream id before payload */
1733 if (stream
->metadata_flag
) {
1734 ret
= write_relayd_metadata_id(outfd
, stream
, padding
);
1741 /* No streaming, we have to set the len with the full padding */
1744 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1745 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1747 ERR("Reset metadata file");
1750 stream
->reset_metadata_flag
= 0;
1754 * Check if we need to change the tracefile before writing the packet.
1756 if (stream
->chan
->tracefile_size
> 0 &&
1757 (stream
->tracefile_size_current
+ len
) >
1758 stream
->chan
->tracefile_size
) {
1759 ret
= consumer_stream_rotate_output_files(stream
);
1763 outfd
= stream
->out_fd
;
1766 stream
->tracefile_size_current
+= len
;
1768 index
->offset
= htobe64(stream
->out_fd_offset
);
1773 * This call guarantee that len or less is returned. It's impossible to
1774 * receive a ret value that is bigger than len.
1776 ret
= lttng_write(outfd
, buffer
, len
);
1777 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1778 if (ret
< 0 || ((size_t) ret
!= len
)) {
1780 * Report error to caller if nothing was written else at least send the
1788 /* Socket operation failed. We consider the relayd dead */
1789 if (errno
== EPIPE
) {
1791 * This is possible if the fd is closed on the other side
1792 * (outfd) or any write problem. It can be verbose a bit for a
1793 * normal execution if for instance the relayd is stopped
1794 * abruptly. This can happen so set this to a DBG statement.
1796 DBG("Consumer mmap write detected relayd hang up");
1798 /* Unhandled error, print it and stop function right now. */
1799 PERROR("Error in write mmap (ret %zd != len %lu)", ret
, len
);
1803 stream
->output_written
+= ret
;
1805 /* This call is useless on a socket so better save a syscall. */
1807 /* This won't block, but will start writeout asynchronously */
1808 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, len
,
1809 SYNC_FILE_RANGE_WRITE
);
1810 stream
->out_fd_offset
+= len
;
1811 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1816 * This is a special case that the relayd has closed its socket. Let's
1817 * cleanup the relayd object and all associated streams.
1819 if (relayd
&& relayd_hang_up
) {
1820 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
1821 lttng_consumer_cleanup_relayd(relayd
);
1825 /* Unlock only if ctrl socket used */
1826 if (relayd
&& stream
->metadata_flag
) {
1827 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1835 * Splice the data from the ring buffer to the tracefile.
1837 * It must be called with the stream lock held.
1839 * Returns the number of bytes spliced.
1841 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1842 struct lttng_consumer_local_data
*ctx
,
1843 struct lttng_consumer_stream
*stream
, unsigned long len
,
1844 unsigned long padding
,
1845 struct ctf_packet_index
*index
)
1847 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1849 off_t orig_offset
= stream
->out_fd_offset
;
1850 int fd
= stream
->wait_fd
;
1851 /* Default is on the disk */
1852 int outfd
= stream
->out_fd
;
1853 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1855 unsigned int relayd_hang_up
= 0;
1857 switch (consumer_data
.type
) {
1858 case LTTNG_CONSUMER_KERNEL
:
1860 case LTTNG_CONSUMER32_UST
:
1861 case LTTNG_CONSUMER64_UST
:
1862 /* Not supported for user space tracing */
1865 ERR("Unknown consumer_data type");
1869 /* RCU lock for the relayd pointer */
1872 /* Flag that the current stream if set for network streaming. */
1873 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1874 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1875 if (relayd
== NULL
) {
1880 splice_pipe
= stream
->splice_pipe
;
1882 /* Write metadata stream id before payload */
1884 unsigned long total_len
= len
;
1886 if (stream
->metadata_flag
) {
1888 * Lock the control socket for the complete duration of the function
1889 * since from this point on we will use the socket.
1891 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1893 if (stream
->reset_metadata_flag
) {
1894 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1895 stream
->relayd_stream_id
,
1896 stream
->metadata_version
);
1901 stream
->reset_metadata_flag
= 0;
1903 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
,
1911 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1914 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1920 /* Use the returned socket. */
1923 /* No streaming, we have to set the len with the full padding */
1926 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1927 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1929 ERR("Reset metadata file");
1932 stream
->reset_metadata_flag
= 0;
1935 * Check if we need to change the tracefile before writing the packet.
1937 if (stream
->chan
->tracefile_size
> 0 &&
1938 (stream
->tracefile_size_current
+ len
) >
1939 stream
->chan
->tracefile_size
) {
1940 ret
= consumer_stream_rotate_output_files(stream
);
1945 outfd
= stream
->out_fd
;
1948 stream
->tracefile_size_current
+= len
;
1949 index
->offset
= htobe64(stream
->out_fd_offset
);
1953 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1954 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1955 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1956 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1957 DBG("splice chan to pipe, ret %zd", ret_splice
);
1958 if (ret_splice
< 0) {
1961 PERROR("Error in relay splice");
1965 /* Handle stream on the relayd if the output is on the network */
1966 if (relayd
&& stream
->metadata_flag
) {
1967 size_t metadata_payload_size
=
1968 sizeof(struct lttcomm_relayd_metadata_payload
);
1970 /* Update counter to fit the spliced data */
1971 ret_splice
+= metadata_payload_size
;
1972 len
+= metadata_payload_size
;
1974 * We do this so the return value can match the len passed as
1975 * argument to this function.
1977 written
-= metadata_payload_size
;
1980 /* Splice data out */
1981 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1982 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1983 DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
1985 if (ret_splice
< 0) {
1990 } else if (ret_splice
> len
) {
1992 * We don't expect this code path to be executed but you never know
1993 * so this is an extra protection agains a buggy splice().
1996 written
+= ret_splice
;
1997 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
2001 /* All good, update current len and continue. */
2005 /* This call is useless on a socket so better save a syscall. */
2007 /* This won't block, but will start writeout asynchronously */
2008 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
2009 SYNC_FILE_RANGE_WRITE
);
2010 stream
->out_fd_offset
+= ret_splice
;
2012 stream
->output_written
+= ret_splice
;
2013 written
+= ret_splice
;
2016 lttng_consumer_sync_trace_file(stream
, orig_offset
);
2022 * This is a special case that the relayd has closed its socket. Let's
2023 * cleanup the relayd object and all associated streams.
2025 if (relayd
&& relayd_hang_up
) {
2026 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
2027 lttng_consumer_cleanup_relayd(relayd
);
2028 /* Skip splice error so the consumer does not fail */
2033 /* send the appropriate error description to sessiond */
2036 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
2039 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
2042 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
2047 if (relayd
&& stream
->metadata_flag
) {
2048 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
2056 * Sample the snapshot positions for a specific fd
2058 * Returns 0 on success, < 0 on error
2060 int lttng_consumer_sample_snapshot_positions(struct lttng_consumer_stream
*stream
)
2062 switch (consumer_data
.type
) {
2063 case LTTNG_CONSUMER_KERNEL
:
2064 return lttng_kconsumer_sample_snapshot_positions(stream
);
2065 case LTTNG_CONSUMER32_UST
:
2066 case LTTNG_CONSUMER64_UST
:
2067 return lttng_ustconsumer_sample_snapshot_positions(stream
);
2069 ERR("Unknown consumer_data type");
2075 * Take a snapshot for a specific fd
2077 * Returns 0 on success, < 0 on error
2079 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2081 switch (consumer_data
.type
) {
2082 case LTTNG_CONSUMER_KERNEL
:
2083 return lttng_kconsumer_take_snapshot(stream
);
2084 case LTTNG_CONSUMER32_UST
:
2085 case LTTNG_CONSUMER64_UST
:
2086 return lttng_ustconsumer_take_snapshot(stream
);
2088 ERR("Unknown consumer_data type");
2095 * Get the produced position
2097 * Returns 0 on success, < 0 on error
2099 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
2102 switch (consumer_data
.type
) {
2103 case LTTNG_CONSUMER_KERNEL
:
2104 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
2105 case LTTNG_CONSUMER32_UST
:
2106 case LTTNG_CONSUMER64_UST
:
2107 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
2109 ERR("Unknown consumer_data type");
2116 * Get the consumed position (free-running counter position in bytes).
2118 * Returns 0 on success, < 0 on error
2120 int lttng_consumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
2123 switch (consumer_data
.type
) {
2124 case LTTNG_CONSUMER_KERNEL
:
2125 return lttng_kconsumer_get_consumed_snapshot(stream
, pos
);
2126 case LTTNG_CONSUMER32_UST
:
2127 case LTTNG_CONSUMER64_UST
:
2128 return lttng_ustconsumer_get_consumed_snapshot(stream
, pos
);
2130 ERR("Unknown consumer_data type");
2136 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
2137 int sock
, struct pollfd
*consumer_sockpoll
)
2139 switch (consumer_data
.type
) {
2140 case LTTNG_CONSUMER_KERNEL
:
2141 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2142 case LTTNG_CONSUMER32_UST
:
2143 case LTTNG_CONSUMER64_UST
:
2144 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2146 ERR("Unknown consumer_data type");
2153 void lttng_consumer_close_all_metadata(void)
2155 switch (consumer_data
.type
) {
2156 case LTTNG_CONSUMER_KERNEL
:
2158 * The Kernel consumer has a different metadata scheme so we don't
2159 * close anything because the stream will be closed by the session
2163 case LTTNG_CONSUMER32_UST
:
2164 case LTTNG_CONSUMER64_UST
:
2166 * Close all metadata streams. The metadata hash table is passed and
2167 * this call iterates over it by closing all wakeup fd. This is safe
2168 * because at this point we are sure that the metadata producer is
2169 * either dead or blocked.
2171 lttng_ustconsumer_close_all_metadata(metadata_ht
);
2174 ERR("Unknown consumer_data type");
2180 * Clean up a metadata stream and free its memory.
2182 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
2183 struct lttng_ht
*ht
)
2185 struct lttng_consumer_channel
*channel
= NULL
;
2186 bool free_channel
= false;
2190 * This call should NEVER receive regular stream. It must always be
2191 * metadata stream and this is crucial for data structure synchronization.
2193 assert(stream
->metadata_flag
);
2195 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
2197 pthread_mutex_lock(&consumer_data
.lock
);
2199 * Note that this assumes that a stream's channel is never changed and
2200 * that the stream's lock doesn't need to be taken to sample its
2203 channel
= stream
->chan
;
2204 pthread_mutex_lock(&channel
->lock
);
2205 pthread_mutex_lock(&stream
->lock
);
2206 if (channel
->metadata_cache
) {
2207 /* Only applicable to userspace consumers. */
2208 pthread_mutex_lock(&channel
->metadata_cache
->lock
);
2211 /* Remove any reference to that stream. */
2212 consumer_stream_delete(stream
, ht
);
2214 /* Close down everything including the relayd if one. */
2215 consumer_stream_close(stream
);
2216 /* Destroy tracer buffers of the stream. */
2217 consumer_stream_destroy_buffers(stream
);
2219 /* Atomically decrement channel refcount since other threads can use it. */
2220 if (!uatomic_sub_return(&channel
->refcount
, 1)
2221 && !uatomic_read(&channel
->nb_init_stream_left
)) {
2222 /* Go for channel deletion! */
2223 free_channel
= true;
2225 stream
->chan
= NULL
;
2228 * Nullify the stream reference so it is not used after deletion. The
2229 * channel lock MUST be acquired before being able to check for a NULL
2232 channel
->metadata_stream
= NULL
;
2234 if (channel
->metadata_cache
) {
2235 pthread_mutex_unlock(&channel
->metadata_cache
->lock
);
2237 pthread_mutex_unlock(&stream
->lock
);
2238 pthread_mutex_unlock(&channel
->lock
);
2239 pthread_mutex_unlock(&consumer_data
.lock
);
2242 consumer_del_channel(channel
);
2245 lttng_trace_chunk_put(stream
->trace_chunk
);
2246 stream
->trace_chunk
= NULL
;
2247 consumer_stream_free(stream
);
2251 * Action done with the metadata stream when adding it to the consumer internal
2252 * data structures to handle it.
2254 void consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2256 struct lttng_ht
*ht
= metadata_ht
;
2257 struct lttng_ht_iter iter
;
2258 struct lttng_ht_node_u64
*node
;
2263 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2265 pthread_mutex_lock(&consumer_data
.lock
);
2266 pthread_mutex_lock(&stream
->chan
->lock
);
2267 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2268 pthread_mutex_lock(&stream
->lock
);
2271 * From here, refcounts are updated so be _careful_ when returning an error
2278 * Lookup the stream just to make sure it does not exist in our internal
2279 * state. This should NEVER happen.
2281 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2282 node
= lttng_ht_iter_get_node_u64(&iter
);
2286 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2287 * in terms of destroying the associated channel, because the action that
2288 * causes the count to become 0 also causes a stream to be added. The
2289 * channel deletion will thus be triggered by the following removal of this
2292 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2293 /* Increment refcount before decrementing nb_init_stream_left */
2295 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2298 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2300 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
2301 &stream
->node_channel_id
);
2304 * Add stream to the stream_list_ht of the consumer data. No need to steal
2305 * the key since the HT does not use it and we allow to add redundant keys
2308 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2312 pthread_mutex_unlock(&stream
->lock
);
2313 pthread_mutex_unlock(&stream
->chan
->lock
);
2314 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2315 pthread_mutex_unlock(&consumer_data
.lock
);
2319 * Delete data stream that are flagged for deletion (endpoint_status).
2321 static void validate_endpoint_status_data_stream(void)
2323 struct lttng_ht_iter iter
;
2324 struct lttng_consumer_stream
*stream
;
2326 DBG("Consumer delete flagged data stream");
2329 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2330 /* Validate delete flag of the stream */
2331 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2334 /* Delete it right now */
2335 consumer_del_stream(stream
, data_ht
);
2341 * Delete metadata stream that are flagged for deletion (endpoint_status).
2343 static void validate_endpoint_status_metadata_stream(
2344 struct lttng_poll_event
*pollset
)
2346 struct lttng_ht_iter iter
;
2347 struct lttng_consumer_stream
*stream
;
2349 DBG("Consumer delete flagged metadata stream");
2354 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2355 /* Validate delete flag of the stream */
2356 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2360 * Remove from pollset so the metadata thread can continue without
2361 * blocking on a deleted stream.
2363 lttng_poll_del(pollset
, stream
->wait_fd
);
2365 /* Delete it right now */
2366 consumer_del_metadata_stream(stream
, metadata_ht
);
2372 * Thread polls on metadata file descriptor and write them on disk or on the
2375 void *consumer_thread_metadata_poll(void *data
)
2377 int ret
, i
, pollfd
, err
= -1;
2378 uint32_t revents
, nb_fd
;
2379 struct lttng_consumer_stream
*stream
= NULL
;
2380 struct lttng_ht_iter iter
;
2381 struct lttng_ht_node_u64
*node
;
2382 struct lttng_poll_event events
;
2383 struct lttng_consumer_local_data
*ctx
= data
;
2386 rcu_register_thread();
2388 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2390 if (testpoint(consumerd_thread_metadata
)) {
2391 goto error_testpoint
;
2394 health_code_update();
2396 DBG("Thread metadata poll started");
2398 /* Size is set to 1 for the consumer_metadata pipe */
2399 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2401 ERR("Poll set creation failed");
2405 ret
= lttng_poll_add(&events
,
2406 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2412 DBG("Metadata main loop started");
2416 health_code_update();
2417 health_poll_entry();
2418 DBG("Metadata poll wait");
2419 ret
= lttng_poll_wait(&events
, -1);
2420 DBG("Metadata poll return from wait with %d fd(s)",
2421 LTTNG_POLL_GETNB(&events
));
2423 DBG("Metadata event caught in thread");
2425 if (errno
== EINTR
) {
2426 ERR("Poll EINTR caught");
2429 if (LTTNG_POLL_GETNB(&events
) == 0) {
2430 err
= 0; /* All is OK */
2437 /* From here, the event is a metadata wait fd */
2438 for (i
= 0; i
< nb_fd
; i
++) {
2439 health_code_update();
2441 revents
= LTTNG_POLL_GETEV(&events
, i
);
2442 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2444 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2445 if (revents
& LPOLLIN
) {
2448 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2449 &stream
, sizeof(stream
));
2450 if (pipe_len
< sizeof(stream
)) {
2452 PERROR("read metadata stream");
2455 * Remove the pipe from the poll set and continue the loop
2456 * since their might be data to consume.
2458 lttng_poll_del(&events
,
2459 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2460 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2464 /* A NULL stream means that the state has changed. */
2465 if (stream
== NULL
) {
2466 /* Check for deleted streams. */
2467 validate_endpoint_status_metadata_stream(&events
);
2471 DBG("Adding metadata stream %d to poll set",
2474 /* Add metadata stream to the global poll events list */
2475 lttng_poll_add(&events
, stream
->wait_fd
,
2476 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2477 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2478 DBG("Metadata thread pipe hung up");
2480 * Remove the pipe from the poll set and continue the loop
2481 * since their might be data to consume.
2483 lttng_poll_del(&events
,
2484 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2485 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2488 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2492 /* Handle other stream */
2498 uint64_t tmp_id
= (uint64_t) pollfd
;
2500 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2502 node
= lttng_ht_iter_get_node_u64(&iter
);
2505 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2508 if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2509 /* Get the data out of the metadata file descriptor */
2510 DBG("Metadata available on fd %d", pollfd
);
2511 assert(stream
->wait_fd
== pollfd
);
2514 health_code_update();
2516 len
= ctx
->on_buffer_ready(stream
, ctx
);
2518 * We don't check the return value here since if we get
2519 * a negative len, it means an error occurred thus we
2520 * simply remove it from the poll set and free the
2525 /* It's ok to have an unavailable sub-buffer */
2526 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2527 /* Clean up stream from consumer and free it. */
2528 lttng_poll_del(&events
, stream
->wait_fd
);
2529 consumer_del_metadata_stream(stream
, metadata_ht
);
2531 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2532 DBG("Metadata fd %d is hup|err.", pollfd
);
2533 if (!stream
->hangup_flush_done
2534 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2535 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2536 DBG("Attempting to flush and consume the UST buffers");
2537 lttng_ustconsumer_on_stream_hangup(stream
);
2539 /* We just flushed the stream now read it. */
2541 health_code_update();
2543 len
= ctx
->on_buffer_ready(stream
, ctx
);
2545 * We don't check the return value here since if we get
2546 * a negative len, it means an error occurred thus we
2547 * simply remove it from the poll set and free the
2553 lttng_poll_del(&events
, stream
->wait_fd
);
2555 * This call update the channel states, closes file descriptors
2556 * and securely free the stream.
2558 consumer_del_metadata_stream(stream
, metadata_ht
);
2560 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2564 /* Release RCU lock for the stream looked up */
2572 DBG("Metadata poll thread exiting");
2574 lttng_poll_clean(&events
);
2579 ERR("Health error occurred in %s", __func__
);
2581 health_unregister(health_consumerd
);
2582 rcu_unregister_thread();
2587 * This thread polls the fds in the set to consume the data and write
2588 * it to tracefile if necessary.
2590 void *consumer_thread_data_poll(void *data
)
2592 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2593 struct pollfd
*pollfd
= NULL
;
2594 /* local view of the streams */
2595 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2596 /* local view of consumer_data.fds_count */
2598 /* 2 for the consumer_data_pipe and wake up pipe */
2599 const int nb_pipes_fd
= 2;
2600 /* Number of FDs with CONSUMER_ENDPOINT_INACTIVE but still open. */
2601 int nb_inactive_fd
= 0;
2602 struct lttng_consumer_local_data
*ctx
= data
;
2605 rcu_register_thread();
2607 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2609 if (testpoint(consumerd_thread_data
)) {
2610 goto error_testpoint
;
2613 health_code_update();
2615 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2616 if (local_stream
== NULL
) {
2617 PERROR("local_stream malloc");
2622 health_code_update();
2628 * the fds set has been updated, we need to update our
2629 * local array as well
2631 pthread_mutex_lock(&consumer_data
.lock
);
2632 if (consumer_data
.need_update
) {
2637 local_stream
= NULL
;
2639 /* Allocate for all fds */
2640 pollfd
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) * sizeof(struct pollfd
));
2641 if (pollfd
== NULL
) {
2642 PERROR("pollfd malloc");
2643 pthread_mutex_unlock(&consumer_data
.lock
);
2647 local_stream
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) *
2648 sizeof(struct lttng_consumer_stream
*));
2649 if (local_stream
== NULL
) {
2650 PERROR("local_stream malloc");
2651 pthread_mutex_unlock(&consumer_data
.lock
);
2654 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2655 data_ht
, &nb_inactive_fd
);
2657 ERR("Error in allocating pollfd or local_outfds");
2658 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2659 pthread_mutex_unlock(&consumer_data
.lock
);
2663 consumer_data
.need_update
= 0;
2665 pthread_mutex_unlock(&consumer_data
.lock
);
2667 /* No FDs and consumer_quit, consumer_cleanup the thread */
2668 if (nb_fd
== 0 && nb_inactive_fd
== 0 &&
2669 CMM_LOAD_SHARED(consumer_quit
) == 1) {
2670 err
= 0; /* All is OK */
2673 /* poll on the array of fds */
2675 DBG("polling on %d fd", nb_fd
+ nb_pipes_fd
);
2676 if (testpoint(consumerd_thread_data_poll
)) {
2679 health_poll_entry();
2680 num_rdy
= poll(pollfd
, nb_fd
+ nb_pipes_fd
, -1);
2682 DBG("poll num_rdy : %d", num_rdy
);
2683 if (num_rdy
== -1) {
2685 * Restart interrupted system call.
2687 if (errno
== EINTR
) {
2690 PERROR("Poll error");
2691 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2693 } else if (num_rdy
== 0) {
2694 DBG("Polling thread timed out");
2698 if (caa_unlikely(data_consumption_paused
)) {
2699 DBG("Data consumption paused, sleeping...");
2705 * If the consumer_data_pipe triggered poll go directly to the
2706 * beginning of the loop to update the array. We want to prioritize
2707 * array update over low-priority reads.
2709 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2710 ssize_t pipe_readlen
;
2712 DBG("consumer_data_pipe wake up");
2713 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2714 &new_stream
, sizeof(new_stream
));
2715 if (pipe_readlen
< sizeof(new_stream
)) {
2716 PERROR("Consumer data pipe");
2717 /* Continue so we can at least handle the current stream(s). */
2722 * If the stream is NULL, just ignore it. It's also possible that
2723 * the sessiond poll thread changed the consumer_quit state and is
2724 * waking us up to test it.
2726 if (new_stream
== NULL
) {
2727 validate_endpoint_status_data_stream();
2731 /* Continue to update the local streams and handle prio ones */
2735 /* Handle wakeup pipe. */
2736 if (pollfd
[nb_fd
+ 1].revents
& (POLLIN
| POLLPRI
)) {
2738 ssize_t pipe_readlen
;
2740 pipe_readlen
= lttng_pipe_read(ctx
->consumer_wakeup_pipe
, &dummy
,
2742 if (pipe_readlen
< 0) {
2743 PERROR("Consumer data wakeup pipe");
2745 /* We've been awakened to handle stream(s). */
2746 ctx
->has_wakeup
= 0;
2749 /* Take care of high priority channels first. */
2750 for (i
= 0; i
< nb_fd
; i
++) {
2751 health_code_update();
2753 if (local_stream
[i
] == NULL
) {
2756 if (pollfd
[i
].revents
& POLLPRI
) {
2757 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2759 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2760 /* it's ok to have an unavailable sub-buffer */
2761 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2762 /* Clean the stream and free it. */
2763 consumer_del_stream(local_stream
[i
], data_ht
);
2764 local_stream
[i
] = NULL
;
2765 } else if (len
> 0) {
2766 local_stream
[i
]->data_read
= 1;
2772 * If we read high prio channel in this loop, try again
2773 * for more high prio data.
2779 /* Take care of low priority channels. */
2780 for (i
= 0; i
< nb_fd
; i
++) {
2781 health_code_update();
2783 if (local_stream
[i
] == NULL
) {
2786 if ((pollfd
[i
].revents
& POLLIN
) ||
2787 local_stream
[i
]->hangup_flush_done
||
2788 local_stream
[i
]->has_data
) {
2789 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2790 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2791 /* it's ok to have an unavailable sub-buffer */
2792 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2793 /* Clean the stream and free it. */
2794 consumer_del_stream(local_stream
[i
], data_ht
);
2795 local_stream
[i
] = NULL
;
2796 } else if (len
> 0) {
2797 local_stream
[i
]->data_read
= 1;
2802 /* Handle hangup and errors */
2803 for (i
= 0; i
< nb_fd
; i
++) {
2804 health_code_update();
2806 if (local_stream
[i
] == NULL
) {
2809 if (!local_stream
[i
]->hangup_flush_done
2810 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2811 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2812 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2813 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2815 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2816 /* Attempt read again, for the data we just flushed. */
2817 local_stream
[i
]->data_read
= 1;
2820 * If the poll flag is HUP/ERR/NVAL and we have
2821 * read no data in this pass, we can remove the
2822 * stream from its hash table.
2824 if ((pollfd
[i
].revents
& POLLHUP
)) {
2825 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2826 if (!local_stream
[i
]->data_read
) {
2827 consumer_del_stream(local_stream
[i
], data_ht
);
2828 local_stream
[i
] = NULL
;
2831 } else if (pollfd
[i
].revents
& POLLERR
) {
2832 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2833 if (!local_stream
[i
]->data_read
) {
2834 consumer_del_stream(local_stream
[i
], data_ht
);
2835 local_stream
[i
] = NULL
;
2838 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2839 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2840 if (!local_stream
[i
]->data_read
) {
2841 consumer_del_stream(local_stream
[i
], data_ht
);
2842 local_stream
[i
] = NULL
;
2846 if (local_stream
[i
] != NULL
) {
2847 local_stream
[i
]->data_read
= 0;
2854 DBG("polling thread exiting");
2859 * Close the write side of the pipe so epoll_wait() in
2860 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2861 * read side of the pipe. If we close them both, epoll_wait strangely does
2862 * not return and could create a endless wait period if the pipe is the
2863 * only tracked fd in the poll set. The thread will take care of closing
2866 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2871 ERR("Health error occurred in %s", __func__
);
2873 health_unregister(health_consumerd
);
2875 rcu_unregister_thread();
2880 * Close wake-up end of each stream belonging to the channel. This will
2881 * allow the poll() on the stream read-side to detect when the
2882 * write-side (application) finally closes them.
2885 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2887 struct lttng_ht
*ht
;
2888 struct lttng_consumer_stream
*stream
;
2889 struct lttng_ht_iter iter
;
2891 ht
= consumer_data
.stream_per_chan_id_ht
;
2894 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2895 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2896 ht
->match_fct
, &channel
->key
,
2897 &iter
.iter
, stream
, node_channel_id
.node
) {
2899 * Protect against teardown with mutex.
2901 pthread_mutex_lock(&stream
->lock
);
2902 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2905 switch (consumer_data
.type
) {
2906 case LTTNG_CONSUMER_KERNEL
:
2908 case LTTNG_CONSUMER32_UST
:
2909 case LTTNG_CONSUMER64_UST
:
2910 if (stream
->metadata_flag
) {
2911 /* Safe and protected by the stream lock. */
2912 lttng_ustconsumer_close_metadata(stream
->chan
);
2915 * Note: a mutex is taken internally within
2916 * liblttng-ust-ctl to protect timer wakeup_fd
2917 * use from concurrent close.
2919 lttng_ustconsumer_close_stream_wakeup(stream
);
2923 ERR("Unknown consumer_data type");
2927 pthread_mutex_unlock(&stream
->lock
);
2932 static void destroy_channel_ht(struct lttng_ht
*ht
)
2934 struct lttng_ht_iter iter
;
2935 struct lttng_consumer_channel
*channel
;
2943 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2944 ret
= lttng_ht_del(ht
, &iter
);
2949 lttng_ht_destroy(ht
);
2953 * This thread polls the channel fds to detect when they are being
2954 * closed. It closes all related streams if the channel is detected as
2955 * closed. It is currently only used as a shim layer for UST because the
2956 * consumerd needs to keep the per-stream wakeup end of pipes open for
2959 void *consumer_thread_channel_poll(void *data
)
2961 int ret
, i
, pollfd
, err
= -1;
2962 uint32_t revents
, nb_fd
;
2963 struct lttng_consumer_channel
*chan
= NULL
;
2964 struct lttng_ht_iter iter
;
2965 struct lttng_ht_node_u64
*node
;
2966 struct lttng_poll_event events
;
2967 struct lttng_consumer_local_data
*ctx
= data
;
2968 struct lttng_ht
*channel_ht
;
2970 rcu_register_thread();
2972 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2974 if (testpoint(consumerd_thread_channel
)) {
2975 goto error_testpoint
;
2978 health_code_update();
2980 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2982 /* ENOMEM at this point. Better to bail out. */
2986 DBG("Thread channel poll started");
2988 /* Size is set to 1 for the consumer_channel pipe */
2989 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2991 ERR("Poll set creation failed");
2995 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
3001 DBG("Channel main loop started");
3005 health_code_update();
3006 DBG("Channel poll wait");
3007 health_poll_entry();
3008 ret
= lttng_poll_wait(&events
, -1);
3009 DBG("Channel poll return from wait with %d fd(s)",
3010 LTTNG_POLL_GETNB(&events
));
3012 DBG("Channel event caught in thread");
3014 if (errno
== EINTR
) {
3015 ERR("Poll EINTR caught");
3018 if (LTTNG_POLL_GETNB(&events
) == 0) {
3019 err
= 0; /* All is OK */
3026 /* From here, the event is a channel wait fd */
3027 for (i
= 0; i
< nb_fd
; i
++) {
3028 health_code_update();
3030 revents
= LTTNG_POLL_GETEV(&events
, i
);
3031 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3033 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
3034 if (revents
& LPOLLIN
) {
3035 enum consumer_channel_action action
;
3038 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
3041 ERR("Error reading channel pipe");
3043 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3048 case CONSUMER_CHANNEL_ADD
:
3049 DBG("Adding channel %d to poll set",
3052 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
3055 lttng_ht_add_unique_u64(channel_ht
,
3056 &chan
->wait_fd_node
);
3058 /* Add channel to the global poll events list */
3059 lttng_poll_add(&events
, chan
->wait_fd
,
3060 LPOLLERR
| LPOLLHUP
);
3062 case CONSUMER_CHANNEL_DEL
:
3065 * This command should never be called if the channel
3066 * has streams monitored by either the data or metadata
3067 * thread. The consumer only notify this thread with a
3068 * channel del. command if it receives a destroy
3069 * channel command from the session daemon that send it
3070 * if a command prior to the GET_CHANNEL failed.
3074 chan
= consumer_find_channel(key
);
3077 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
3080 lttng_poll_del(&events
, chan
->wait_fd
);
3081 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
3082 ret
= lttng_ht_del(channel_ht
, &iter
);
3085 switch (consumer_data
.type
) {
3086 case LTTNG_CONSUMER_KERNEL
:
3088 case LTTNG_CONSUMER32_UST
:
3089 case LTTNG_CONSUMER64_UST
:
3090 health_code_update();
3091 /* Destroy streams that might have been left in the stream list. */
3092 clean_channel_stream_list(chan
);
3095 ERR("Unknown consumer_data type");
3100 * Release our own refcount. Force channel deletion even if
3101 * streams were not initialized.
3103 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
3104 consumer_del_channel(chan
);
3109 case CONSUMER_CHANNEL_QUIT
:
3111 * Remove the pipe from the poll set and continue the loop
3112 * since their might be data to consume.
3114 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3117 ERR("Unknown action");
3120 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3121 DBG("Channel thread pipe hung up");
3123 * Remove the pipe from the poll set and continue the loop
3124 * since their might be data to consume.
3126 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3129 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3133 /* Handle other stream */
3139 uint64_t tmp_id
= (uint64_t) pollfd
;
3141 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
3143 node
= lttng_ht_iter_get_node_u64(&iter
);
3146 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
3149 /* Check for error event */
3150 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3151 DBG("Channel fd %d is hup|err.", pollfd
);
3153 lttng_poll_del(&events
, chan
->wait_fd
);
3154 ret
= lttng_ht_del(channel_ht
, &iter
);
3158 * This will close the wait fd for each stream associated to
3159 * this channel AND monitored by the data/metadata thread thus
3160 * will be clean by the right thread.
3162 consumer_close_channel_streams(chan
);
3164 /* Release our own refcount */
3165 if (!uatomic_sub_return(&chan
->refcount
, 1)
3166 && !uatomic_read(&chan
->nb_init_stream_left
)) {
3167 consumer_del_channel(chan
);
3170 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3175 /* Release RCU lock for the channel looked up */
3183 lttng_poll_clean(&events
);
3185 destroy_channel_ht(channel_ht
);
3188 DBG("Channel poll thread exiting");
3191 ERR("Health error occurred in %s", __func__
);
3193 health_unregister(health_consumerd
);
3194 rcu_unregister_thread();
3198 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
3199 struct pollfd
*sockpoll
, int client_socket
)
3206 ret
= lttng_consumer_poll_socket(sockpoll
);
3210 DBG("Metadata connection on client_socket");
3212 /* Blocking call, waiting for transmission */
3213 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
3214 if (ctx
->consumer_metadata_socket
< 0) {
3215 WARN("On accept metadata");
3226 * This thread listens on the consumerd socket and receives the file
3227 * descriptors from the session daemon.
3229 void *consumer_thread_sessiond_poll(void *data
)
3231 int sock
= -1, client_socket
, ret
, err
= -1;
3233 * structure to poll for incoming data on communication socket avoids
3234 * making blocking sockets.
3236 struct pollfd consumer_sockpoll
[2];
3237 struct lttng_consumer_local_data
*ctx
= data
;
3239 rcu_register_thread();
3241 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3243 if (testpoint(consumerd_thread_sessiond
)) {
3244 goto error_testpoint
;
3247 health_code_update();
3249 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3250 unlink(ctx
->consumer_command_sock_path
);
3251 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3252 if (client_socket
< 0) {
3253 ERR("Cannot create command socket");
3257 ret
= lttcomm_listen_unix_sock(client_socket
);
3262 DBG("Sending ready command to lttng-sessiond");
3263 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3264 /* return < 0 on error, but == 0 is not fatal */
3266 ERR("Error sending ready command to lttng-sessiond");
3270 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3271 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3272 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3273 consumer_sockpoll
[1].fd
= client_socket
;
3274 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3276 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3284 DBG("Connection on client_socket");
3286 /* Blocking call, waiting for transmission */
3287 sock
= lttcomm_accept_unix_sock(client_socket
);
3294 * Setup metadata socket which is the second socket connection on the
3295 * command unix socket.
3297 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3306 /* This socket is not useful anymore. */
3307 ret
= close(client_socket
);
3309 PERROR("close client_socket");
3313 /* update the polling structure to poll on the established socket */
3314 consumer_sockpoll
[1].fd
= sock
;
3315 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3318 health_code_update();
3320 health_poll_entry();
3321 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3330 DBG("Incoming command on sock");
3331 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3334 * This could simply be a session daemon quitting. Don't output
3337 DBG("Communication interrupted on command socket");
3341 if (CMM_LOAD_SHARED(consumer_quit
)) {
3342 DBG("consumer_thread_receive_fds received quit from signal");
3343 err
= 0; /* All is OK */
3346 DBG("received command on sock");
3352 DBG("Consumer thread sessiond poll exiting");
3355 * Close metadata streams since the producer is the session daemon which
3358 * NOTE: for now, this only applies to the UST tracer.
3360 lttng_consumer_close_all_metadata();
3363 * when all fds have hung up, the polling thread
3366 CMM_STORE_SHARED(consumer_quit
, 1);
3369 * Notify the data poll thread to poll back again and test the
3370 * consumer_quit state that we just set so to quit gracefully.
3372 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3374 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3376 notify_health_quit_pipe(health_quit_pipe
);
3378 /* Cleaning up possibly open sockets. */
3382 PERROR("close sock sessiond poll");
3385 if (client_socket
>= 0) {
3386 ret
= close(client_socket
);
3388 PERROR("close client_socket sessiond poll");
3395 ERR("Health error occurred in %s", __func__
);
3397 health_unregister(health_consumerd
);
3399 rcu_unregister_thread();
3403 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3404 struct lttng_consumer_local_data
*ctx
)
3408 pthread_mutex_lock(&stream
->chan
->lock
);
3409 pthread_mutex_lock(&stream
->lock
);
3410 if (stream
->metadata_flag
) {
3411 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3414 switch (consumer_data
.type
) {
3415 case LTTNG_CONSUMER_KERNEL
:
3416 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3418 case LTTNG_CONSUMER32_UST
:
3419 case LTTNG_CONSUMER64_UST
:
3420 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3423 ERR("Unknown consumer_data type");
3429 if (stream
->metadata_flag
) {
3430 pthread_cond_broadcast(&stream
->metadata_rdv
);
3431 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3433 pthread_mutex_unlock(&stream
->lock
);
3434 pthread_mutex_unlock(&stream
->chan
->lock
);
3439 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3441 switch (consumer_data
.type
) {
3442 case LTTNG_CONSUMER_KERNEL
:
3443 return lttng_kconsumer_on_recv_stream(stream
);
3444 case LTTNG_CONSUMER32_UST
:
3445 case LTTNG_CONSUMER64_UST
:
3446 return lttng_ustconsumer_on_recv_stream(stream
);
3448 ERR("Unknown consumer_data type");
3455 * Allocate and set consumer data hash tables.
3457 int lttng_consumer_init(void)
3459 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3460 if (!consumer_data
.channel_ht
) {
3464 consumer_data
.channels_by_session_id_ht
=
3465 lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3466 if (!consumer_data
.channels_by_session_id_ht
) {
3470 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3471 if (!consumer_data
.relayd_ht
) {
3475 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3476 if (!consumer_data
.stream_list_ht
) {
3480 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3481 if (!consumer_data
.stream_per_chan_id_ht
) {
3485 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3490 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3495 consumer_data
.chunk_registry
= lttng_trace_chunk_registry_create();
3496 if (!consumer_data
.chunk_registry
) {
3507 * Process the ADD_RELAYD command receive by a consumer.
3509 * This will create a relayd socket pair and add it to the relayd hash table.
3510 * The caller MUST acquire a RCU read side lock before calling it.
3512 void consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3513 struct lttng_consumer_local_data
*ctx
, int sock
,
3514 struct pollfd
*consumer_sockpoll
,
3515 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3516 uint64_t relayd_session_id
)
3518 int fd
= -1, ret
= -1, relayd_created
= 0;
3519 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3520 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3523 assert(relayd_sock
);
3525 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3527 /* Get relayd reference if exists. */
3528 relayd
= consumer_find_relayd(net_seq_idx
);
3529 if (relayd
== NULL
) {
3530 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3531 /* Not found. Allocate one. */
3532 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3533 if (relayd
== NULL
) {
3534 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3537 relayd
->sessiond_session_id
= sessiond_id
;
3542 * This code path MUST continue to the consumer send status message to
3543 * we can notify the session daemon and continue our work without
3544 * killing everything.
3548 * relayd key should never be found for control socket.
3550 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3553 /* First send a status message before receiving the fds. */
3554 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3556 /* Somehow, the session daemon is not responding anymore. */
3557 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3558 goto error_nosignal
;
3561 /* Poll on consumer socket. */
3562 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3564 /* Needing to exit in the middle of a command: error. */
3565 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3566 goto error_nosignal
;
3569 /* Get relayd socket from session daemon */
3570 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3571 if (ret
!= sizeof(fd
)) {
3572 fd
= -1; /* Just in case it gets set with an invalid value. */
3575 * Failing to receive FDs might indicate a major problem such as
3576 * reaching a fd limit during the receive where the kernel returns a
3577 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3578 * don't take any chances and stop everything.
3580 * XXX: Feature request #558 will fix that and avoid this possible
3581 * issue when reaching the fd limit.
3583 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3584 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3588 /* Copy socket information and received FD */
3589 switch (sock_type
) {
3590 case LTTNG_STREAM_CONTROL
:
3591 /* Copy received lttcomm socket */
3592 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3593 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3594 /* Handle create_sock error. */
3596 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3600 * Close the socket created internally by
3601 * lttcomm_create_sock, so we can replace it by the one
3602 * received from sessiond.
3604 if (close(relayd
->control_sock
.sock
.fd
)) {
3608 /* Assign new file descriptor */
3609 relayd
->control_sock
.sock
.fd
= fd
;
3610 /* Assign version values. */
3611 relayd
->control_sock
.major
= relayd_sock
->major
;
3612 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3614 relayd
->relayd_session_id
= relayd_session_id
;
3617 case LTTNG_STREAM_DATA
:
3618 /* Copy received lttcomm socket */
3619 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3620 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3621 /* Handle create_sock error. */
3623 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3627 * Close the socket created internally by
3628 * lttcomm_create_sock, so we can replace it by the one
3629 * received from sessiond.
3631 if (close(relayd
->data_sock
.sock
.fd
)) {
3635 /* Assign new file descriptor */
3636 relayd
->data_sock
.sock
.fd
= fd
;
3637 /* Assign version values. */
3638 relayd
->data_sock
.major
= relayd_sock
->major
;
3639 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3642 ERR("Unknown relayd socket type (%d)", sock_type
);
3643 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3647 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3648 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3649 relayd
->net_seq_idx
, fd
);
3651 * We gave the ownership of the fd to the relayd structure. Set the
3652 * fd to -1 so we don't call close() on it in the error path below.
3656 /* We successfully added the socket. Send status back. */
3657 ret
= consumer_send_status_msg(sock
, ret_code
);
3659 /* Somehow, the session daemon is not responding anymore. */
3660 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3661 goto error_nosignal
;
3665 * Add relayd socket pair to consumer data hashtable. If object already
3666 * exists or on error, the function gracefully returns.
3675 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3676 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3680 /* Close received socket if valid. */
3683 PERROR("close received socket");
3687 if (relayd_created
) {
3693 * Search for a relayd associated to the session id and return the reference.
3695 * A rcu read side lock MUST be acquire before calling this function and locked
3696 * until the relayd object is no longer necessary.
3698 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3700 struct lttng_ht_iter iter
;
3701 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3703 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3704 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3707 * Check by sessiond id which is unique here where the relayd session
3708 * id might not be when having multiple relayd.
3710 if (relayd
->sessiond_session_id
== id
) {
3711 /* Found the relayd. There can be only one per id. */
3723 * Check if for a given session id there is still data needed to be extract
3726 * Return 1 if data is pending or else 0 meaning ready to be read.
3728 int consumer_data_pending(uint64_t id
)
3731 struct lttng_ht_iter iter
;
3732 struct lttng_ht
*ht
;
3733 struct lttng_consumer_stream
*stream
;
3734 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3735 int (*data_pending
)(struct lttng_consumer_stream
*);
3737 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3740 pthread_mutex_lock(&consumer_data
.lock
);
3742 switch (consumer_data
.type
) {
3743 case LTTNG_CONSUMER_KERNEL
:
3744 data_pending
= lttng_kconsumer_data_pending
;
3746 case LTTNG_CONSUMER32_UST
:
3747 case LTTNG_CONSUMER64_UST
:
3748 data_pending
= lttng_ustconsumer_data_pending
;
3751 ERR("Unknown consumer data type");
3755 /* Ease our life a bit */
3756 ht
= consumer_data
.stream_list_ht
;
3758 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3759 ht
->hash_fct(&id
, lttng_ht_seed
),
3761 &iter
.iter
, stream
, node_session_id
.node
) {
3762 pthread_mutex_lock(&stream
->lock
);
3765 * A removed node from the hash table indicates that the stream has
3766 * been deleted thus having a guarantee that the buffers are closed
3767 * on the consumer side. However, data can still be transmitted
3768 * over the network so don't skip the relayd check.
3770 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3772 /* Check the stream if there is data in the buffers. */
3773 ret
= data_pending(stream
);
3775 pthread_mutex_unlock(&stream
->lock
);
3780 pthread_mutex_unlock(&stream
->lock
);
3783 relayd
= find_relayd_by_session_id(id
);
3785 unsigned int is_data_inflight
= 0;
3787 /* Send init command for data pending. */
3788 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3789 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3790 relayd
->relayd_session_id
);
3792 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3793 /* Communication error thus the relayd so no data pending. */
3794 goto data_not_pending
;
3797 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3798 ht
->hash_fct(&id
, lttng_ht_seed
),
3800 &iter
.iter
, stream
, node_session_id
.node
) {
3801 if (stream
->metadata_flag
) {
3802 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3803 stream
->relayd_stream_id
);
3805 ret
= relayd_data_pending(&relayd
->control_sock
,
3806 stream
->relayd_stream_id
,
3807 stream
->next_net_seq_num
- 1);
3811 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3813 } else if (ret
< 0) {
3814 ERR("Relayd data pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
3815 lttng_consumer_cleanup_relayd(relayd
);
3816 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3817 goto data_not_pending
;
3821 /* Send end command for data pending. */
3822 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3823 relayd
->relayd_session_id
, &is_data_inflight
);
3824 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3826 ERR("Relayd end data pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
3827 lttng_consumer_cleanup_relayd(relayd
);
3828 goto data_not_pending
;
3830 if (is_data_inflight
) {
3836 * Finding _no_ node in the hash table and no inflight data means that the
3837 * stream(s) have been removed thus data is guaranteed to be available for
3838 * analysis from the trace files.
3842 /* Data is available to be read by a viewer. */
3843 pthread_mutex_unlock(&consumer_data
.lock
);
3848 /* Data is still being extracted from buffers. */
3849 pthread_mutex_unlock(&consumer_data
.lock
);
3855 * Send a ret code status message to the sessiond daemon.
3857 * Return the sendmsg() return value.
3859 int consumer_send_status_msg(int sock
, int ret_code
)
3861 struct lttcomm_consumer_status_msg msg
;
3863 memset(&msg
, 0, sizeof(msg
));
3864 msg
.ret_code
= ret_code
;
3866 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3870 * Send a channel status message to the sessiond daemon.
3872 * Return the sendmsg() return value.
3874 int consumer_send_status_channel(int sock
,
3875 struct lttng_consumer_channel
*channel
)
3877 struct lttcomm_consumer_status_channel msg
;
3881 memset(&msg
, 0, sizeof(msg
));
3883 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3885 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3886 msg
.key
= channel
->key
;
3887 msg
.stream_count
= channel
->streams
.count
;
3890 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3893 unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos
,
3894 unsigned long produced_pos
, uint64_t nb_packets_per_stream
,
3895 uint64_t max_sb_size
)
3897 unsigned long start_pos
;
3899 if (!nb_packets_per_stream
) {
3900 return consumed_pos
; /* Grab everything */
3902 start_pos
= produced_pos
- offset_align_floor(produced_pos
, max_sb_size
);
3903 start_pos
-= max_sb_size
* nb_packets_per_stream
;
3904 if ((long) (start_pos
- consumed_pos
) < 0) {
3905 return consumed_pos
; /* Grab everything */
3911 int consumer_flush_buffer(struct lttng_consumer_stream
*stream
, int producer_active
)
3915 switch (consumer_data
.type
) {
3916 case LTTNG_CONSUMER_KERNEL
:
3917 if (producer_active
) {
3918 ret
= kernctl_buffer_flush(stream
->wait_fd
);
3920 ERR("Failed to flush kernel stream");
3924 ret
= kernctl_buffer_flush_empty(stream
->wait_fd
);
3927 * Doing a buffer flush which does not take into
3928 * account empty packets. This is not perfect,
3929 * but required as a fall-back when
3930 * "flush_empty" is not implemented by
3933 ret
= kernctl_buffer_flush(stream
->wait_fd
);
3935 ERR("Failed to flush kernel stream");
3941 case LTTNG_CONSUMER32_UST
:
3942 case LTTNG_CONSUMER64_UST
:
3943 lttng_ustconsumer_flush_buffer(stream
, producer_active
);
3946 ERR("Unknown consumer_data type");
3955 * Sample the rotate position for all the streams of a channel. If a stream
3956 * is already at the rotate position (produced == consumed), we flag it as
3957 * ready for rotation. The rotation of ready streams occurs after we have
3958 * replied to the session daemon that we have finished sampling the positions.
3959 * Must be called with RCU read-side lock held to ensure existence of channel.
3961 * Returns 0 on success, < 0 on error
3963 int lttng_consumer_rotate_channel(struct lttng_consumer_channel
*channel
,
3964 uint64_t key
, uint64_t relayd_id
, uint32_t metadata
,
3965 struct lttng_consumer_local_data
*ctx
)
3968 struct lttng_consumer_stream
*stream
;
3969 struct lttng_ht_iter iter
;
3970 struct lttng_ht
*ht
= consumer_data
.stream_per_chan_id_ht
;
3971 struct lttng_dynamic_array stream_rotation_positions
;
3972 uint64_t next_chunk_id
, stream_count
= 0;
3973 enum lttng_trace_chunk_status chunk_status
;
3974 const bool is_local_trace
= relayd_id
== -1ULL;
3975 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3976 bool rotating_to_new_chunk
= true;
3978 DBG("Consumer sample rotate position for channel %" PRIu64
, key
);
3980 lttng_dynamic_array_init(&stream_rotation_positions
,
3981 sizeof(struct relayd_stream_rotation_position
), NULL
);
3985 pthread_mutex_lock(&channel
->lock
);
3986 assert(channel
->trace_chunk
);
3987 chunk_status
= lttng_trace_chunk_get_id(channel
->trace_chunk
,
3989 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
3991 goto end_unlock_channel
;
3994 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3995 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
3996 ht
->match_fct
, &channel
->key
, &iter
.iter
,
3997 stream
, node_channel_id
.node
) {
3998 unsigned long produced_pos
= 0, consumed_pos
= 0;
4000 health_code_update();
4003 * Lock stream because we are about to change its state.
4005 pthread_mutex_lock(&stream
->lock
);
4007 if (stream
->trace_chunk
== stream
->chan
->trace_chunk
) {
4008 rotating_to_new_chunk
= false;
4012 * Do not flush an empty packet when rotating from a NULL trace
4013 * chunk. The stream has no means to output data, and the prior
4014 * rotation which rotated to NULL performed that side-effect already.
4016 if (stream
->trace_chunk
) {
4018 * For metadata stream, do an active flush, which does not
4019 * produce empty packets. For data streams, empty-flush;
4020 * ensures we have at least one packet in each stream per trace
4021 * chunk, even if no data was produced.
4023 ret
= consumer_flush_buffer(stream
, stream
->metadata_flag
? 1 : 0);
4025 ERR("Failed to flush stream %" PRIu64
" during channel rotation",
4027 goto end_unlock_stream
;
4031 ret
= lttng_consumer_take_snapshot(stream
);
4032 if (ret
< 0 && ret
!= -ENODATA
&& ret
!= -EAGAIN
) {
4033 ERR("Failed to sample snapshot position during channel rotation");
4034 goto end_unlock_stream
;
4037 ret
= lttng_consumer_get_produced_snapshot(stream
,
4040 ERR("Failed to sample produced position during channel rotation");
4041 goto end_unlock_stream
;
4044 ret
= lttng_consumer_get_consumed_snapshot(stream
,
4047 ERR("Failed to sample consumed position during channel rotation");
4048 goto end_unlock_stream
;
4052 * Align produced position on the start-of-packet boundary of the first
4053 * packet going into the next trace chunk.
4055 produced_pos
= ALIGN_FLOOR(produced_pos
, stream
->max_sb_size
);
4056 if (consumed_pos
== produced_pos
) {
4057 DBG("Set rotate ready for stream %" PRIu64
" produced = %lu consumed = %lu",
4058 stream
->key
, produced_pos
, consumed_pos
);
4059 stream
->rotate_ready
= true;
4061 DBG("Different consumed and produced positions "
4062 "for stream %" PRIu64
" produced = %lu consumed = %lu",
4063 stream
->key
, produced_pos
, consumed_pos
);
4066 * The rotation position is based on the packet_seq_num of the
4067 * packet following the last packet that was consumed for this
4068 * stream, incremented by the offset between produced and
4069 * consumed positions. This rotation position is a lower bound
4070 * (inclusive) at which the next trace chunk starts. Since it
4071 * is a lower bound, it is OK if the packet_seq_num does not
4072 * correspond exactly to the same packet identified by the
4073 * consumed_pos, which can happen in overwrite mode.
4075 if (stream
->sequence_number_unavailable
) {
4077 * Rotation should never be performed on a session which
4078 * interacts with a pre-2.8 lttng-modules, which does
4079 * not implement packet sequence number.
4081 ERR("Failure to rotate stream %" PRIu64
": sequence number unavailable",
4084 goto end_unlock_stream
;
4086 stream
->rotate_position
= stream
->last_sequence_number
+ 1 +
4087 ((produced_pos
- consumed_pos
) / stream
->max_sb_size
);
4088 DBG("Set rotation position for stream %" PRIu64
" at position %" PRIu64
,
4089 stream
->key
, stream
->rotate_position
);
4091 if (!is_local_trace
) {
4093 * The relay daemon control protocol expects a rotation
4094 * position as "the sequence number of the first packet
4095 * _after_ the current trace chunk".
4097 const struct relayd_stream_rotation_position position
= {
4098 .stream_id
= stream
->relayd_stream_id
,
4099 .rotate_at_seq_num
= stream
->rotate_position
,
4102 ret
= lttng_dynamic_array_add_element(
4103 &stream_rotation_positions
,
4106 ERR("Failed to allocate stream rotation position");
4107 goto end_unlock_stream
;
4111 pthread_mutex_unlock(&stream
->lock
);
4114 pthread_mutex_unlock(&channel
->lock
);
4116 if (is_local_trace
) {
4121 relayd
= consumer_find_relayd(relayd_id
);
4123 ERR("Failed to find relayd %" PRIu64
, relayd_id
);
4128 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4129 ret
= relayd_rotate_streams(&relayd
->control_sock
, stream_count
,
4130 rotating_to_new_chunk
? &next_chunk_id
: NULL
,
4131 (const struct relayd_stream_rotation_position
*)
4132 stream_rotation_positions
.buffer
.data
);
4133 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4135 ERR("Relayd rotate stream failed. Cleaning up relayd %" PRIu64
,
4136 relayd
->net_seq_idx
);
4137 lttng_consumer_cleanup_relayd(relayd
);
4145 pthread_mutex_unlock(&stream
->lock
);
4147 pthread_mutex_unlock(&channel
->lock
);
4150 lttng_dynamic_array_reset(&stream_rotation_positions
);
4155 int consumer_clear_buffer(struct lttng_consumer_stream
*stream
)
4158 unsigned long consumed_pos_before
, consumed_pos_after
;
4160 ret
= lttng_consumer_sample_snapshot_positions(stream
);
4162 ERR("Taking snapshot positions");
4166 ret
= lttng_consumer_get_consumed_snapshot(stream
, &consumed_pos_before
);
4168 ERR("Consumed snapshot position");
4172 switch (consumer_data
.type
) {
4173 case LTTNG_CONSUMER_KERNEL
:
4174 ret
= kernctl_buffer_clear(stream
->wait_fd
);
4176 ERR("Failed to clear kernel stream (ret = %d)", ret
);
4180 case LTTNG_CONSUMER32_UST
:
4181 case LTTNG_CONSUMER64_UST
:
4182 lttng_ustconsumer_clear_buffer(stream
);
4185 ERR("Unknown consumer_data type");
4189 ret
= lttng_consumer_sample_snapshot_positions(stream
);
4191 ERR("Taking snapshot positions");
4194 ret
= lttng_consumer_get_consumed_snapshot(stream
, &consumed_pos_after
);
4196 ERR("Consumed snapshot position");
4199 DBG("clear: before: %lu after: %lu", consumed_pos_before
, consumed_pos_after
);
4205 int consumer_clear_stream(struct lttng_consumer_stream
*stream
)
4209 ret
= consumer_flush_buffer(stream
, 1);
4211 ERR("Failed to flush stream %" PRIu64
" during channel clear",
4213 ret
= LTTCOMM_CONSUMERD_FATAL
;
4217 ret
= consumer_clear_buffer(stream
);
4219 ERR("Failed to clear stream %" PRIu64
" during channel clear",
4221 ret
= LTTCOMM_CONSUMERD_FATAL
;
4225 ret
= LTTCOMM_CONSUMERD_SUCCESS
;
4231 int consumer_clear_unmonitored_channel(struct lttng_consumer_channel
*channel
)
4234 struct lttng_consumer_stream
*stream
;
4237 pthread_mutex_lock(&channel
->lock
);
4238 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
4239 health_code_update();
4240 pthread_mutex_lock(&stream
->lock
);
4241 ret
= consumer_clear_stream(stream
);
4245 pthread_mutex_unlock(&stream
->lock
);
4247 pthread_mutex_unlock(&channel
->lock
);
4252 pthread_mutex_unlock(&stream
->lock
);
4253 pthread_mutex_unlock(&channel
->lock
);
4259 * Check if a stream is ready to be rotated after extracting it.
4261 * Return 1 if it is ready for rotation, 0 if it is not, a negative value on
4262 * error. Stream lock must be held.
4264 int lttng_consumer_stream_is_rotate_ready(struct lttng_consumer_stream
*stream
)
4266 DBG("Check is rotate ready for stream %" PRIu64
4267 " ready %u rotate_position %" PRIu64
4268 " last_sequence_number %" PRIu64
,
4269 stream
->key
, stream
->rotate_ready
,
4270 stream
->rotate_position
, stream
->last_sequence_number
);
4271 if (stream
->rotate_ready
) {
4276 * If packet seq num is unavailable, it means we are interacting
4277 * with a pre-2.8 lttng-modules which does not implement the
4278 * sequence number. Rotation should never be used by sessiond in this
4281 if (stream
->sequence_number_unavailable
) {
4282 ERR("Internal error: rotation used on stream %" PRIu64
4283 " with unavailable sequence number",
4288 if (stream
->rotate_position
== -1ULL ||
4289 stream
->last_sequence_number
== -1ULL) {
4294 * Rotate position not reached yet. The stream rotate position is
4295 * the position of the next packet belonging to the next trace chunk,
4296 * but consumerd considers rotation ready when reaching the last
4297 * packet of the current chunk, hence the "rotate_position - 1".
4300 DBG("Check is rotate ready for stream %" PRIu64
4301 " last_sequence_number %" PRIu64
4302 " rotate_position %" PRIu64
,
4303 stream
->key
, stream
->last_sequence_number
,
4304 stream
->rotate_position
);
4305 if (stream
->last_sequence_number
>= stream
->rotate_position
- 1) {
4313 * Reset the state for a stream after a rotation occurred.
4315 void lttng_consumer_reset_stream_rotate_state(struct lttng_consumer_stream
*stream
)
4317 DBG("lttng_consumer_reset_stream_rotate_state for stream %" PRIu64
,
4319 stream
->rotate_position
= -1ULL;
4320 stream
->rotate_ready
= false;
4324 * Perform the rotation a local stream file.
4327 int rotate_local_stream(struct lttng_consumer_local_data
*ctx
,
4328 struct lttng_consumer_stream
*stream
)
4332 DBG("Rotate local stream: stream key %" PRIu64
", channel key %" PRIu64
,
4335 stream
->tracefile_size_current
= 0;
4336 stream
->tracefile_count_current
= 0;
4338 if (stream
->out_fd
>= 0) {
4339 ret
= close(stream
->out_fd
);
4341 PERROR("Failed to close stream out_fd of channel \"%s\"",
4342 stream
->chan
->name
);
4344 stream
->out_fd
= -1;
4347 if (stream
->index_file
) {
4348 lttng_index_file_put(stream
->index_file
);
4349 stream
->index_file
= NULL
;
4352 if (!stream
->trace_chunk
) {
4356 ret
= consumer_stream_create_output_files(stream
, true);
4362 * Performs the stream rotation for the rotate session feature if needed.
4363 * It must be called with the channel and stream locks held.
4365 * Return 0 on success, a negative number of error.
4367 int lttng_consumer_rotate_stream(struct lttng_consumer_local_data
*ctx
,
4368 struct lttng_consumer_stream
*stream
)
4372 DBG("Consumer rotate stream %" PRIu64
, stream
->key
);
4375 * Update the stream's 'current' chunk to the session's (channel)
4376 * now-current chunk.
4378 lttng_trace_chunk_put(stream
->trace_chunk
);
4379 if (stream
->chan
->trace_chunk
== stream
->trace_chunk
) {
4381 * A channel can be rotated and not have a "next" chunk
4382 * to transition to. In that case, the channel's "current chunk"
4383 * has not been closed yet, but it has not been updated to
4384 * a "next" trace chunk either. Hence, the stream, like its
4385 * parent channel, becomes part of no chunk and can't output
4386 * anything until a new trace chunk is created.
4388 stream
->trace_chunk
= NULL
;
4389 } else if (stream
->chan
->trace_chunk
&&
4390 !lttng_trace_chunk_get(stream
->chan
->trace_chunk
)) {
4391 ERR("Failed to acquire a reference to channel's trace chunk during stream rotation");
4396 * Update the stream's trace chunk to its parent channel's
4397 * current trace chunk.
4399 stream
->trace_chunk
= stream
->chan
->trace_chunk
;
4402 if (stream
->net_seq_idx
== (uint64_t) -1ULL) {
4403 ret
= rotate_local_stream(ctx
, stream
);
4405 ERR("Failed to rotate stream, ret = %i", ret
);
4410 if (stream
->metadata_flag
&& stream
->trace_chunk
) {
4412 * If the stream has transitioned to a new trace
4413 * chunk, the metadata should be re-dumped to the
4416 * However, it is possible for a stream to transition to
4417 * a "no-chunk" state. This can happen if a rotation
4418 * occurs on an inactive session. In such cases, the metadata
4419 * regeneration will happen when the next trace chunk is
4422 ret
= consumer_metadata_stream_dump(stream
);
4427 lttng_consumer_reset_stream_rotate_state(stream
);
4436 * Rotate all the ready streams now.
4438 * This is especially important for low throughput streams that have already
4439 * been consumed, we cannot wait for their next packet to perform the
4441 * Need to be called with RCU read-side lock held to ensure existence of
4444 * Returns 0 on success, < 0 on error
4446 int lttng_consumer_rotate_ready_streams(struct lttng_consumer_channel
*channel
,
4447 uint64_t key
, struct lttng_consumer_local_data
*ctx
)
4450 struct lttng_consumer_stream
*stream
;
4451 struct lttng_ht_iter iter
;
4452 struct lttng_ht
*ht
= consumer_data
.stream_per_chan_id_ht
;
4456 DBG("Consumer rotate ready streams in channel %" PRIu64
, key
);
4458 cds_lfht_for_each_entry_duplicate(ht
->ht
,
4459 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
4460 ht
->match_fct
, &channel
->key
, &iter
.iter
,
4461 stream
, node_channel_id
.node
) {
4462 health_code_update();
4464 pthread_mutex_lock(&stream
->chan
->lock
);
4465 pthread_mutex_lock(&stream
->lock
);
4467 if (!stream
->rotate_ready
) {
4468 pthread_mutex_unlock(&stream
->lock
);
4469 pthread_mutex_unlock(&stream
->chan
->lock
);
4472 DBG("Consumer rotate ready stream %" PRIu64
, stream
->key
);
4474 ret
= lttng_consumer_rotate_stream(ctx
, stream
);
4475 pthread_mutex_unlock(&stream
->lock
);
4476 pthread_mutex_unlock(&stream
->chan
->lock
);
4489 enum lttcomm_return_code
lttng_consumer_init_command(
4490 struct lttng_consumer_local_data
*ctx
,
4491 const lttng_uuid sessiond_uuid
)
4493 enum lttcomm_return_code ret
;
4494 char uuid_str
[LTTNG_UUID_STR_LEN
];
4496 if (ctx
->sessiond_uuid
.is_set
) {
4497 ret
= LTTCOMM_CONSUMERD_ALREADY_SET
;
4501 ctx
->sessiond_uuid
.is_set
= true;
4502 memcpy(ctx
->sessiond_uuid
.value
, sessiond_uuid
, sizeof(lttng_uuid
));
4503 ret
= LTTCOMM_CONSUMERD_SUCCESS
;
4504 lttng_uuid_to_str(sessiond_uuid
, uuid_str
);
4505 DBG("Received session daemon UUID: %s", uuid_str
);
4510 enum lttcomm_return_code
lttng_consumer_create_trace_chunk(
4511 const uint64_t *relayd_id
, uint64_t session_id
,
4513 time_t chunk_creation_timestamp
,
4514 const char *chunk_override_name
,
4515 const struct lttng_credentials
*credentials
,
4516 struct lttng_directory_handle
*chunk_directory_handle
)
4519 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
4520 struct lttng_trace_chunk
*created_chunk
= NULL
, *published_chunk
= NULL
;
4521 enum lttng_trace_chunk_status chunk_status
;
4522 char relayd_id_buffer
[MAX_INT_DEC_LEN(*relayd_id
)];
4523 char creation_timestamp_buffer
[ISO8601_STR_LEN
];
4524 const char *relayd_id_str
= "(none)";
4525 const char *creation_timestamp_str
;
4526 struct lttng_ht_iter iter
;
4527 struct lttng_consumer_channel
*channel
;
4530 /* Only used for logging purposes. */
4531 ret
= snprintf(relayd_id_buffer
, sizeof(relayd_id_buffer
),
4532 "%" PRIu64
, *relayd_id
);
4533 if (ret
> 0 && ret
< sizeof(relayd_id_buffer
)) {
4534 relayd_id_str
= relayd_id_buffer
;
4536 relayd_id_str
= "(formatting error)";
4540 /* Local protocol error. */
4541 assert(chunk_creation_timestamp
);
4542 ret
= time_to_iso8601_str(chunk_creation_timestamp
,
4543 creation_timestamp_buffer
,
4544 sizeof(creation_timestamp_buffer
));
4545 creation_timestamp_str
= !ret
? creation_timestamp_buffer
:
4546 "(formatting error)";
4548 DBG("Consumer create trace chunk command: relay_id = %s"
4549 ", session_id = %" PRIu64
", chunk_id = %" PRIu64
4550 ", chunk_override_name = %s"
4551 ", chunk_creation_timestamp = %s",
4552 relayd_id_str
, session_id
, chunk_id
,
4553 chunk_override_name
? : "(none)",
4554 creation_timestamp_str
);
4557 * The trace chunk registry, as used by the consumer daemon, implicitly
4558 * owns the trace chunks. This is only needed in the consumer since
4559 * the consumer has no notion of a session beyond session IDs being
4560 * used to identify other objects.
4562 * The lttng_trace_chunk_registry_publish() call below provides a
4563 * reference which is not released; it implicitly becomes the session
4564 * daemon's reference to the chunk in the consumer daemon.
4566 * The lifetime of trace chunks in the consumer daemon is managed by
4567 * the session daemon through the LTTNG_CONSUMER_CREATE_TRACE_CHUNK
4568 * and LTTNG_CONSUMER_DESTROY_TRACE_CHUNK commands.
4570 created_chunk
= lttng_trace_chunk_create(chunk_id
,
4571 chunk_creation_timestamp
, NULL
);
4572 if (!created_chunk
) {
4573 ERR("Failed to create trace chunk");
4574 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4578 if (chunk_override_name
) {
4579 chunk_status
= lttng_trace_chunk_override_name(created_chunk
,
4580 chunk_override_name
);
4581 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4582 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4587 if (chunk_directory_handle
) {
4588 chunk_status
= lttng_trace_chunk_set_credentials(created_chunk
,
4590 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4591 ERR("Failed to set trace chunk credentials");
4592 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4596 * The consumer daemon has no ownership of the chunk output
4599 chunk_status
= lttng_trace_chunk_set_as_user(created_chunk
,
4600 chunk_directory_handle
);
4601 chunk_directory_handle
= NULL
;
4602 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4603 ERR("Failed to set trace chunk's directory handle");
4604 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4609 published_chunk
= lttng_trace_chunk_registry_publish_chunk(
4610 consumer_data
.chunk_registry
, session_id
,
4612 lttng_trace_chunk_put(created_chunk
);
4613 created_chunk
= NULL
;
4614 if (!published_chunk
) {
4615 ERR("Failed to publish trace chunk");
4616 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4621 cds_lfht_for_each_entry_duplicate(consumer_data
.channels_by_session_id_ht
->ht
,
4622 consumer_data
.channels_by_session_id_ht
->hash_fct(
4623 &session_id
, lttng_ht_seed
),
4624 consumer_data
.channels_by_session_id_ht
->match_fct
,
4625 &session_id
, &iter
.iter
, channel
,
4626 channels_by_session_id_ht_node
.node
) {
4627 ret
= lttng_consumer_channel_set_trace_chunk(channel
,
4631 * Roll-back the creation of this chunk.
4633 * This is important since the session daemon will
4634 * assume that the creation of this chunk failed and
4635 * will never ask for it to be closed, resulting
4636 * in a leak and an inconsistent state for some
4639 enum lttcomm_return_code close_ret
;
4640 char path
[LTTNG_PATH_MAX
];
4642 DBG("Failed to set new trace chunk on existing channels, rolling back");
4643 close_ret
= lttng_consumer_close_trace_chunk(relayd_id
,
4644 session_id
, chunk_id
,
4645 chunk_creation_timestamp
, NULL
,
4647 if (close_ret
!= LTTCOMM_CONSUMERD_SUCCESS
) {
4648 ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64
", chunk_id = %" PRIu64
,
4649 session_id
, chunk_id
);
4652 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4658 struct consumer_relayd_sock_pair
*relayd
;
4660 relayd
= consumer_find_relayd(*relayd_id
);
4662 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4663 ret
= relayd_create_trace_chunk(
4664 &relayd
->control_sock
, published_chunk
);
4665 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4667 ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64
, *relayd_id
);
4670 if (!relayd
|| ret
) {
4671 enum lttcomm_return_code close_ret
;
4672 char path
[LTTNG_PATH_MAX
];
4674 close_ret
= lttng_consumer_close_trace_chunk(relayd_id
,
4677 chunk_creation_timestamp
,
4679 if (close_ret
!= LTTCOMM_CONSUMERD_SUCCESS
) {
4680 ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64
", chunk_id = %" PRIu64
,
4685 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4692 /* Release the reference returned by the "publish" operation. */
4693 lttng_trace_chunk_put(published_chunk
);
4694 lttng_trace_chunk_put(created_chunk
);
4698 enum lttcomm_return_code
lttng_consumer_close_trace_chunk(
4699 const uint64_t *relayd_id
, uint64_t session_id
,
4700 uint64_t chunk_id
, time_t chunk_close_timestamp
,
4701 const enum lttng_trace_chunk_command_type
*close_command
,
4704 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
4705 struct lttng_trace_chunk
*chunk
;
4706 char relayd_id_buffer
[MAX_INT_DEC_LEN(*relayd_id
)];
4707 const char *relayd_id_str
= "(none)";
4708 const char *close_command_name
= "none";
4709 struct lttng_ht_iter iter
;
4710 struct lttng_consumer_channel
*channel
;
4711 enum lttng_trace_chunk_status chunk_status
;
4716 /* Only used for logging purposes. */
4717 ret
= snprintf(relayd_id_buffer
, sizeof(relayd_id_buffer
),
4718 "%" PRIu64
, *relayd_id
);
4719 if (ret
> 0 && ret
< sizeof(relayd_id_buffer
)) {
4720 relayd_id_str
= relayd_id_buffer
;
4722 relayd_id_str
= "(formatting error)";
4725 if (close_command
) {
4726 close_command_name
= lttng_trace_chunk_command_type_get_name(
4730 DBG("Consumer close trace chunk command: relayd_id = %s"
4731 ", session_id = %" PRIu64
", chunk_id = %" PRIu64
4732 ", close command = %s",
4733 relayd_id_str
, session_id
, chunk_id
,
4734 close_command_name
);
4736 chunk
= lttng_trace_chunk_registry_find_chunk(
4737 consumer_data
.chunk_registry
, session_id
, chunk_id
);
4739 ERR("Failed to find chunk: session_id = %" PRIu64
4740 ", chunk_id = %" PRIu64
,
4741 session_id
, chunk_id
);
4742 ret_code
= LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK
;
4746 chunk_status
= lttng_trace_chunk_set_close_timestamp(chunk
,
4747 chunk_close_timestamp
);
4748 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4749 ret_code
= LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED
;
4753 if (close_command
) {
4754 chunk_status
= lttng_trace_chunk_set_close_command(
4755 chunk
, *close_command
);
4756 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4757 ret_code
= LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED
;
4763 * chunk is now invalid to access as we no longer hold a reference to
4764 * it; it is only kept around to compare it (by address) to the
4765 * current chunk found in the session's channels.
4768 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
,
4769 channel
, node
.node
) {
4773 * Only change the channel's chunk to NULL if it still
4774 * references the chunk being closed. The channel may
4775 * reference a newer channel in the case of a session
4776 * rotation. When a session rotation occurs, the "next"
4777 * chunk is created before the "current" chunk is closed.
4779 if (channel
->trace_chunk
!= chunk
) {
4782 ret
= lttng_consumer_channel_set_trace_chunk(channel
, NULL
);
4785 * Attempt to close the chunk on as many channels as
4788 ret_code
= LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED
;
4794 struct consumer_relayd_sock_pair
*relayd
;
4796 relayd
= consumer_find_relayd(*relayd_id
);
4798 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4799 ret
= relayd_close_trace_chunk(
4800 &relayd
->control_sock
, chunk
,
4802 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4804 ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64
,
4808 if (!relayd
|| ret
) {
4809 ret_code
= LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED
;
4817 * Release the reference returned by the "find" operation and
4818 * the session daemon's implicit reference to the chunk.
4820 lttng_trace_chunk_put(chunk
);
4821 lttng_trace_chunk_put(chunk
);
4826 enum lttcomm_return_code
lttng_consumer_trace_chunk_exists(
4827 const uint64_t *relayd_id
, uint64_t session_id
,
4831 enum lttcomm_return_code ret_code
;
4832 char relayd_id_buffer
[MAX_INT_DEC_LEN(*relayd_id
)];
4833 const char *relayd_id_str
= "(none)";
4834 const bool is_local_trace
= !relayd_id
;
4835 struct consumer_relayd_sock_pair
*relayd
= NULL
;
4836 bool chunk_exists_local
, chunk_exists_remote
;
4841 /* Only used for logging purposes. */
4842 ret
= snprintf(relayd_id_buffer
, sizeof(relayd_id_buffer
),
4843 "%" PRIu64
, *relayd_id
);
4844 if (ret
> 0 && ret
< sizeof(relayd_id_buffer
)) {
4845 relayd_id_str
= relayd_id_buffer
;
4847 relayd_id_str
= "(formatting error)";
4851 DBG("Consumer trace chunk exists command: relayd_id = %s"
4852 ", chunk_id = %" PRIu64
, relayd_id_str
,
4854 ret
= lttng_trace_chunk_registry_chunk_exists(
4855 consumer_data
.chunk_registry
, session_id
,
4856 chunk_id
, &chunk_exists_local
);
4858 /* Internal error. */
4859 ERR("Failed to query the existence of a trace chunk");
4860 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
4863 DBG("Trace chunk %s locally",
4864 chunk_exists_local
? "exists" : "does not exist");
4865 if (chunk_exists_local
) {
4866 ret_code
= LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_LOCAL
;
4868 } else if (is_local_trace
) {
4869 ret_code
= LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK
;
4874 relayd
= consumer_find_relayd(*relayd_id
);
4876 ERR("Failed to find relayd %" PRIu64
, *relayd_id
);
4877 ret_code
= LTTCOMM_CONSUMERD_INVALID_PARAMETERS
;
4878 goto end_rcu_unlock
;
4880 DBG("Looking up existence of trace chunk on relay daemon");
4881 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4882 ret
= relayd_trace_chunk_exists(&relayd
->control_sock
, chunk_id
,
4883 &chunk_exists_remote
);
4884 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4886 ERR("Failed to look-up the existence of trace chunk on relay daemon");
4887 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
4888 goto end_rcu_unlock
;
4891 ret_code
= chunk_exists_remote
?
4892 LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_REMOTE
:
4893 LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK
;
4894 DBG("Trace chunk %s on relay daemon",
4895 chunk_exists_remote
? "exists" : "does not exist");
4904 int consumer_clear_monitored_channel(struct lttng_consumer_channel
*channel
)
4906 struct lttng_ht
*ht
;
4907 struct lttng_consumer_stream
*stream
;
4908 struct lttng_ht_iter iter
;
4911 ht
= consumer_data
.stream_per_chan_id_ht
;
4914 cds_lfht_for_each_entry_duplicate(ht
->ht
,
4915 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
4916 ht
->match_fct
, &channel
->key
,
4917 &iter
.iter
, stream
, node_channel_id
.node
) {
4919 * Protect against teardown with mutex.
4921 pthread_mutex_lock(&stream
->lock
);
4922 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
4925 ret
= consumer_clear_stream(stream
);
4930 pthread_mutex_unlock(&stream
->lock
);
4933 return LTTCOMM_CONSUMERD_SUCCESS
;
4936 pthread_mutex_unlock(&stream
->lock
);
4941 int lttng_consumer_clear_channel(struct lttng_consumer_channel
*channel
)
4945 DBG("Consumer clear channel %" PRIu64
, channel
->key
);
4947 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
4949 * Nothing to do for the metadata channel/stream.
4950 * Snapshot mechanism already take care of the metadata
4951 * handling/generation, and monitored channels only need to
4952 * have their data stream cleared..
4954 ret
= LTTCOMM_CONSUMERD_SUCCESS
;
4958 if (!channel
->monitor
) {
4959 ret
= consumer_clear_unmonitored_channel(channel
);
4961 ret
= consumer_clear_monitored_channel(channel
);