2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/compat/poll.h>
37 #include <common/compat/endian.h>
38 #include <common/index/index.h>
39 #include <common/kernel-ctl/kernel-ctl.h>
40 #include <common/sessiond-comm/relayd.h>
41 #include <common/sessiond-comm/sessiond-comm.h>
42 #include <common/kernel-consumer/kernel-consumer.h>
43 #include <common/relayd/relayd.h>
44 #include <common/ust-consumer/ust-consumer.h>
45 #include <common/consumer/consumer-timer.h>
46 #include <common/consumer/consumer.h>
47 #include <common/consumer/consumer-stream.h>
48 #include <common/consumer/consumer-testpoint.h>
49 #include <common/align.h>
50 #include <common/consumer/consumer-metadata-cache.h>
52 struct lttng_consumer_global_data consumer_data
= {
55 .type
= LTTNG_CONSUMER_UNKNOWN
,
58 enum consumer_channel_action
{
61 CONSUMER_CHANNEL_QUIT
,
64 struct consumer_channel_msg
{
65 enum consumer_channel_action action
;
66 struct lttng_consumer_channel
*chan
; /* add */
67 uint64_t key
; /* del */
70 /* Flag used to temporarily pause data consumption from testpoints. */
71 int data_consumption_paused
;
74 * Flag to inform the polling thread to quit when all fd hung up. Updated by
75 * the consumer_thread_receive_fds when it notices that all fds has hung up.
76 * Also updated by the signal handler (consumer_should_exit()). Read by the
82 * Global hash table containing respectively metadata and data streams. The
83 * stream element in this ht should only be updated by the metadata poll thread
84 * for the metadata and the data poll thread for the data.
86 static struct lttng_ht
*metadata_ht
;
87 static struct lttng_ht
*data_ht
;
90 * Notify a thread lttng pipe to poll back again. This usually means that some
91 * global state has changed so we just send back the thread in a poll wait
94 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
96 struct lttng_consumer_stream
*null_stream
= NULL
;
100 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
103 static void notify_health_quit_pipe(int *pipe
)
107 ret
= lttng_write(pipe
[1], "4", 1);
109 PERROR("write consumer health quit");
113 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
114 struct lttng_consumer_channel
*chan
,
116 enum consumer_channel_action action
)
118 struct consumer_channel_msg msg
;
121 memset(&msg
, 0, sizeof(msg
));
126 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
127 if (ret
< sizeof(msg
)) {
128 PERROR("notify_channel_pipe write error");
132 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
135 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
138 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
139 struct lttng_consumer_channel
**chan
,
141 enum consumer_channel_action
*action
)
143 struct consumer_channel_msg msg
;
146 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
147 if (ret
< sizeof(msg
)) {
151 *action
= msg
.action
;
159 * Cleanup the stream list of a channel. Those streams are not yet globally
162 static void clean_channel_stream_list(struct lttng_consumer_channel
*channel
)
164 struct lttng_consumer_stream
*stream
, *stmp
;
168 /* Delete streams that might have been left in the stream list. */
169 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
171 cds_list_del(&stream
->send_node
);
173 * Once a stream is added to this list, the buffers were created so we
174 * have a guarantee that this call will succeed. Setting the monitor
175 * mode to 0 so we don't lock nor try to delete the stream from the
179 consumer_stream_destroy(stream
, NULL
);
184 * Find a stream. The consumer_data.lock must be locked during this
187 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
190 struct lttng_ht_iter iter
;
191 struct lttng_ht_node_u64
*node
;
192 struct lttng_consumer_stream
*stream
= NULL
;
196 /* -1ULL keys are lookup failures */
197 if (key
== (uint64_t) -1ULL) {
203 lttng_ht_lookup(ht
, &key
, &iter
);
204 node
= lttng_ht_iter_get_node_u64(&iter
);
206 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
214 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
216 struct lttng_consumer_stream
*stream
;
219 stream
= find_stream(key
, ht
);
221 stream
->key
= (uint64_t) -1ULL;
223 * We don't want the lookup to match, but we still need
224 * to iterate on this stream when iterating over the hash table. Just
225 * change the node key.
227 stream
->node
.key
= (uint64_t) -1ULL;
233 * Return a channel object for the given key.
235 * RCU read side lock MUST be acquired before calling this function and
236 * protects the channel ptr.
238 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
240 struct lttng_ht_iter iter
;
241 struct lttng_ht_node_u64
*node
;
242 struct lttng_consumer_channel
*channel
= NULL
;
244 /* -1ULL keys are lookup failures */
245 if (key
== (uint64_t) -1ULL) {
249 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
250 node
= lttng_ht_iter_get_node_u64(&iter
);
252 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
259 * There is a possibility that the consumer does not have enough time between
260 * the close of the channel on the session daemon and the cleanup in here thus
261 * once we have a channel add with an existing key, we know for sure that this
262 * channel will eventually get cleaned up by all streams being closed.
264 * This function just nullifies the already existing channel key.
266 static void steal_channel_key(uint64_t key
)
268 struct lttng_consumer_channel
*channel
;
271 channel
= consumer_find_channel(key
);
273 channel
->key
= (uint64_t) -1ULL;
275 * We don't want the lookup to match, but we still need to iterate on
276 * this channel when iterating over the hash table. Just change the
279 channel
->node
.key
= (uint64_t) -1ULL;
284 static void free_channel_rcu(struct rcu_head
*head
)
286 struct lttng_ht_node_u64
*node
=
287 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
288 struct lttng_consumer_channel
*channel
=
289 caa_container_of(node
, struct lttng_consumer_channel
, node
);
291 switch (consumer_data
.type
) {
292 case LTTNG_CONSUMER_KERNEL
:
294 case LTTNG_CONSUMER32_UST
:
295 case LTTNG_CONSUMER64_UST
:
296 lttng_ustconsumer_free_channel(channel
);
299 ERR("Unknown consumer_data type");
306 * RCU protected relayd socket pair free.
308 static void free_relayd_rcu(struct rcu_head
*head
)
310 struct lttng_ht_node_u64
*node
=
311 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
312 struct consumer_relayd_sock_pair
*relayd
=
313 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
316 * Close all sockets. This is done in the call RCU since we don't want the
317 * socket fds to be reassigned thus potentially creating bad state of the
320 * We do not have to lock the control socket mutex here since at this stage
321 * there is no one referencing to this relayd object.
323 (void) relayd_close(&relayd
->control_sock
);
324 (void) relayd_close(&relayd
->data_sock
);
330 * Destroy and free relayd socket pair object.
332 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
335 struct lttng_ht_iter iter
;
337 if (relayd
== NULL
) {
341 DBG("Consumer destroy and close relayd socket pair");
343 iter
.iter
.node
= &relayd
->node
.node
;
344 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
346 /* We assume the relayd is being or is destroyed */
350 /* RCU free() call */
351 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
355 * Remove a channel from the global list protected by a mutex. This function is
356 * also responsible for freeing its data structures.
358 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
361 struct lttng_ht_iter iter
;
363 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
365 pthread_mutex_lock(&consumer_data
.lock
);
366 pthread_mutex_lock(&channel
->lock
);
368 /* Destroy streams that might have been left in the stream list. */
369 clean_channel_stream_list(channel
);
371 if (channel
->live_timer_enabled
== 1) {
372 consumer_timer_live_stop(channel
);
374 if (channel
->monitor_timer_enabled
== 1) {
375 consumer_timer_monitor_stop(channel
);
378 switch (consumer_data
.type
) {
379 case LTTNG_CONSUMER_KERNEL
:
381 case LTTNG_CONSUMER32_UST
:
382 case LTTNG_CONSUMER64_UST
:
383 lttng_ustconsumer_del_channel(channel
);
386 ERR("Unknown consumer_data type");
392 iter
.iter
.node
= &channel
->node
.node
;
393 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
397 call_rcu(&channel
->node
.head
, free_channel_rcu
);
399 pthread_mutex_unlock(&channel
->lock
);
400 pthread_mutex_unlock(&consumer_data
.lock
);
404 * Iterate over the relayd hash table and destroy each element. Finally,
405 * destroy the whole hash table.
407 static void cleanup_relayd_ht(void)
409 struct lttng_ht_iter iter
;
410 struct consumer_relayd_sock_pair
*relayd
;
414 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
416 consumer_destroy_relayd(relayd
);
421 lttng_ht_destroy(consumer_data
.relayd_ht
);
425 * Update the end point status of all streams having the given network sequence
426 * index (relayd index).
428 * It's atomically set without having the stream mutex locked which is fine
429 * because we handle the write/read race with a pipe wakeup for each thread.
431 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
432 enum consumer_endpoint_status status
)
434 struct lttng_ht_iter iter
;
435 struct lttng_consumer_stream
*stream
;
437 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
441 /* Let's begin with metadata */
442 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
443 if (stream
->net_seq_idx
== net_seq_idx
) {
444 uatomic_set(&stream
->endpoint_status
, status
);
445 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
449 /* Follow up by the data streams */
450 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
451 if (stream
->net_seq_idx
== net_seq_idx
) {
452 uatomic_set(&stream
->endpoint_status
, status
);
453 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
460 * Cleanup a relayd object by flagging every associated streams for deletion,
461 * destroying the object meaning removing it from the relayd hash table,
462 * closing the sockets and freeing the memory in a RCU call.
464 * If a local data context is available, notify the threads that the streams'
465 * state have changed.
467 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
468 struct lttng_consumer_local_data
*ctx
)
474 DBG("Cleaning up relayd sockets");
476 /* Save the net sequence index before destroying the object */
477 netidx
= relayd
->net_seq_idx
;
480 * Delete the relayd from the relayd hash table, close the sockets and free
481 * the object in a RCU call.
483 consumer_destroy_relayd(relayd
);
485 /* Set inactive endpoint to all streams */
486 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
489 * With a local data context, notify the threads that the streams' state
490 * have changed. The write() action on the pipe acts as an "implicit"
491 * memory barrier ordering the updates of the end point status from the
492 * read of this status which happens AFTER receiving this notify.
495 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
496 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
501 * Flag a relayd socket pair for destruction. Destroy it if the refcount
504 * RCU read side lock MUST be aquired before calling this function.
506 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
510 /* Set destroy flag for this object */
511 uatomic_set(&relayd
->destroy_flag
, 1);
513 /* Destroy the relayd if refcount is 0 */
514 if (uatomic_read(&relayd
->refcount
) == 0) {
515 consumer_destroy_relayd(relayd
);
520 * Completly destroy stream from every visiable data structure and the given
523 * One this call returns, the stream object is not longer usable nor visible.
525 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
528 consumer_stream_destroy(stream
, ht
);
532 * XXX naming of del vs destroy is all mixed up.
534 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
536 consumer_stream_destroy(stream
, data_ht
);
539 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
541 consumer_stream_destroy(stream
, metadata_ht
);
544 void consumer_stream_copy_ro_channel_values(struct lttng_consumer_stream
*stream
,
545 struct lttng_consumer_channel
*channel
)
547 stream
->channel_ro_tracefile_size
= channel
->tracefile_size
;
548 memcpy(stream
->channel_ro_pathname
, channel
->pathname
, PATH_MAX
);
551 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
553 enum lttng_consumer_stream_state state
,
554 const char *channel_name
,
561 enum consumer_channel_type type
,
562 unsigned int monitor
)
565 struct lttng_consumer_stream
*stream
;
567 stream
= zmalloc(sizeof(*stream
));
568 if (stream
== NULL
) {
569 PERROR("malloc struct lttng_consumer_stream");
576 stream
->key
= stream_key
;
578 stream
->out_fd_offset
= 0;
579 stream
->output_written
= 0;
580 stream
->state
= state
;
583 stream
->net_seq_idx
= relayd_id
;
584 stream
->session_id
= session_id
;
585 stream
->monitor
= monitor
;
586 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
587 stream
->index_file
= NULL
;
588 stream
->last_sequence_number
= -1ULL;
589 pthread_mutex_init(&stream
->lock
, NULL
);
590 pthread_mutex_init(&stream
->metadata_timer_lock
, NULL
);
592 /* If channel is the metadata, flag this stream as metadata. */
593 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
594 stream
->metadata_flag
= 1;
595 /* Metadata is flat out. */
596 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
597 /* Live rendez-vous point. */
598 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
599 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
601 /* Format stream name to <channel_name>_<cpu_number> */
602 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
605 PERROR("snprintf stream name");
610 /* Key is always the wait_fd for streams. */
611 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
613 /* Init node per channel id key */
614 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
616 /* Init session id node with the stream session id */
617 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
619 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
620 " relayd_id %" PRIu64
", session_id %" PRIu64
,
621 stream
->name
, stream
->key
, channel_key
,
622 stream
->net_seq_idx
, stream
->session_id
);
638 * Add a stream to the global list protected by a mutex.
640 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
642 struct lttng_ht
*ht
= data_ht
;
648 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
650 pthread_mutex_lock(&consumer_data
.lock
);
651 pthread_mutex_lock(&stream
->chan
->lock
);
652 pthread_mutex_lock(&stream
->chan
->timer_lock
);
653 pthread_mutex_lock(&stream
->lock
);
656 /* Steal stream identifier to avoid having streams with the same key */
657 steal_stream_key(stream
->key
, ht
);
659 lttng_ht_add_unique_u64(ht
, &stream
->node
);
661 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
662 &stream
->node_channel_id
);
665 * Add stream to the stream_list_ht of the consumer data. No need to steal
666 * the key since the HT does not use it and we allow to add redundant keys
669 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
672 * When nb_init_stream_left reaches 0, we don't need to trigger any action
673 * in terms of destroying the associated channel, because the action that
674 * causes the count to become 0 also causes a stream to be added. The
675 * channel deletion will thus be triggered by the following removal of this
678 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
679 /* Increment refcount before decrementing nb_init_stream_left */
681 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
684 /* Update consumer data once the node is inserted. */
685 consumer_data
.stream_count
++;
686 consumer_data
.need_update
= 1;
689 pthread_mutex_unlock(&stream
->lock
);
690 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
691 pthread_mutex_unlock(&stream
->chan
->lock
);
692 pthread_mutex_unlock(&consumer_data
.lock
);
697 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
699 consumer_del_stream(stream
, data_ht
);
703 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
704 * be acquired before calling this.
706 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
709 struct lttng_ht_node_u64
*node
;
710 struct lttng_ht_iter iter
;
714 lttng_ht_lookup(consumer_data
.relayd_ht
,
715 &relayd
->net_seq_idx
, &iter
);
716 node
= lttng_ht_iter_get_node_u64(&iter
);
720 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
727 * Allocate and return a consumer relayd socket.
729 static struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
730 uint64_t net_seq_idx
)
732 struct consumer_relayd_sock_pair
*obj
= NULL
;
734 /* net sequence index of -1 is a failure */
735 if (net_seq_idx
== (uint64_t) -1ULL) {
739 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
741 PERROR("zmalloc relayd sock");
745 obj
->net_seq_idx
= net_seq_idx
;
747 obj
->destroy_flag
= 0;
748 obj
->control_sock
.sock
.fd
= -1;
749 obj
->data_sock
.sock
.fd
= -1;
750 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
751 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
758 * Find a relayd socket pair in the global consumer data.
760 * Return the object if found else NULL.
761 * RCU read-side lock must be held across this call and while using the
764 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
766 struct lttng_ht_iter iter
;
767 struct lttng_ht_node_u64
*node
;
768 struct consumer_relayd_sock_pair
*relayd
= NULL
;
770 /* Negative keys are lookup failures */
771 if (key
== (uint64_t) -1ULL) {
775 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
777 node
= lttng_ht_iter_get_node_u64(&iter
);
779 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
787 * Find a relayd and send the stream
789 * Returns 0 on success, < 0 on error
791 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
792 char *path
, enum lttng_domain_type domain
)
795 struct consumer_relayd_sock_pair
*relayd
;
798 assert(stream
->net_seq_idx
!= -1ULL);
801 /* The stream is not metadata. Get relayd reference if exists. */
803 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
804 if (relayd
!= NULL
) {
805 /* Add stream on the relayd */
806 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
807 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
808 path
, &stream
->relayd_stream_id
,
809 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
,
811 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
816 uatomic_inc(&relayd
->refcount
);
817 stream
->sent_to_relayd
= 1;
819 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
820 stream
->key
, stream
->net_seq_idx
);
825 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
826 stream
->name
, stream
->key
, stream
->net_seq_idx
);
834 * Find a relayd and send the streams sent message
836 * Returns 0 on success, < 0 on error
838 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
841 struct consumer_relayd_sock_pair
*relayd
;
843 assert(net_seq_idx
!= -1ULL);
845 /* The stream is not metadata. Get relayd reference if exists. */
847 relayd
= consumer_find_relayd(net_seq_idx
);
848 if (relayd
!= NULL
) {
849 /* Add stream on the relayd */
850 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
851 ret
= relayd_streams_sent(&relayd
->control_sock
);
852 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
857 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
864 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
872 * Find a relayd and close the stream
874 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
876 struct consumer_relayd_sock_pair
*relayd
;
878 /* The stream is not metadata. Get relayd reference if exists. */
880 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
882 consumer_stream_relayd_close(stream
, relayd
);
888 * Handle stream for relayd transmission if the stream applies for network
889 * streaming where the net sequence index is set.
891 * Return destination file descriptor or negative value on error.
893 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
894 size_t data_size
, unsigned long padding
,
895 struct consumer_relayd_sock_pair
*relayd
)
898 struct lttcomm_relayd_data_hdr data_hdr
;
904 /* Reset data header */
905 memset(&data_hdr
, 0, sizeof(data_hdr
));
907 if (stream
->metadata_flag
) {
908 /* Caller MUST acquire the relayd control socket lock */
909 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
914 /* Metadata are always sent on the control socket. */
915 outfd
= relayd
->control_sock
.sock
.fd
;
917 /* Set header with stream information */
918 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
919 data_hdr
.data_size
= htobe32(data_size
);
920 data_hdr
.padding_size
= htobe32(padding
);
922 * Note that net_seq_num below is assigned with the *current* value of
923 * next_net_seq_num and only after that the next_net_seq_num will be
924 * increment. This is why when issuing a command on the relayd using
925 * this next value, 1 should always be substracted in order to compare
926 * the last seen sequence number on the relayd side to the last sent.
928 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
929 /* Other fields are zeroed previously */
931 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
937 ++stream
->next_net_seq_num
;
939 /* Set to go on data socket */
940 outfd
= relayd
->data_sock
.sock
.fd
;
948 * Allocate and return a new lttng_consumer_channel object using the given key
949 * to initialize the hash table node.
951 * On error, return NULL.
953 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
955 const char *pathname
,
960 enum lttng_event_output output
,
961 uint64_t tracefile_size
,
962 uint64_t tracefile_count
,
963 uint64_t session_id_per_pid
,
964 unsigned int monitor
,
965 unsigned int live_timer_interval
,
966 const char *root_shm_path
,
967 const char *shm_path
)
969 struct lttng_consumer_channel
*channel
;
971 channel
= zmalloc(sizeof(*channel
));
972 if (channel
== NULL
) {
973 PERROR("malloc struct lttng_consumer_channel");
978 channel
->refcount
= 0;
979 channel
->session_id
= session_id
;
980 channel
->session_id_per_pid
= session_id_per_pid
;
983 channel
->relayd_id
= relayd_id
;
984 channel
->tracefile_size
= tracefile_size
;
985 channel
->tracefile_count
= tracefile_count
;
986 channel
->monitor
= monitor
;
987 channel
->live_timer_interval
= live_timer_interval
;
988 pthread_mutex_init(&channel
->lock
, NULL
);
989 pthread_mutex_init(&channel
->timer_lock
, NULL
);
992 case LTTNG_EVENT_SPLICE
:
993 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
995 case LTTNG_EVENT_MMAP
:
996 channel
->output
= CONSUMER_CHANNEL_MMAP
;
1006 * In monitor mode, the streams associated with the channel will be put in
1007 * a special list ONLY owned by this channel. So, the refcount is set to 1
1008 * here meaning that the channel itself has streams that are referenced.
1010 * On a channel deletion, once the channel is no longer visible, the
1011 * refcount is decremented and checked for a zero value to delete it. With
1012 * streams in no monitor mode, it will now be safe to destroy the channel.
1014 if (!channel
->monitor
) {
1015 channel
->refcount
= 1;
1018 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
1019 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
1021 strncpy(channel
->name
, name
, sizeof(channel
->name
));
1022 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
1024 if (root_shm_path
) {
1025 strncpy(channel
->root_shm_path
, root_shm_path
, sizeof(channel
->root_shm_path
));
1026 channel
->root_shm_path
[sizeof(channel
->root_shm_path
) - 1] = '\0';
1029 strncpy(channel
->shm_path
, shm_path
, sizeof(channel
->shm_path
));
1030 channel
->shm_path
[sizeof(channel
->shm_path
) - 1] = '\0';
1033 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
1035 channel
->wait_fd
= -1;
1037 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
1039 DBG("Allocated channel (key %" PRIu64
")", channel
->key
);
1046 * Add a channel to the global list protected by a mutex.
1048 * Always return 0 indicating success.
1050 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
1051 struct lttng_consumer_local_data
*ctx
)
1053 pthread_mutex_lock(&consumer_data
.lock
);
1054 pthread_mutex_lock(&channel
->lock
);
1055 pthread_mutex_lock(&channel
->timer_lock
);
1058 * This gives us a guarantee that the channel we are about to add to the
1059 * channel hash table will be unique. See this function comment on the why
1060 * we need to steel the channel key at this stage.
1062 steal_channel_key(channel
->key
);
1065 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1068 pthread_mutex_unlock(&channel
->timer_lock
);
1069 pthread_mutex_unlock(&channel
->lock
);
1070 pthread_mutex_unlock(&consumer_data
.lock
);
1072 if (channel
->wait_fd
!= -1 && channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1073 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1080 * Allocate the pollfd structure and the local view of the out fds to avoid
1081 * doing a lookup in the linked list and concurrency issues when writing is
1082 * needed. Called with consumer_data.lock held.
1084 * Returns the number of fds in the structures.
1086 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1087 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1088 struct lttng_ht
*ht
)
1091 struct lttng_ht_iter iter
;
1092 struct lttng_consumer_stream
*stream
;
1097 assert(local_stream
);
1099 DBG("Updating poll fd array");
1101 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1103 * Only active streams with an active end point can be added to the
1104 * poll set and local stream storage of the thread.
1106 * There is a potential race here for endpoint_status to be updated
1107 * just after the check. However, this is OK since the stream(s) will
1108 * be deleted once the thread is notified that the end point state has
1109 * changed where this function will be called back again.
1111 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
1112 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1116 * This clobbers way too much the debug output. Uncomment that if you
1117 * need it for debugging purposes.
1119 * DBG("Active FD %d", stream->wait_fd);
1121 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1122 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1123 local_stream
[i
] = stream
;
1129 * Insert the consumer_data_pipe at the end of the array and don't
1130 * increment i so nb_fd is the number of real FD.
1132 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1133 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1135 (*pollfd
)[i
+ 1].fd
= lttng_pipe_get_readfd(ctx
->consumer_wakeup_pipe
);
1136 (*pollfd
)[i
+ 1].events
= POLLIN
| POLLPRI
;
1138 (*pollfd
)[i
+ 2].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_rotate_pipe
);
1139 (*pollfd
)[i
+ 2].events
= POLLIN
| POLLPRI
;
1144 * Poll on the should_quit pipe and the command socket return -1 on
1145 * error, 1 if should exit, 0 if data is available on the command socket
1147 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1152 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1153 if (num_rdy
== -1) {
1155 * Restart interrupted system call.
1157 if (errno
== EINTR
) {
1160 PERROR("Poll error");
1163 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1164 DBG("consumer_should_quit wake up");
1171 * Set the error socket.
1173 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1176 ctx
->consumer_error_socket
= sock
;
1180 * Set the command socket path.
1182 void lttng_consumer_set_command_sock_path(
1183 struct lttng_consumer_local_data
*ctx
, char *sock
)
1185 ctx
->consumer_command_sock_path
= sock
;
1189 * Send return code to the session daemon.
1190 * If the socket is not defined, we return 0, it is not a fatal error
1192 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1194 if (ctx
->consumer_error_socket
> 0) {
1195 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1196 sizeof(enum lttcomm_sessiond_command
));
1203 * Close all the tracefiles and stream fds and MUST be called when all
1204 * instances are destroyed i.e. when all threads were joined and are ended.
1206 void lttng_consumer_cleanup(void)
1208 struct lttng_ht_iter iter
;
1209 struct lttng_consumer_channel
*channel
;
1213 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1215 consumer_del_channel(channel
);
1220 lttng_ht_destroy(consumer_data
.channel_ht
);
1222 cleanup_relayd_ht();
1224 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1227 * This HT contains streams that are freed by either the metadata thread or
1228 * the data thread so we do *nothing* on the hash table and simply destroy
1231 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1235 * Called from signal handler.
1237 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1241 CMM_STORE_SHARED(consumer_quit
, 1);
1242 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1244 PERROR("write consumer quit");
1247 DBG("Consumer flag that it should quit");
1252 * Flush pending writes to trace output disk file.
1255 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1259 int outfd
= stream
->out_fd
;
1262 * This does a blocking write-and-wait on any page that belongs to the
1263 * subbuffer prior to the one we just wrote.
1264 * Don't care about error values, as these are just hints and ways to
1265 * limit the amount of page cache used.
1267 if (orig_offset
< stream
->max_sb_size
) {
1270 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1271 stream
->max_sb_size
,
1272 SYNC_FILE_RANGE_WAIT_BEFORE
1273 | SYNC_FILE_RANGE_WRITE
1274 | SYNC_FILE_RANGE_WAIT_AFTER
);
1276 * Give hints to the kernel about how we access the file:
1277 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1280 * We need to call fadvise again after the file grows because the
1281 * kernel does not seem to apply fadvise to non-existing parts of the
1284 * Call fadvise _after_ having waited for the page writeback to
1285 * complete because the dirty page writeback semantic is not well
1286 * defined. So it can be expected to lead to lower throughput in
1289 ret
= posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1290 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1291 if (ret
&& ret
!= -ENOSYS
) {
1293 PERROR("posix_fadvise on fd %i", outfd
);
1298 * Initialise the necessary environnement :
1299 * - create a new context
1300 * - create the poll_pipe
1301 * - create the should_quit pipe (for signal handler)
1302 * - create the thread pipe (for splice)
1304 * Takes a function pointer as argument, this function is called when data is
1305 * available on a buffer. This function is responsible to do the
1306 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1307 * buffer configuration and then kernctl_put_next_subbuf at the end.
1309 * Returns a pointer to the new context or NULL on error.
1311 struct lttng_consumer_local_data
*lttng_consumer_create(
1312 enum lttng_consumer_type type
,
1313 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1314 struct lttng_consumer_local_data
*ctx
),
1315 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1316 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1317 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1320 struct lttng_consumer_local_data
*ctx
;
1322 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1323 consumer_data
.type
== type
);
1324 consumer_data
.type
= type
;
1326 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1328 PERROR("allocating context");
1332 ctx
->consumer_error_socket
= -1;
1333 ctx
->consumer_metadata_socket
= -1;
1334 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1335 /* assign the callbacks */
1336 ctx
->on_buffer_ready
= buffer_ready
;
1337 ctx
->on_recv_channel
= recv_channel
;
1338 ctx
->on_recv_stream
= recv_stream
;
1339 ctx
->on_update_stream
= update_stream
;
1341 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1342 if (!ctx
->consumer_data_pipe
) {
1343 goto error_poll_pipe
;
1346 ctx
->consumer_wakeup_pipe
= lttng_pipe_open(0);
1347 if (!ctx
->consumer_wakeup_pipe
) {
1348 goto error_wakeup_pipe
;
1351 ctx
->consumer_data_rotate_pipe
= lttng_pipe_open(0);
1352 if (!ctx
->consumer_data_rotate_pipe
) {
1353 goto error_data_rotate_pipe
;
1356 ctx
->consumer_metadata_rotate_pipe
= lttng_pipe_open(0);
1357 if (!ctx
->consumer_metadata_rotate_pipe
) {
1358 goto error_metadata_rotate_pipe
;
1361 ret
= pipe(ctx
->consumer_should_quit
);
1363 PERROR("Error creating recv pipe");
1364 goto error_quit_pipe
;
1367 ret
= pipe(ctx
->consumer_channel_pipe
);
1369 PERROR("Error creating channel pipe");
1370 goto error_channel_pipe
;
1373 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1374 if (!ctx
->consumer_metadata_pipe
) {
1375 goto error_metadata_pipe
;
1378 ctx
->channel_monitor_pipe
= -1;
1382 error_metadata_pipe
:
1383 utils_close_pipe(ctx
->consumer_channel_pipe
);
1385 utils_close_pipe(ctx
->consumer_should_quit
);
1387 lttng_pipe_destroy(ctx
->consumer_metadata_rotate_pipe
);
1388 error_metadata_rotate_pipe
:
1389 lttng_pipe_destroy(ctx
->consumer_data_rotate_pipe
);
1390 error_data_rotate_pipe
:
1391 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1393 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1401 * Iterate over all streams of the hashtable and free them properly.
1403 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1405 struct lttng_ht_iter iter
;
1406 struct lttng_consumer_stream
*stream
;
1413 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1415 * Ignore return value since we are currently cleaning up so any error
1418 (void) consumer_del_stream(stream
, ht
);
1422 lttng_ht_destroy(ht
);
1426 * Iterate over all streams of the metadata hashtable and free them
1429 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1431 struct lttng_ht_iter iter
;
1432 struct lttng_consumer_stream
*stream
;
1439 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1441 * Ignore return value since we are currently cleaning up so any error
1444 (void) consumer_del_metadata_stream(stream
, ht
);
1448 lttng_ht_destroy(ht
);
1452 * Close all fds associated with the instance and free the context.
1454 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1458 DBG("Consumer destroying it. Closing everything.");
1464 destroy_data_stream_ht(data_ht
);
1465 destroy_metadata_stream_ht(metadata_ht
);
1467 ret
= close(ctx
->consumer_error_socket
);
1471 ret
= close(ctx
->consumer_metadata_socket
);
1475 utils_close_pipe(ctx
->consumer_channel_pipe
);
1476 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1477 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1478 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1479 lttng_pipe_destroy(ctx
->consumer_data_rotate_pipe
);
1480 lttng_pipe_destroy(ctx
->consumer_metadata_rotate_pipe
);
1481 utils_close_pipe(ctx
->consumer_should_quit
);
1483 unlink(ctx
->consumer_command_sock_path
);
1488 * Write the metadata stream id on the specified file descriptor.
1490 static int write_relayd_metadata_id(int fd
,
1491 struct lttng_consumer_stream
*stream
,
1492 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1495 struct lttcomm_relayd_metadata_payload hdr
;
1497 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1498 hdr
.padding_size
= htobe32(padding
);
1499 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1500 if (ret
< sizeof(hdr
)) {
1502 * This error means that the fd's end is closed so ignore the PERROR
1503 * not to clubber the error output since this can happen in a normal
1506 if (errno
!= EPIPE
) {
1507 PERROR("write metadata stream id");
1509 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1511 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1512 * handle writting the missing part so report that as an error and
1513 * don't lie to the caller.
1518 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1519 stream
->relayd_stream_id
, padding
);
1526 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1527 * core function for writing trace buffers to either the local filesystem or
1530 * It must be called with the stream lock held.
1532 * Careful review MUST be put if any changes occur!
1534 * Returns the number of bytes written
1536 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1537 struct lttng_consumer_local_data
*ctx
,
1538 struct lttng_consumer_stream
*stream
, unsigned long len
,
1539 unsigned long padding
,
1540 struct ctf_packet_index
*index
)
1542 unsigned long mmap_offset
;
1545 off_t orig_offset
= stream
->out_fd_offset
;
1546 /* Default is on the disk */
1547 int outfd
= stream
->out_fd
;
1548 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1549 unsigned int relayd_hang_up
= 0;
1551 /* RCU lock for the relayd pointer */
1554 /* Flag that the current stream if set for network streaming. */
1555 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1556 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1557 if (relayd
== NULL
) {
1563 /* get the offset inside the fd to mmap */
1564 switch (consumer_data
.type
) {
1565 case LTTNG_CONSUMER_KERNEL
:
1566 mmap_base
= stream
->mmap_base
;
1567 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1569 PERROR("tracer ctl get_mmap_read_offset");
1573 case LTTNG_CONSUMER32_UST
:
1574 case LTTNG_CONSUMER64_UST
:
1575 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1577 ERR("read mmap get mmap base for stream %s", stream
->name
);
1581 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1583 PERROR("tracer ctl get_mmap_read_offset");
1589 ERR("Unknown consumer_data type");
1593 /* Handle stream on the relayd if the output is on the network */
1595 unsigned long netlen
= len
;
1598 * Lock the control socket for the complete duration of the function
1599 * since from this point on we will use the socket.
1601 if (stream
->metadata_flag
) {
1602 /* Metadata requires the control socket. */
1603 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1604 if (stream
->reset_metadata_flag
) {
1605 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1606 stream
->relayd_stream_id
,
1607 stream
->metadata_version
);
1612 stream
->reset_metadata_flag
= 0;
1614 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1617 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1622 /* Use the returned socket. */
1625 /* Write metadata stream id before payload */
1626 if (stream
->metadata_flag
) {
1627 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1634 /* No streaming, we have to set the len with the full padding */
1637 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1638 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1640 ERR("Reset metadata file");
1643 stream
->reset_metadata_flag
= 0;
1647 * Check if we need to change the tracefile before writing the packet.
1649 if (stream
->chan
->tracefile_size
> 0 &&
1650 (stream
->tracefile_size_current
+ len
) >
1651 stream
->chan
->tracefile_size
) {
1652 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1653 stream
->name
, stream
->chan
->tracefile_size
,
1654 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1655 stream
->out_fd
, &(stream
->tracefile_count_current
),
1658 ERR("Rotating output file");
1661 outfd
= stream
->out_fd
;
1663 if (stream
->index_file
) {
1664 lttng_index_file_put(stream
->index_file
);
1665 stream
->index_file
= lttng_index_file_create(stream
->chan
->pathname
,
1666 stream
->name
, stream
->uid
, stream
->gid
,
1667 stream
->chan
->tracefile_size
,
1668 stream
->tracefile_count_current
,
1669 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
1670 if (!stream
->index_file
) {
1675 /* Reset current size because we just perform a rotation. */
1676 stream
->tracefile_size_current
= 0;
1677 stream
->out_fd_offset
= 0;
1680 stream
->tracefile_size_current
+= len
;
1682 index
->offset
= htobe64(stream
->out_fd_offset
);
1687 * This call guarantee that len or less is returned. It's impossible to
1688 * receive a ret value that is bigger than len.
1690 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1691 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1692 if (ret
< 0 || ((size_t) ret
!= len
)) {
1694 * Report error to caller if nothing was written else at least send the
1702 /* Socket operation failed. We consider the relayd dead */
1703 if (errno
== EPIPE
|| errno
== EINVAL
|| errno
== EBADF
) {
1705 * This is possible if the fd is closed on the other side
1706 * (outfd) or any write problem. It can be verbose a bit for a
1707 * normal execution if for instance the relayd is stopped
1708 * abruptly. This can happen so set this to a DBG statement.
1710 DBG("Consumer mmap write detected relayd hang up");
1712 /* Unhandled error, print it and stop function right now. */
1713 PERROR("Error in write mmap (ret %zd != len %lu)", ret
, len
);
1717 stream
->output_written
+= ret
;
1719 /* This call is useless on a socket so better save a syscall. */
1721 /* This won't block, but will start writeout asynchronously */
1722 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, len
,
1723 SYNC_FILE_RANGE_WRITE
);
1724 stream
->out_fd_offset
+= len
;
1725 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1730 * This is a special case that the relayd has closed its socket. Let's
1731 * cleanup the relayd object and all associated streams.
1733 if (relayd
&& relayd_hang_up
) {
1734 cleanup_relayd(relayd
, ctx
);
1738 /* Unlock only if ctrl socket used */
1739 if (relayd
&& stream
->metadata_flag
) {
1740 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1748 * Splice the data from the ring buffer to the tracefile.
1750 * It must be called with the stream lock held.
1752 * Returns the number of bytes spliced.
1754 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1755 struct lttng_consumer_local_data
*ctx
,
1756 struct lttng_consumer_stream
*stream
, unsigned long len
,
1757 unsigned long padding
,
1758 struct ctf_packet_index
*index
)
1760 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1762 off_t orig_offset
= stream
->out_fd_offset
;
1763 int fd
= stream
->wait_fd
;
1764 /* Default is on the disk */
1765 int outfd
= stream
->out_fd
;
1766 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1768 unsigned int relayd_hang_up
= 0;
1770 switch (consumer_data
.type
) {
1771 case LTTNG_CONSUMER_KERNEL
:
1773 case LTTNG_CONSUMER32_UST
:
1774 case LTTNG_CONSUMER64_UST
:
1775 /* Not supported for user space tracing */
1778 ERR("Unknown consumer_data type");
1782 /* RCU lock for the relayd pointer */
1785 /* Flag that the current stream if set for network streaming. */
1786 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1787 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1788 if (relayd
== NULL
) {
1793 splice_pipe
= stream
->splice_pipe
;
1795 /* Write metadata stream id before payload */
1797 unsigned long total_len
= len
;
1799 if (stream
->metadata_flag
) {
1801 * Lock the control socket for the complete duration of the function
1802 * since from this point on we will use the socket.
1804 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1806 if (stream
->reset_metadata_flag
) {
1807 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1808 stream
->relayd_stream_id
,
1809 stream
->metadata_version
);
1814 stream
->reset_metadata_flag
= 0;
1816 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1824 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1827 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1833 /* Use the returned socket. */
1836 /* No streaming, we have to set the len with the full padding */
1839 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1840 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1842 ERR("Reset metadata file");
1845 stream
->reset_metadata_flag
= 0;
1848 * Check if we need to change the tracefile before writing the packet.
1850 if (stream
->chan
->tracefile_size
> 0 &&
1851 (stream
->tracefile_size_current
+ len
) >
1852 stream
->chan
->tracefile_size
) {
1853 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1854 stream
->name
, stream
->chan
->tracefile_size
,
1855 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1856 stream
->out_fd
, &(stream
->tracefile_count_current
),
1860 ERR("Rotating output file");
1863 outfd
= stream
->out_fd
;
1865 if (stream
->index_file
) {
1866 lttng_index_file_put(stream
->index_file
);
1867 stream
->index_file
= lttng_index_file_create(stream
->chan
->pathname
,
1868 stream
->name
, stream
->uid
, stream
->gid
,
1869 stream
->chan
->tracefile_size
,
1870 stream
->tracefile_count_current
,
1871 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
1872 if (!stream
->index_file
) {
1877 /* Reset current size because we just perform a rotation. */
1878 stream
->tracefile_size_current
= 0;
1879 stream
->out_fd_offset
= 0;
1882 stream
->tracefile_size_current
+= len
;
1883 index
->offset
= htobe64(stream
->out_fd_offset
);
1887 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1888 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1889 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1890 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1891 DBG("splice chan to pipe, ret %zd", ret_splice
);
1892 if (ret_splice
< 0) {
1895 PERROR("Error in relay splice");
1899 /* Handle stream on the relayd if the output is on the network */
1900 if (relayd
&& stream
->metadata_flag
) {
1901 size_t metadata_payload_size
=
1902 sizeof(struct lttcomm_relayd_metadata_payload
);
1904 /* Update counter to fit the spliced data */
1905 ret_splice
+= metadata_payload_size
;
1906 len
+= metadata_payload_size
;
1908 * We do this so the return value can match the len passed as
1909 * argument to this function.
1911 written
-= metadata_payload_size
;
1914 /* Splice data out */
1915 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1916 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1917 DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
1919 if (ret_splice
< 0) {
1924 } else if (ret_splice
> len
) {
1926 * We don't expect this code path to be executed but you never know
1927 * so this is an extra protection agains a buggy splice().
1930 written
+= ret_splice
;
1931 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
1935 /* All good, update current len and continue. */
1939 /* This call is useless on a socket so better save a syscall. */
1941 /* This won't block, but will start writeout asynchronously */
1942 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1943 SYNC_FILE_RANGE_WRITE
);
1944 stream
->out_fd_offset
+= ret_splice
;
1946 stream
->output_written
+= ret_splice
;
1947 written
+= ret_splice
;
1950 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1956 * This is a special case that the relayd has closed its socket. Let's
1957 * cleanup the relayd object and all associated streams.
1959 if (relayd
&& relayd_hang_up
) {
1960 cleanup_relayd(relayd
, ctx
);
1961 /* Skip splice error so the consumer does not fail */
1966 /* send the appropriate error description to sessiond */
1969 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1972 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1975 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1980 if (relayd
&& stream
->metadata_flag
) {
1981 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1989 * Sample the snapshot positions for a specific fd
1991 * Returns 0 on success, < 0 on error
1993 int lttng_consumer_sample_snapshot_positions(struct lttng_consumer_stream
*stream
)
1995 switch (consumer_data
.type
) {
1996 case LTTNG_CONSUMER_KERNEL
:
1997 return lttng_kconsumer_sample_snapshot_positions(stream
);
1998 case LTTNG_CONSUMER32_UST
:
1999 case LTTNG_CONSUMER64_UST
:
2000 return lttng_ustconsumer_sample_snapshot_positions(stream
);
2002 ERR("Unknown consumer_data type");
2008 * Take a snapshot for a specific fd
2010 * Returns 0 on success, < 0 on error
2012 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2014 switch (consumer_data
.type
) {
2015 case LTTNG_CONSUMER_KERNEL
:
2016 return lttng_kconsumer_take_snapshot(stream
);
2017 case LTTNG_CONSUMER32_UST
:
2018 case LTTNG_CONSUMER64_UST
:
2019 return lttng_ustconsumer_take_snapshot(stream
);
2021 ERR("Unknown consumer_data type");
2028 * Get the produced position
2030 * Returns 0 on success, < 0 on error
2032 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
2035 switch (consumer_data
.type
) {
2036 case LTTNG_CONSUMER_KERNEL
:
2037 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
2038 case LTTNG_CONSUMER32_UST
:
2039 case LTTNG_CONSUMER64_UST
:
2040 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
2042 ERR("Unknown consumer_data type");
2049 * Get the consumed position
2051 * Returns 0 on success, < 0 on error
2053 int lttng_consumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
2056 switch (consumer_data
.type
) {
2057 case LTTNG_CONSUMER_KERNEL
:
2058 return lttng_kconsumer_get_consumed_snapshot(stream
, pos
);
2059 case LTTNG_CONSUMER32_UST
:
2060 case LTTNG_CONSUMER64_UST
:
2061 return lttng_ustconsumer_get_consumed_snapshot(stream
, pos
);
2063 ERR("Unknown consumer_data type");
2069 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
2070 int sock
, struct pollfd
*consumer_sockpoll
)
2072 switch (consumer_data
.type
) {
2073 case LTTNG_CONSUMER_KERNEL
:
2074 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2075 case LTTNG_CONSUMER32_UST
:
2076 case LTTNG_CONSUMER64_UST
:
2077 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2079 ERR("Unknown consumer_data type");
2085 void lttng_consumer_close_all_metadata(void)
2087 switch (consumer_data
.type
) {
2088 case LTTNG_CONSUMER_KERNEL
:
2090 * The Kernel consumer has a different metadata scheme so we don't
2091 * close anything because the stream will be closed by the session
2095 case LTTNG_CONSUMER32_UST
:
2096 case LTTNG_CONSUMER64_UST
:
2098 * Close all metadata streams. The metadata hash table is passed and
2099 * this call iterates over it by closing all wakeup fd. This is safe
2100 * because at this point we are sure that the metadata producer is
2101 * either dead or blocked.
2103 lttng_ustconsumer_close_all_metadata(metadata_ht
);
2106 ERR("Unknown consumer_data type");
2112 * Clean up a metadata stream and free its memory.
2114 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
2115 struct lttng_ht
*ht
)
2117 struct lttng_consumer_channel
*free_chan
= NULL
;
2121 * This call should NEVER receive regular stream. It must always be
2122 * metadata stream and this is crucial for data structure synchronization.
2124 assert(stream
->metadata_flag
);
2126 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
2128 pthread_mutex_lock(&consumer_data
.lock
);
2129 pthread_mutex_lock(&stream
->chan
->lock
);
2130 pthread_mutex_lock(&stream
->lock
);
2131 if (stream
->chan
->metadata_cache
) {
2132 /* Only applicable to userspace consumers. */
2133 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2136 /* Remove any reference to that stream. */
2137 consumer_stream_delete(stream
, ht
);
2139 /* Close down everything including the relayd if one. */
2140 consumer_stream_close(stream
);
2141 /* Destroy tracer buffers of the stream. */
2142 consumer_stream_destroy_buffers(stream
);
2144 /* Atomically decrement channel refcount since other threads can use it. */
2145 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2146 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2147 /* Go for channel deletion! */
2148 free_chan
= stream
->chan
;
2152 * Nullify the stream reference so it is not used after deletion. The
2153 * channel lock MUST be acquired before being able to check for a NULL
2156 stream
->chan
->metadata_stream
= NULL
;
2158 if (stream
->chan
->metadata_cache
) {
2159 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2161 pthread_mutex_unlock(&stream
->lock
);
2162 pthread_mutex_unlock(&stream
->chan
->lock
);
2163 pthread_mutex_unlock(&consumer_data
.lock
);
2166 consumer_del_channel(free_chan
);
2169 consumer_stream_free(stream
);
2173 * Action done with the metadata stream when adding it to the consumer internal
2174 * data structures to handle it.
2176 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2178 struct lttng_ht
*ht
= metadata_ht
;
2180 struct lttng_ht_iter iter
;
2181 struct lttng_ht_node_u64
*node
;
2186 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2188 pthread_mutex_lock(&consumer_data
.lock
);
2189 pthread_mutex_lock(&stream
->chan
->lock
);
2190 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2191 pthread_mutex_lock(&stream
->lock
);
2194 * From here, refcounts are updated so be _careful_ when returning an error
2201 * Lookup the stream just to make sure it does not exist in our internal
2202 * state. This should NEVER happen.
2204 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2205 node
= lttng_ht_iter_get_node_u64(&iter
);
2209 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2210 * in terms of destroying the associated channel, because the action that
2211 * causes the count to become 0 also causes a stream to be added. The
2212 * channel deletion will thus be triggered by the following removal of this
2215 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2216 /* Increment refcount before decrementing nb_init_stream_left */
2218 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2221 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2223 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2224 &stream
->node_channel_id
);
2227 * Add stream to the stream_list_ht of the consumer data. No need to steal
2228 * the key since the HT does not use it and we allow to add redundant keys
2231 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2235 pthread_mutex_unlock(&stream
->lock
);
2236 pthread_mutex_unlock(&stream
->chan
->lock
);
2237 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2238 pthread_mutex_unlock(&consumer_data
.lock
);
2243 * Delete data stream that are flagged for deletion (endpoint_status).
2245 static void validate_endpoint_status_data_stream(void)
2247 struct lttng_ht_iter iter
;
2248 struct lttng_consumer_stream
*stream
;
2250 DBG("Consumer delete flagged data stream");
2253 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2254 /* Validate delete flag of the stream */
2255 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2258 /* Delete it right now */
2259 consumer_del_stream(stream
, data_ht
);
2265 * Delete metadata stream that are flagged for deletion (endpoint_status).
2267 static void validate_endpoint_status_metadata_stream(
2268 struct lttng_poll_event
*pollset
)
2270 struct lttng_ht_iter iter
;
2271 struct lttng_consumer_stream
*stream
;
2273 DBG("Consumer delete flagged metadata stream");
2278 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2279 /* Validate delete flag of the stream */
2280 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2284 * Remove from pollset so the metadata thread can continue without
2285 * blocking on a deleted stream.
2287 lttng_poll_del(pollset
, stream
->wait_fd
);
2289 /* Delete it right now */
2290 consumer_del_metadata_stream(stream
, metadata_ht
);
2296 int rotate_notify_sessiond(struct lttng_consumer_local_data
*ctx
,
2302 ret
= write(ctx
->channel_rotate_pipe
, &key
, sizeof(key
));
2303 } while (ret
== -1 && errno
== EINTR
);
2305 PERROR("write to the channel rotate pipe");
2307 DBG("Sent channel rotation notification for channel key %"
2315 * Perform operations that need to be done after a stream has
2316 * rotated and released the stream lock.
2318 * Multiple rotations cannot occur simultaneously, so we know the state of the
2319 * "rotated" stream flag cannot change.
2321 * This MUST be called WITHOUT the stream lock held.
2324 int consumer_post_rotation(struct lttng_consumer_stream
*stream
,
2325 struct lttng_consumer_local_data
*ctx
)
2329 if (!stream
->rotated
) {
2333 pthread_mutex_lock(&stream
->chan
->lock
);
2334 switch (consumer_data
.type
) {
2335 case LTTNG_CONSUMER_KERNEL
:
2337 case LTTNG_CONSUMER32_UST
:
2338 case LTTNG_CONSUMER64_UST
:
2340 * The ust_metadata_pushed counter has been reset to 0, so now
2341 * we can wakeup the metadata thread so it dumps the metadata
2342 * cache to the new file.
2344 if (stream
->metadata_flag
) {
2345 consumer_metadata_wakeup_pipe(stream
->chan
);
2349 ERR("Unknown consumer_data type");
2353 if (--stream
->chan
->nr_stream_rotate_pending
== 0) {
2354 ret
= rotate_notify_sessiond(ctx
, stream
->chan
->key
);
2356 pthread_mutex_unlock(&stream
->chan
->lock
);
2357 stream
->rotated
= 0;
2364 int handle_rotate_wakeup_pipe(struct lttng_consumer_local_data
*ctx
,
2365 struct lttng_pipe
*stream_pipe
)
2369 struct lttng_consumer_stream
*stream
;
2371 pipe_len
= lttng_pipe_read(stream_pipe
, &stream
, sizeof(stream
));
2372 if (pipe_len
< sizeof(stream
)) {
2374 PERROR("read metadata stream");
2376 ERR("Failed to read stream on metadata rotate pipe");
2381 pthread_mutex_lock(&stream
->lock
);
2382 ret
= lttng_consumer_rotate_stream(ctx
, stream
);
2383 pthread_mutex_unlock(&stream
->lock
);
2385 ERR("Failed to rotate metadata stream");
2388 ret
= consumer_post_rotation(stream
, ctx
);
2390 ERR("Failed after a rotation");
2401 * Thread polls on metadata file descriptor and write them on disk or on the
2404 void *consumer_thread_metadata_poll(void *data
)
2406 int ret
, i
, pollfd
, err
= -1;
2407 uint32_t revents
, nb_fd
;
2408 struct lttng_consumer_stream
*stream
= NULL
;
2409 struct lttng_ht_iter iter
;
2410 struct lttng_ht_node_u64
*node
;
2411 struct lttng_poll_event events
;
2412 struct lttng_consumer_local_data
*ctx
= data
;
2415 rcu_register_thread();
2417 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2419 if (testpoint(consumerd_thread_metadata
)) {
2420 goto error_testpoint
;
2423 health_code_update();
2425 DBG("Thread metadata poll started");
2427 /* Size is set to 1 for the consumer_metadata pipe */
2428 ret
= lttng_poll_create(&events
, 3, LTTNG_CLOEXEC
);
2430 ERR("Poll set creation failed");
2434 ret
= lttng_poll_add(&events
,
2435 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2440 ret
= lttng_poll_add(&events
,
2441 lttng_pipe_get_readfd(ctx
->consumer_metadata_rotate_pipe
), LPOLLIN
);
2447 DBG("Metadata main loop started");
2451 health_code_update();
2452 health_poll_entry();
2453 DBG("Metadata poll wait");
2454 ret
= lttng_poll_wait(&events
, -1);
2455 DBG("Metadata poll return from wait with %d fd(s)",
2456 LTTNG_POLL_GETNB(&events
));
2458 DBG("Metadata event caught in thread");
2460 if (errno
== EINTR
) {
2461 ERR("Poll EINTR caught");
2464 if (LTTNG_POLL_GETNB(&events
) == 0) {
2465 err
= 0; /* All is OK */
2472 /* From here, the event is a metadata wait fd */
2473 for (i
= 0; i
< nb_fd
; i
++) {
2474 health_code_update();
2476 revents
= LTTNG_POLL_GETEV(&events
, i
);
2477 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2480 /* No activity for this FD (poll implementation). */
2484 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2485 if (revents
& LPOLLIN
) {
2488 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2489 &stream
, sizeof(stream
));
2490 if (pipe_len
< sizeof(stream
)) {
2492 PERROR("read metadata stream");
2495 * Remove the pipe from the poll set and continue the loop
2496 * since their might be data to consume.
2498 lttng_poll_del(&events
,
2499 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2500 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2504 /* A NULL stream means that the state has changed. */
2505 if (stream
== NULL
) {
2506 /* Check for deleted streams. */
2507 validate_endpoint_status_metadata_stream(&events
);
2511 DBG("Adding metadata stream %d to poll set",
2514 /* Add metadata stream to the global poll events list */
2515 lttng_poll_add(&events
, stream
->wait_fd
,
2516 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2517 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2518 DBG("Metadata thread pipe hung up");
2520 * Remove the pipe from the poll set and continue the loop
2521 * since their might be data to consume.
2523 lttng_poll_del(&events
,
2524 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2525 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2528 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2532 /* Handle other stream */
2534 } else if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_rotate_pipe
)) {
2535 if (revents
& LPOLLIN
) {
2536 ret
= handle_rotate_wakeup_pipe(ctx
,
2537 ctx
->consumer_metadata_rotate_pipe
);
2539 ERR("Failed to rotate metadata stream");
2540 lttng_poll_del(&events
,
2541 lttng_pipe_get_readfd(
2542 ctx
->consumer_metadata_rotate_pipe
));
2543 lttng_pipe_read_close(
2544 ctx
->consumer_metadata_rotate_pipe
);
2547 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2548 DBG("Metadata rotate pipe hung up");
2550 * Remove the pipe from the poll set and continue the loop
2551 * since their might be data to consume.
2553 lttng_poll_del(&events
,
2554 lttng_pipe_get_readfd(ctx
->consumer_metadata_rotate_pipe
));
2555 lttng_pipe_read_close(ctx
->consumer_metadata_rotate_pipe
);
2558 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2566 uint64_t tmp_id
= (uint64_t) pollfd
;
2568 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2570 node
= lttng_ht_iter_get_node_u64(&iter
);
2573 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2576 if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2577 /* Get the data out of the metadata file descriptor */
2578 DBG("Metadata available on fd %d", pollfd
);
2579 assert(stream
->wait_fd
== pollfd
);
2582 health_code_update();
2584 len
= ctx
->on_buffer_ready(stream
, ctx
);
2586 * We don't check the return value here since if we get
2587 * a negative len, it means an error occurred thus we
2588 * simply remove it from the poll set and free the
2593 /* It's ok to have an unavailable sub-buffer */
2594 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2595 /* Clean up stream from consumer and free it. */
2596 lttng_poll_del(&events
, stream
->wait_fd
);
2597 consumer_del_metadata_stream(stream
, metadata_ht
);
2599 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2600 DBG("Metadata fd %d is hup|err.", pollfd
);
2601 if (!stream
->hangup_flush_done
2602 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2603 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2604 DBG("Attempting to flush and consume the UST buffers");
2605 lttng_ustconsumer_on_stream_hangup(stream
);
2607 /* We just flushed the stream now read it. */
2609 health_code_update();
2611 len
= ctx
->on_buffer_ready(stream
, ctx
);
2613 * We don't check the return value here since if we get
2614 * a negative len, it means an error occurred thus we
2615 * simply remove it from the poll set and free the
2621 lttng_poll_del(&events
, stream
->wait_fd
);
2623 * This call update the channel states, closes file descriptors
2624 * and securely free the stream.
2626 consumer_del_metadata_stream(stream
, metadata_ht
);
2628 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2632 /* Release RCU lock for the stream looked up */
2640 DBG("Metadata poll thread exiting");
2642 lttng_poll_clean(&events
);
2647 ERR("Health error occurred in %s", __func__
);
2649 health_unregister(health_consumerd
);
2650 rcu_unregister_thread();
2655 * This thread polls the fds in the set to consume the data and write
2656 * it to tracefile if necessary.
2658 void *consumer_thread_data_poll(void *data
)
2660 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2661 struct pollfd
*pollfd
= NULL
;
2662 /* local view of the streams */
2663 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2664 /* local view of consumer_data.fds_count */
2665 int nb_fd
= 0, nb_pipes_fd
;
2666 struct lttng_consumer_local_data
*ctx
= data
;
2669 rcu_register_thread();
2671 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2673 if (testpoint(consumerd_thread_data
)) {
2674 goto error_testpoint
;
2677 health_code_update();
2679 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2680 if (local_stream
== NULL
) {
2681 PERROR("local_stream malloc");
2686 health_code_update();
2692 * the fds set has been updated, we need to update our
2693 * local array as well
2695 pthread_mutex_lock(&consumer_data
.lock
);
2696 if (consumer_data
.need_update
) {
2701 local_stream
= NULL
;
2704 * Allocate for all fds + 3:
2705 * +1 for the consumer_data_pipe
2706 * +1 for wake up pipe
2707 * +1 for consumer_data_rotate_pipe.
2710 pollfd
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) * sizeof(struct pollfd
));
2711 if (pollfd
== NULL
) {
2712 PERROR("pollfd malloc");
2713 pthread_mutex_unlock(&consumer_data
.lock
);
2717 local_stream
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) *
2718 sizeof(struct lttng_consumer_stream
*));
2719 if (local_stream
== NULL
) {
2720 PERROR("local_stream malloc");
2721 pthread_mutex_unlock(&consumer_data
.lock
);
2724 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2727 ERR("Error in allocating pollfd or local_outfds");
2728 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2729 pthread_mutex_unlock(&consumer_data
.lock
);
2733 consumer_data
.need_update
= 0;
2735 pthread_mutex_unlock(&consumer_data
.lock
);
2737 /* No FDs and consumer_quit, consumer_cleanup the thread */
2738 if (nb_fd
== 0 && CMM_LOAD_SHARED(consumer_quit
) == 1) {
2739 err
= 0; /* All is OK */
2742 /* poll on the array of fds */
2744 DBG("polling on %d fd", nb_fd
+ nb_pipes_fd
);
2745 if (testpoint(consumerd_thread_data_poll
)) {
2748 health_poll_entry();
2749 num_rdy
= poll(pollfd
, nb_fd
+ nb_pipes_fd
, -1);
2751 DBG("poll num_rdy : %d", num_rdy
);
2752 if (num_rdy
== -1) {
2754 * Restart interrupted system call.
2756 if (errno
== EINTR
) {
2759 PERROR("Poll error");
2760 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2762 } else if (num_rdy
== 0) {
2763 DBG("Polling thread timed out");
2767 if (caa_unlikely(data_consumption_paused
)) {
2768 DBG("Data consumption paused, sleeping...");
2774 * If the consumer_data_pipe triggered poll go directly to the
2775 * beginning of the loop to update the array. We want to prioritize
2776 * array update over low-priority reads.
2778 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2779 ssize_t pipe_readlen
;
2781 DBG("consumer_data_pipe wake up");
2782 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2783 &new_stream
, sizeof(new_stream
));
2784 if (pipe_readlen
< sizeof(new_stream
)) {
2785 PERROR("Consumer data pipe");
2786 /* Continue so we can at least handle the current stream(s). */
2791 * If the stream is NULL, just ignore it. It's also possible that
2792 * the sessiond poll thread changed the consumer_quit state and is
2793 * waking us up to test it.
2795 if (new_stream
== NULL
) {
2796 validate_endpoint_status_data_stream();
2800 /* Continue to update the local streams and handle prio ones */
2804 /* Handle wakeup pipe. */
2805 if (pollfd
[nb_fd
+ 1].revents
& (POLLIN
| POLLPRI
)) {
2807 ssize_t pipe_readlen
;
2809 pipe_readlen
= lttng_pipe_read(ctx
->consumer_wakeup_pipe
, &dummy
,
2811 if (pipe_readlen
< 0) {
2812 PERROR("Consumer data wakeup pipe");
2814 /* We've been awakened to handle stream(s). */
2815 ctx
->has_wakeup
= 0;
2818 /* Handle consumer_data_rotate_pipe. */
2819 if (pollfd
[nb_fd
+ 2].revents
& (POLLIN
| POLLPRI
)) {
2820 ret
= handle_rotate_wakeup_pipe(ctx
,
2821 ctx
->consumer_data_rotate_pipe
);
2823 ERR("Failed to rotate metadata stream");
2828 /* Take care of high priority channels first. */
2829 for (i
= 0; i
< nb_fd
; i
++) {
2830 health_code_update();
2832 if (local_stream
[i
] == NULL
) {
2835 if (pollfd
[i
].revents
& POLLPRI
) {
2836 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2838 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2839 /* it's ok to have an unavailable sub-buffer */
2840 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2841 /* Clean the stream and free it. */
2842 consumer_del_stream(local_stream
[i
], data_ht
);
2843 local_stream
[i
] = NULL
;
2844 } else if (len
> 0) {
2845 local_stream
[i
]->data_read
= 1;
2851 * If we read high prio channel in this loop, try again
2852 * for more high prio data.
2858 /* Take care of low priority channels. */
2859 for (i
= 0; i
< nb_fd
; i
++) {
2860 health_code_update();
2862 if (local_stream
[i
] == NULL
) {
2865 if ((pollfd
[i
].revents
& POLLIN
) ||
2866 local_stream
[i
]->hangup_flush_done
||
2867 local_stream
[i
]->has_data
) {
2868 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2869 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2870 /* it's ok to have an unavailable sub-buffer */
2871 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2872 /* Clean the stream and free it. */
2873 consumer_del_stream(local_stream
[i
], data_ht
);
2874 local_stream
[i
] = NULL
;
2875 } else if (len
> 0) {
2876 local_stream
[i
]->data_read
= 1;
2881 /* Handle hangup and errors */
2882 for (i
= 0; i
< nb_fd
; i
++) {
2883 health_code_update();
2885 if (local_stream
[i
] == NULL
) {
2888 if (!local_stream
[i
]->hangup_flush_done
2889 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2890 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2891 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2892 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2894 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2895 /* Attempt read again, for the data we just flushed. */
2896 local_stream
[i
]->data_read
= 1;
2899 * If the poll flag is HUP/ERR/NVAL and we have
2900 * read no data in this pass, we can remove the
2901 * stream from its hash table.
2903 if ((pollfd
[i
].revents
& POLLHUP
)) {
2904 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2905 if (!local_stream
[i
]->data_read
) {
2906 consumer_del_stream(local_stream
[i
], data_ht
);
2907 local_stream
[i
] = NULL
;
2910 } else if (pollfd
[i
].revents
& POLLERR
) {
2911 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2912 if (!local_stream
[i
]->data_read
) {
2913 consumer_del_stream(local_stream
[i
], data_ht
);
2914 local_stream
[i
] = NULL
;
2917 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2918 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2919 if (!local_stream
[i
]->data_read
) {
2920 consumer_del_stream(local_stream
[i
], data_ht
);
2921 local_stream
[i
] = NULL
;
2925 if (local_stream
[i
] != NULL
) {
2926 local_stream
[i
]->data_read
= 0;
2933 DBG("polling thread exiting");
2938 * Close the write side of the pipe so epoll_wait() in
2939 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2940 * read side of the pipe. If we close them both, epoll_wait strangely does
2941 * not return and could create a endless wait period if the pipe is the
2942 * only tracked fd in the poll set. The thread will take care of closing
2945 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2946 (void) lttng_pipe_write_close(ctx
->consumer_metadata_rotate_pipe
);
2951 ERR("Health error occurred in %s", __func__
);
2953 health_unregister(health_consumerd
);
2955 rcu_unregister_thread();
2960 * Close wake-up end of each stream belonging to the channel. This will
2961 * allow the poll() on the stream read-side to detect when the
2962 * write-side (application) finally closes them.
2965 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2967 struct lttng_ht
*ht
;
2968 struct lttng_consumer_stream
*stream
;
2969 struct lttng_ht_iter iter
;
2971 ht
= consumer_data
.stream_per_chan_id_ht
;
2974 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2975 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2976 ht
->match_fct
, &channel
->key
,
2977 &iter
.iter
, stream
, node_channel_id
.node
) {
2979 * Protect against teardown with mutex.
2981 pthread_mutex_lock(&stream
->lock
);
2982 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2985 switch (consumer_data
.type
) {
2986 case LTTNG_CONSUMER_KERNEL
:
2988 case LTTNG_CONSUMER32_UST
:
2989 case LTTNG_CONSUMER64_UST
:
2990 if (stream
->metadata_flag
) {
2991 /* Safe and protected by the stream lock. */
2992 lttng_ustconsumer_close_metadata(stream
->chan
);
2995 * Note: a mutex is taken internally within
2996 * liblttng-ust-ctl to protect timer wakeup_fd
2997 * use from concurrent close.
2999 lttng_ustconsumer_close_stream_wakeup(stream
);
3003 ERR("Unknown consumer_data type");
3007 pthread_mutex_unlock(&stream
->lock
);
3012 static void destroy_channel_ht(struct lttng_ht
*ht
)
3014 struct lttng_ht_iter iter
;
3015 struct lttng_consumer_channel
*channel
;
3023 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
3024 ret
= lttng_ht_del(ht
, &iter
);
3029 lttng_ht_destroy(ht
);
3033 * This thread polls the channel fds to detect when they are being
3034 * closed. It closes all related streams if the channel is detected as
3035 * closed. It is currently only used as a shim layer for UST because the
3036 * consumerd needs to keep the per-stream wakeup end of pipes open for
3039 void *consumer_thread_channel_poll(void *data
)
3041 int ret
, i
, pollfd
, err
= -1;
3042 uint32_t revents
, nb_fd
;
3043 struct lttng_consumer_channel
*chan
= NULL
;
3044 struct lttng_ht_iter iter
;
3045 struct lttng_ht_node_u64
*node
;
3046 struct lttng_poll_event events
;
3047 struct lttng_consumer_local_data
*ctx
= data
;
3048 struct lttng_ht
*channel_ht
;
3050 rcu_register_thread();
3052 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
3054 if (testpoint(consumerd_thread_channel
)) {
3055 goto error_testpoint
;
3058 health_code_update();
3060 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3062 /* ENOMEM at this point. Better to bail out. */
3066 DBG("Thread channel poll started");
3068 /* Size is set to 1 for the consumer_channel pipe */
3069 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
3071 ERR("Poll set creation failed");
3075 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
3081 DBG("Channel main loop started");
3085 health_code_update();
3086 DBG("Channel poll wait");
3087 health_poll_entry();
3088 ret
= lttng_poll_wait(&events
, -1);
3089 DBG("Channel poll return from wait with %d fd(s)",
3090 LTTNG_POLL_GETNB(&events
));
3092 DBG("Channel event caught in thread");
3094 if (errno
== EINTR
) {
3095 ERR("Poll EINTR caught");
3098 if (LTTNG_POLL_GETNB(&events
) == 0) {
3099 err
= 0; /* All is OK */
3106 /* From here, the event is a channel wait fd */
3107 for (i
= 0; i
< nb_fd
; i
++) {
3108 health_code_update();
3110 revents
= LTTNG_POLL_GETEV(&events
, i
);
3111 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3114 /* No activity for this FD (poll implementation). */
3118 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
3119 if (revents
& LPOLLIN
) {
3120 enum consumer_channel_action action
;
3123 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
3126 ERR("Error reading channel pipe");
3128 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3133 case CONSUMER_CHANNEL_ADD
:
3134 DBG("Adding channel %d to poll set",
3137 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
3140 lttng_ht_add_unique_u64(channel_ht
,
3141 &chan
->wait_fd_node
);
3143 /* Add channel to the global poll events list */
3144 lttng_poll_add(&events
, chan
->wait_fd
,
3145 LPOLLERR
| LPOLLHUP
);
3147 case CONSUMER_CHANNEL_DEL
:
3150 * This command should never be called if the channel
3151 * has streams monitored by either the data or metadata
3152 * thread. The consumer only notify this thread with a
3153 * channel del. command if it receives a destroy
3154 * channel command from the session daemon that send it
3155 * if a command prior to the GET_CHANNEL failed.
3159 chan
= consumer_find_channel(key
);
3162 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
3165 lttng_poll_del(&events
, chan
->wait_fd
);
3166 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
3167 ret
= lttng_ht_del(channel_ht
, &iter
);
3170 switch (consumer_data
.type
) {
3171 case LTTNG_CONSUMER_KERNEL
:
3173 case LTTNG_CONSUMER32_UST
:
3174 case LTTNG_CONSUMER64_UST
:
3175 health_code_update();
3176 /* Destroy streams that might have been left in the stream list. */
3177 clean_channel_stream_list(chan
);
3180 ERR("Unknown consumer_data type");
3185 * Release our own refcount. Force channel deletion even if
3186 * streams were not initialized.
3188 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
3189 consumer_del_channel(chan
);
3194 case CONSUMER_CHANNEL_QUIT
:
3196 * Remove the pipe from the poll set and continue the loop
3197 * since their might be data to consume.
3199 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3202 ERR("Unknown action");
3205 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3206 DBG("Channel thread pipe hung up");
3208 * Remove the pipe from the poll set and continue the loop
3209 * since their might be data to consume.
3211 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3214 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3218 /* Handle other stream */
3224 uint64_t tmp_id
= (uint64_t) pollfd
;
3226 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
3228 node
= lttng_ht_iter_get_node_u64(&iter
);
3231 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
3234 /* Check for error event */
3235 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3236 DBG("Channel fd %d is hup|err.", pollfd
);
3238 lttng_poll_del(&events
, chan
->wait_fd
);
3239 ret
= lttng_ht_del(channel_ht
, &iter
);
3243 * This will close the wait fd for each stream associated to
3244 * this channel AND monitored by the data/metadata thread thus
3245 * will be clean by the right thread.
3247 consumer_close_channel_streams(chan
);
3249 /* Release our own refcount */
3250 if (!uatomic_sub_return(&chan
->refcount
, 1)
3251 && !uatomic_read(&chan
->nb_init_stream_left
)) {
3252 consumer_del_channel(chan
);
3255 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3260 /* Release RCU lock for the channel looked up */
3268 lttng_poll_clean(&events
);
3270 destroy_channel_ht(channel_ht
);
3273 DBG("Channel poll thread exiting");
3276 ERR("Health error occurred in %s", __func__
);
3278 health_unregister(health_consumerd
);
3279 rcu_unregister_thread();
3283 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
3284 struct pollfd
*sockpoll
, int client_socket
)
3291 ret
= lttng_consumer_poll_socket(sockpoll
);
3295 DBG("Metadata connection on client_socket");
3297 /* Blocking call, waiting for transmission */
3298 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
3299 if (ctx
->consumer_metadata_socket
< 0) {
3300 WARN("On accept metadata");
3311 * This thread listens on the consumerd socket and receives the file
3312 * descriptors from the session daemon.
3314 void *consumer_thread_sessiond_poll(void *data
)
3316 int sock
= -1, client_socket
, ret
, err
= -1;
3318 * structure to poll for incoming data on communication socket avoids
3319 * making blocking sockets.
3321 struct pollfd consumer_sockpoll
[2];
3322 struct lttng_consumer_local_data
*ctx
= data
;
3324 rcu_register_thread();
3326 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3328 if (testpoint(consumerd_thread_sessiond
)) {
3329 goto error_testpoint
;
3332 health_code_update();
3334 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3335 unlink(ctx
->consumer_command_sock_path
);
3336 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3337 if (client_socket
< 0) {
3338 ERR("Cannot create command socket");
3342 ret
= lttcomm_listen_unix_sock(client_socket
);
3347 DBG("Sending ready command to lttng-sessiond");
3348 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3349 /* return < 0 on error, but == 0 is not fatal */
3351 ERR("Error sending ready command to lttng-sessiond");
3355 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3356 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3357 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3358 consumer_sockpoll
[1].fd
= client_socket
;
3359 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3361 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3369 DBG("Connection on client_socket");
3371 /* Blocking call, waiting for transmission */
3372 sock
= lttcomm_accept_unix_sock(client_socket
);
3379 * Setup metadata socket which is the second socket connection on the
3380 * command unix socket.
3382 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3391 /* This socket is not useful anymore. */
3392 ret
= close(client_socket
);
3394 PERROR("close client_socket");
3398 /* update the polling structure to poll on the established socket */
3399 consumer_sockpoll
[1].fd
= sock
;
3400 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3403 health_code_update();
3405 health_poll_entry();
3406 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3415 DBG("Incoming command on sock");
3416 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3419 * This could simply be a session daemon quitting. Don't output
3422 DBG("Communication interrupted on command socket");
3426 if (CMM_LOAD_SHARED(consumer_quit
)) {
3427 DBG("consumer_thread_receive_fds received quit from signal");
3428 err
= 0; /* All is OK */
3431 DBG("received command on sock");
3437 DBG("Consumer thread sessiond poll exiting");
3440 * Close metadata streams since the producer is the session daemon which
3443 * NOTE: for now, this only applies to the UST tracer.
3445 lttng_consumer_close_all_metadata();
3448 * when all fds have hung up, the polling thread
3451 CMM_STORE_SHARED(consumer_quit
, 1);
3454 * Notify the data poll thread to poll back again and test the
3455 * consumer_quit state that we just set so to quit gracefully.
3457 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3459 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3461 notify_health_quit_pipe(health_quit_pipe
);
3463 /* Cleaning up possibly open sockets. */
3467 PERROR("close sock sessiond poll");
3470 if (client_socket
>= 0) {
3471 ret
= close(client_socket
);
3473 PERROR("close client_socket sessiond poll");
3480 ERR("Health error occurred in %s", __func__
);
3482 health_unregister(health_consumerd
);
3484 rcu_unregister_thread();
3488 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3489 struct lttng_consumer_local_data
*ctx
)
3494 pthread_mutex_lock(&stream
->lock
);
3495 if (stream
->metadata_flag
) {
3496 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3499 switch (consumer_data
.type
) {
3500 case LTTNG_CONSUMER_KERNEL
:
3501 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3503 case LTTNG_CONSUMER32_UST
:
3504 case LTTNG_CONSUMER64_UST
:
3505 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3508 ERR("Unknown consumer_data type");
3514 if (stream
->metadata_flag
) {
3515 pthread_cond_broadcast(&stream
->metadata_rdv
);
3516 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3518 pthread_mutex_unlock(&stream
->lock
);
3520 rotate_ret
= consumer_post_rotation(stream
, ctx
);
3521 if (rotate_ret
< 0) {
3522 ERR("Failed after a rotation");
3529 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3531 switch (consumer_data
.type
) {
3532 case LTTNG_CONSUMER_KERNEL
:
3533 return lttng_kconsumer_on_recv_stream(stream
);
3534 case LTTNG_CONSUMER32_UST
:
3535 case LTTNG_CONSUMER64_UST
:
3536 return lttng_ustconsumer_on_recv_stream(stream
);
3538 ERR("Unknown consumer_data type");
3545 * Allocate and set consumer data hash tables.
3547 int lttng_consumer_init(void)
3549 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3550 if (!consumer_data
.channel_ht
) {
3554 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3555 if (!consumer_data
.relayd_ht
) {
3559 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3560 if (!consumer_data
.stream_list_ht
) {
3564 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3565 if (!consumer_data
.stream_per_chan_id_ht
) {
3569 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3574 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3586 * Process the ADD_RELAYD command receive by a consumer.
3588 * This will create a relayd socket pair and add it to the relayd hash table.
3589 * The caller MUST acquire a RCU read side lock before calling it.
3591 void consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3592 struct lttng_consumer_local_data
*ctx
, int sock
,
3593 struct pollfd
*consumer_sockpoll
,
3594 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3595 uint64_t relayd_session_id
)
3597 int fd
= -1, ret
= -1, relayd_created
= 0;
3598 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3599 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3602 assert(relayd_sock
);
3604 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3606 /* Get relayd reference if exists. */
3607 relayd
= consumer_find_relayd(net_seq_idx
);
3608 if (relayd
== NULL
) {
3609 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3610 /* Not found. Allocate one. */
3611 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3612 if (relayd
== NULL
) {
3613 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3616 relayd
->sessiond_session_id
= sessiond_id
;
3621 * This code path MUST continue to the consumer send status message to
3622 * we can notify the session daemon and continue our work without
3623 * killing everything.
3627 * relayd key should never be found for control socket.
3629 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3632 /* First send a status message before receiving the fds. */
3633 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3635 /* Somehow, the session daemon is not responding anymore. */
3636 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3637 goto error_nosignal
;
3640 /* Poll on consumer socket. */
3641 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3643 /* Needing to exit in the middle of a command: error. */
3644 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3645 goto error_nosignal
;
3648 /* Get relayd socket from session daemon */
3649 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3650 if (ret
!= sizeof(fd
)) {
3651 fd
= -1; /* Just in case it gets set with an invalid value. */
3654 * Failing to receive FDs might indicate a major problem such as
3655 * reaching a fd limit during the receive where the kernel returns a
3656 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3657 * don't take any chances and stop everything.
3659 * XXX: Feature request #558 will fix that and avoid this possible
3660 * issue when reaching the fd limit.
3662 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3663 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3667 /* Copy socket information and received FD */
3668 switch (sock_type
) {
3669 case LTTNG_STREAM_CONTROL
:
3670 /* Copy received lttcomm socket */
3671 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3672 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3673 /* Handle create_sock error. */
3675 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3679 * Close the socket created internally by
3680 * lttcomm_create_sock, so we can replace it by the one
3681 * received from sessiond.
3683 if (close(relayd
->control_sock
.sock
.fd
)) {
3687 /* Assign new file descriptor */
3688 relayd
->control_sock
.sock
.fd
= fd
;
3689 fd
= -1; /* For error path */
3690 /* Assign version values. */
3691 relayd
->control_sock
.major
= relayd_sock
->major
;
3692 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3694 relayd
->relayd_session_id
= relayd_session_id
;
3697 case LTTNG_STREAM_DATA
:
3698 /* Copy received lttcomm socket */
3699 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3700 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3701 /* Handle create_sock error. */
3703 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3707 * Close the socket created internally by
3708 * lttcomm_create_sock, so we can replace it by the one
3709 * received from sessiond.
3711 if (close(relayd
->data_sock
.sock
.fd
)) {
3715 /* Assign new file descriptor */
3716 relayd
->data_sock
.sock
.fd
= fd
;
3717 fd
= -1; /* for eventual error paths */
3718 /* Assign version values. */
3719 relayd
->data_sock
.major
= relayd_sock
->major
;
3720 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3723 ERR("Unknown relayd socket type (%d)", sock_type
);
3724 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3728 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3729 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3730 relayd
->net_seq_idx
, fd
);
3732 /* We successfully added the socket. Send status back. */
3733 ret
= consumer_send_status_msg(sock
, ret_code
);
3735 /* Somehow, the session daemon is not responding anymore. */
3736 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3737 goto error_nosignal
;
3741 * Add relayd socket pair to consumer data hashtable. If object already
3742 * exists or on error, the function gracefully returns.
3750 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3751 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3755 /* Close received socket if valid. */
3758 PERROR("close received socket");
3762 if (relayd_created
) {
3768 * Try to lock the stream mutex.
3770 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3772 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3779 * Try to lock the stream mutex. On failure, we know that the stream is
3780 * being used else where hence there is data still being extracted.
3782 ret
= pthread_mutex_trylock(&stream
->lock
);
3784 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3796 * Search for a relayd associated to the session id and return the reference.
3798 * A rcu read side lock MUST be acquire before calling this function and locked
3799 * until the relayd object is no longer necessary.
3801 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3803 struct lttng_ht_iter iter
;
3804 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3806 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3807 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3810 * Check by sessiond id which is unique here where the relayd session
3811 * id might not be when having multiple relayd.
3813 if (relayd
->sessiond_session_id
== id
) {
3814 /* Found the relayd. There can be only one per id. */
3826 * Check if for a given session id there is still data needed to be extract
3829 * Return 1 if data is pending or else 0 meaning ready to be read.
3831 int consumer_data_pending(uint64_t id
)
3834 struct lttng_ht_iter iter
;
3835 struct lttng_ht
*ht
;
3836 struct lttng_consumer_stream
*stream
;
3837 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3838 int (*data_pending
)(struct lttng_consumer_stream
*);
3840 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3843 pthread_mutex_lock(&consumer_data
.lock
);
3845 switch (consumer_data
.type
) {
3846 case LTTNG_CONSUMER_KERNEL
:
3847 data_pending
= lttng_kconsumer_data_pending
;
3849 case LTTNG_CONSUMER32_UST
:
3850 case LTTNG_CONSUMER64_UST
:
3851 data_pending
= lttng_ustconsumer_data_pending
;
3854 ERR("Unknown consumer data type");
3858 /* Ease our life a bit */
3859 ht
= consumer_data
.stream_list_ht
;
3861 relayd
= find_relayd_by_session_id(id
);
3863 /* Send init command for data pending. */
3864 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3865 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3866 relayd
->relayd_session_id
);
3867 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3869 /* Communication error thus the relayd so no data pending. */
3870 goto data_not_pending
;
3874 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3875 ht
->hash_fct(&id
, lttng_ht_seed
),
3877 &iter
.iter
, stream
, node_session_id
.node
) {
3878 /* If this call fails, the stream is being used hence data pending. */
3879 ret
= stream_try_lock(stream
);
3885 * A removed node from the hash table indicates that the stream has
3886 * been deleted thus having a guarantee that the buffers are closed
3887 * on the consumer side. However, data can still be transmitted
3888 * over the network so don't skip the relayd check.
3890 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3892 /* Check the stream if there is data in the buffers. */
3893 ret
= data_pending(stream
);
3895 pthread_mutex_unlock(&stream
->lock
);
3902 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3903 if (stream
->metadata_flag
) {
3904 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3905 stream
->relayd_stream_id
);
3907 ret
= relayd_data_pending(&relayd
->control_sock
,
3908 stream
->relayd_stream_id
,
3909 stream
->next_net_seq_num
- 1);
3911 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3913 pthread_mutex_unlock(&stream
->lock
);
3917 pthread_mutex_unlock(&stream
->lock
);
3921 unsigned int is_data_inflight
= 0;
3923 /* Send init command for data pending. */
3924 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3925 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3926 relayd
->relayd_session_id
, &is_data_inflight
);
3927 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3929 goto data_not_pending
;
3931 if (is_data_inflight
) {
3937 * Finding _no_ node in the hash table and no inflight data means that the
3938 * stream(s) have been removed thus data is guaranteed to be available for
3939 * analysis from the trace files.
3943 /* Data is available to be read by a viewer. */
3944 pthread_mutex_unlock(&consumer_data
.lock
);
3949 /* Data is still being extracted from buffers. */
3950 pthread_mutex_unlock(&consumer_data
.lock
);
3956 * Send a ret code status message to the sessiond daemon.
3958 * Return the sendmsg() return value.
3960 int consumer_send_status_msg(int sock
, int ret_code
)
3962 struct lttcomm_consumer_status_msg msg
;
3964 memset(&msg
, 0, sizeof(msg
));
3965 msg
.ret_code
= ret_code
;
3967 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3971 * Send a channel status message to the sessiond daemon.
3973 * Return the sendmsg() return value.
3975 int consumer_send_status_channel(int sock
,
3976 struct lttng_consumer_channel
*channel
)
3978 struct lttcomm_consumer_status_channel msg
;
3982 memset(&msg
, 0, sizeof(msg
));
3984 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3986 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3987 msg
.key
= channel
->key
;
3988 msg
.stream_count
= channel
->streams
.count
;
3991 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3994 unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos
,
3995 unsigned long produced_pos
, uint64_t nb_packets_per_stream
,
3996 uint64_t max_sb_size
)
3998 unsigned long start_pos
;
4000 if (!nb_packets_per_stream
) {
4001 return consumed_pos
; /* Grab everything */
4003 start_pos
= produced_pos
- offset_align_floor(produced_pos
, max_sb_size
);
4004 start_pos
-= max_sb_size
* nb_packets_per_stream
;
4005 if ((long) (start_pos
- consumed_pos
) < 0) {
4006 return consumed_pos
; /* Grab everything */
4012 int consumer_flush_buffer(struct lttng_consumer_stream
*stream
, int producer_active
)
4016 switch (consumer_data
.type
) {
4017 case LTTNG_CONSUMER_KERNEL
:
4018 ret
= kernctl_buffer_flush(stream
->wait_fd
);
4020 ERR("Failed to flush kernel stream");
4024 case LTTNG_CONSUMER32_UST
:
4025 case LTTNG_CONSUMER64_UST
:
4026 lttng_ustctl_flush_buffer(stream
, producer_active
);
4029 ERR("Unknown consumer_data type");
4038 * Sample the rotate position for all the streams of a channel.
4040 * Returns 0 on success, < 0 on error
4042 int lttng_consumer_rotate_channel(uint64_t key
, char *path
,
4043 uint64_t relayd_id
, uint32_t metadata
, uint64_t new_chunk_id
,
4044 struct lttng_consumer_local_data
*ctx
)
4047 struct lttng_consumer_channel
*channel
;
4048 struct lttng_consumer_stream
*stream
;
4049 struct lttng_ht_iter iter
;
4050 struct lttng_ht
*ht
= consumer_data
.stream_per_chan_id_ht
;
4052 DBG("Consumer sample rotate position for channel %" PRIu64
, key
);
4056 channel
= consumer_find_channel(key
);
4058 ERR("No channel found for key %" PRIu64
, key
);
4062 pthread_mutex_lock(&channel
->lock
);
4063 channel
->current_chunk_id
= new_chunk_id
;
4064 snprintf(channel
->pathname
, PATH_MAX
, "%s", path
);
4065 ret
= utils_mkdir_recursive(channel
->pathname
, S_IRWXU
| S_IRWXG
,
4066 channel
->uid
, channel
->gid
);
4068 ERR("Trace directory creation error");
4070 pthread_mutex_unlock(&channel
->lock
);
4073 pthread_mutex_unlock(&channel
->lock
);
4075 cds_lfht_for_each_entry_duplicate(ht
->ht
,
4076 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
4077 ht
->match_fct
, &channel
->key
, &iter
.iter
,
4078 stream
, node_channel_id
.node
) {
4079 uint64_t consumed_pos
;
4081 health_code_update();
4084 * Lock stream because we are about to change its state.
4086 pthread_mutex_lock(&stream
->lock
);
4088 memcpy(stream
->channel_ro_pathname
, channel
->pathname
, PATH_MAX
);
4089 ret
= lttng_consumer_sample_snapshot_positions(stream
);
4091 ERR("Taking snapshot positions");
4095 ret
= lttng_consumer_get_produced_snapshot(stream
,
4096 &stream
->rotate_position
);
4098 ERR("Produced snapshot position");
4101 lttng_consumer_get_consumed_snapshot(stream
,
4103 if (consumed_pos
== stream
->rotate_position
) {
4104 stream
->rotate_ready
= 1;
4106 channel
->nr_stream_rotate_pending
++;
4108 ret
= consumer_flush_buffer(stream
, 1);
4110 ERR("Failed to flush stream");
4114 pthread_mutex_unlock(&stream
->lock
);
4121 pthread_mutex_unlock(&stream
->lock
);
4128 * Check if a stream is ready to be rotated after extracting it.
4130 * Return 1 if it is ready for rotation, 0 if it is not, a negative value on
4131 * error. Stream lock must be held.
4133 int lttng_consumer_stream_is_rotate_ready(struct lttng_consumer_stream
*stream
)
4136 unsigned long consumed_pos
;
4138 if (!stream
->rotate_position
&& !stream
->rotate_ready
) {
4143 if (stream
->rotate_ready
) {
4149 * If we don't have the rotate_ready flag, check the consumed position
4150 * to determine if we need to rotate.
4152 ret
= lttng_consumer_sample_snapshot_positions(stream
);
4154 ERR("Taking kernel snapshot positions");
4158 ret
= lttng_consumer_get_consumed_snapshot(stream
, &consumed_pos
);
4160 ERR("Consumed kernel snapshot position");
4164 /* Rotate position not reached yet. */
4165 if ((long) (consumed_pos
- stream
->rotate_position
) < 0) {
4176 * Reset the state for a stream after a rotation occurred.
4178 void lttng_consumer_reset_stream_rotate_state(struct lttng_consumer_stream
*stream
)
4180 stream
->rotate_position
= 0;
4181 stream
->rotate_ready
= 0;
4182 stream
->rotated
= 1;
4186 * Perform the rotation a local stream file.
4188 int rotate_local_stream(struct lttng_consumer_local_data
*ctx
,
4189 struct lttng_consumer_stream
*stream
)
4193 ret
= close(stream
->out_fd
);
4195 PERROR("Closing tracefile");
4199 ret
= utils_create_stream_file(stream
->channel_ro_pathname
, stream
->name
,
4200 stream
->channel_ro_tracefile_size
, stream
->tracefile_count_current
,
4201 stream
->uid
, stream
->gid
, NULL
);
4205 stream
->out_fd
= ret
;
4206 stream
->tracefile_size_current
= 0;
4208 if (!stream
->metadata_flag
) {
4209 struct lttng_index_file
*index_file
;
4211 lttng_index_file_put(stream
->index_file
);
4213 index_file
= lttng_index_file_create(stream
->channel_ro_pathname
,
4214 stream
->name
, stream
->uid
, stream
->gid
,
4215 stream
->channel_ro_tracefile_size
,
4216 stream
->tracefile_count_current
,
4217 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
4221 stream
->index_file
= index_file
;
4222 stream
->out_fd_offset
= 0;
4236 * Perform the rotation a stream file on the relay.
4238 int rotate_relay_stream(struct lttng_consumer_local_data
*ctx
,
4239 struct lttng_consumer_stream
*stream
)
4242 struct consumer_relayd_sock_pair
*relayd
;
4244 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
4246 ERR("Failed to find relayd");
4251 /* FIXME: chan_ro ? */
4252 ret
= relayd_rotate_stream(&relayd
->control_sock
,
4253 stream
->relayd_stream_id
, stream
->channel_ro_pathname
,
4254 stream
->chan
->current_chunk_id
,
4255 stream
->last_sequence_number
);
4262 * Performs the stream rotation for the rotate session feature if needed.
4263 * It must be called with the stream lock held.
4265 * Return 0 on success, a negative number of error.
4267 int lttng_consumer_rotate_stream(struct lttng_consumer_local_data
*ctx
,
4268 struct lttng_consumer_stream
*stream
)
4272 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
4273 ret
= rotate_relay_stream(ctx
, stream
);
4275 ret
= rotate_local_stream(ctx
, stream
);
4281 if (stream
->metadata_flag
) {
4282 switch (consumer_data
.type
) {
4283 case LTTNG_CONSUMER_KERNEL
:
4285 * Reset the position of what has been read from the metadata
4286 * cache to 0 so we can dump it again.
4288 ret
= kernctl_metadata_cache_dump(stream
->wait_fd
);
4290 ERR("Failed to dump the kernel metadata cache after rotation");
4294 case LTTNG_CONSUMER32_UST
:
4295 case LTTNG_CONSUMER64_UST
:
4297 * Reset the position pushed from the metadata cache so it
4298 * will write from the beginning on the next push.
4300 stream
->ust_metadata_pushed
= 0;
4303 ERR("Unknown consumer_data type");
4307 lttng_consumer_reset_stream_rotate_state(stream
);
4316 * Rotate all the ready streams.
4318 * This is especially important for low throughput streams that have already
4319 * been consumed, we cannot wait for their next packet to perform the
4322 * Returns 0 on success, < 0 on error
4324 int lttng_consumer_rotate_ready_streams(uint64_t key
,
4325 struct lttng_consumer_local_data
*ctx
)
4328 struct lttng_consumer_channel
*channel
;
4329 struct lttng_consumer_stream
*stream
;
4330 struct lttng_pipe
*stream_pipe
;
4331 struct lttng_ht_iter iter
;
4332 struct lttng_ht
*ht
= consumer_data
.stream_per_chan_id_ht
;
4336 channel
= consumer_find_channel(key
);
4338 ERR("No channel found for key %" PRIu64
, key
);
4343 if (channel
->metadata_stream
) {
4344 stream_pipe
= ctx
->consumer_metadata_rotate_pipe
;
4346 stream_pipe
= ctx
->consumer_data_rotate_pipe
;
4349 cds_lfht_for_each_entry_duplicate(ht
->ht
,
4350 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
4351 ht
->match_fct
, &channel
->key
, &iter
.iter
,
4352 stream
, node_channel_id
.node
) {
4353 health_code_update();
4355 if (stream
->rotate_ready
== 0) {
4358 ret
= lttng_pipe_write(stream_pipe
, &stream
, sizeof(stream
));
4360 ERR("Failed to wakeup consumer rotate pipe");
4373 int rotate_rename_local(char *current_path
, char *new_path
,
4374 uid_t uid
, gid_t gid
)
4378 ret
= utils_mkdir_recursive(new_path
, S_IRWXU
| S_IRWXG
,
4381 ERR("Create directory on rotate");
4385 ret
= rename(current_path
, new_path
);
4387 * If a domain has not yet created its channel, the domain-specific
4388 * folder might not exist, but this is not an error.
4390 if (ret
< 0 && errno
!= ENOENT
) {
4391 PERROR("Rename completed rotation chunk");
4402 int rotate_rename_relay(char *current_path
, char *new_path
, uint64_t relayd_id
)
4405 struct consumer_relayd_sock_pair
*relayd
;
4407 relayd
= consumer_find_relayd(relayd_id
);
4409 ERR("Failed to find relayd");
4414 ret
= relayd_rotate_rename(&relayd
->control_sock
, current_path
, new_path
);
4421 int lttng_consumer_rotate_rename(char *current_path
, char *new_path
,
4422 uid_t uid
, gid_t gid
, uint64_t relayd_id
)
4424 if (relayd_id
!= (uint64_t) -1ULL) {
4425 return rotate_rename_relay(current_path
, new_path
, relayd_id
);
4427 return rotate_rename_local(current_path
, new_path
, uid
, gid
);
4431 int lttng_consumer_rotate_pending_relay(uint64_t session_id
,
4432 uint64_t relayd_id
, uint64_t chunk_id
)
4435 struct consumer_relayd_sock_pair
*relayd
;
4437 relayd
= consumer_find_relayd(relayd_id
);
4439 ERR("Failed to find relayd");
4444 ret
= relayd_rotate_pending(&relayd
->control_sock
, chunk_id
);