2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <common/common.h>
34 #include <common/utils.h>
35 #include <common/compat/poll.h>
36 #include <common/kernel-ctl/kernel-ctl.h>
37 #include <common/sessiond-comm/relayd.h>
38 #include <common/sessiond-comm/sessiond-comm.h>
39 #include <common/kernel-consumer/kernel-consumer.h>
40 #include <common/relayd/relayd.h>
41 #include <common/ust-consumer/ust-consumer.h>
44 #include "consumer-stream.h"
46 struct lttng_consumer_global_data consumer_data
= {
49 .type
= LTTNG_CONSUMER_UNKNOWN
,
52 enum consumer_channel_action
{
55 CONSUMER_CHANNEL_QUIT
,
58 struct consumer_channel_msg
{
59 enum consumer_channel_action action
;
60 struct lttng_consumer_channel
*chan
; /* add */
61 uint64_t key
; /* del */
65 * Flag to inform the polling thread to quit when all fd hung up. Updated by
66 * the consumer_thread_receive_fds when it notices that all fds has hung up.
67 * Also updated by the signal handler (consumer_should_exit()). Read by the
70 volatile int consumer_quit
;
73 * Global hash table containing respectively metadata and data streams. The
74 * stream element in this ht should only be updated by the metadata poll thread
75 * for the metadata and the data poll thread for the data.
77 static struct lttng_ht
*metadata_ht
;
78 static struct lttng_ht
*data_ht
;
81 * Notify a thread lttng pipe to poll back again. This usually means that some
82 * global state has changed so we just send back the thread in a poll wait
85 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
87 struct lttng_consumer_stream
*null_stream
= NULL
;
91 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
94 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
95 struct lttng_consumer_channel
*chan
,
97 enum consumer_channel_action action
)
99 struct consumer_channel_msg msg
;
102 memset(&msg
, 0, sizeof(msg
));
108 ret
= write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
109 } while (ret
< 0 && errno
== EINTR
);
112 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
115 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
118 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
119 struct lttng_consumer_channel
**chan
,
121 enum consumer_channel_action
*action
)
123 struct consumer_channel_msg msg
;
127 ret
= read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
128 } while (ret
< 0 && errno
== EINTR
);
130 *action
= msg
.action
;
138 * Find a stream. The consumer_data.lock must be locked during this
141 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
144 struct lttng_ht_iter iter
;
145 struct lttng_ht_node_u64
*node
;
146 struct lttng_consumer_stream
*stream
= NULL
;
150 /* -1ULL keys are lookup failures */
151 if (key
== (uint64_t) -1ULL) {
157 lttng_ht_lookup(ht
, &key
, &iter
);
158 node
= lttng_ht_iter_get_node_u64(&iter
);
160 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
168 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
170 struct lttng_consumer_stream
*stream
;
173 stream
= find_stream(key
, ht
);
175 stream
->key
= (uint64_t) -1ULL;
177 * We don't want the lookup to match, but we still need
178 * to iterate on this stream when iterating over the hash table. Just
179 * change the node key.
181 stream
->node
.key
= (uint64_t) -1ULL;
187 * Return a channel object for the given key.
189 * RCU read side lock MUST be acquired before calling this function and
190 * protects the channel ptr.
192 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
194 struct lttng_ht_iter iter
;
195 struct lttng_ht_node_u64
*node
;
196 struct lttng_consumer_channel
*channel
= NULL
;
198 /* -1ULL keys are lookup failures */
199 if (key
== (uint64_t) -1ULL) {
203 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
204 node
= lttng_ht_iter_get_node_u64(&iter
);
206 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
212 static void free_stream_rcu(struct rcu_head
*head
)
214 struct lttng_ht_node_u64
*node
=
215 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
216 struct lttng_consumer_stream
*stream
=
217 caa_container_of(node
, struct lttng_consumer_stream
, node
);
222 static void free_channel_rcu(struct rcu_head
*head
)
224 struct lttng_ht_node_u64
*node
=
225 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
226 struct lttng_consumer_channel
*channel
=
227 caa_container_of(node
, struct lttng_consumer_channel
, node
);
233 * RCU protected relayd socket pair free.
235 static void free_relayd_rcu(struct rcu_head
*head
)
237 struct lttng_ht_node_u64
*node
=
238 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
239 struct consumer_relayd_sock_pair
*relayd
=
240 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
243 * Close all sockets. This is done in the call RCU since we don't want the
244 * socket fds to be reassigned thus potentially creating bad state of the
247 * We do not have to lock the control socket mutex here since at this stage
248 * there is no one referencing to this relayd object.
250 (void) relayd_close(&relayd
->control_sock
);
251 (void) relayd_close(&relayd
->data_sock
);
257 * Destroy and free relayd socket pair object.
259 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
262 struct lttng_ht_iter iter
;
264 if (relayd
== NULL
) {
268 DBG("Consumer destroy and close relayd socket pair");
270 iter
.iter
.node
= &relayd
->node
.node
;
271 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
273 /* We assume the relayd is being or is destroyed */
277 /* RCU free() call */
278 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
282 * Remove a channel from the global list protected by a mutex. This function is
283 * also responsible for freeing its data structures.
285 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
288 struct lttng_ht_iter iter
;
289 struct lttng_consumer_stream
*stream
, *stmp
;
291 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
293 pthread_mutex_lock(&consumer_data
.lock
);
294 pthread_mutex_lock(&channel
->lock
);
296 /* Delete streams that might have been left in the stream list. */
297 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
299 cds_list_del(&stream
->send_node
);
301 * Once a stream is added to this list, the buffers were created so
302 * we have a guarantee that this call will succeed.
304 consumer_stream_destroy(stream
, NULL
);
307 switch (consumer_data
.type
) {
308 case LTTNG_CONSUMER_KERNEL
:
310 case LTTNG_CONSUMER32_UST
:
311 case LTTNG_CONSUMER64_UST
:
312 lttng_ustconsumer_del_channel(channel
);
315 ERR("Unknown consumer_data type");
321 iter
.iter
.node
= &channel
->node
.node
;
322 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
326 call_rcu(&channel
->node
.head
, free_channel_rcu
);
328 pthread_mutex_unlock(&channel
->lock
);
329 pthread_mutex_unlock(&consumer_data
.lock
);
333 * Iterate over the relayd hash table and destroy each element. Finally,
334 * destroy the whole hash table.
336 static void cleanup_relayd_ht(void)
338 struct lttng_ht_iter iter
;
339 struct consumer_relayd_sock_pair
*relayd
;
343 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
345 consumer_destroy_relayd(relayd
);
350 lttng_ht_destroy(consumer_data
.relayd_ht
);
354 * Update the end point status of all streams having the given network sequence
355 * index (relayd index).
357 * It's atomically set without having the stream mutex locked which is fine
358 * because we handle the write/read race with a pipe wakeup for each thread.
360 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
361 enum consumer_endpoint_status status
)
363 struct lttng_ht_iter iter
;
364 struct lttng_consumer_stream
*stream
;
366 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
370 /* Let's begin with metadata */
371 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
372 if (stream
->net_seq_idx
== net_seq_idx
) {
373 uatomic_set(&stream
->endpoint_status
, status
);
374 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
378 /* Follow up by the data streams */
379 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
380 if (stream
->net_seq_idx
== net_seq_idx
) {
381 uatomic_set(&stream
->endpoint_status
, status
);
382 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
389 * Cleanup a relayd object by flagging every associated streams for deletion,
390 * destroying the object meaning removing it from the relayd hash table,
391 * closing the sockets and freeing the memory in a RCU call.
393 * If a local data context is available, notify the threads that the streams'
394 * state have changed.
396 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
397 struct lttng_consumer_local_data
*ctx
)
403 DBG("Cleaning up relayd sockets");
405 /* Save the net sequence index before destroying the object */
406 netidx
= relayd
->net_seq_idx
;
409 * Delete the relayd from the relayd hash table, close the sockets and free
410 * the object in a RCU call.
412 consumer_destroy_relayd(relayd
);
414 /* Set inactive endpoint to all streams */
415 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
418 * With a local data context, notify the threads that the streams' state
419 * have changed. The write() action on the pipe acts as an "implicit"
420 * memory barrier ordering the updates of the end point status from the
421 * read of this status which happens AFTER receiving this notify.
424 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
425 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
430 * Flag a relayd socket pair for destruction. Destroy it if the refcount
433 * RCU read side lock MUST be aquired before calling this function.
435 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
439 /* Set destroy flag for this object */
440 uatomic_set(&relayd
->destroy_flag
, 1);
442 /* Destroy the relayd if refcount is 0 */
443 if (uatomic_read(&relayd
->refcount
) == 0) {
444 consumer_destroy_relayd(relayd
);
449 * Completly destroy stream from every visiable data structure and the given
452 * One this call returns, the stream object is not longer usable nor visible.
454 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
457 consumer_stream_destroy(stream
, ht
);
460 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
462 enum lttng_consumer_stream_state state
,
463 const char *channel_name
,
470 enum consumer_channel_type type
,
471 unsigned int monitor
)
474 struct lttng_consumer_stream
*stream
;
476 stream
= zmalloc(sizeof(*stream
));
477 if (stream
== NULL
) {
478 PERROR("malloc struct lttng_consumer_stream");
485 stream
->key
= stream_key
;
487 stream
->out_fd_offset
= 0;
488 stream
->state
= state
;
491 stream
->net_seq_idx
= relayd_id
;
492 stream
->session_id
= session_id
;
493 stream
->monitor
= monitor
;
494 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
495 pthread_mutex_init(&stream
->lock
, NULL
);
497 /* If channel is the metadata, flag this stream as metadata. */
498 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
499 stream
->metadata_flag
= 1;
500 /* Metadata is flat out. */
501 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
503 /* Format stream name to <channel_name>_<cpu_number> */
504 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
507 PERROR("snprintf stream name");
512 /* Key is always the wait_fd for streams. */
513 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
515 /* Init node per channel id key */
516 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
518 /* Init session id node with the stream session id */
519 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
521 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
522 " relayd_id %" PRIu64
", session_id %" PRIu64
,
523 stream
->name
, stream
->key
, channel_key
,
524 stream
->net_seq_idx
, stream
->session_id
);
540 * Add a stream to the global list protected by a mutex.
542 static int add_stream(struct lttng_consumer_stream
*stream
,
550 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
552 pthread_mutex_lock(&consumer_data
.lock
);
553 pthread_mutex_lock(&stream
->chan
->lock
);
554 pthread_mutex_lock(&stream
->lock
);
557 /* Steal stream identifier to avoid having streams with the same key */
558 steal_stream_key(stream
->key
, ht
);
560 lttng_ht_add_unique_u64(ht
, &stream
->node
);
562 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
563 &stream
->node_channel_id
);
566 * Add stream to the stream_list_ht of the consumer data. No need to steal
567 * the key since the HT does not use it and we allow to add redundant keys
570 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
573 * When nb_init_stream_left reaches 0, we don't need to trigger any action
574 * in terms of destroying the associated channel, because the action that
575 * causes the count to become 0 also causes a stream to be added. The
576 * channel deletion will thus be triggered by the following removal of this
579 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
580 /* Increment refcount before decrementing nb_init_stream_left */
582 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
585 /* Update consumer data once the node is inserted. */
586 consumer_data
.stream_count
++;
587 consumer_data
.need_update
= 1;
590 pthread_mutex_unlock(&stream
->lock
);
591 pthread_mutex_unlock(&stream
->chan
->lock
);
592 pthread_mutex_unlock(&consumer_data
.lock
);
598 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
599 * be acquired before calling this.
601 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
604 struct lttng_ht_node_u64
*node
;
605 struct lttng_ht_iter iter
;
609 lttng_ht_lookup(consumer_data
.relayd_ht
,
610 &relayd
->net_seq_idx
, &iter
);
611 node
= lttng_ht_iter_get_node_u64(&iter
);
615 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
622 * Allocate and return a consumer relayd socket.
624 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
625 uint64_t net_seq_idx
)
627 struct consumer_relayd_sock_pair
*obj
= NULL
;
629 /* net sequence index of -1 is a failure */
630 if (net_seq_idx
== (uint64_t) -1ULL) {
634 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
636 PERROR("zmalloc relayd sock");
640 obj
->net_seq_idx
= net_seq_idx
;
642 obj
->destroy_flag
= 0;
643 obj
->control_sock
.sock
.fd
= -1;
644 obj
->data_sock
.sock
.fd
= -1;
645 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
646 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
653 * Find a relayd socket pair in the global consumer data.
655 * Return the object if found else NULL.
656 * RCU read-side lock must be held across this call and while using the
659 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
661 struct lttng_ht_iter iter
;
662 struct lttng_ht_node_u64
*node
;
663 struct consumer_relayd_sock_pair
*relayd
= NULL
;
665 /* Negative keys are lookup failures */
666 if (key
== (uint64_t) -1ULL) {
670 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
672 node
= lttng_ht_iter_get_node_u64(&iter
);
674 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
682 * Find a relayd and send the stream
684 * Returns 0 on success, < 0 on error
686 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
690 struct consumer_relayd_sock_pair
*relayd
;
693 assert(stream
->net_seq_idx
!= -1ULL);
696 /* The stream is not metadata. Get relayd reference if exists. */
698 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
699 if (relayd
!= NULL
) {
700 /* Add stream on the relayd */
701 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
702 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
703 path
, &stream
->relayd_stream_id
,
704 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
705 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
709 uatomic_inc(&relayd
->refcount
);
710 stream
->sent_to_relayd
= 1;
712 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
713 stream
->key
, stream
->net_seq_idx
);
718 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
719 stream
->name
, stream
->key
, stream
->net_seq_idx
);
727 * Find a relayd and close the stream
729 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
731 struct consumer_relayd_sock_pair
*relayd
;
733 /* The stream is not metadata. Get relayd reference if exists. */
735 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
737 consumer_stream_relayd_close(stream
, relayd
);
743 * Handle stream for relayd transmission if the stream applies for network
744 * streaming where the net sequence index is set.
746 * Return destination file descriptor or negative value on error.
748 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
749 size_t data_size
, unsigned long padding
,
750 struct consumer_relayd_sock_pair
*relayd
)
753 struct lttcomm_relayd_data_hdr data_hdr
;
759 /* Reset data header */
760 memset(&data_hdr
, 0, sizeof(data_hdr
));
762 if (stream
->metadata_flag
) {
763 /* Caller MUST acquire the relayd control socket lock */
764 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
769 /* Metadata are always sent on the control socket. */
770 outfd
= relayd
->control_sock
.sock
.fd
;
772 /* Set header with stream information */
773 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
774 data_hdr
.data_size
= htobe32(data_size
);
775 data_hdr
.padding_size
= htobe32(padding
);
777 * Note that net_seq_num below is assigned with the *current* value of
778 * next_net_seq_num and only after that the next_net_seq_num will be
779 * increment. This is why when issuing a command on the relayd using
780 * this next value, 1 should always be substracted in order to compare
781 * the last seen sequence number on the relayd side to the last sent.
783 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
784 /* Other fields are zeroed previously */
786 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
792 ++stream
->next_net_seq_num
;
794 /* Set to go on data socket */
795 outfd
= relayd
->data_sock
.sock
.fd
;
803 * Allocate and return a new lttng_consumer_channel object using the given key
804 * to initialize the hash table node.
806 * On error, return NULL.
808 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
810 const char *pathname
,
815 enum lttng_event_output output
,
816 uint64_t tracefile_size
,
817 uint64_t tracefile_count
,
818 uint64_t session_id_per_pid
,
819 unsigned int monitor
)
821 struct lttng_consumer_channel
*channel
;
823 channel
= zmalloc(sizeof(*channel
));
824 if (channel
== NULL
) {
825 PERROR("malloc struct lttng_consumer_channel");
830 channel
->refcount
= 0;
831 channel
->session_id
= session_id
;
832 channel
->session_id_per_pid
= session_id_per_pid
;
835 channel
->relayd_id
= relayd_id
;
836 channel
->output
= output
;
837 channel
->tracefile_size
= tracefile_size
;
838 channel
->tracefile_count
= tracefile_count
;
839 channel
->monitor
= monitor
;
840 pthread_mutex_init(&channel
->lock
, NULL
);
843 * In monitor mode, the streams associated with the channel will be put in
844 * a special list ONLY owned by this channel. So, the refcount is set to 1
845 * here meaning that the channel itself has streams that are referenced.
847 * On a channel deletion, once the channel is no longer visible, the
848 * refcount is decremented and checked for a zero value to delete it. With
849 * streams in no monitor mode, it will now be safe to destroy the channel.
851 if (!channel
->monitor
) {
852 channel
->refcount
= 1;
855 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
856 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
858 strncpy(channel
->name
, name
, sizeof(channel
->name
));
859 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
861 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
863 channel
->wait_fd
= -1;
865 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
867 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
874 * Add a channel to the global list protected by a mutex.
876 * On success 0 is returned else a negative value.
878 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
879 struct lttng_consumer_local_data
*ctx
)
882 struct lttng_ht_node_u64
*node
;
883 struct lttng_ht_iter iter
;
885 pthread_mutex_lock(&consumer_data
.lock
);
886 pthread_mutex_lock(&channel
->lock
);
889 lttng_ht_lookup(consumer_data
.channel_ht
, &channel
->key
, &iter
);
890 node
= lttng_ht_iter_get_node_u64(&iter
);
892 /* Channel already exist. Ignore the insertion */
893 ERR("Consumer add channel key %" PRIu64
" already exists!",
899 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
903 pthread_mutex_unlock(&channel
->lock
);
904 pthread_mutex_unlock(&consumer_data
.lock
);
906 if (!ret
&& channel
->wait_fd
!= -1 &&
907 channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
908 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
914 * Allocate the pollfd structure and the local view of the out fds to avoid
915 * doing a lookup in the linked list and concurrency issues when writing is
916 * needed. Called with consumer_data.lock held.
918 * Returns the number of fds in the structures.
920 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
921 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
925 struct lttng_ht_iter iter
;
926 struct lttng_consumer_stream
*stream
;
931 assert(local_stream
);
933 DBG("Updating poll fd array");
935 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
937 * Only active streams with an active end point can be added to the
938 * poll set and local stream storage of the thread.
940 * There is a potential race here for endpoint_status to be updated
941 * just after the check. However, this is OK since the stream(s) will
942 * be deleted once the thread is notified that the end point state has
943 * changed where this function will be called back again.
945 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
946 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
950 * This clobbers way too much the debug output. Uncomment that if you
951 * need it for debugging purposes.
953 * DBG("Active FD %d", stream->wait_fd);
955 (*pollfd
)[i
].fd
= stream
->wait_fd
;
956 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
957 local_stream
[i
] = stream
;
963 * Insert the consumer_data_pipe at the end of the array and don't
964 * increment i so nb_fd is the number of real FD.
966 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
967 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
972 * Poll on the should_quit pipe and the command socket return -1 on error and
973 * should exit, 0 if data is available on the command socket
975 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
980 num_rdy
= poll(consumer_sockpoll
, 2, -1);
983 * Restart interrupted system call.
985 if (errno
== EINTR
) {
988 PERROR("Poll error");
991 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
992 DBG("consumer_should_quit wake up");
1002 * Set the error socket.
1004 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1007 ctx
->consumer_error_socket
= sock
;
1011 * Set the command socket path.
1013 void lttng_consumer_set_command_sock_path(
1014 struct lttng_consumer_local_data
*ctx
, char *sock
)
1016 ctx
->consumer_command_sock_path
= sock
;
1020 * Send return code to the session daemon.
1021 * If the socket is not defined, we return 0, it is not a fatal error
1023 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1025 if (ctx
->consumer_error_socket
> 0) {
1026 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1027 sizeof(enum lttcomm_sessiond_command
));
1034 * Close all the tracefiles and stream fds and MUST be called when all
1035 * instances are destroyed i.e. when all threads were joined and are ended.
1037 void lttng_consumer_cleanup(void)
1039 struct lttng_ht_iter iter
;
1040 struct lttng_consumer_channel
*channel
;
1044 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1046 consumer_del_channel(channel
);
1051 lttng_ht_destroy(consumer_data
.channel_ht
);
1053 cleanup_relayd_ht();
1055 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1058 * This HT contains streams that are freed by either the metadata thread or
1059 * the data thread so we do *nothing* on the hash table and simply destroy
1062 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1066 * Called from signal handler.
1068 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1073 ret
= write(ctx
->consumer_should_quit
[1], "4", 1);
1074 } while (ret
< 0 && errno
== EINTR
);
1075 if (ret
< 0 || ret
!= 1) {
1076 PERROR("write consumer quit");
1079 DBG("Consumer flag that it should quit");
1082 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1085 int outfd
= stream
->out_fd
;
1088 * This does a blocking write-and-wait on any page that belongs to the
1089 * subbuffer prior to the one we just wrote.
1090 * Don't care about error values, as these are just hints and ways to
1091 * limit the amount of page cache used.
1093 if (orig_offset
< stream
->max_sb_size
) {
1096 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1097 stream
->max_sb_size
,
1098 SYNC_FILE_RANGE_WAIT_BEFORE
1099 | SYNC_FILE_RANGE_WRITE
1100 | SYNC_FILE_RANGE_WAIT_AFTER
);
1102 * Give hints to the kernel about how we access the file:
1103 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1106 * We need to call fadvise again after the file grows because the
1107 * kernel does not seem to apply fadvise to non-existing parts of the
1110 * Call fadvise _after_ having waited for the page writeback to
1111 * complete because the dirty page writeback semantic is not well
1112 * defined. So it can be expected to lead to lower throughput in
1115 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1116 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1120 * Initialise the necessary environnement :
1121 * - create a new context
1122 * - create the poll_pipe
1123 * - create the should_quit pipe (for signal handler)
1124 * - create the thread pipe (for splice)
1126 * Takes a function pointer as argument, this function is called when data is
1127 * available on a buffer. This function is responsible to do the
1128 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1129 * buffer configuration and then kernctl_put_next_subbuf at the end.
1131 * Returns a pointer to the new context or NULL on error.
1133 struct lttng_consumer_local_data
*lttng_consumer_create(
1134 enum lttng_consumer_type type
,
1135 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1136 struct lttng_consumer_local_data
*ctx
),
1137 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1138 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1139 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1142 struct lttng_consumer_local_data
*ctx
;
1144 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1145 consumer_data
.type
== type
);
1146 consumer_data
.type
= type
;
1148 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1150 PERROR("allocating context");
1154 ctx
->consumer_error_socket
= -1;
1155 ctx
->consumer_metadata_socket
= -1;
1156 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1157 /* assign the callbacks */
1158 ctx
->on_buffer_ready
= buffer_ready
;
1159 ctx
->on_recv_channel
= recv_channel
;
1160 ctx
->on_recv_stream
= recv_stream
;
1161 ctx
->on_update_stream
= update_stream
;
1163 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1164 if (!ctx
->consumer_data_pipe
) {
1165 goto error_poll_pipe
;
1168 ret
= pipe(ctx
->consumer_should_quit
);
1170 PERROR("Error creating recv pipe");
1171 goto error_quit_pipe
;
1174 ret
= pipe(ctx
->consumer_thread_pipe
);
1176 PERROR("Error creating thread pipe");
1177 goto error_thread_pipe
;
1180 ret
= pipe(ctx
->consumer_channel_pipe
);
1182 PERROR("Error creating channel pipe");
1183 goto error_channel_pipe
;
1186 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1187 if (!ctx
->consumer_metadata_pipe
) {
1188 goto error_metadata_pipe
;
1191 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1193 goto error_splice_pipe
;
1199 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1200 error_metadata_pipe
:
1201 utils_close_pipe(ctx
->consumer_channel_pipe
);
1203 utils_close_pipe(ctx
->consumer_thread_pipe
);
1205 utils_close_pipe(ctx
->consumer_should_quit
);
1207 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1215 * Close all fds associated with the instance and free the context.
1217 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1221 DBG("Consumer destroying it. Closing everything.");
1223 ret
= close(ctx
->consumer_error_socket
);
1227 ret
= close(ctx
->consumer_metadata_socket
);
1231 utils_close_pipe(ctx
->consumer_thread_pipe
);
1232 utils_close_pipe(ctx
->consumer_channel_pipe
);
1233 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1234 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1235 utils_close_pipe(ctx
->consumer_should_quit
);
1236 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1238 unlink(ctx
->consumer_command_sock_path
);
1243 * Write the metadata stream id on the specified file descriptor.
1245 static int write_relayd_metadata_id(int fd
,
1246 struct lttng_consumer_stream
*stream
,
1247 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1250 struct lttcomm_relayd_metadata_payload hdr
;
1252 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1253 hdr
.padding_size
= htobe32(padding
);
1255 ret
= write(fd
, (void *) &hdr
, sizeof(hdr
));
1256 } while (ret
< 0 && errno
== EINTR
);
1257 if (ret
< 0 || ret
!= sizeof(hdr
)) {
1259 * This error means that the fd's end is closed so ignore the perror
1260 * not to clubber the error output since this can happen in a normal
1263 if (errno
!= EPIPE
) {
1264 PERROR("write metadata stream id");
1266 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1268 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1269 * handle writting the missing part so report that as an error and
1270 * don't lie to the caller.
1275 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1276 stream
->relayd_stream_id
, padding
);
1283 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1284 * core function for writing trace buffers to either the local filesystem or
1287 * It must be called with the stream lock held.
1289 * Careful review MUST be put if any changes occur!
1291 * Returns the number of bytes written
1293 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1294 struct lttng_consumer_local_data
*ctx
,
1295 struct lttng_consumer_stream
*stream
, unsigned long len
,
1296 unsigned long padding
)
1298 unsigned long mmap_offset
;
1300 ssize_t ret
= 0, written
= 0;
1301 off_t orig_offset
= stream
->out_fd_offset
;
1302 /* Default is on the disk */
1303 int outfd
= stream
->out_fd
;
1304 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1305 unsigned int relayd_hang_up
= 0;
1307 /* RCU lock for the relayd pointer */
1310 /* Flag that the current stream if set for network streaming. */
1311 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1312 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1313 if (relayd
== NULL
) {
1318 /* get the offset inside the fd to mmap */
1319 switch (consumer_data
.type
) {
1320 case LTTNG_CONSUMER_KERNEL
:
1321 mmap_base
= stream
->mmap_base
;
1322 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1324 case LTTNG_CONSUMER32_UST
:
1325 case LTTNG_CONSUMER64_UST
:
1326 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1328 ERR("read mmap get mmap base for stream %s", stream
->name
);
1332 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1336 ERR("Unknown consumer_data type");
1341 PERROR("tracer ctl get_mmap_read_offset");
1346 /* Handle stream on the relayd if the output is on the network */
1348 unsigned long netlen
= len
;
1351 * Lock the control socket for the complete duration of the function
1352 * since from this point on we will use the socket.
1354 if (stream
->metadata_flag
) {
1355 /* Metadata requires the control socket. */
1356 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1357 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1360 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1362 /* Use the returned socket. */
1365 /* Write metadata stream id before payload */
1366 if (stream
->metadata_flag
) {
1367 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1370 /* Socket operation failed. We consider the relayd dead */
1371 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1379 /* Socket operation failed. We consider the relayd dead */
1380 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1384 /* Else, use the default set before which is the filesystem. */
1387 /* No streaming, we have to set the len with the full padding */
1391 * Check if we need to change the tracefile before writing the packet.
1393 if (stream
->chan
->tracefile_size
> 0 &&
1394 (stream
->tracefile_size_current
+ len
) >
1395 stream
->chan
->tracefile_size
) {
1396 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1397 stream
->name
, stream
->chan
->tracefile_size
,
1398 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1399 stream
->out_fd
, &(stream
->tracefile_count_current
));
1401 ERR("Rotating output file");
1404 outfd
= stream
->out_fd
= ret
;
1405 /* Reset current size because we just perform a rotation. */
1406 stream
->tracefile_size_current
= 0;
1408 stream
->tracefile_size_current
+= len
;
1413 ret
= write(outfd
, mmap_base
+ mmap_offset
, len
);
1414 } while (ret
< 0 && errno
== EINTR
);
1415 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1418 * This is possible if the fd is closed on the other side (outfd)
1419 * or any write problem. It can be verbose a bit for a normal
1420 * execution if for instance the relayd is stopped abruptly. This
1421 * can happen so set this to a DBG statement.
1423 DBG("Error in file write mmap");
1427 /* Socket operation failed. We consider the relayd dead */
1428 if (errno
== EPIPE
|| errno
== EINVAL
) {
1433 } else if (ret
> len
) {
1434 PERROR("Error in file write (ret %zd > len %lu)", ret
, len
);
1442 /* This call is useless on a socket so better save a syscall. */
1444 /* This won't block, but will start writeout asynchronously */
1445 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret
,
1446 SYNC_FILE_RANGE_WRITE
);
1447 stream
->out_fd_offset
+= ret
;
1451 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1455 * This is a special case that the relayd has closed its socket. Let's
1456 * cleanup the relayd object and all associated streams.
1458 if (relayd
&& relayd_hang_up
) {
1459 cleanup_relayd(relayd
, ctx
);
1463 /* Unlock only if ctrl socket used */
1464 if (relayd
&& stream
->metadata_flag
) {
1465 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1473 * Splice the data from the ring buffer to the tracefile.
1475 * It must be called with the stream lock held.
1477 * Returns the number of bytes spliced.
1479 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1480 struct lttng_consumer_local_data
*ctx
,
1481 struct lttng_consumer_stream
*stream
, unsigned long len
,
1482 unsigned long padding
)
1484 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1486 off_t orig_offset
= stream
->out_fd_offset
;
1487 int fd
= stream
->wait_fd
;
1488 /* Default is on the disk */
1489 int outfd
= stream
->out_fd
;
1490 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1492 unsigned int relayd_hang_up
= 0;
1494 switch (consumer_data
.type
) {
1495 case LTTNG_CONSUMER_KERNEL
:
1497 case LTTNG_CONSUMER32_UST
:
1498 case LTTNG_CONSUMER64_UST
:
1499 /* Not supported for user space tracing */
1502 ERR("Unknown consumer_data type");
1506 /* RCU lock for the relayd pointer */
1509 /* Flag that the current stream if set for network streaming. */
1510 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1511 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1512 if (relayd
== NULL
) {
1518 * Choose right pipe for splice. Metadata and trace data are handled by
1519 * different threads hence the use of two pipes in order not to race or
1520 * corrupt the written data.
1522 if (stream
->metadata_flag
) {
1523 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1525 splice_pipe
= ctx
->consumer_thread_pipe
;
1528 /* Write metadata stream id before payload */
1530 int total_len
= len
;
1532 if (stream
->metadata_flag
) {
1534 * Lock the control socket for the complete duration of the function
1535 * since from this point on we will use the socket.
1537 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1539 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1543 /* Socket operation failed. We consider the relayd dead */
1544 if (ret
== -EBADF
) {
1545 WARN("Remote relayd disconnected. Stopping");
1552 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1555 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1557 /* Use the returned socket. */
1560 /* Socket operation failed. We consider the relayd dead */
1561 if (ret
== -EBADF
) {
1562 WARN("Remote relayd disconnected. Stopping");
1569 /* No streaming, we have to set the len with the full padding */
1573 * Check if we need to change the tracefile before writing the packet.
1575 if (stream
->chan
->tracefile_size
> 0 &&
1576 (stream
->tracefile_size_current
+ len
) >
1577 stream
->chan
->tracefile_size
) {
1578 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1579 stream
->name
, stream
->chan
->tracefile_size
,
1580 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1581 stream
->out_fd
, &(stream
->tracefile_count_current
));
1583 ERR("Rotating output file");
1586 outfd
= stream
->out_fd
= ret
;
1587 /* Reset current size because we just perform a rotation. */
1588 stream
->tracefile_size_current
= 0;
1590 stream
->tracefile_size_current
+= len
;
1594 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1595 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1596 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1597 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1598 DBG("splice chan to pipe, ret %zd", ret_splice
);
1599 if (ret_splice
< 0) {
1600 PERROR("Error in relay splice");
1602 written
= ret_splice
;
1608 /* Handle stream on the relayd if the output is on the network */
1610 if (stream
->metadata_flag
) {
1611 size_t metadata_payload_size
=
1612 sizeof(struct lttcomm_relayd_metadata_payload
);
1614 /* Update counter to fit the spliced data */
1615 ret_splice
+= metadata_payload_size
;
1616 len
+= metadata_payload_size
;
1618 * We do this so the return value can match the len passed as
1619 * argument to this function.
1621 written
-= metadata_payload_size
;
1625 /* Splice data out */
1626 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1627 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1628 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1629 if (ret_splice
< 0) {
1630 PERROR("Error in file splice");
1632 written
= ret_splice
;
1634 /* Socket operation failed. We consider the relayd dead */
1635 if (errno
== EBADF
|| errno
== EPIPE
) {
1636 WARN("Remote relayd disconnected. Stopping");
1642 } else if (ret_splice
> len
) {
1644 PERROR("Wrote more data than requested %zd (len: %lu)",
1646 written
+= ret_splice
;
1652 /* This call is useless on a socket so better save a syscall. */
1654 /* This won't block, but will start writeout asynchronously */
1655 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1656 SYNC_FILE_RANGE_WRITE
);
1657 stream
->out_fd_offset
+= ret_splice
;
1659 written
+= ret_splice
;
1661 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1669 * This is a special case that the relayd has closed its socket. Let's
1670 * cleanup the relayd object and all associated streams.
1672 if (relayd
&& relayd_hang_up
) {
1673 cleanup_relayd(relayd
, ctx
);
1674 /* Skip splice error so the consumer does not fail */
1679 /* send the appropriate error description to sessiond */
1682 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1685 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1688 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1693 if (relayd
&& stream
->metadata_flag
) {
1694 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1702 * Take a snapshot for a specific fd
1704 * Returns 0 on success, < 0 on error
1706 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1708 switch (consumer_data
.type
) {
1709 case LTTNG_CONSUMER_KERNEL
:
1710 return lttng_kconsumer_take_snapshot(stream
);
1711 case LTTNG_CONSUMER32_UST
:
1712 case LTTNG_CONSUMER64_UST
:
1713 return lttng_ustconsumer_take_snapshot(stream
);
1715 ERR("Unknown consumer_data type");
1722 * Get the produced position
1724 * Returns 0 on success, < 0 on error
1726 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1729 switch (consumer_data
.type
) {
1730 case LTTNG_CONSUMER_KERNEL
:
1731 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1732 case LTTNG_CONSUMER32_UST
:
1733 case LTTNG_CONSUMER64_UST
:
1734 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1736 ERR("Unknown consumer_data type");
1742 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1743 int sock
, struct pollfd
*consumer_sockpoll
)
1745 switch (consumer_data
.type
) {
1746 case LTTNG_CONSUMER_KERNEL
:
1747 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1748 case LTTNG_CONSUMER32_UST
:
1749 case LTTNG_CONSUMER64_UST
:
1750 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1752 ERR("Unknown consumer_data type");
1759 * Iterate over all streams of the hashtable and free them properly.
1761 * WARNING: *MUST* be used with data stream only.
1763 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1765 struct lttng_ht_iter iter
;
1766 struct lttng_consumer_stream
*stream
;
1773 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1775 * Ignore return value since we are currently cleaning up so any error
1778 (void) consumer_del_stream(stream
, ht
);
1782 lttng_ht_destroy(ht
);
1786 * Iterate over all streams of the hashtable and free them properly.
1788 * XXX: Should not be only for metadata stream or else use an other name.
1790 static void destroy_stream_ht(struct lttng_ht
*ht
)
1792 struct lttng_ht_iter iter
;
1793 struct lttng_consumer_stream
*stream
;
1800 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1802 * Ignore return value since we are currently cleaning up so any error
1805 (void) consumer_del_metadata_stream(stream
, ht
);
1809 lttng_ht_destroy(ht
);
1812 void lttng_consumer_close_metadata(void)
1814 switch (consumer_data
.type
) {
1815 case LTTNG_CONSUMER_KERNEL
:
1817 * The Kernel consumer has a different metadata scheme so we don't
1818 * close anything because the stream will be closed by the session
1822 case LTTNG_CONSUMER32_UST
:
1823 case LTTNG_CONSUMER64_UST
:
1825 * Close all metadata streams. The metadata hash table is passed and
1826 * this call iterates over it by closing all wakeup fd. This is safe
1827 * because at this point we are sure that the metadata producer is
1828 * either dead or blocked.
1830 lttng_ustconsumer_close_metadata(metadata_ht
);
1833 ERR("Unknown consumer_data type");
1839 * Clean up a metadata stream and free its memory.
1841 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1842 struct lttng_ht
*ht
)
1845 struct lttng_ht_iter iter
;
1846 struct lttng_consumer_channel
*free_chan
= NULL
;
1847 struct consumer_relayd_sock_pair
*relayd
;
1851 * This call should NEVER receive regular stream. It must always be
1852 * metadata stream and this is crucial for data structure synchronization.
1854 assert(stream
->metadata_flag
);
1856 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1859 /* Means the stream was allocated but not successfully added */
1860 goto free_stream_rcu
;
1863 pthread_mutex_lock(&consumer_data
.lock
);
1864 pthread_mutex_lock(&stream
->chan
->lock
);
1865 pthread_mutex_lock(&stream
->lock
);
1867 switch (consumer_data
.type
) {
1868 case LTTNG_CONSUMER_KERNEL
:
1869 if (stream
->mmap_base
!= NULL
) {
1870 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
1872 PERROR("munmap metadata stream");
1875 if (stream
->wait_fd
>= 0) {
1876 ret
= close(stream
->wait_fd
);
1878 PERROR("close kernel metadata wait_fd");
1882 case LTTNG_CONSUMER32_UST
:
1883 case LTTNG_CONSUMER64_UST
:
1884 if (stream
->monitor
) {
1885 /* close the write-side in close_metadata */
1886 ret
= close(stream
->ust_metadata_poll_pipe
[0]);
1888 PERROR("Close UST metadata read-side poll pipe");
1891 lttng_ustconsumer_del_stream(stream
);
1894 ERR("Unknown consumer_data type");
1900 iter
.iter
.node
= &stream
->node
.node
;
1901 ret
= lttng_ht_del(ht
, &iter
);
1904 iter
.iter
.node
= &stream
->node_channel_id
.node
;
1905 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
1908 iter
.iter
.node
= &stream
->node_session_id
.node
;
1909 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
1913 if (stream
->out_fd
>= 0) {
1914 ret
= close(stream
->out_fd
);
1920 /* Check and cleanup relayd */
1922 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1923 if (relayd
!= NULL
) {
1924 uatomic_dec(&relayd
->refcount
);
1925 assert(uatomic_read(&relayd
->refcount
) >= 0);
1927 /* Closing streams requires to lock the control socket. */
1928 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1929 ret
= relayd_send_close_stream(&relayd
->control_sock
,
1930 stream
->relayd_stream_id
, stream
->next_net_seq_num
- 1);
1931 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1933 DBG("Unable to close stream on the relayd. Continuing");
1935 * Continue here. There is nothing we can do for the relayd.
1936 * Chances are that the relayd has closed the socket so we just
1937 * continue cleaning up.
1941 /* Both conditions are met, we destroy the relayd. */
1942 if (uatomic_read(&relayd
->refcount
) == 0 &&
1943 uatomic_read(&relayd
->destroy_flag
)) {
1944 consumer_destroy_relayd(relayd
);
1949 /* Atomically decrement channel refcount since other threads can use it. */
1950 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
1951 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
1952 /* Go for channel deletion! */
1953 free_chan
= stream
->chan
;
1958 * Nullify the stream reference so it is not used after deletion. The
1959 * consumer data lock MUST be acquired before being able to check for a
1960 * NULL pointer value.
1962 stream
->chan
->metadata_stream
= NULL
;
1964 pthread_mutex_unlock(&stream
->lock
);
1965 pthread_mutex_unlock(&stream
->chan
->lock
);
1966 pthread_mutex_unlock(&consumer_data
.lock
);
1969 consumer_del_channel(free_chan
);
1973 call_rcu(&stream
->node
.head
, free_stream_rcu
);
1977 * Action done with the metadata stream when adding it to the consumer internal
1978 * data structures to handle it.
1980 static int add_metadata_stream(struct lttng_consumer_stream
*stream
,
1981 struct lttng_ht
*ht
)
1984 struct lttng_ht_iter iter
;
1985 struct lttng_ht_node_u64
*node
;
1990 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
1992 pthread_mutex_lock(&consumer_data
.lock
);
1993 pthread_mutex_lock(&stream
->chan
->lock
);
1994 pthread_mutex_lock(&stream
->lock
);
1997 * From here, refcounts are updated so be _careful_ when returning an error
2004 * Lookup the stream just to make sure it does not exist in our internal
2005 * state. This should NEVER happen.
2007 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2008 node
= lttng_ht_iter_get_node_u64(&iter
);
2012 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2013 * in terms of destroying the associated channel, because the action that
2014 * causes the count to become 0 also causes a stream to be added. The
2015 * channel deletion will thus be triggered by the following removal of this
2018 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2019 /* Increment refcount before decrementing nb_init_stream_left */
2021 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2024 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2026 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2027 &stream
->node_channel_id
);
2030 * Add stream to the stream_list_ht of the consumer data. No need to steal
2031 * the key since the HT does not use it and we allow to add redundant keys
2034 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2038 pthread_mutex_unlock(&stream
->lock
);
2039 pthread_mutex_unlock(&stream
->chan
->lock
);
2040 pthread_mutex_unlock(&consumer_data
.lock
);
2045 * Delete data stream that are flagged for deletion (endpoint_status).
2047 static void validate_endpoint_status_data_stream(void)
2049 struct lttng_ht_iter iter
;
2050 struct lttng_consumer_stream
*stream
;
2052 DBG("Consumer delete flagged data stream");
2055 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2056 /* Validate delete flag of the stream */
2057 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2060 /* Delete it right now */
2061 consumer_del_stream(stream
, data_ht
);
2067 * Delete metadata stream that are flagged for deletion (endpoint_status).
2069 static void validate_endpoint_status_metadata_stream(
2070 struct lttng_poll_event
*pollset
)
2072 struct lttng_ht_iter iter
;
2073 struct lttng_consumer_stream
*stream
;
2075 DBG("Consumer delete flagged metadata stream");
2080 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2081 /* Validate delete flag of the stream */
2082 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2086 * Remove from pollset so the metadata thread can continue without
2087 * blocking on a deleted stream.
2089 lttng_poll_del(pollset
, stream
->wait_fd
);
2091 /* Delete it right now */
2092 consumer_del_metadata_stream(stream
, metadata_ht
);
2098 * Thread polls on metadata file descriptor and write them on disk or on the
2101 void *consumer_thread_metadata_poll(void *data
)
2104 uint32_t revents
, nb_fd
;
2105 struct lttng_consumer_stream
*stream
= NULL
;
2106 struct lttng_ht_iter iter
;
2107 struct lttng_ht_node_u64
*node
;
2108 struct lttng_poll_event events
;
2109 struct lttng_consumer_local_data
*ctx
= data
;
2112 rcu_register_thread();
2114 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2116 /* ENOMEM at this point. Better to bail out. */
2120 DBG("Thread metadata poll started");
2122 /* Size is set to 1 for the consumer_metadata pipe */
2123 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2125 ERR("Poll set creation failed");
2129 ret
= lttng_poll_add(&events
,
2130 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2136 DBG("Metadata main loop started");
2139 /* Only the metadata pipe is set */
2140 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2145 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2146 ret
= lttng_poll_wait(&events
, -1);
2147 DBG("Metadata event catched in thread");
2149 if (errno
== EINTR
) {
2150 ERR("Poll EINTR catched");
2158 /* From here, the event is a metadata wait fd */
2159 for (i
= 0; i
< nb_fd
; i
++) {
2160 revents
= LTTNG_POLL_GETEV(&events
, i
);
2161 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2163 /* Just don't waste time if no returned events for the fd */
2168 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2169 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2170 DBG("Metadata thread pipe hung up");
2172 * Remove the pipe from the poll set and continue the loop
2173 * since their might be data to consume.
2175 lttng_poll_del(&events
,
2176 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2177 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2179 } else if (revents
& LPOLLIN
) {
2182 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2183 &stream
, sizeof(stream
));
2185 ERR("read metadata stream, ret: %ld", pipe_len
);
2187 * Continue here to handle the rest of the streams.
2192 /* A NULL stream means that the state has changed. */
2193 if (stream
== NULL
) {
2194 /* Check for deleted streams. */
2195 validate_endpoint_status_metadata_stream(&events
);
2199 DBG("Adding metadata stream %d to poll set",
2202 ret
= add_metadata_stream(stream
, metadata_ht
);
2204 ERR("Unable to add metadata stream");
2205 /* Stream was not setup properly. Continuing. */
2206 consumer_del_metadata_stream(stream
, NULL
);
2210 /* Add metadata stream to the global poll events list */
2211 lttng_poll_add(&events
, stream
->wait_fd
,
2212 LPOLLIN
| LPOLLPRI
);
2215 /* Handle other stream */
2221 uint64_t tmp_id
= (uint64_t) pollfd
;
2223 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2225 node
= lttng_ht_iter_get_node_u64(&iter
);
2228 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2231 /* Check for error event */
2232 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2233 DBG("Metadata fd %d is hup|err.", pollfd
);
2234 if (!stream
->hangup_flush_done
2235 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2236 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2237 DBG("Attempting to flush and consume the UST buffers");
2238 lttng_ustconsumer_on_stream_hangup(stream
);
2240 /* We just flushed the stream now read it. */
2242 len
= ctx
->on_buffer_ready(stream
, ctx
);
2244 * We don't check the return value here since if we get
2245 * a negative len, it means an error occured thus we
2246 * simply remove it from the poll set and free the
2252 lttng_poll_del(&events
, stream
->wait_fd
);
2254 * This call update the channel states, closes file descriptors
2255 * and securely free the stream.
2257 consumer_del_metadata_stream(stream
, metadata_ht
);
2258 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2259 /* Get the data out of the metadata file descriptor */
2260 DBG("Metadata available on fd %d", pollfd
);
2261 assert(stream
->wait_fd
== pollfd
);
2264 len
= ctx
->on_buffer_ready(stream
, ctx
);
2266 * We don't check the return value here since if we get
2267 * a negative len, it means an error occured thus we
2268 * simply remove it from the poll set and free the
2273 /* It's ok to have an unavailable sub-buffer */
2274 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2275 /* Clean up stream from consumer and free it. */
2276 lttng_poll_del(&events
, stream
->wait_fd
);
2277 consumer_del_metadata_stream(stream
, metadata_ht
);
2281 /* Release RCU lock for the stream looked up */
2288 DBG("Metadata poll thread exiting");
2290 lttng_poll_clean(&events
);
2292 destroy_stream_ht(metadata_ht
);
2294 rcu_unregister_thread();
2299 * This thread polls the fds in the set to consume the data and write
2300 * it to tracefile if necessary.
2302 void *consumer_thread_data_poll(void *data
)
2304 int num_rdy
, num_hup
, high_prio
, ret
, i
;
2305 struct pollfd
*pollfd
= NULL
;
2306 /* local view of the streams */
2307 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2308 /* local view of consumer_data.fds_count */
2310 struct lttng_consumer_local_data
*ctx
= data
;
2313 rcu_register_thread();
2315 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2316 if (data_ht
== NULL
) {
2317 /* ENOMEM at this point. Better to bail out. */
2321 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2322 if (local_stream
== NULL
) {
2323 PERROR("local_stream malloc");
2332 * the fds set has been updated, we need to update our
2333 * local array as well
2335 pthread_mutex_lock(&consumer_data
.lock
);
2336 if (consumer_data
.need_update
) {
2341 local_stream
= NULL
;
2343 /* allocate for all fds + 1 for the consumer_data_pipe */
2344 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2345 if (pollfd
== NULL
) {
2346 PERROR("pollfd malloc");
2347 pthread_mutex_unlock(&consumer_data
.lock
);
2351 /* allocate for all fds + 1 for the consumer_data_pipe */
2352 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2353 sizeof(struct lttng_consumer_stream
*));
2354 if (local_stream
== NULL
) {
2355 PERROR("local_stream malloc");
2356 pthread_mutex_unlock(&consumer_data
.lock
);
2359 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2362 ERR("Error in allocating pollfd or local_outfds");
2363 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2364 pthread_mutex_unlock(&consumer_data
.lock
);
2368 consumer_data
.need_update
= 0;
2370 pthread_mutex_unlock(&consumer_data
.lock
);
2372 /* No FDs and consumer_quit, consumer_cleanup the thread */
2373 if (nb_fd
== 0 && consumer_quit
== 1) {
2376 /* poll on the array of fds */
2378 DBG("polling on %d fd", nb_fd
+ 1);
2379 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2380 DBG("poll num_rdy : %d", num_rdy
);
2381 if (num_rdy
== -1) {
2383 * Restart interrupted system call.
2385 if (errno
== EINTR
) {
2388 PERROR("Poll error");
2389 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2391 } else if (num_rdy
== 0) {
2392 DBG("Polling thread timed out");
2397 * If the consumer_data_pipe triggered poll go directly to the
2398 * beginning of the loop to update the array. We want to prioritize
2399 * array update over low-priority reads.
2401 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2402 ssize_t pipe_readlen
;
2404 DBG("consumer_data_pipe wake up");
2405 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2406 &new_stream
, sizeof(new_stream
));
2407 if (pipe_readlen
< 0) {
2408 ERR("Consumer data pipe ret %ld", pipe_readlen
);
2409 /* Continue so we can at least handle the current stream(s). */
2414 * If the stream is NULL, just ignore it. It's also possible that
2415 * the sessiond poll thread changed the consumer_quit state and is
2416 * waking us up to test it.
2418 if (new_stream
== NULL
) {
2419 validate_endpoint_status_data_stream();
2423 ret
= add_stream(new_stream
, data_ht
);
2425 ERR("Consumer add stream %" PRIu64
" failed. Continuing",
2428 * At this point, if the add_stream fails, it is not in the
2429 * hash table thus passing the NULL value here.
2431 consumer_del_stream(new_stream
, NULL
);
2434 /* Continue to update the local streams and handle prio ones */
2438 /* Take care of high priority channels first. */
2439 for (i
= 0; i
< nb_fd
; i
++) {
2440 if (local_stream
[i
] == NULL
) {
2443 if (pollfd
[i
].revents
& POLLPRI
) {
2444 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2446 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2447 /* it's ok to have an unavailable sub-buffer */
2448 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2449 /* Clean the stream and free it. */
2450 consumer_del_stream(local_stream
[i
], data_ht
);
2451 local_stream
[i
] = NULL
;
2452 } else if (len
> 0) {
2453 local_stream
[i
]->data_read
= 1;
2459 * If we read high prio channel in this loop, try again
2460 * for more high prio data.
2466 /* Take care of low priority channels. */
2467 for (i
= 0; i
< nb_fd
; i
++) {
2468 if (local_stream
[i
] == NULL
) {
2471 if ((pollfd
[i
].revents
& POLLIN
) ||
2472 local_stream
[i
]->hangup_flush_done
) {
2473 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2474 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2475 /* it's ok to have an unavailable sub-buffer */
2476 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2477 /* Clean the stream and free it. */
2478 consumer_del_stream(local_stream
[i
], data_ht
);
2479 local_stream
[i
] = NULL
;
2480 } else if (len
> 0) {
2481 local_stream
[i
]->data_read
= 1;
2486 /* Handle hangup and errors */
2487 for (i
= 0; i
< nb_fd
; i
++) {
2488 if (local_stream
[i
] == NULL
) {
2491 if (!local_stream
[i
]->hangup_flush_done
2492 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2493 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2494 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2495 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2497 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2498 /* Attempt read again, for the data we just flushed. */
2499 local_stream
[i
]->data_read
= 1;
2502 * If the poll flag is HUP/ERR/NVAL and we have
2503 * read no data in this pass, we can remove the
2504 * stream from its hash table.
2506 if ((pollfd
[i
].revents
& POLLHUP
)) {
2507 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2508 if (!local_stream
[i
]->data_read
) {
2509 consumer_del_stream(local_stream
[i
], data_ht
);
2510 local_stream
[i
] = NULL
;
2513 } else if (pollfd
[i
].revents
& POLLERR
) {
2514 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2515 if (!local_stream
[i
]->data_read
) {
2516 consumer_del_stream(local_stream
[i
], data_ht
);
2517 local_stream
[i
] = NULL
;
2520 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2521 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2522 if (!local_stream
[i
]->data_read
) {
2523 consumer_del_stream(local_stream
[i
], data_ht
);
2524 local_stream
[i
] = NULL
;
2528 if (local_stream
[i
] != NULL
) {
2529 local_stream
[i
]->data_read
= 0;
2534 DBG("polling thread exiting");
2539 * Close the write side of the pipe so epoll_wait() in
2540 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2541 * read side of the pipe. If we close them both, epoll_wait strangely does
2542 * not return and could create a endless wait period if the pipe is the
2543 * only tracked fd in the poll set. The thread will take care of closing
2546 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2548 destroy_data_stream_ht(data_ht
);
2550 rcu_unregister_thread();
2555 * Close wake-up end of each stream belonging to the channel. This will
2556 * allow the poll() on the stream read-side to detect when the
2557 * write-side (application) finally closes them.
2560 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2562 struct lttng_ht
*ht
;
2563 struct lttng_consumer_stream
*stream
;
2564 struct lttng_ht_iter iter
;
2566 ht
= consumer_data
.stream_per_chan_id_ht
;
2569 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2570 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2571 ht
->match_fct
, &channel
->key
,
2572 &iter
.iter
, stream
, node_channel_id
.node
) {
2574 * Protect against teardown with mutex.
2576 pthread_mutex_lock(&stream
->lock
);
2577 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2580 switch (consumer_data
.type
) {
2581 case LTTNG_CONSUMER_KERNEL
:
2583 case LTTNG_CONSUMER32_UST
:
2584 case LTTNG_CONSUMER64_UST
:
2586 * Note: a mutex is taken internally within
2587 * liblttng-ust-ctl to protect timer wakeup_fd
2588 * use from concurrent close.
2590 lttng_ustconsumer_close_stream_wakeup(stream
);
2593 ERR("Unknown consumer_data type");
2597 pthread_mutex_unlock(&stream
->lock
);
2602 static void destroy_channel_ht(struct lttng_ht
*ht
)
2604 struct lttng_ht_iter iter
;
2605 struct lttng_consumer_channel
*channel
;
2613 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2614 ret
= lttng_ht_del(ht
, &iter
);
2619 lttng_ht_destroy(ht
);
2623 * This thread polls the channel fds to detect when they are being
2624 * closed. It closes all related streams if the channel is detected as
2625 * closed. It is currently only used as a shim layer for UST because the
2626 * consumerd needs to keep the per-stream wakeup end of pipes open for
2629 void *consumer_thread_channel_poll(void *data
)
2632 uint32_t revents
, nb_fd
;
2633 struct lttng_consumer_channel
*chan
= NULL
;
2634 struct lttng_ht_iter iter
;
2635 struct lttng_ht_node_u64
*node
;
2636 struct lttng_poll_event events
;
2637 struct lttng_consumer_local_data
*ctx
= data
;
2638 struct lttng_ht
*channel_ht
;
2640 rcu_register_thread();
2642 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2644 /* ENOMEM at this point. Better to bail out. */
2648 DBG("Thread channel poll started");
2650 /* Size is set to 1 for the consumer_channel pipe */
2651 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2653 ERR("Poll set creation failed");
2657 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2663 DBG("Channel main loop started");
2666 /* Only the channel pipe is set */
2667 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2672 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2673 ret
= lttng_poll_wait(&events
, -1);
2674 DBG("Channel event catched in thread");
2676 if (errno
== EINTR
) {
2677 ERR("Poll EINTR catched");
2685 /* From here, the event is a channel wait fd */
2686 for (i
= 0; i
< nb_fd
; i
++) {
2687 revents
= LTTNG_POLL_GETEV(&events
, i
);
2688 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2690 /* Just don't waste time if no returned events for the fd */
2694 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2695 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2696 DBG("Channel thread pipe hung up");
2698 * Remove the pipe from the poll set and continue the loop
2699 * since their might be data to consume.
2701 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2703 } else if (revents
& LPOLLIN
) {
2704 enum consumer_channel_action action
;
2707 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2709 ERR("Error reading channel pipe");
2714 case CONSUMER_CHANNEL_ADD
:
2715 DBG("Adding channel %d to poll set",
2718 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2721 lttng_ht_add_unique_u64(channel_ht
,
2722 &chan
->wait_fd_node
);
2724 /* Add channel to the global poll events list */
2725 lttng_poll_add(&events
, chan
->wait_fd
,
2726 LPOLLIN
| LPOLLPRI
);
2728 case CONSUMER_CHANNEL_DEL
:
2730 struct lttng_consumer_stream
*stream
, *stmp
;
2733 chan
= consumer_find_channel(key
);
2736 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2739 lttng_poll_del(&events
, chan
->wait_fd
);
2740 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2741 ret
= lttng_ht_del(channel_ht
, &iter
);
2743 consumer_close_channel_streams(chan
);
2745 switch (consumer_data
.type
) {
2746 case LTTNG_CONSUMER_KERNEL
:
2748 case LTTNG_CONSUMER32_UST
:
2749 case LTTNG_CONSUMER64_UST
:
2750 /* Delete streams that might have been left in the stream list. */
2751 cds_list_for_each_entry_safe(stream
, stmp
, &chan
->streams
.head
,
2753 cds_list_del(&stream
->send_node
);
2754 lttng_ustconsumer_del_stream(stream
);
2755 uatomic_sub(&stream
->chan
->refcount
, 1);
2756 assert(&chan
->refcount
);
2761 ERR("Unknown consumer_data type");
2766 * Release our own refcount. Force channel deletion even if
2767 * streams were not initialized.
2769 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2770 consumer_del_channel(chan
);
2775 case CONSUMER_CHANNEL_QUIT
:
2777 * Remove the pipe from the poll set and continue the loop
2778 * since their might be data to consume.
2780 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2783 ERR("Unknown action");
2788 /* Handle other stream */
2794 uint64_t tmp_id
= (uint64_t) pollfd
;
2796 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2798 node
= lttng_ht_iter_get_node_u64(&iter
);
2801 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2804 /* Check for error event */
2805 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2806 DBG("Channel fd %d is hup|err.", pollfd
);
2808 lttng_poll_del(&events
, chan
->wait_fd
);
2809 ret
= lttng_ht_del(channel_ht
, &iter
);
2811 consumer_close_channel_streams(chan
);
2813 /* Release our own refcount */
2814 if (!uatomic_sub_return(&chan
->refcount
, 1)
2815 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2816 consumer_del_channel(chan
);
2820 /* Release RCU lock for the channel looked up */
2826 lttng_poll_clean(&events
);
2828 destroy_channel_ht(channel_ht
);
2830 DBG("Channel poll thread exiting");
2831 rcu_unregister_thread();
2835 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
2836 struct pollfd
*sockpoll
, int client_socket
)
2843 if (lttng_consumer_poll_socket(sockpoll
) < 0) {
2847 DBG("Metadata connection on client_socket");
2849 /* Blocking call, waiting for transmission */
2850 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
2851 if (ctx
->consumer_metadata_socket
< 0) {
2852 WARN("On accept metadata");
2863 * This thread listens on the consumerd socket and receives the file
2864 * descriptors from the session daemon.
2866 void *consumer_thread_sessiond_poll(void *data
)
2868 int sock
= -1, client_socket
, ret
;
2870 * structure to poll for incoming data on communication socket avoids
2871 * making blocking sockets.
2873 struct pollfd consumer_sockpoll
[2];
2874 struct lttng_consumer_local_data
*ctx
= data
;
2876 rcu_register_thread();
2878 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
2879 unlink(ctx
->consumer_command_sock_path
);
2880 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
2881 if (client_socket
< 0) {
2882 ERR("Cannot create command socket");
2886 ret
= lttcomm_listen_unix_sock(client_socket
);
2891 DBG("Sending ready command to lttng-sessiond");
2892 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
2893 /* return < 0 on error, but == 0 is not fatal */
2895 ERR("Error sending ready command to lttng-sessiond");
2899 /* prepare the FDs to poll : to client socket and the should_quit pipe */
2900 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
2901 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
2902 consumer_sockpoll
[1].fd
= client_socket
;
2903 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2905 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2908 DBG("Connection on client_socket");
2910 /* Blocking call, waiting for transmission */
2911 sock
= lttcomm_accept_unix_sock(client_socket
);
2918 * Setup metadata socket which is the second socket connection on the
2919 * command unix socket.
2921 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
2926 /* This socket is not useful anymore. */
2927 ret
= close(client_socket
);
2929 PERROR("close client_socket");
2933 /* update the polling structure to poll on the established socket */
2934 consumer_sockpoll
[1].fd
= sock
;
2935 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2938 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2941 DBG("Incoming command on sock");
2942 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2943 if (ret
== -ENOENT
) {
2944 DBG("Received STOP command");
2949 * This could simply be a session daemon quitting. Don't output
2952 DBG("Communication interrupted on command socket");
2955 if (consumer_quit
) {
2956 DBG("consumer_thread_receive_fds received quit from signal");
2959 DBG("received command on sock");
2962 DBG("Consumer thread sessiond poll exiting");
2965 * Close metadata streams since the producer is the session daemon which
2968 * NOTE: for now, this only applies to the UST tracer.
2970 lttng_consumer_close_metadata();
2973 * when all fds have hung up, the polling thread
2979 * Notify the data poll thread to poll back again and test the
2980 * consumer_quit state that we just set so to quit gracefully.
2982 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
2984 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
2986 /* Cleaning up possibly open sockets. */
2990 PERROR("close sock sessiond poll");
2993 if (client_socket
>= 0) {
2994 ret
= close(client_socket
);
2996 PERROR("close client_socket sessiond poll");
3000 rcu_unregister_thread();
3004 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3005 struct lttng_consumer_local_data
*ctx
)
3009 pthread_mutex_lock(&stream
->lock
);
3011 switch (consumer_data
.type
) {
3012 case LTTNG_CONSUMER_KERNEL
:
3013 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3015 case LTTNG_CONSUMER32_UST
:
3016 case LTTNG_CONSUMER64_UST
:
3017 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3020 ERR("Unknown consumer_data type");
3026 pthread_mutex_unlock(&stream
->lock
);
3030 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3032 switch (consumer_data
.type
) {
3033 case LTTNG_CONSUMER_KERNEL
:
3034 return lttng_kconsumer_on_recv_stream(stream
);
3035 case LTTNG_CONSUMER32_UST
:
3036 case LTTNG_CONSUMER64_UST
:
3037 return lttng_ustconsumer_on_recv_stream(stream
);
3039 ERR("Unknown consumer_data type");
3046 * Allocate and set consumer data hash tables.
3048 void lttng_consumer_init(void)
3050 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3051 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3052 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3053 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3057 * Process the ADD_RELAYD command receive by a consumer.
3059 * This will create a relayd socket pair and add it to the relayd hash table.
3060 * The caller MUST acquire a RCU read side lock before calling it.
3062 int consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3063 struct lttng_consumer_local_data
*ctx
, int sock
,
3064 struct pollfd
*consumer_sockpoll
,
3065 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
)
3067 int fd
= -1, ret
= -1, relayd_created
= 0;
3068 enum lttng_error_code ret_code
= LTTNG_OK
;
3069 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3072 assert(relayd_sock
);
3074 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3076 /* Get relayd reference if exists. */
3077 relayd
= consumer_find_relayd(net_seq_idx
);
3078 if (relayd
== NULL
) {
3079 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3080 /* Not found. Allocate one. */
3081 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3082 if (relayd
== NULL
) {
3084 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3087 relayd
->sessiond_session_id
= sessiond_id
;
3092 * This code path MUST continue to the consumer send status message to
3093 * we can notify the session daemon and continue our work without
3094 * killing everything.
3098 * relayd key should never be found for control socket.
3100 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3103 /* First send a status message before receiving the fds. */
3104 ret
= consumer_send_status_msg(sock
, LTTNG_OK
);
3106 /* Somehow, the session daemon is not responding anymore. */
3107 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3108 goto error_nosignal
;
3111 /* Poll on consumer socket. */
3112 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3113 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3115 goto error_nosignal
;
3118 /* Get relayd socket from session daemon */
3119 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3120 if (ret
!= sizeof(fd
)) {
3122 fd
= -1; /* Just in case it gets set with an invalid value. */
3125 * Failing to receive FDs might indicate a major problem such as
3126 * reaching a fd limit during the receive where the kernel returns a
3127 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3128 * don't take any chances and stop everything.
3130 * XXX: Feature request #558 will fix that and avoid this possible
3131 * issue when reaching the fd limit.
3133 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3134 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3138 /* Copy socket information and received FD */
3139 switch (sock_type
) {
3140 case LTTNG_STREAM_CONTROL
:
3141 /* Copy received lttcomm socket */
3142 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3143 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3144 /* Handle create_sock error. */
3146 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3150 * Close the socket created internally by
3151 * lttcomm_create_sock, so we can replace it by the one
3152 * received from sessiond.
3154 if (close(relayd
->control_sock
.sock
.fd
)) {
3158 /* Assign new file descriptor */
3159 relayd
->control_sock
.sock
.fd
= fd
;
3160 fd
= -1; /* For error path */
3161 /* Assign version values. */
3162 relayd
->control_sock
.major
= relayd_sock
->major
;
3163 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3166 * Create a session on the relayd and store the returned id. Lock the
3167 * control socket mutex if the relayd was NOT created before.
3169 if (!relayd_created
) {
3170 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3172 ret
= relayd_create_session(&relayd
->control_sock
,
3173 &relayd
->relayd_session_id
);
3174 if (!relayd_created
) {
3175 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3179 * Close all sockets of a relayd object. It will be freed if it was
3180 * created at the error code path or else it will be garbage
3183 (void) relayd_close(&relayd
->control_sock
);
3184 (void) relayd_close(&relayd
->data_sock
);
3185 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
3190 case LTTNG_STREAM_DATA
:
3191 /* Copy received lttcomm socket */
3192 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3193 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3194 /* Handle create_sock error. */
3196 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3200 * Close the socket created internally by
3201 * lttcomm_create_sock, so we can replace it by the one
3202 * received from sessiond.
3204 if (close(relayd
->data_sock
.sock
.fd
)) {
3208 /* Assign new file descriptor */
3209 relayd
->data_sock
.sock
.fd
= fd
;
3210 fd
= -1; /* for eventual error paths */
3211 /* Assign version values. */
3212 relayd
->data_sock
.major
= relayd_sock
->major
;
3213 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3216 ERR("Unknown relayd socket type (%d)", sock_type
);
3218 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3222 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3223 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3224 relayd
->net_seq_idx
, fd
);
3226 /* We successfully added the socket. Send status back. */
3227 ret
= consumer_send_status_msg(sock
, ret_code
);
3229 /* Somehow, the session daemon is not responding anymore. */
3230 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3231 goto error_nosignal
;
3235 * Add relayd socket pair to consumer data hashtable. If object already
3236 * exists or on error, the function gracefully returns.
3244 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3245 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3249 /* Close received socket if valid. */
3252 PERROR("close received socket");
3256 if (relayd_created
) {
3264 * Try to lock the stream mutex.
3266 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3268 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3275 * Try to lock the stream mutex. On failure, we know that the stream is
3276 * being used else where hence there is data still being extracted.
3278 ret
= pthread_mutex_trylock(&stream
->lock
);
3280 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3292 * Search for a relayd associated to the session id and return the reference.
3294 * A rcu read side lock MUST be acquire before calling this function and locked
3295 * until the relayd object is no longer necessary.
3297 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3299 struct lttng_ht_iter iter
;
3300 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3302 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3303 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3306 * Check by sessiond id which is unique here where the relayd session
3307 * id might not be when having multiple relayd.
3309 if (relayd
->sessiond_session_id
== id
) {
3310 /* Found the relayd. There can be only one per id. */
3322 * Check if for a given session id there is still data needed to be extract
3325 * Return 1 if data is pending or else 0 meaning ready to be read.
3327 int consumer_data_pending(uint64_t id
)
3330 struct lttng_ht_iter iter
;
3331 struct lttng_ht
*ht
;
3332 struct lttng_consumer_stream
*stream
;
3333 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3334 int (*data_pending
)(struct lttng_consumer_stream
*);
3336 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3339 pthread_mutex_lock(&consumer_data
.lock
);
3341 switch (consumer_data
.type
) {
3342 case LTTNG_CONSUMER_KERNEL
:
3343 data_pending
= lttng_kconsumer_data_pending
;
3345 case LTTNG_CONSUMER32_UST
:
3346 case LTTNG_CONSUMER64_UST
:
3347 data_pending
= lttng_ustconsumer_data_pending
;
3350 ERR("Unknown consumer data type");
3354 /* Ease our life a bit */
3355 ht
= consumer_data
.stream_list_ht
;
3357 relayd
= find_relayd_by_session_id(id
);
3359 /* Send init command for data pending. */
3360 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3361 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3362 relayd
->relayd_session_id
);
3363 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3365 /* Communication error thus the relayd so no data pending. */
3366 goto data_not_pending
;
3370 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3371 ht
->hash_fct(&id
, lttng_ht_seed
),
3373 &iter
.iter
, stream
, node_session_id
.node
) {
3374 /* If this call fails, the stream is being used hence data pending. */
3375 ret
= stream_try_lock(stream
);
3381 * A removed node from the hash table indicates that the stream has
3382 * been deleted thus having a guarantee that the buffers are closed
3383 * on the consumer side. However, data can still be transmitted
3384 * over the network so don't skip the relayd check.
3386 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3388 /* Check the stream if there is data in the buffers. */
3389 ret
= data_pending(stream
);
3391 pthread_mutex_unlock(&stream
->lock
);
3398 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3399 if (stream
->metadata_flag
) {
3400 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3401 stream
->relayd_stream_id
);
3403 ret
= relayd_data_pending(&relayd
->control_sock
,
3404 stream
->relayd_stream_id
,
3405 stream
->next_net_seq_num
- 1);
3407 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3409 pthread_mutex_unlock(&stream
->lock
);
3413 pthread_mutex_unlock(&stream
->lock
);
3417 unsigned int is_data_inflight
= 0;
3419 /* Send init command for data pending. */
3420 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3421 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3422 relayd
->relayd_session_id
, &is_data_inflight
);
3423 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3425 goto data_not_pending
;
3427 if (is_data_inflight
) {
3433 * Finding _no_ node in the hash table and no inflight data means that the
3434 * stream(s) have been removed thus data is guaranteed to be available for
3435 * analysis from the trace files.
3439 /* Data is available to be read by a viewer. */
3440 pthread_mutex_unlock(&consumer_data
.lock
);
3445 /* Data is still being extracted from buffers. */
3446 pthread_mutex_unlock(&consumer_data
.lock
);
3452 * Send a ret code status message to the sessiond daemon.
3454 * Return the sendmsg() return value.
3456 int consumer_send_status_msg(int sock
, int ret_code
)
3458 struct lttcomm_consumer_status_msg msg
;
3460 msg
.ret_code
= ret_code
;
3462 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3466 * Send a channel status message to the sessiond daemon.
3468 * Return the sendmsg() return value.
3470 int consumer_send_status_channel(int sock
,
3471 struct lttng_consumer_channel
*channel
)
3473 struct lttcomm_consumer_status_channel msg
;
3478 msg
.ret_code
= -LTTNG_ERR_UST_CHAN_FAIL
;
3480 msg
.ret_code
= LTTNG_OK
;
3481 msg
.key
= channel
->key
;
3482 msg
.stream_count
= channel
->streams
.count
;
3485 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3489 * Using a maximum stream size with the produced and consumed position of a
3490 * stream, computes the new consumed position to be as close as possible to the
3491 * maximum possible stream size.
3493 * If maximum stream size is lower than the possible buffer size (produced -
3494 * consumed), the consumed_pos given is returned untouched else the new value
3497 unsigned long consumer_get_consumed_maxsize(unsigned long consumed_pos
,
3498 unsigned long produced_pos
, uint64_t max_stream_size
)
3500 if (max_stream_size
&& max_stream_size
< (produced_pos
- consumed_pos
)) {
3501 /* Offset from the produced position to get the latest buffers. */
3502 return produced_pos
- max_stream_size
;
3505 return consumed_pos
;