2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
11 #include <lttng/ust-ctl.h>
12 #include <lttng/ust-sigbus.h>
18 #include <sys/socket.h>
20 #include <sys/types.h>
23 #include <urcu/list.h>
28 #include <bin/lttng-consumerd/health-consumerd.hpp>
29 #include <common/common.hpp>
30 #include <common/sessiond-comm/sessiond-comm.hpp>
31 #include <common/relayd/relayd.hpp>
32 #include <common/compat/fcntl.hpp>
33 #include <common/compat/endian.hpp>
34 #include <common/consumer/consumer-metadata-cache.hpp>
35 #include <common/consumer/consumer-stream.hpp>
36 #include <common/consumer/consumer-timer.hpp>
37 #include <common/utils.hpp>
38 #include <common/index/index.hpp>
39 #include <common/consumer/consumer.hpp>
40 #include <common/shm.hpp>
41 #include <common/optional.hpp>
43 #include "ust-consumer.hpp"
45 #define INT_MAX_STR_LEN 12 /* includes \0 */
47 extern struct lttng_consumer_global_data the_consumer_data
;
48 extern int consumer_poll_timeout
;
50 LTTNG_EXPORT
DEFINE_LTTNG_UST_SIGBUS_STATE();
53 * Add channel to internal consumer state.
55 * Returns 0 on success or else a negative value.
57 static int add_channel(struct lttng_consumer_channel
*channel
,
58 struct lttng_consumer_local_data
*ctx
)
62 LTTNG_ASSERT(channel
);
65 if (ctx
->on_recv_channel
!= NULL
) {
66 ret
= ctx
->on_recv_channel(channel
);
68 ret
= consumer_add_channel(channel
, ctx
);
70 /* Most likely an ENOMEM. */
71 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
75 ret
= consumer_add_channel(channel
, ctx
);
78 DBG("UST consumer channel added (key: %" PRIu64
")", channel
->key
);
85 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
86 * error value if applicable is set in it else it is kept untouched.
88 * Return NULL on error else the newly allocated stream object.
90 static struct lttng_consumer_stream
*allocate_stream(int cpu
, int key
,
91 struct lttng_consumer_channel
*channel
,
92 struct lttng_consumer_local_data
*ctx
, int *_alloc_ret
)
95 struct lttng_consumer_stream
*stream
= NULL
;
97 LTTNG_ASSERT(channel
);
100 stream
= consumer_stream_create(channel
, channel
->key
, key
, channel
->name
,
101 channel
->relayd_id
, channel
->session_id
, channel
->trace_chunk
, cpu
,
102 &alloc_ret
, channel
->type
, channel
->monitor
, channel
->trace_format
);
103 if (stream
== NULL
) {
107 * We could not find the channel. Can happen if cpu hotplug
108 * happens while tearing down.
110 DBG3("Could not find channel");
115 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
121 consumer_stream_update_channel_attributes(stream
, channel
);
125 *_alloc_ret
= alloc_ret
;
131 * Send the given stream pointer to the corresponding thread.
133 * Returns 0 on success else a negative value.
135 static int send_stream_to_thread(struct lttng_consumer_stream
*stream
,
136 struct lttng_consumer_local_data
*ctx
)
139 struct lttng_pipe
*stream_pipe
;
141 /* Get the right pipe where the stream will be sent. */
142 if (stream
->metadata_flag
) {
143 consumer_add_metadata_stream(stream
);
144 stream_pipe
= ctx
->consumer_metadata_pipe
;
146 consumer_add_data_stream(stream
);
147 stream_pipe
= ctx
->consumer_data_pipe
;
151 * From this point on, the stream's ownership has been moved away from
152 * the channel and it becomes globally visible. Hence, remove it from
153 * the local stream list to prevent the stream from being both local and
156 stream
->globally_visible
= 1;
157 cds_list_del_init(&stream
->send_node
);
159 ret
= lttng_pipe_write(stream_pipe
, &stream
, sizeof(stream
));
161 ERR("Consumer write %s stream to pipe %d",
162 stream
->metadata_flag
? "metadata" : "data",
163 lttng_pipe_get_writefd(stream_pipe
));
164 if (stream
->metadata_flag
) {
165 consumer_del_stream_for_metadata(stream
);
167 consumer_del_stream_for_data(stream
);
177 int get_stream_shm_path(char *stream_shm_path
, const char *shm_path
, int cpu
)
179 char cpu_nr
[INT_MAX_STR_LEN
]; /* int max len */
182 strncpy(stream_shm_path
, shm_path
, PATH_MAX
);
183 stream_shm_path
[PATH_MAX
- 1] = '\0';
184 ret
= snprintf(cpu_nr
, INT_MAX_STR_LEN
, "%i", cpu
);
189 strncat(stream_shm_path
, cpu_nr
,
190 PATH_MAX
- strlen(stream_shm_path
) - 1);
197 * Create streams for the given channel using liblttng-ust-ctl.
198 * The channel lock must be acquired by the caller.
200 * Return 0 on success else a negative value.
202 static int create_ust_streams(struct lttng_consumer_channel
*channel
,
203 struct lttng_consumer_local_data
*ctx
)
206 struct lttng_ust_ctl_consumer_stream
*ustream
;
207 struct lttng_consumer_stream
*stream
;
208 pthread_mutex_t
*current_stream_lock
= NULL
;
210 LTTNG_ASSERT(channel
);
214 * While a stream is available from ustctl. When NULL is returned, we've
215 * reached the end of the possible stream for the channel.
217 while ((ustream
= lttng_ust_ctl_create_stream(channel
->uchan
, cpu
))) {
219 int ust_metadata_pipe
[2];
221 health_code_update();
223 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
&& channel
->monitor
) {
224 ret
= utils_create_pipe_cloexec_nonblock(ust_metadata_pipe
);
226 ERR("Create ust metadata poll pipe");
229 wait_fd
= ust_metadata_pipe
[0];
231 wait_fd
= lttng_ust_ctl_stream_get_wait_fd(ustream
);
234 /* Allocate consumer stream object. */
235 stream
= allocate_stream(cpu
, wait_fd
, channel
, ctx
, &ret
);
239 stream
->ustream
= ustream
;
241 * Store it so we can save multiple function calls afterwards since
242 * this value is used heavily in the stream threads. This is UST
243 * specific so this is why it's done after allocation.
245 stream
->wait_fd
= wait_fd
;
248 * Increment channel refcount since the channel reference has now been
249 * assigned in the allocation process above.
251 if (stream
->chan
->monitor
) {
252 uatomic_inc(&stream
->chan
->refcount
);
255 pthread_mutex_lock(&stream
->lock
);
256 current_stream_lock
= &stream
->lock
;
258 * Order is important this is why a list is used. On error, the caller
259 * should clean this list.
261 cds_list_add_tail(&stream
->send_node
, &channel
->streams
.head
);
263 ret
= lttng_ust_ctl_get_max_subbuf_size(stream
->ustream
,
264 &stream
->max_sb_size
);
266 ERR("lttng_ust_ctl_get_max_subbuf_size failed for stream %s",
271 /* Do actions once stream has been received. */
272 if (ctx
->on_recv_stream
) {
273 ret
= ctx
->on_recv_stream(stream
);
279 DBG("UST consumer add stream %s (key: %" PRIu64
") with relayd id %" PRIu64
,
280 stream
->name
, stream
->key
, stream
->relayd_stream_id
);
282 /* Set next CPU stream. */
283 channel
->streams
.count
= ++cpu
;
285 /* Keep stream reference when creating metadata. */
286 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
287 channel
->metadata_stream
= stream
;
288 if (channel
->monitor
) {
289 /* Set metadata poll pipe if we created one */
290 memcpy(stream
->ust_metadata_poll_pipe
,
292 sizeof(ust_metadata_pipe
));
295 pthread_mutex_unlock(&stream
->lock
);
296 current_stream_lock
= NULL
;
303 if (current_stream_lock
) {
304 pthread_mutex_unlock(current_stream_lock
);
309 static int open_ust_stream_fd(struct lttng_consumer_channel
*channel
, int cpu
,
310 const struct lttng_credentials
*session_credentials
)
312 char shm_path
[PATH_MAX
];
315 if (!channel
->shm_path
[0]) {
316 return shm_create_anonymous("ust-consumer");
318 ret
= get_stream_shm_path(shm_path
, channel
->shm_path
, cpu
);
322 return run_as_open(shm_path
,
323 O_RDWR
| O_CREAT
| O_EXCL
, S_IRUSR
| S_IWUSR
,
324 lttng_credentials_get_uid(session_credentials
),
325 lttng_credentials_get_gid(session_credentials
));
332 * Create an UST channel with the given attributes and send it to the session
333 * daemon using the ust ctl API.
335 * Return 0 on success or else a negative value.
337 static int create_ust_channel(struct lttng_consumer_channel
*channel
,
338 struct lttng_ust_ctl_consumer_channel_attr
*attr
,
339 struct lttng_ust_ctl_consumer_channel
**ust_chanp
)
341 int ret
, nr_stream_fds
, i
, j
;
343 struct lttng_ust_ctl_consumer_channel
*ust_channel
;
345 LTTNG_ASSERT(channel
);
347 LTTNG_ASSERT(ust_chanp
);
348 LTTNG_ASSERT(channel
->buffer_credentials
.is_set
);
350 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
351 "subbuf_size: %" PRIu64
", num_subbuf: %" PRIu64
", "
352 "switch_timer_interval: %u, read_timer_interval: %u, "
353 "output: %d, type: %d", attr
->overwrite
, attr
->subbuf_size
,
354 attr
->num_subbuf
, attr
->switch_timer_interval
,
355 attr
->read_timer_interval
, attr
->output
, attr
->type
);
357 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
)
360 nr_stream_fds
= lttng_ust_ctl_get_nr_stream_per_channel();
361 stream_fds
= calloc
<int>(nr_stream_fds
);
366 for (i
= 0; i
< nr_stream_fds
; i
++) {
367 stream_fds
[i
] = open_ust_stream_fd(channel
, i
,
368 &channel
->buffer_credentials
.value
);
369 if (stream_fds
[i
] < 0) {
374 ust_channel
= lttng_ust_ctl_create_channel(attr
, stream_fds
, nr_stream_fds
);
379 channel
->nr_stream_fds
= nr_stream_fds
;
380 channel
->stream_fds
= stream_fds
;
381 *ust_chanp
= ust_channel
;
387 for (j
= i
- 1; j
>= 0; j
--) {
390 closeret
= close(stream_fds
[j
]);
394 if (channel
->shm_path
[0]) {
395 char shm_path
[PATH_MAX
];
397 closeret
= get_stream_shm_path(shm_path
,
398 channel
->shm_path
, j
);
400 ERR("Cannot get stream shm path");
402 closeret
= run_as_unlink(shm_path
,
403 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
404 channel
->buffer_credentials
)),
405 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
406 channel
->buffer_credentials
)));
408 PERROR("unlink %s", shm_path
);
412 /* Try to rmdir all directories under shm_path root. */
413 if (channel
->root_shm_path
[0]) {
414 (void) run_as_rmdir_recursive(channel
->root_shm_path
,
415 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
416 channel
->buffer_credentials
)),
417 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
418 channel
->buffer_credentials
)),
419 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG
);
427 * Send a single given stream to the session daemon using the sock.
429 * Return 0 on success else a negative value.
431 static int send_sessiond_stream(int sock
, struct lttng_consumer_stream
*stream
)
435 LTTNG_ASSERT(stream
);
436 LTTNG_ASSERT(sock
>= 0);
438 DBG("UST consumer sending stream %" PRIu64
" to sessiond", stream
->key
);
440 /* Send stream to session daemon. */
441 ret
= lttng_ust_ctl_send_stream_to_sessiond(sock
, stream
->ustream
);
451 * Send channel to sessiond and relayd if applicable.
453 * Return 0 on success or else a negative value.
455 static int send_channel_to_sessiond_and_relayd(int sock
,
456 struct lttng_consumer_channel
*channel
,
457 struct lttng_consumer_local_data
*ctx
, int *relayd_error
)
459 int ret
, ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
460 struct lttng_consumer_stream
*stream
;
461 uint64_t net_seq_idx
= -1ULL;
463 LTTNG_ASSERT(channel
);
465 LTTNG_ASSERT(sock
>= 0);
467 DBG("UST consumer sending channel %s to sessiond", channel
->name
);
469 if (channel
->relayd_id
!= (uint64_t) -1ULL) {
470 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
472 health_code_update();
474 /* Try to send the stream to the relayd if one is available. */
475 DBG("Sending stream %" PRIu64
" of channel \"%s\" to relayd",
476 stream
->key
, channel
->name
);
477 ret
= consumer_send_relayd_stream(stream
, stream
->chan
->pathname
);
480 * Flag that the relayd was the problem here probably due to a
481 * communicaton error on the socket.
486 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
488 if (net_seq_idx
== -1ULL) {
489 net_seq_idx
= stream
->net_seq_idx
;
494 /* Inform sessiond that we are about to send channel and streams. */
495 ret
= consumer_send_status_msg(sock
, ret_code
);
496 if (ret
< 0 || ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
498 * Either the session daemon is not responding or the relayd died so we
504 /* Send channel to sessiond. */
505 ret
= lttng_ust_ctl_send_channel_to_sessiond(sock
, channel
->uchan
);
510 ret
= lttng_ust_ctl_channel_close_wakeup_fd(channel
->uchan
);
515 /* The channel was sent successfully to the sessiond at this point. */
516 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
518 health_code_update();
520 /* Send stream to session daemon. */
521 ret
= send_sessiond_stream(sock
, stream
);
527 /* Tell sessiond there is no more stream. */
528 ret
= lttng_ust_ctl_send_stream_to_sessiond(sock
, NULL
);
533 DBG("UST consumer NULL stream sent to sessiond");
538 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
545 * Creates a channel and streams and add the channel it to the channel internal
546 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
549 * Return 0 on success or else, a negative value is returned and the channel
550 * MUST be destroyed by consumer_del_channel().
552 static int ask_channel(struct lttng_consumer_local_data
*ctx
,
553 struct lttng_consumer_channel
*channel
,
554 struct lttng_ust_ctl_consumer_channel_attr
*attr
)
559 LTTNG_ASSERT(channel
);
563 * This value is still used by the kernel consumer since for the kernel,
564 * the stream ownership is not IN the consumer so we need to have the
565 * number of left stream that needs to be initialized so we can know when
566 * to delete the channel (see consumer.c).
568 * As for the user space tracer now, the consumer creates and sends the
569 * stream to the session daemon which only sends them to the application
570 * once every stream of a channel is received making this value useless
571 * because we they will be added to the poll thread before the application
572 * receives them. This ensures that a stream can not hang up during
573 * initilization of a channel.
575 channel
->nb_init_stream_left
= 0;
577 /* The reply msg status is handled in the following call. */
578 ret
= create_ust_channel(channel
, attr
, &channel
->uchan
);
583 channel
->wait_fd
= lttng_ust_ctl_channel_get_wait_fd(channel
->uchan
);
586 * For the snapshots (no monitor), we create the metadata streams
587 * on demand, not during the channel creation.
589 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
&& !channel
->monitor
) {
594 /* Open all streams for this channel. */
595 pthread_mutex_lock(&channel
->lock
);
596 ret
= create_ust_streams(channel
, ctx
);
597 pthread_mutex_unlock(&channel
->lock
);
607 * Send all stream of a channel to the right thread handling it.
609 * On error, return a negative value else 0 on success.
611 static int send_streams_to_thread(struct lttng_consumer_channel
*channel
,
612 struct lttng_consumer_local_data
*ctx
)
615 struct lttng_consumer_stream
*stream
, *stmp
;
617 LTTNG_ASSERT(channel
);
620 /* Send streams to the corresponding thread. */
621 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
624 health_code_update();
626 /* Sending the stream to the thread. */
627 ret
= send_stream_to_thread(stream
, ctx
);
630 * If we are unable to send the stream to the thread, there is
631 * a big problem so just stop everything.
642 * Flush channel's streams using the given key to retrieve the channel.
644 * Return 0 on success else an LTTng error code.
646 static int flush_channel(uint64_t chan_key
)
649 struct lttng_consumer_channel
*channel
;
650 struct lttng_consumer_stream
*stream
;
652 struct lttng_ht_iter iter
;
654 DBG("UST consumer flush channel key %" PRIu64
, chan_key
);
657 channel
= consumer_find_channel(chan_key
);
659 ERR("UST consumer flush channel %" PRIu64
" not found", chan_key
);
660 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
664 ht
= the_consumer_data
.stream_per_chan_id_ht
;
666 /* For each stream of the channel id, flush it. */
667 cds_lfht_for_each_entry_duplicate(ht
->ht
,
668 ht
->hash_fct(&channel
->key
, lttng_ht_seed
), ht
->match_fct
,
669 &channel
->key
, &iter
.iter
, stream
, node_channel_id
.node
) {
671 health_code_update();
673 pthread_mutex_lock(&stream
->lock
);
676 * Protect against concurrent teardown of a stream.
678 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
682 if (!stream
->quiescent
) {
683 ret
= lttng_ust_ctl_flush_buffer(stream
->ustream
, 0);
685 ERR("Failed to flush buffer while flushing channel: channel key = %" PRIu64
", channel name = '%s'",
686 chan_key
, channel
->name
);
687 ret
= LTTNG_ERR_BUFFER_FLUSH_FAILED
;
688 pthread_mutex_unlock(&stream
->lock
);
691 stream
->quiescent
= true;
694 pthread_mutex_unlock(&stream
->lock
);
698 * Send one last buffer statistics update to the session daemon. This
699 * ensures that the session daemon gets at least one statistics update
700 * per channel even in the case of short-lived channels, such as when a
701 * short-lived app is traced in per-pid mode.
703 sample_and_send_channel_buffer_stats(channel
);
710 * Clear quiescent state from channel's streams using the given key to
711 * retrieve the channel.
713 * Return 0 on success else an LTTng error code.
715 static int clear_quiescent_channel(uint64_t chan_key
)
718 struct lttng_consumer_channel
*channel
;
719 struct lttng_consumer_stream
*stream
;
721 struct lttng_ht_iter iter
;
723 DBG("UST consumer clear quiescent channel key %" PRIu64
, chan_key
);
726 channel
= consumer_find_channel(chan_key
);
728 ERR("UST consumer clear quiescent channel %" PRIu64
" not found", chan_key
);
729 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
733 ht
= the_consumer_data
.stream_per_chan_id_ht
;
735 /* For each stream of the channel id, clear quiescent state. */
736 cds_lfht_for_each_entry_duplicate(ht
->ht
,
737 ht
->hash_fct(&channel
->key
, lttng_ht_seed
), ht
->match_fct
,
738 &channel
->key
, &iter
.iter
, stream
, node_channel_id
.node
) {
740 health_code_update();
742 pthread_mutex_lock(&stream
->lock
);
743 stream
->quiescent
= false;
744 pthread_mutex_unlock(&stream
->lock
);
752 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
754 * Return 0 on success else an LTTng error code.
756 static int close_metadata(uint64_t chan_key
)
759 struct lttng_consumer_channel
*channel
;
760 unsigned int channel_monitor
;
762 DBG("UST consumer close metadata key %" PRIu64
, chan_key
);
764 channel
= consumer_find_channel(chan_key
);
767 * This is possible if the metadata thread has issue a delete because
768 * the endpoint point of the stream hung up. There is no way the
769 * session daemon can know about it thus use a DBG instead of an actual
772 DBG("UST consumer close metadata %" PRIu64
" not found", chan_key
);
773 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
777 pthread_mutex_lock(&the_consumer_data
.lock
);
778 pthread_mutex_lock(&channel
->lock
);
779 channel_monitor
= channel
->monitor
;
780 if (cds_lfht_is_node_deleted(&channel
->node
.node
)) {
784 lttng_ustconsumer_close_metadata(channel
);
785 pthread_mutex_unlock(&channel
->lock
);
786 pthread_mutex_unlock(&the_consumer_data
.lock
);
789 * The ownership of a metadata channel depends on the type of
790 * session to which it belongs. In effect, the monitor flag is checked
791 * to determine if this metadata channel is in "snapshot" mode or not.
793 * In the non-snapshot case, the metadata channel is created along with
794 * a single stream which will remain present until the metadata channel
795 * is destroyed (on the destruction of its session). In this case, the
796 * metadata stream in "monitored" by the metadata poll thread and holds
797 * the ownership of its channel.
799 * Closing the metadata will cause the metadata stream's "metadata poll
800 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
801 * thread which will teardown the metadata stream which, in return,
802 * deletes the metadata channel.
804 * In the snapshot case, the metadata stream is created and destroyed
805 * on every snapshot record. Since the channel doesn't have an owner
806 * other than the session daemon, it is safe to destroy it immediately
807 * on reception of the CLOSE_METADATA command.
809 if (!channel_monitor
) {
811 * The channel and consumer_data locks must be
812 * released before this call since consumer_del_channel
813 * re-acquires the channel and consumer_data locks to teardown
814 * the channel and queue its reclamation by the "call_rcu"
817 consumer_del_channel(channel
);
822 pthread_mutex_unlock(&channel
->lock
);
823 pthread_mutex_unlock(&the_consumer_data
.lock
);
829 * RCU read side lock MUST be acquired before calling this function.
831 * Return 0 on success else an LTTng error code.
833 static int setup_metadata(struct lttng_consumer_local_data
*ctx
, uint64_t key
)
836 struct lttng_consumer_channel
*metadata
;
838 ASSERT_RCU_READ_LOCKED();
840 DBG("UST consumer setup metadata key %" PRIu64
, key
);
842 metadata
= consumer_find_channel(key
);
844 ERR("UST consumer push metadata %" PRIu64
" not found", key
);
845 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
850 * In no monitor mode, the metadata channel has no stream(s) so skip the
851 * ownership transfer to the metadata thread.
853 if (!metadata
->monitor
) {
854 DBG("Metadata channel in no monitor");
860 * Send metadata stream to relayd if one available. Availability is
861 * known if the stream is still in the list of the channel.
863 if (cds_list_empty(&metadata
->streams
.head
)) {
864 ERR("Metadata channel key %" PRIu64
", no stream available.", key
);
865 ret
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
866 goto error_no_stream
;
869 /* Send metadata stream to relayd if needed. */
870 if (metadata
->metadata_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
871 ret
= consumer_send_relayd_stream(metadata
->metadata_stream
,
874 ret
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
877 ret
= consumer_send_relayd_streams_sent(
878 metadata
->metadata_stream
->net_seq_idx
);
880 ret
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
886 * Ownership of metadata stream is passed along. Freeing is handled by
889 ret
= send_streams_to_thread(metadata
, ctx
);
892 * If we are unable to send the stream to the thread, there is
893 * a big problem so just stop everything.
895 ret
= LTTCOMM_CONSUMERD_FATAL
;
896 goto send_streams_error
;
898 /* List MUST be empty after or else it could be reused. */
899 LTTNG_ASSERT(cds_list_empty(&metadata
->streams
.head
));
906 * Delete metadata channel on error. At this point, the metadata stream can
907 * NOT be monitored by the metadata thread thus having the guarantee that
908 * the stream is still in the local stream list of the channel. This call
909 * will make sure to clean that list.
911 consumer_stream_destroy(metadata
->metadata_stream
, NULL
);
912 metadata
->metadata_stream
= NULL
;
920 * Snapshot the whole metadata.
921 * RCU read-side lock must be held by the caller.
923 * Returns 0 on success, < 0 on error
925 static int snapshot_metadata(struct lttng_consumer_channel
*metadata_channel
,
926 uint64_t key
, char *path
, uint64_t relayd_id
,
927 struct lttng_consumer_local_data
*ctx
)
930 struct lttng_consumer_stream
*metadata_stream
;
934 ASSERT_RCU_READ_LOCKED();
936 DBG("UST consumer snapshot metadata with key %" PRIu64
" at path %s",
941 LTTNG_ASSERT(!metadata_channel
->monitor
);
943 health_code_update();
946 * Ask the sessiond if we have new metadata waiting and update the
947 * consumer metadata cache.
949 ret
= lttng_ustconsumer_request_metadata(ctx
, metadata_channel
, 0, 1);
954 health_code_update();
957 * The metadata stream is NOT created in no monitor mode when the channel
958 * is created on a sessiond ask channel command.
960 ret
= create_ust_streams(metadata_channel
, ctx
);
965 metadata_stream
= metadata_channel
->metadata_stream
;
966 LTTNG_ASSERT(metadata_stream
);
968 metadata_stream
->read_subbuffer_ops
.lock(metadata_stream
);
969 if (relayd_id
!= (uint64_t) -1ULL) {
970 metadata_stream
->net_seq_idx
= relayd_id
;
971 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
973 ret
= consumer_stream_create_output_files(metadata_stream
,
981 health_code_update();
982 ret
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
989 metadata_stream
->read_subbuffer_ops
.unlock(metadata_stream
);
991 * Clean up the stream completely because the next snapshot will use a
992 * new metadata stream.
994 consumer_stream_destroy(metadata_stream
, NULL
);
995 metadata_channel
->metadata_stream
= NULL
;
1003 int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
,
1007 unsigned long mmap_offset
;
1008 const char *mmap_base
;
1010 mmap_base
= (const char *) lttng_ust_ctl_get_mmap_base(stream
->ustream
);
1012 ERR("Failed to get mmap base for stream `%s`",
1018 ret
= lttng_ust_ctl_get_mmap_read_offset(stream
->ustream
, &mmap_offset
);
1020 ERR("Failed to get mmap offset for stream `%s`", stream
->name
);
1025 *addr
= mmap_base
+ mmap_offset
;
1032 * Take a snapshot of all the stream of a channel.
1033 * RCU read-side lock and the channel lock must be held by the caller.
1035 * Returns 0 on success, < 0 on error
1037 static int snapshot_channel(struct lttng_consumer_channel
*channel
,
1038 uint64_t key
, char *path
, uint64_t relayd_id
,
1039 uint64_t nb_packets_per_stream
,
1040 struct lttng_consumer_local_data
*ctx
)
1043 unsigned use_relayd
= 0;
1044 unsigned long consumed_pos
, produced_pos
;
1045 struct lttng_consumer_stream
*stream
;
1049 ASSERT_RCU_READ_LOCKED();
1053 if (relayd_id
!= (uint64_t) -1ULL) {
1057 LTTNG_ASSERT(!channel
->monitor
);
1058 DBG("UST consumer snapshot channel %" PRIu64
, key
);
1060 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
1061 health_code_update();
1063 /* Lock stream because we are about to change its state. */
1064 pthread_mutex_lock(&stream
->lock
);
1065 LTTNG_ASSERT(channel
->trace_chunk
);
1066 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
1068 * Can't happen barring an internal error as the channel
1069 * holds a reference to the trace chunk.
1071 ERR("Failed to acquire reference to channel's trace chunk");
1075 LTTNG_ASSERT(!stream
->trace_chunk
);
1076 stream
->trace_chunk
= channel
->trace_chunk
;
1078 stream
->net_seq_idx
= relayd_id
;
1081 ret
= consumer_send_relayd_stream(stream
, path
);
1086 ret
= consumer_stream_create_output_files(stream
,
1091 DBG("UST consumer snapshot stream (%" PRIu64
")",
1096 * If tracing is active, we want to perform a "full" buffer flush.
1097 * Else, if quiescent, it has already been done by the prior stop.
1099 if (!stream
->quiescent
) {
1100 ret
= lttng_ust_ctl_flush_buffer(stream
->ustream
, 0);
1102 ERR("Failed to flush buffer during snapshot of channel: channel key = %" PRIu64
", channel name = '%s'",
1103 channel
->key
, channel
->name
);
1108 ret
= lttng_ustconsumer_take_snapshot(stream
);
1110 ERR("Taking UST snapshot");
1114 ret
= lttng_ustconsumer_get_produced_snapshot(stream
, &produced_pos
);
1116 ERR("Produced UST snapshot position");
1120 ret
= lttng_ustconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
1122 ERR("Consumerd UST snapshot position");
1127 * The original value is sent back if max stream size is larger than
1128 * the possible size of the snapshot. Also, we assume that the session
1129 * daemon should never send a maximum stream size that is lower than
1132 consumed_pos
= consumer_get_consume_start_pos(consumed_pos
,
1133 produced_pos
, nb_packets_per_stream
,
1134 stream
->max_sb_size
);
1136 while ((long) (consumed_pos
- produced_pos
) < 0) {
1138 unsigned long len
, padded_len
;
1139 const char *subbuf_addr
;
1140 struct lttng_buffer_view subbuf_view
;
1142 health_code_update();
1144 DBG("UST consumer taking snapshot at pos %lu", consumed_pos
);
1146 ret
= lttng_ust_ctl_get_subbuf(stream
->ustream
, &consumed_pos
);
1148 if (ret
!= -EAGAIN
) {
1149 PERROR("lttng_ust_ctl_get_subbuf snapshot");
1150 goto error_close_stream
;
1152 DBG("UST consumer get subbuf failed. Skipping it.");
1153 consumed_pos
+= stream
->max_sb_size
;
1154 stream
->chan
->lost_packets
++;
1158 ret
= lttng_ust_ctl_get_subbuf_size(stream
->ustream
, &len
);
1160 ERR("Snapshot lttng_ust_ctl_get_subbuf_size");
1161 goto error_put_subbuf
;
1164 ret
= lttng_ust_ctl_get_padded_subbuf_size(stream
->ustream
, &padded_len
);
1166 ERR("Snapshot lttng_ust_ctl_get_padded_subbuf_size");
1167 goto error_put_subbuf
;
1170 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
1172 goto error_put_subbuf
;
1175 subbuf_view
= lttng_buffer_view_init(
1176 subbuf_addr
, 0, padded_len
);
1177 read_len
= lttng_consumer_on_read_subbuffer_mmap(
1178 stream
, &subbuf_view
, padded_len
- len
);
1180 if (read_len
!= len
) {
1182 goto error_put_subbuf
;
1185 if (read_len
!= padded_len
) {
1187 goto error_put_subbuf
;
1191 ret
= lttng_ust_ctl_put_subbuf(stream
->ustream
);
1193 ERR("Snapshot lttng_ust_ctl_put_subbuf");
1194 goto error_close_stream
;
1196 consumed_pos
+= stream
->max_sb_size
;
1199 /* Simply close the stream so we can use it on the next snapshot. */
1200 consumer_stream_close(stream
);
1201 pthread_mutex_unlock(&stream
->lock
);
1208 if (lttng_ust_ctl_put_subbuf(stream
->ustream
) < 0) {
1209 ERR("Snapshot lttng_ust_ctl_put_subbuf");
1212 consumer_stream_close(stream
);
1214 pthread_mutex_unlock(&stream
->lock
);
1220 void metadata_stream_reset_cache_consumed_position(
1221 struct lttng_consumer_stream
*stream
)
1223 ASSERT_LOCKED(stream
->lock
);
1225 DBG("Reset metadata cache of session %" PRIu64
,
1226 stream
->chan
->session_id
);
1227 stream
->ust_metadata_pushed
= 0;
1231 * Receive the metadata updates from the sessiond. Supports receiving
1232 * overlapping metadata, but is needs to always belong to a contiguous
1233 * range starting from 0.
1234 * Be careful about the locks held when calling this function: it needs
1235 * the metadata cache flush to concurrently progress in order to
1238 int lttng_ustconsumer_recv_metadata(int sock
, uint64_t key
, uint64_t offset
,
1239 uint64_t len
, uint64_t version
,
1240 struct lttng_consumer_channel
*channel
, int timer
, int wait
)
1242 int ret
, ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1244 enum consumer_metadata_cache_write_status cache_write_status
;
1246 DBG("UST consumer push metadata key %" PRIu64
" of len %" PRIu64
, key
, len
);
1248 metadata_str
= calloc
<char>(len
);
1249 if (!metadata_str
) {
1250 PERROR("zmalloc metadata string");
1251 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
1255 health_code_update();
1257 /* Receive metadata string. */
1258 ret
= lttcomm_recv_unix_sock(sock
, metadata_str
, len
);
1260 /* Session daemon is dead so return gracefully. */
1265 health_code_update();
1267 pthread_mutex_lock(&channel
->metadata_cache
->lock
);
1268 cache_write_status
= consumer_metadata_cache_write(
1269 channel
->metadata_cache
, offset
, len
, version
,
1271 pthread_mutex_unlock(&channel
->metadata_cache
->lock
);
1272 switch (cache_write_status
) {
1273 case CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE
:
1275 * The write entirely overlapped with existing contents of the
1276 * same metadata version (same content); there is nothing to do.
1279 case CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED
:
1281 * The metadata cache was invalidated (previously pushed
1282 * content has been overwritten). Reset the stream's consumed
1283 * metadata position to ensure the metadata poll thread consumes
1288 * channel::metadata_stream can be null when the metadata
1289 * channel is under a snapshot session type. No need to update
1290 * the stream position in that scenario.
1292 if (channel
->metadata_stream
!= NULL
) {
1293 pthread_mutex_lock(&channel
->metadata_stream
->lock
);
1294 metadata_stream_reset_cache_consumed_position(
1295 channel
->metadata_stream
);
1296 pthread_mutex_unlock(&channel
->metadata_stream
->lock
);
1298 /* Validate we are in snapshot mode. */
1299 LTTNG_ASSERT(!channel
->monitor
);
1302 case CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT
:
1304 * In both cases, the metadata poll thread has new data to
1307 ret
= consumer_metadata_wakeup_pipe(channel
);
1309 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
1313 case CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR
:
1314 /* Unable to handle metadata. Notify session daemon. */
1315 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
1317 * Skip metadata flush on write error since the offset and len might
1318 * not have been updated which could create an infinite loop below when
1319 * waiting for the metadata cache to be flushed.
1329 while (consumer_metadata_cache_flushed(channel
, offset
+ len
, timer
)) {
1330 DBG("Waiting for metadata to be flushed");
1332 health_code_update();
1334 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME
);
1344 * Receive command from session daemon and process it.
1346 * Return 1 on success else a negative value or 0.
1348 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1349 int sock
, struct pollfd
*consumer_sockpoll
)
1352 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1353 struct lttcomm_consumer_msg msg
;
1354 struct lttng_consumer_channel
*channel
= NULL
;
1356 health_code_update();
1361 ret_recv
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
1362 if (ret_recv
!= sizeof(msg
)) {
1363 DBG("Consumer received unexpected message size %zd (expects %zu)",
1364 ret_recv
, sizeof(msg
));
1366 * The ret value might 0 meaning an orderly shutdown but this is ok
1367 * since the caller handles this.
1370 lttng_consumer_send_error(ctx
,
1371 LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
1378 health_code_update();
1381 LTTNG_ASSERT(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
1383 health_code_update();
1385 /* relayd needs RCU read-side lock */
1388 switch (msg
.cmd_type
) {
1389 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
1391 uint32_t major
= msg
.u
.relayd_sock
.major
;
1392 uint32_t minor
= msg
.u
.relayd_sock
.minor
;
1393 enum lttcomm_sock_proto protocol
=
1394 (enum lttcomm_sock_proto
) msg
.u
.relayd_sock
1395 .relayd_socket_protocol
;
1397 /* Session daemon status message are handled in the following call. */
1398 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
1399 msg
.u
.relayd_sock
.type
, ctx
, sock
,
1400 consumer_sockpoll
, msg
.u
.relayd_sock
.session_id
,
1401 msg
.u
.relayd_sock
.relayd_session_id
, major
,
1405 case LTTNG_CONSUMER_DESTROY_RELAYD
:
1407 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
1408 struct consumer_relayd_sock_pair
*relayd
;
1410 DBG("UST consumer destroying relayd %" PRIu64
, index
);
1412 /* Get relayd reference if exists. */
1413 relayd
= consumer_find_relayd(index
);
1414 if (relayd
== NULL
) {
1415 DBG("Unable to find relayd %" PRIu64
, index
);
1416 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
1420 * Each relayd socket pair has a refcount of stream attached to it
1421 * which tells if the relayd is still active or not depending on the
1424 * This will set the destroy flag of the relayd object and destroy it
1425 * if the refcount reaches zero when called.
1427 * The destroy can happen either here or when a stream fd hangs up.
1430 consumer_flag_relayd_for_destroy(relayd
);
1433 goto end_msg_sessiond
;
1435 case LTTNG_CONSUMER_UPDATE_STREAM
:
1440 case LTTNG_CONSUMER_DATA_PENDING
:
1442 int is_data_pending
;
1444 uint64_t id
= msg
.u
.data_pending
.session_id
;
1446 DBG("UST consumer data pending command for id %" PRIu64
, id
);
1448 is_data_pending
= consumer_data_pending(id
);
1450 /* Send back returned value to session daemon */
1451 ret_send
= lttcomm_send_unix_sock(sock
, &is_data_pending
,
1452 sizeof(is_data_pending
));
1454 DBG("Error when sending the data pending ret code: %zd",
1460 * No need to send back a status message since the data pending
1461 * returned value is the response.
1465 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION
:
1467 int ret_ask_channel
, ret_add_channel
, ret_send
;
1468 struct lttng_ust_ctl_consumer_channel_attr attr
;
1469 const uint64_t chunk_id
= msg
.u
.ask_channel
.chunk_id
.value
;
1470 const struct lttng_credentials buffer_credentials
= {
1471 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.ask_channel
.buffer_credentials
.uid
),
1472 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.ask_channel
.buffer_credentials
.gid
),
1475 /* Create a plain object and reserve a channel key. */
1476 channel
= consumer_allocate_channel(msg
.u
.ask_channel
.key
,
1477 msg
.u
.ask_channel
.session_id
,
1478 msg
.u
.ask_channel
.chunk_id
.is_set
? &chunk_id
: NULL
,
1479 msg
.u
.ask_channel
.pathname
, msg
.u
.ask_channel
.name
,
1480 msg
.u
.ask_channel
.relayd_id
,
1481 (enum lttng_event_output
) msg
.u
.ask_channel
.output
,
1482 msg
.u
.ask_channel
.tracefile_size
, msg
.u
.ask_channel
.tracefile_count
,
1483 msg
.u
.ask_channel
.session_id_per_pid
, msg
.u
.ask_channel
.monitor
,
1484 msg
.u
.ask_channel
.live_timer_interval
, msg
.u
.ask_channel
.is_live
,
1485 msg
.u
.ask_channel
.root_shm_path
, msg
.u
.ask_channel
.shm_path
,
1486 msg
.u
.ask_channel
.trace_format
);
1488 goto end_channel_error
;
1491 LTTNG_OPTIONAL_SET(&channel
->buffer_credentials
,
1492 buffer_credentials
);
1495 * Assign UST application UID to the channel. This value is ignored for
1496 * per PID buffers. This is specific to UST thus setting this after the
1499 channel
->ust_app_uid
= msg
.u
.ask_channel
.ust_app_uid
;
1501 /* Build channel attributes from received message. */
1502 attr
.subbuf_size
= msg
.u
.ask_channel
.subbuf_size
;
1503 attr
.num_subbuf
= msg
.u
.ask_channel
.num_subbuf
;
1504 attr
.overwrite
= msg
.u
.ask_channel
.overwrite
;
1505 attr
.switch_timer_interval
= msg
.u
.ask_channel
.switch_timer_interval
;
1506 attr
.read_timer_interval
= msg
.u
.ask_channel
.read_timer_interval
;
1507 attr
.chan_id
= msg
.u
.ask_channel
.chan_id
;
1508 memcpy(attr
.uuid
, msg
.u
.ask_channel
.uuid
, sizeof(attr
.uuid
));
1509 attr
.blocking_timeout
= msg
.u
.ask_channel
.blocking_timeout
;
1511 /* Match channel buffer type to the UST abi. */
1512 switch (msg
.u
.ask_channel
.output
) {
1513 case LTTNG_EVENT_MMAP
:
1515 attr
.output
= LTTNG_UST_ABI_MMAP
;
1519 /* Translate and save channel type. */
1520 switch (msg
.u
.ask_channel
.type
) {
1521 case LTTNG_UST_ABI_CHAN_PER_CPU
:
1522 channel
->type
= CONSUMER_CHANNEL_TYPE_DATA
;
1523 attr
.type
= LTTNG_UST_ABI_CHAN_PER_CPU
;
1525 * Set refcount to 1 for owner. Below, we will
1526 * pass ownership to the
1527 * consumer_thread_channel_poll() thread.
1529 channel
->refcount
= 1;
1531 case LTTNG_UST_ABI_CHAN_METADATA
:
1532 channel
->type
= CONSUMER_CHANNEL_TYPE_METADATA
;
1533 attr
.type
= LTTNG_UST_ABI_CHAN_METADATA
;
1540 health_code_update();
1542 ret_ask_channel
= ask_channel(ctx
, channel
, &attr
);
1543 if (ret_ask_channel
< 0) {
1544 goto end_channel_error
;
1547 if (msg
.u
.ask_channel
.type
== LTTNG_UST_ABI_CHAN_METADATA
) {
1550 ret_allocate
= consumer_metadata_cache_allocate(
1552 if (ret_allocate
< 0) {
1553 ERR("Allocating metadata cache");
1554 goto end_channel_error
;
1556 consumer_timer_switch_start(channel
, attr
.switch_timer_interval
);
1557 attr
.switch_timer_interval
= 0;
1559 int monitor_start_ret
;
1561 consumer_timer_live_start(channel
,
1562 msg
.u
.ask_channel
.live_timer_interval
);
1563 monitor_start_ret
= consumer_timer_monitor_start(
1565 msg
.u
.ask_channel
.monitor_timer_interval
);
1566 if (monitor_start_ret
< 0) {
1567 ERR("Starting channel monitoring timer failed");
1568 goto end_channel_error
;
1572 health_code_update();
1575 * Add the channel to the internal state AFTER all streams were created
1576 * and successfully sent to session daemon. This way, all streams must
1577 * be ready before this channel is visible to the threads.
1578 * If add_channel succeeds, ownership of the channel is
1579 * passed to consumer_thread_channel_poll().
1581 ret_add_channel
= add_channel(channel
, ctx
);
1582 if (ret_add_channel
< 0) {
1583 if (msg
.u
.ask_channel
.type
== LTTNG_UST_ABI_CHAN_METADATA
) {
1584 if (channel
->switch_timer_enabled
== 1) {
1585 consumer_timer_switch_stop(channel
);
1587 consumer_metadata_cache_destroy(channel
);
1589 if (channel
->live_timer_enabled
== 1) {
1590 consumer_timer_live_stop(channel
);
1592 if (channel
->monitor_timer_enabled
== 1) {
1593 consumer_timer_monitor_stop(channel
);
1595 goto end_channel_error
;
1598 health_code_update();
1601 * Channel and streams are now created. Inform the session daemon that
1602 * everything went well and should wait to receive the channel and
1603 * streams with ustctl API.
1605 ret_send
= consumer_send_status_channel(sock
, channel
);
1608 * There is probably a problem on the socket.
1615 case LTTNG_CONSUMER_GET_CHANNEL
:
1617 int ret
, relayd_err
= 0;
1618 uint64_t key
= msg
.u
.get_channel
.key
;
1619 struct lttng_consumer_channel
*found_channel
;
1621 found_channel
= consumer_find_channel(key
);
1622 if (!found_channel
) {
1623 ERR("UST consumer get channel key %" PRIu64
" not found", key
);
1624 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1625 goto end_get_channel
;
1628 health_code_update();
1630 /* Send the channel to sessiond (and relayd, if applicable). */
1631 ret
= send_channel_to_sessiond_and_relayd(
1632 sock
, found_channel
, ctx
, &relayd_err
);
1636 * We were unable to send to the relayd the stream so avoid
1637 * sending back a fatal error to the thread since this is OK
1638 * and the consumer can continue its work. The above call
1639 * has sent the error status message to the sessiond.
1641 goto end_get_channel_nosignal
;
1644 * The communicaton was broken hence there is a bad state between
1645 * the consumer and sessiond so stop everything.
1647 goto error_get_channel_fatal
;
1650 health_code_update();
1653 * In no monitor mode, the streams ownership is kept inside the channel
1654 * so don't send them to the data thread.
1656 if (!found_channel
->monitor
) {
1657 goto end_get_channel
;
1660 ret
= send_streams_to_thread(found_channel
, ctx
);
1663 * If we are unable to send the stream to the thread, there is
1664 * a big problem so just stop everything.
1666 goto error_get_channel_fatal
;
1668 /* List MUST be empty after or else it could be reused. */
1669 LTTNG_ASSERT(cds_list_empty(&found_channel
->streams
.head
));
1671 goto end_msg_sessiond
;
1672 error_get_channel_fatal
:
1674 end_get_channel_nosignal
:
1677 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
1679 uint64_t key
= msg
.u
.destroy_channel
.key
;
1682 * Only called if streams have not been sent to stream
1683 * manager thread. However, channel has been sent to
1684 * channel manager thread.
1686 notify_thread_del_channel(ctx
, key
);
1687 goto end_msg_sessiond
;
1689 case LTTNG_CONSUMER_CLOSE_METADATA
:
1693 ret
= close_metadata(msg
.u
.close_metadata
.key
);
1695 ret_code
= (lttcomm_return_code
) ret
;
1698 goto end_msg_sessiond
;
1700 case LTTNG_CONSUMER_FLUSH_CHANNEL
:
1704 ret
= flush_channel(msg
.u
.flush_channel
.key
);
1706 ret_code
= (lttcomm_return_code
) ret
;
1709 goto end_msg_sessiond
;
1711 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL
:
1715 ret
= clear_quiescent_channel(
1716 msg
.u
.clear_quiescent_channel
.key
);
1718 ret_code
= (lttcomm_return_code
) ret
;
1721 goto end_msg_sessiond
;
1723 case LTTNG_CONSUMER_PUSH_METADATA
:
1726 uint64_t len
= msg
.u
.push_metadata
.len
;
1727 uint64_t key
= msg
.u
.push_metadata
.key
;
1728 uint64_t offset
= msg
.u
.push_metadata
.target_offset
;
1729 uint64_t version
= msg
.u
.push_metadata
.version
;
1730 struct lttng_consumer_channel
*found_channel
;
1732 DBG("UST consumer push metadata key %" PRIu64
" of len %" PRIu64
, key
,
1735 found_channel
= consumer_find_channel(key
);
1736 if (!found_channel
) {
1738 * This is possible if the metadata creation on the consumer side
1739 * is in flight vis-a-vis a concurrent push metadata from the
1740 * session daemon. Simply return that the channel failed and the
1741 * session daemon will handle that message correctly considering
1742 * that this race is acceptable thus the DBG() statement here.
1744 DBG("UST consumer push metadata %" PRIu64
" not found", key
);
1745 ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
1746 goto end_push_metadata_msg_sessiond
;
1749 health_code_update();
1753 * There is nothing to receive. We have simply
1754 * checked whether the channel can be found.
1756 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1757 goto end_push_metadata_msg_sessiond
;
1760 /* Tell session daemon we are ready to receive the metadata. */
1761 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
1763 /* Somehow, the session daemon is not responding anymore. */
1764 goto error_push_metadata_fatal
;
1767 health_code_update();
1769 /* Wait for more data. */
1770 health_poll_entry();
1771 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
1774 goto error_push_metadata_fatal
;
1777 health_code_update();
1779 ret
= lttng_ustconsumer_recv_metadata(sock
, key
, offset
, len
,
1780 version
, found_channel
, 0, 1);
1782 /* error receiving from sessiond */
1783 goto error_push_metadata_fatal
;
1785 ret_code
= (lttcomm_return_code
) ret
;
1786 goto end_push_metadata_msg_sessiond
;
1788 end_push_metadata_msg_sessiond
:
1789 goto end_msg_sessiond
;
1790 error_push_metadata_fatal
:
1793 case LTTNG_CONSUMER_SETUP_METADATA
:
1797 ret
= setup_metadata(ctx
, msg
.u
.setup_metadata
.key
);
1799 ret_code
= (lttcomm_return_code
) ret
;
1801 goto end_msg_sessiond
;
1803 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
1805 struct lttng_consumer_channel
*found_channel
;
1806 uint64_t key
= msg
.u
.snapshot_channel
.key
;
1809 found_channel
= consumer_find_channel(key
);
1810 if (!found_channel
) {
1811 DBG("UST snapshot channel not found for key %" PRIu64
, key
);
1812 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1814 if (msg
.u
.snapshot_channel
.metadata
) {
1817 ret_snapshot
= snapshot_metadata(found_channel
,
1819 msg
.u
.snapshot_channel
.pathname
,
1820 msg
.u
.snapshot_channel
.relayd_id
,
1822 if (ret_snapshot
< 0) {
1823 ERR("Snapshot metadata failed");
1824 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1829 ret_snapshot
= snapshot_channel(found_channel
,
1831 msg
.u
.snapshot_channel
.pathname
,
1832 msg
.u
.snapshot_channel
.relayd_id
,
1833 msg
.u
.snapshot_channel
1834 .nb_packets_per_stream
,
1836 if (ret_snapshot
< 0) {
1837 ERR("Snapshot channel failed");
1838 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
1842 health_code_update();
1843 ret_send
= consumer_send_status_msg(sock
, ret_code
);
1845 /* Somehow, the session daemon is not responding anymore. */
1848 health_code_update();
1851 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1854 uint64_t discarded_events
;
1855 struct lttng_ht_iter iter
;
1856 struct lttng_ht
*ht
;
1857 struct lttng_consumer_stream
*stream
;
1858 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1859 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1861 DBG("UST consumer discarded events command for session id %"
1864 pthread_mutex_lock(&the_consumer_data
.lock
);
1866 ht
= the_consumer_data
.stream_list_ht
;
1869 * We only need a reference to the channel, but they are not
1870 * directly indexed, so we just use the first matching stream
1871 * to extract the information we need, we default to 0 if not
1872 * found (no events are dropped if the channel is not yet in
1875 discarded_events
= 0;
1876 cds_lfht_for_each_entry_duplicate(ht
->ht
,
1877 ht
->hash_fct(&id
, lttng_ht_seed
),
1879 &iter
.iter
, stream
, node_session_id
.node
) {
1880 if (stream
->chan
->key
== key
) {
1881 discarded_events
= stream
->chan
->discarded_events
;
1885 pthread_mutex_unlock(&the_consumer_data
.lock
);
1888 DBG("UST consumer discarded events command for session id %"
1889 PRIu64
", channel key %" PRIu64
, id
, key
);
1891 health_code_update();
1893 /* Send back returned value to session daemon */
1894 ret
= lttcomm_send_unix_sock(sock
, &discarded_events
, sizeof(discarded_events
));
1896 PERROR("send discarded events");
1902 case LTTNG_CONSUMER_LOST_PACKETS
:
1905 uint64_t lost_packets
;
1906 struct lttng_ht_iter iter
;
1907 struct lttng_ht
*ht
;
1908 struct lttng_consumer_stream
*stream
;
1909 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1910 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1912 DBG("UST consumer lost packets command for session id %"
1915 pthread_mutex_lock(&the_consumer_data
.lock
);
1917 ht
= the_consumer_data
.stream_list_ht
;
1920 * We only need a reference to the channel, but they are not
1921 * directly indexed, so we just use the first matching stream
1922 * to extract the information we need, we default to 0 if not
1923 * found (no packets lost if the channel is not yet in use).
1926 cds_lfht_for_each_entry_duplicate(ht
->ht
,
1927 ht
->hash_fct(&id
, lttng_ht_seed
),
1929 &iter
.iter
, stream
, node_session_id
.node
) {
1930 if (stream
->chan
->key
== key
) {
1931 lost_packets
= stream
->chan
->lost_packets
;
1935 pthread_mutex_unlock(&the_consumer_data
.lock
);
1938 DBG("UST consumer lost packets command for session id %"
1939 PRIu64
", channel key %" PRIu64
, id
, key
);
1941 health_code_update();
1943 /* Send back returned value to session daemon */
1944 ret
= lttcomm_send_unix_sock(sock
, &lost_packets
,
1945 sizeof(lost_packets
));
1947 PERROR("send lost packets");
1953 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1955 int channel_monitor_pipe
, ret_send
,
1956 ret_set_channel_monitor_pipe
;
1959 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1960 /* Successfully received the command's type. */
1961 ret_send
= consumer_send_status_msg(sock
, ret_code
);
1966 ret_recv
= lttcomm_recv_fds_unix_sock(
1967 sock
, &channel_monitor_pipe
, 1);
1968 if (ret_recv
!= sizeof(channel_monitor_pipe
)) {
1969 ERR("Failed to receive channel monitor pipe");
1973 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
1974 ret_set_channel_monitor_pipe
=
1975 consumer_timer_thread_set_channel_monitor_pipe(
1976 channel_monitor_pipe
);
1977 if (!ret_set_channel_monitor_pipe
) {
1981 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1982 /* Set the pipe as non-blocking. */
1983 ret_fcntl
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
1984 if (ret_fcntl
== -1) {
1985 PERROR("fcntl get flags of the channel monitoring pipe");
1990 ret_fcntl
= fcntl(channel_monitor_pipe
, F_SETFL
,
1991 flags
| O_NONBLOCK
);
1992 if (ret_fcntl
== -1) {
1993 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1996 DBG("Channel monitor pipe set as non-blocking");
1998 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
2000 goto end_msg_sessiond
;
2002 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
2004 struct lttng_consumer_channel
*found_channel
;
2005 uint64_t key
= msg
.u
.rotate_channel
.key
;
2006 int ret_send_status
;
2008 found_channel
= consumer_find_channel(key
);
2009 if (!found_channel
) {
2010 DBG("Channel %" PRIu64
" not found", key
);
2011 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2016 * Sample the rotate position of all the streams in
2019 rotate_channel
= lttng_consumer_rotate_channel(
2021 msg
.u
.rotate_channel
.relayd_id
);
2022 if (rotate_channel
< 0) {
2023 ERR("Rotate channel failed");
2024 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
2027 health_code_update();
2030 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2031 if (ret_send_status
< 0) {
2032 /* Somehow, the session daemon is not responding anymore. */
2033 goto end_rotate_channel_nosignal
;
2037 * Rotate the streams that are ready right now.
2038 * FIXME: this is a second consecutive iteration over the
2039 * streams in a channel, there is probably a better way to
2040 * handle this, but it needs to be after the
2041 * consumer_send_status_msg() call.
2043 if (found_channel
) {
2044 int ret_rotate_read_streams
;
2046 ret_rotate_read_streams
=
2047 lttng_consumer_rotate_ready_streams(
2048 found_channel
, key
);
2049 if (ret_rotate_read_streams
< 0) {
2050 ERR("Rotate channel failed");
2054 end_rotate_channel_nosignal
:
2057 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
2059 struct lttng_consumer_channel
*found_channel
;
2060 uint64_t key
= msg
.u
.clear_channel
.key
;
2061 int ret_send_status
;
2063 found_channel
= consumer_find_channel(key
);
2064 if (!found_channel
) {
2065 DBG("Channel %" PRIu64
" not found", key
);
2066 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2068 int ret_clear_channel
;
2070 ret_clear_channel
= lttng_consumer_clear_channel(
2072 if (ret_clear_channel
) {
2073 ERR("Clear channel failed key %" PRIu64
, key
);
2074 ret_code
= (lttcomm_return_code
) ret_clear_channel
;
2077 health_code_update();
2079 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2080 if (ret_send_status
< 0) {
2081 /* Somehow, the session daemon is not responding anymore. */
2086 case LTTNG_CONSUMER_INIT
:
2088 int ret_send_status
;
2089 lttng_uuid sessiond_uuid
;
2091 std::copy(std::begin(msg
.u
.init
.sessiond_uuid
), std::end(msg
.u
.init
.sessiond_uuid
),
2092 sessiond_uuid
.begin());
2093 ret_code
= lttng_consumer_init_command(ctx
, sessiond_uuid
);
2094 health_code_update();
2095 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2096 if (ret_send_status
< 0) {
2097 /* Somehow, the session daemon is not responding anymore. */
2102 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
2104 const struct lttng_credentials credentials
= {
2105 .uid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.uid
),
2106 .gid
= LTTNG_OPTIONAL_INIT_VALUE(msg
.u
.create_trace_chunk
.credentials
.value
.gid
),
2108 const bool is_local_trace
=
2109 !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
2110 const uint64_t relayd_id
=
2111 msg
.u
.create_trace_chunk
.relayd_id
.value
;
2112 const char *chunk_override_name
=
2113 *msg
.u
.create_trace_chunk
.override_name
?
2114 msg
.u
.create_trace_chunk
.override_name
:
2116 struct lttng_directory_handle
*chunk_directory_handle
= NULL
;
2119 * The session daemon will only provide a chunk directory file
2120 * descriptor for local traces.
2122 if (is_local_trace
) {
2124 int ret_send_status
;
2127 /* Acnowledge the reception of the command. */
2128 ret_send_status
= consumer_send_status_msg(
2129 sock
, LTTCOMM_CONSUMERD_SUCCESS
);
2130 if (ret_send_status
< 0) {
2131 /* Somehow, the session daemon is not responding anymore. */
2136 * Receive trace chunk domain dirfd.
2138 ret_recv
= lttcomm_recv_fds_unix_sock(
2139 sock
, &chunk_dirfd
, 1);
2140 if (ret_recv
!= sizeof(chunk_dirfd
)) {
2141 ERR("Failed to receive trace chunk domain directory file descriptor");
2145 DBG("Received trace chunk domain directory fd (%d)",
2147 chunk_directory_handle
= lttng_directory_handle_create_from_dirfd(
2149 if (!chunk_directory_handle
) {
2150 ERR("Failed to initialize chunk domain directory handle from directory file descriptor");
2151 if (close(chunk_dirfd
)) {
2152 PERROR("Failed to close chunk directory file descriptor");
2158 ret_code
= lttng_consumer_create_trace_chunk(
2159 !is_local_trace
? &relayd_id
: NULL
,
2160 msg
.u
.create_trace_chunk
.session_id
,
2161 msg
.u
.create_trace_chunk
.chunk_id
,
2162 (time_t) msg
.u
.create_trace_chunk
2163 .creation_timestamp
,
2164 chunk_override_name
,
2165 msg
.u
.create_trace_chunk
.credentials
.is_set
?
2168 chunk_directory_handle
);
2169 lttng_directory_handle_put(chunk_directory_handle
);
2170 goto end_msg_sessiond
;
2172 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
2174 enum lttng_trace_chunk_command_type close_command
=
2175 (lttng_trace_chunk_command_type
)
2176 msg
.u
.close_trace_chunk
.close_command
.value
;
2177 const uint64_t relayd_id
=
2178 msg
.u
.close_trace_chunk
.relayd_id
.value
;
2179 struct lttcomm_consumer_close_trace_chunk_reply reply
;
2180 char closed_trace_chunk_path
[LTTNG_PATH_MAX
] = {};
2183 ret_code
= lttng_consumer_close_trace_chunk(
2184 msg
.u
.close_trace_chunk
.relayd_id
.is_set
?
2187 msg
.u
.close_trace_chunk
.session_id
,
2188 msg
.u
.close_trace_chunk
.chunk_id
,
2189 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
2190 msg
.u
.close_trace_chunk
.close_command
.is_set
?
2192 NULL
, closed_trace_chunk_path
);
2193 reply
.ret_code
= ret_code
;
2194 reply
.path_length
= strlen(closed_trace_chunk_path
) + 1;
2195 ret
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
2196 if (ret
!= sizeof(reply
)) {
2199 ret
= lttcomm_send_unix_sock(sock
, closed_trace_chunk_path
,
2201 if (ret
!= reply
.path_length
) {
2206 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
2208 const uint64_t relayd_id
=
2209 msg
.u
.trace_chunk_exists
.relayd_id
.value
;
2211 ret_code
= lttng_consumer_trace_chunk_exists(
2212 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
?
2214 msg
.u
.trace_chunk_exists
.session_id
,
2215 msg
.u
.trace_chunk_exists
.chunk_id
);
2216 goto end_msg_sessiond
;
2218 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
2220 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
2221 struct lttng_consumer_channel
*found_channel
=
2222 consumer_find_channel(key
);
2224 if (found_channel
) {
2225 pthread_mutex_lock(&found_channel
->lock
);
2226 ret_code
= lttng_consumer_open_channel_packets(
2228 pthread_mutex_unlock(&found_channel
->lock
);
2231 * The channel could have disappeared in per-pid
2234 DBG("Channel %" PRIu64
" not found", key
);
2235 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
2238 health_code_update();
2239 goto end_msg_sessiond
;
2247 * Return 1 to indicate success since the 0 value can be a socket
2248 * shutdown during the recv() or send() call.
2255 * The returned value here is not useful since either way we'll return 1 to
2256 * the caller because the session daemon socket management is done
2257 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
2260 int ret_send_status
;
2262 ret_send_status
= consumer_send_status_msg(sock
, ret_code
);
2263 if (ret_send_status
< 0) {
2273 consumer_del_channel(channel
);
2275 /* We have to send a status channel message indicating an error. */
2277 int ret_send_status
;
2279 ret_send_status
= consumer_send_status_channel(sock
, NULL
);
2280 if (ret_send_status
< 0) {
2281 /* Stop everything if session daemon can not be notified. */
2290 /* This will issue a consumer stop. */
2296 health_code_update();
2300 int lttng_ust_flush_buffer(struct lttng_consumer_stream
*stream
,
2301 int producer_active
)
2303 LTTNG_ASSERT(stream
);
2304 LTTNG_ASSERT(stream
->ustream
);
2306 return lttng_ust_ctl_flush_buffer(stream
->ustream
, producer_active
);
2310 * Take a snapshot for a specific stream.
2312 * Returns 0 on success, < 0 on error
2314 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2316 LTTNG_ASSERT(stream
);
2317 LTTNG_ASSERT(stream
->ustream
);
2319 return lttng_ust_ctl_snapshot(stream
->ustream
);
2323 * Sample consumed and produced positions for a specific stream.
2325 * Returns 0 on success, < 0 on error.
2327 int lttng_ustconsumer_sample_snapshot_positions(
2328 struct lttng_consumer_stream
*stream
)
2330 LTTNG_ASSERT(stream
);
2331 LTTNG_ASSERT(stream
->ustream
);
2333 return lttng_ust_ctl_snapshot_sample_positions(stream
->ustream
);
2337 * Get the produced position
2339 * Returns 0 on success, < 0 on error
2341 int lttng_ustconsumer_get_produced_snapshot(
2342 struct lttng_consumer_stream
*stream
, unsigned long *pos
)
2344 LTTNG_ASSERT(stream
);
2345 LTTNG_ASSERT(stream
->ustream
);
2348 return lttng_ust_ctl_snapshot_get_produced(stream
->ustream
, pos
);
2352 * Get the consumed position
2354 * Returns 0 on success, < 0 on error
2356 int lttng_ustconsumer_get_consumed_snapshot(
2357 struct lttng_consumer_stream
*stream
, unsigned long *pos
)
2359 LTTNG_ASSERT(stream
);
2360 LTTNG_ASSERT(stream
->ustream
);
2363 return lttng_ust_ctl_snapshot_get_consumed(stream
->ustream
, pos
);
2366 int lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream
*stream
,
2369 LTTNG_ASSERT(stream
);
2370 LTTNG_ASSERT(stream
->ustream
);
2372 return lttng_ust_ctl_flush_buffer(stream
->ustream
, producer
);
2375 int lttng_ustconsumer_clear_buffer(struct lttng_consumer_stream
*stream
)
2377 LTTNG_ASSERT(stream
);
2378 LTTNG_ASSERT(stream
->ustream
);
2380 return lttng_ust_ctl_clear_buffer(stream
->ustream
);
2383 int lttng_ustconsumer_get_current_timestamp(
2384 struct lttng_consumer_stream
*stream
, uint64_t *ts
)
2386 LTTNG_ASSERT(stream
);
2387 LTTNG_ASSERT(stream
->ustream
);
2390 return lttng_ust_ctl_get_current_timestamp(stream
->ustream
, ts
);
2393 int lttng_ustconsumer_get_sequence_number(
2394 struct lttng_consumer_stream
*stream
, uint64_t *seq
)
2396 LTTNG_ASSERT(stream
);
2397 LTTNG_ASSERT(stream
->ustream
);
2400 return lttng_ust_ctl_get_sequence_number(stream
->ustream
, seq
);
2404 * Called when the stream signals the consumer that it has hung up.
2406 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream
*stream
)
2408 LTTNG_ASSERT(stream
);
2409 LTTNG_ASSERT(stream
->ustream
);
2411 pthread_mutex_lock(&stream
->lock
);
2412 if (!stream
->quiescent
) {
2413 if (lttng_ust_ctl_flush_buffer(stream
->ustream
, 0) < 0) {
2414 ERR("Failed to flush buffer on stream hang-up");
2416 stream
->quiescent
= true;
2420 stream
->hangup_flush_done
= 1;
2421 pthread_mutex_unlock(&stream
->lock
);
2424 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel
*chan
)
2429 LTTNG_ASSERT(chan
->uchan
);
2430 LTTNG_ASSERT(chan
->buffer_credentials
.is_set
);
2432 if (chan
->switch_timer_enabled
== 1) {
2433 consumer_timer_switch_stop(chan
);
2435 for (i
= 0; i
< chan
->nr_stream_fds
; i
++) {
2438 ret
= close(chan
->stream_fds
[i
]);
2442 if (chan
->shm_path
[0]) {
2443 char shm_path
[PATH_MAX
];
2445 ret
= get_stream_shm_path(shm_path
, chan
->shm_path
, i
);
2447 ERR("Cannot get stream shm path");
2449 ret
= run_as_unlink(shm_path
,
2450 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2451 chan
->buffer_credentials
)),
2452 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2453 chan
->buffer_credentials
)));
2455 PERROR("unlink %s", shm_path
);
2461 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel
*chan
)
2464 LTTNG_ASSERT(chan
->uchan
);
2465 LTTNG_ASSERT(chan
->buffer_credentials
.is_set
);
2467 consumer_metadata_cache_destroy(chan
);
2468 lttng_ust_ctl_destroy_channel(chan
->uchan
);
2469 /* Try to rmdir all directories under shm_path root. */
2470 if (chan
->root_shm_path
[0]) {
2471 (void) run_as_rmdir_recursive(chan
->root_shm_path
,
2472 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2473 chan
->buffer_credentials
)),
2474 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2475 chan
->buffer_credentials
)),
2476 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG
);
2478 free(chan
->stream_fds
);
2481 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream
*stream
)
2483 LTTNG_ASSERT(stream
);
2484 LTTNG_ASSERT(stream
->ustream
);
2486 if (stream
->chan
->switch_timer_enabled
== 1) {
2487 consumer_timer_switch_stop(stream
->chan
);
2489 lttng_ust_ctl_destroy_stream(stream
->ustream
);
2492 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream
*stream
)
2494 LTTNG_ASSERT(stream
);
2495 LTTNG_ASSERT(stream
->ustream
);
2497 return lttng_ust_ctl_stream_get_wakeup_fd(stream
->ustream
);
2500 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream
*stream
)
2502 LTTNG_ASSERT(stream
);
2503 LTTNG_ASSERT(stream
->ustream
);
2505 return lttng_ust_ctl_stream_close_wakeup_fd(stream
->ustream
);
2509 * Write up to one packet from the metadata cache to the channel.
2511 * Returns the number of bytes pushed from the cache into the ring buffer, or a
2512 * negative value on error.
2515 int commit_one_metadata_packet(struct lttng_consumer_stream
*stream
)
2520 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2521 if (stream
->chan
->metadata_cache
->contents
.size
==
2522 stream
->ust_metadata_pushed
) {
2524 * In the context of a user space metadata channel, a
2525 * change in version can be detected in two ways:
2526 * 1) During the pre-consume of the `read_subbuffer` loop,
2527 * 2) When populating the metadata ring buffer (i.e. here).
2529 * This function is invoked when there is no metadata
2530 * available in the ring-buffer. If all data was consumed
2531 * up to the size of the metadata cache, there is no metadata
2532 * to insert in the ring-buffer.
2534 * However, the metadata version could still have changed (a
2535 * regeneration without any new data will yield the same cache
2538 * The cache's version is checked for a version change and the
2539 * consumed position is reset if one occurred.
2541 * This check is only necessary for the user space domain as
2542 * it has to manage the cache explicitly. If this reset was not
2543 * performed, no metadata would be consumed (and no reset would
2544 * occur as part of the pre-consume) until the metadata size
2545 * exceeded the cache size.
2547 if (stream
->metadata_version
!=
2548 stream
->chan
->metadata_cache
->version
) {
2549 metadata_stream_reset_cache_consumed_position(stream
);
2550 consumer_stream_metadata_set_version(stream
,
2551 stream
->chan
->metadata_cache
->version
);
2558 write_len
= lttng_ust_ctl_write_one_packet_to_channel(stream
->chan
->uchan
,
2559 &stream
->chan
->metadata_cache
->contents
.data
[stream
->ust_metadata_pushed
],
2560 stream
->chan
->metadata_cache
->contents
.size
-
2561 stream
->ust_metadata_pushed
);
2562 LTTNG_ASSERT(write_len
!= 0);
2563 if (write_len
< 0) {
2564 ERR("Writing one metadata packet");
2568 stream
->ust_metadata_pushed
+= write_len
;
2570 LTTNG_ASSERT(stream
->chan
->metadata_cache
->contents
.size
>=
2571 stream
->ust_metadata_pushed
);
2575 * Switch packet (but don't open the next one) on every commit of
2576 * a metadata packet. Since the subbuffer is fully filled (with padding,
2577 * if needed), the stream is "quiescent" after this commit.
2579 if (lttng_ust_ctl_flush_buffer(stream
->ustream
, 1)) {
2580 ERR("Failed to flush buffer while committing one metadata packet");
2583 stream
->quiescent
= true;
2586 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2592 * Sync metadata meaning request them to the session daemon and snapshot to the
2593 * metadata thread can consumer them.
2595 * Metadata stream lock is held here, but we need to release it when
2596 * interacting with sessiond, else we cause a deadlock with live
2597 * awaiting on metadata to be pushed out.
2599 * The RCU read side lock must be held by the caller.
2601 enum sync_metadata_status
lttng_ustconsumer_sync_metadata(
2602 struct lttng_consumer_local_data
*ctx
,
2603 struct lttng_consumer_stream
*metadata_stream
)
2606 enum sync_metadata_status status
;
2607 struct lttng_consumer_channel
*metadata_channel
;
2610 LTTNG_ASSERT(metadata_stream
);
2611 ASSERT_RCU_READ_LOCKED();
2613 metadata_channel
= metadata_stream
->chan
;
2614 pthread_mutex_unlock(&metadata_stream
->lock
);
2616 * Request metadata from the sessiond, but don't wait for the flush
2617 * because we locked the metadata thread.
2619 ret
= lttng_ustconsumer_request_metadata(ctx
, metadata_channel
, 0, 0);
2620 pthread_mutex_lock(&metadata_stream
->lock
);
2622 status
= SYNC_METADATA_STATUS_ERROR
;
2627 * The metadata stream and channel can be deleted while the
2628 * metadata stream lock was released. The streamed is checked
2629 * for deletion before we use it further.
2631 * Note that it is safe to access a logically-deleted stream since its
2632 * existence is still guaranteed by the RCU read side lock. However,
2633 * it should no longer be used. The close/deletion of the metadata
2634 * channel and stream already guarantees that all metadata has been
2635 * consumed. Therefore, there is nothing left to do in this function.
2637 if (consumer_stream_is_deleted(metadata_stream
)) {
2638 DBG("Metadata stream %" PRIu64
" was deleted during the metadata synchronization",
2639 metadata_stream
->key
);
2640 status
= SYNC_METADATA_STATUS_NO_DATA
;
2644 ret
= commit_one_metadata_packet(metadata_stream
);
2646 status
= SYNC_METADATA_STATUS_ERROR
;
2648 } else if (ret
> 0) {
2649 status
= SYNC_METADATA_STATUS_NEW_DATA
;
2650 } else /* ret == 0 */ {
2651 status
= SYNC_METADATA_STATUS_NO_DATA
;
2655 ret
= lttng_ust_ctl_snapshot(metadata_stream
->ustream
);
2657 ERR("Failed to take a snapshot of the metadata ring-buffer positions, ret = %d", ret
);
2658 status
= SYNC_METADATA_STATUS_ERROR
;
2667 * Return 0 on success else a negative value.
2669 static int notify_if_more_data(struct lttng_consumer_stream
*stream
,
2670 struct lttng_consumer_local_data
*ctx
)
2673 struct lttng_ust_ctl_consumer_stream
*ustream
;
2675 LTTNG_ASSERT(stream
);
2678 ustream
= stream
->ustream
;
2681 * First, we are going to check if there is a new subbuffer available
2682 * before reading the stream wait_fd.
2684 /* Get the next subbuffer */
2685 ret
= lttng_ust_ctl_get_next_subbuf(ustream
);
2687 /* No more data found, flag the stream. */
2688 stream
->has_data
= 0;
2693 ret
= lttng_ust_ctl_put_subbuf(ustream
);
2696 /* This stream still has data. Flag it and wake up the data thread. */
2697 stream
->has_data
= 1;
2699 if (stream
->monitor
&& !stream
->hangup_flush_done
&& !ctx
->has_wakeup
) {
2702 writelen
= lttng_pipe_write(ctx
->consumer_wakeup_pipe
, "!", 1);
2703 if (writelen
< 0 && errno
!= EAGAIN
&& errno
!= EWOULDBLOCK
) {
2708 /* The wake up pipe has been notified. */
2709 ctx
->has_wakeup
= 1;
2717 static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream
*stream
)
2722 * We can consume the 1 byte written into the wait_fd by
2723 * UST. Don't trigger error if we cannot read this one byte
2724 * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK.
2726 * This is only done when the stream is monitored by a thread,
2727 * before the flush is done after a hangup and if the stream
2728 * is not flagged with data since there might be nothing to
2729 * consume in the wait fd but still have data available
2730 * flagged by the consumer wake up pipe.
2732 if (stream
->monitor
&& !stream
->hangup_flush_done
&& !stream
->has_data
) {
2736 readlen
= lttng_read(stream
->wait_fd
, &dummy
, 1);
2737 if (readlen
< 0 && errno
!= EAGAIN
&& errno
!= EWOULDBLOCK
) {
2745 static int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
2746 struct stream_subbuffer
*subbuf
)
2750 ret
= lttng_ust_ctl_get_subbuf_size(
2751 stream
->ustream
, &subbuf
->info
.data
.subbuf_size
);
2756 ret
= lttng_ust_ctl_get_padded_subbuf_size(
2757 stream
->ustream
, &subbuf
->info
.data
.padded_subbuf_size
);
2766 static int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
2767 struct stream_subbuffer
*subbuf
)
2771 ret
= extract_common_subbuffer_info(stream
, subbuf
);
2776 subbuf
->info
.metadata
.version
= stream
->metadata_version
;
2782 static int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
2783 struct stream_subbuffer
*subbuf
)
2787 ret
= extract_common_subbuffer_info(stream
, subbuf
);
2792 ret
= lttng_ust_ctl_get_packet_size(
2793 stream
->ustream
, &subbuf
->info
.data
.packet_size
);
2795 PERROR("Failed to get sub-buffer packet size");
2799 ret
= lttng_ust_ctl_get_content_size(
2800 stream
->ustream
, &subbuf
->info
.data
.content_size
);
2802 PERROR("Failed to get sub-buffer content size");
2806 ret
= lttng_ust_ctl_get_timestamp_begin(
2807 stream
->ustream
, &subbuf
->info
.data
.timestamp_begin
);
2809 PERROR("Failed to get sub-buffer begin timestamp");
2813 ret
= lttng_ust_ctl_get_timestamp_end(
2814 stream
->ustream
, &subbuf
->info
.data
.timestamp_end
);
2816 PERROR("Failed to get sub-buffer end timestamp");
2820 ret
= lttng_ust_ctl_get_events_discarded(
2821 stream
->ustream
, &subbuf
->info
.data
.events_discarded
);
2823 PERROR("Failed to get sub-buffer events discarded count");
2827 ret
= lttng_ust_ctl_get_sequence_number(stream
->ustream
,
2828 &subbuf
->info
.data
.sequence_number
.value
);
2830 /* May not be supported by older LTTng-modules. */
2831 if (ret
!= -ENOTTY
) {
2832 PERROR("Failed to get sub-buffer sequence number");
2836 subbuf
->info
.data
.sequence_number
.is_set
= true;
2839 ret
= lttng_ust_ctl_get_stream_id(
2840 stream
->ustream
, &subbuf
->info
.data
.stream_id
);
2842 PERROR("Failed to get stream id");
2846 ret
= lttng_ust_ctl_get_instance_id(stream
->ustream
,
2847 &subbuf
->info
.data
.stream_instance_id
.value
);
2849 /* May not be supported by older LTTng-modules. */
2850 if (ret
!= -ENOTTY
) {
2851 PERROR("Failed to get stream instance id");
2855 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
2861 static int get_next_subbuffer_common(struct lttng_consumer_stream
*stream
,
2862 struct stream_subbuffer
*subbuffer
)
2867 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
2873 ret
= get_current_subbuf_addr(stream
, &addr
);
2878 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
2879 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
2880 LTTNG_ASSERT(subbuffer
->buffer
.buffer
.data
!= NULL
);
2885 static enum get_next_subbuffer_status
get_next_subbuffer(
2886 struct lttng_consumer_stream
*stream
,
2887 struct stream_subbuffer
*subbuffer
)
2890 enum get_next_subbuffer_status status
;
2892 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
2895 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
2900 * The caller only expects -ENODATA when there is no data to
2901 * read, but the kernel tracer returns -EAGAIN when there is
2902 * currently no data for a non-finalized stream, and -ENODATA
2903 * when there is no data for a finalized stream. Those can be
2904 * combined into a -ENODATA return value.
2906 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
2909 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2913 ret
= get_next_subbuffer_common(stream
, subbuffer
);
2915 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2922 static enum get_next_subbuffer_status
get_next_subbuffer_metadata(
2923 struct lttng_consumer_stream
*stream
,
2924 struct stream_subbuffer
*subbuffer
)
2931 unsigned long consumed_pos
, produced_pos
;
2932 enum get_next_subbuffer_status status
;
2935 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
2937 got_subbuffer
= true;
2939 got_subbuffer
= false;
2940 if (ret
!= -EAGAIN
) {
2942 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2948 * Determine if the cache is empty and ensure that a sub-buffer
2949 * is made available if the cache is not empty.
2951 if (!got_subbuffer
) {
2952 ret
= commit_one_metadata_packet(stream
);
2953 if (ret
< 0 && ret
!= -ENOBUFS
) {
2954 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2956 } else if (ret
== 0) {
2957 /* Not an error, the cache is empty. */
2959 status
= GET_NEXT_SUBBUFFER_STATUS_NO_DATA
;
2962 cache_empty
= false;
2965 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
2966 cache_empty
= stream
->chan
->metadata_cache
->contents
.size
==
2967 stream
->ust_metadata_pushed
;
2968 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
2970 } while (!got_subbuffer
);
2972 /* Populate sub-buffer infos and view. */
2973 ret
= get_next_subbuffer_common(stream
, subbuffer
);
2975 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2979 ret
= lttng_ustconsumer_sample_snapshot_positions(stream
);
2982 * -EAGAIN is not expected since we got a sub-buffer and haven't
2983 * pushed the consumption position yet (on put_next).
2985 PERROR("Failed to take a snapshot of metadata buffer positions");
2986 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2990 ret
= lttng_ustconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
2992 PERROR("Failed to get metadata consumed position");
2993 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
2997 ret
= lttng_ustconsumer_get_produced_snapshot(stream
, &produced_pos
);
2999 PERROR("Failed to get metadata produced position");
3000 status
= GET_NEXT_SUBBUFFER_STATUS_ERROR
;
3004 /* Last sub-buffer of the ring buffer ? */
3005 buffer_empty
= (consumed_pos
+ stream
->max_sb_size
) == produced_pos
;
3008 * The sessiond registry lock ensures that coherent units of metadata
3009 * are pushed to the consumer daemon at once. Hence, if a sub-buffer is
3010 * acquired, the cache is empty, and it is the only available sub-buffer
3011 * available, it is safe to assume that it is "coherent".
3013 coherent
= got_subbuffer
&& cache_empty
&& buffer_empty
;
3015 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
3016 status
= GET_NEXT_SUBBUFFER_STATUS_OK
;
3021 static int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
3022 struct stream_subbuffer
*subbuffer
__attribute__((unused
)))
3024 const int ret
= lttng_ust_ctl_put_next_subbuf(stream
->ustream
);
3026 LTTNG_ASSERT(ret
== 0);
3030 static int signal_metadata(struct lttng_consumer_stream
*stream
,
3031 struct lttng_consumer_local_data
*ctx
__attribute__((unused
)))
3033 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
3034 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
3037 static int lttng_ustconsumer_set_stream_ops(
3038 struct lttng_consumer_stream
*stream
)
3042 stream
->read_subbuffer_ops
.on_wake_up
= consumer_stream_ust_on_wake_up
;
3043 if (stream
->metadata_flag
) {
3044 stream
->read_subbuffer_ops
.get_next_subbuffer
=
3045 get_next_subbuffer_metadata
;
3046 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
3047 extract_metadata_subbuffer_info
;
3048 stream
->read_subbuffer_ops
.reset_metadata
=
3049 metadata_stream_reset_cache_consumed_position
;
3050 if (stream
->chan
->is_live
) {
3051 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
3052 ret
= consumer_stream_enable_metadata_bucketization(
3059 stream
->read_subbuffer_ops
.get_next_subbuffer
=
3061 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
3062 extract_data_subbuffer_info
;
3063 stream
->read_subbuffer_ops
.on_sleep
= notify_if_more_data
;
3064 if (stream
->chan
->is_live
) {
3065 stream
->read_subbuffer_ops
.send_live_beacon
=
3066 consumer_flush_ust_index
;
3070 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
3076 * Called when a stream is created.
3078 * Return 0 on success or else a negative value.
3080 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3084 LTTNG_ASSERT(stream
);
3087 * Don't create anything if this is set for streaming or if there is
3088 * no current trace chunk on the parent channel.
3090 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
3091 stream
->chan
->trace_chunk
) {
3092 ret
= consumer_stream_create_output_files(stream
, true);
3098 lttng_ustconsumer_set_stream_ops(stream
);
3106 * Check if data is still being extracted from the buffers for a specific
3107 * stream. Consumer data lock MUST be acquired before calling this function
3108 * and the stream lock.
3110 * Return 1 if the traced data are still getting read else 0 meaning that the
3111 * data is available for trace viewer reading.
3113 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream
*stream
)
3117 LTTNG_ASSERT(stream
);
3118 LTTNG_ASSERT(stream
->ustream
);
3119 ASSERT_LOCKED(stream
->lock
);
3121 DBG("UST consumer checking data pending");
3123 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
3128 if (stream
->chan
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
3129 uint64_t contiguous
, pushed
;
3131 /* Ease our life a bit. */
3132 pthread_mutex_lock(&stream
->chan
->metadata_cache
->lock
);
3133 contiguous
= stream
->chan
->metadata_cache
->contents
.size
;
3134 pthread_mutex_unlock(&stream
->chan
->metadata_cache
->lock
);
3135 pushed
= stream
->ust_metadata_pushed
;
3138 * We can simply check whether all contiguously available data
3139 * has been pushed to the ring buffer, since the push operation
3140 * is performed within get_next_subbuf(), and because both
3141 * get_next_subbuf() and put_next_subbuf() are issued atomically
3142 * thanks to the stream lock within
3143 * lttng_ustconsumer_read_subbuffer(). This basically means that
3144 * whetnever ust_metadata_pushed is incremented, the associated
3145 * metadata has been consumed from the metadata stream.
3147 DBG("UST consumer metadata pending check: contiguous %" PRIu64
" vs pushed %" PRIu64
,
3148 contiguous
, pushed
);
3149 LTTNG_ASSERT(((int64_t) (contiguous
- pushed
)) >= 0);
3150 if ((contiguous
!= pushed
) ||
3151 (((int64_t) contiguous
- pushed
) > 0 || contiguous
== 0)) {
3152 ret
= 1; /* Data is pending */
3156 ret
= lttng_ust_ctl_get_next_subbuf(stream
->ustream
);
3159 * There is still data so let's put back this
3162 ret
= lttng_ust_ctl_put_subbuf(stream
->ustream
);
3163 LTTNG_ASSERT(ret
== 0);
3164 ret
= 1; /* Data is pending */
3169 /* Data is NOT pending so ready to be read. */
3177 * Stop a given metadata channel timer if enabled and close the wait fd which
3178 * is the poll pipe of the metadata stream.
3180 * This MUST be called with the metadata channel lock acquired.
3182 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel
*metadata
)
3186 LTTNG_ASSERT(metadata
);
3187 LTTNG_ASSERT(metadata
->type
== CONSUMER_CHANNEL_TYPE_METADATA
);
3189 DBG("Closing metadata channel key %" PRIu64
, metadata
->key
);
3191 if (metadata
->switch_timer_enabled
== 1) {
3192 consumer_timer_switch_stop(metadata
);
3195 if (!metadata
->metadata_stream
) {
3200 * Closing write side so the thread monitoring the stream wakes up if any
3201 * and clean the metadata stream.
3203 if (metadata
->metadata_stream
->ust_metadata_poll_pipe
[1] >= 0) {
3204 ret
= close(metadata
->metadata_stream
->ust_metadata_poll_pipe
[1]);
3206 PERROR("closing metadata pipe write side");
3208 metadata
->metadata_stream
->ust_metadata_poll_pipe
[1] = -1;
3216 * Close every metadata stream wait fd of the metadata hash table. This
3217 * function MUST be used very carefully so not to run into a race between the
3218 * metadata thread handling streams and this function closing their wait fd.
3220 * For UST, this is used when the session daemon hangs up. Its the metadata
3221 * producer so calling this is safe because we are assured that no state change
3222 * can occur in the metadata thread for the streams in the hash table.
3224 void lttng_ustconsumer_close_all_metadata(struct lttng_ht
*metadata_ht
)
3226 struct lttng_ht_iter iter
;
3227 struct lttng_consumer_stream
*stream
;
3229 LTTNG_ASSERT(metadata_ht
);
3230 LTTNG_ASSERT(metadata_ht
->ht
);
3232 DBG("UST consumer closing all metadata streams");
3235 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
,
3238 health_code_update();
3240 pthread_mutex_lock(&stream
->chan
->lock
);
3241 lttng_ustconsumer_close_metadata(stream
->chan
);
3242 pthread_mutex_unlock(&stream
->chan
->lock
);
3248 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream
*stream
)
3252 ret
= lttng_ust_ctl_stream_close_wakeup_fd(stream
->ustream
);
3254 ERR("Unable to close wakeup fd");
3259 * Please refer to consumer-timer.c before adding any lock within this
3260 * function or any of its callees. Timers have a very strict locking
3261 * semantic with respect to teardown. Failure to respect this semantic
3262 * introduces deadlocks.
3264 * DON'T hold the metadata lock when calling this function, else this
3265 * can cause deadlock involving consumer awaiting for metadata to be
3266 * pushed out due to concurrent interaction with the session daemon.
3268 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data
*ctx
,
3269 struct lttng_consumer_channel
*channel
, int timer
, int wait
)
3271 struct lttcomm_metadata_request_msg request
;
3272 struct lttcomm_consumer_msg msg
;
3273 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3274 uint64_t len
, key
, offset
, version
;
3277 LTTNG_ASSERT(channel
);
3278 LTTNG_ASSERT(channel
->metadata_cache
);
3280 memset(&request
, 0, sizeof(request
));
3282 /* send the metadata request to sessiond */
3283 switch (the_consumer_data
.type
) {
3284 case LTTNG_CONSUMER64_UST
:
3285 request
.bits_per_long
= 64;
3287 case LTTNG_CONSUMER32_UST
:
3288 request
.bits_per_long
= 32;
3291 request
.bits_per_long
= 0;
3295 request
.session_id
= channel
->session_id
;
3296 request
.session_id_per_pid
= channel
->session_id_per_pid
;
3298 * Request the application UID here so the metadata of that application can
3299 * be sent back. The channel UID corresponds to the user UID of the session
3300 * used for the rights on the stream file(s).
3302 request
.uid
= channel
->ust_app_uid
;
3303 request
.key
= channel
->key
;
3305 DBG("Sending metadata request to sessiond, session id %" PRIu64
3306 ", per-pid %" PRIu64
", app UID %u and channel key %" PRIu64
,
3307 request
.session_id
, request
.session_id_per_pid
, request
.uid
,
3310 pthread_mutex_lock(&ctx
->metadata_socket_lock
);
3312 health_code_update();
3314 ret
= lttcomm_send_unix_sock(ctx
->consumer_metadata_socket
, &request
,
3317 ERR("Asking metadata to sessiond");
3321 health_code_update();
3323 /* Receive the metadata from sessiond */
3324 ret
= lttcomm_recv_unix_sock(ctx
->consumer_metadata_socket
, &msg
,
3326 if (ret
!= sizeof(msg
)) {
3327 DBG("Consumer received unexpected message size %d (expects %zu)",
3329 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
3331 * The ret value might 0 meaning an orderly shutdown but this is ok
3332 * since the caller handles this.
3337 health_code_update();
3339 if (msg
.cmd_type
== LTTNG_ERR_UND
) {
3340 /* No registry found */
3341 (void) consumer_send_status_msg(ctx
->consumer_metadata_socket
,
3345 } else if (msg
.cmd_type
!= LTTNG_CONSUMER_PUSH_METADATA
) {
3346 ERR("Unexpected cmd_type received %d", msg
.cmd_type
);
3351 len
= msg
.u
.push_metadata
.len
;
3352 key
= msg
.u
.push_metadata
.key
;
3353 offset
= msg
.u
.push_metadata
.target_offset
;
3354 version
= msg
.u
.push_metadata
.version
;
3356 LTTNG_ASSERT(key
== channel
->key
);
3358 DBG("No new metadata to receive for key %" PRIu64
, key
);
3361 health_code_update();
3363 /* Tell session daemon we are ready to receive the metadata. */
3364 ret
= consumer_send_status_msg(ctx
->consumer_metadata_socket
,
3365 LTTCOMM_CONSUMERD_SUCCESS
);
3366 if (ret
< 0 || len
== 0) {
3368 * Somehow, the session daemon is not responding anymore or there is
3369 * nothing to receive.
3374 health_code_update();
3376 ret
= lttng_ustconsumer_recv_metadata(ctx
->consumer_metadata_socket
,
3377 key
, offset
, len
, version
, channel
, timer
, wait
);
3380 * Only send the status msg if the sessiond is alive meaning a positive
3383 (void) consumer_send_status_msg(ctx
->consumer_metadata_socket
, ret
);
3388 health_code_update();
3390 pthread_mutex_unlock(&ctx
->metadata_socket_lock
);
3395 * Return the ustctl call for the get stream id.
3397 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream
*stream
,
3398 uint64_t *stream_id
)
3400 LTTNG_ASSERT(stream
);
3401 LTTNG_ASSERT(stream_id
);
3403 return lttng_ust_ctl_get_stream_id(stream
->ustream
, stream_id
);
3406 void lttng_ustconsumer_sigbus_handle(void *addr
)
3408 lttng_ust_ctl_sigbus_handle(addr
);