2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <sys/socket.h>
27 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/kernel-ctl/kernel-ctl.h>
36 #include <common/sessiond-comm/sessiond-comm.h>
37 #include <common/sessiond-comm/relayd.h>
38 #include <common/compat/fcntl.h>
39 #include <common/compat/endian.h>
40 #include <common/pipe.h>
41 #include <common/relayd/relayd.h>
42 #include <common/utils.h>
43 #include <common/consumer/consumer-stream.h>
44 #include <common/index/index.h>
45 #include <common/consumer/consumer-timer.h>
46 #include <common/optional.h>
47 #include <common/buffer-view.h>
48 #include <common/consumer/consumer.h>
49 #include <common/consumer/metadata-bucket.h>
51 #include "kernel-consumer.h"
53 extern struct lttng_consumer_global_data consumer_data
;
54 extern int consumer_poll_timeout
;
55 extern volatile int consumer_quit
;
58 * Take a snapshot for a specific fd
60 * Returns 0 on success, < 0 on error
62 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
65 int infd
= stream
->wait_fd
;
67 ret
= kernctl_snapshot(infd
);
69 * -EAGAIN is not an error, it just means that there is no data to
72 if (ret
!= 0 && ret
!= -EAGAIN
) {
73 PERROR("Getting sub-buffer snapshot.");
80 * Get the produced position
82 * Returns 0 on success, < 0 on error
84 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
88 int infd
= stream
->wait_fd
;
90 ret
= kernctl_snapshot_get_produced(infd
, pos
);
92 PERROR("kernctl_snapshot_get_produced");
99 * Get the consumerd position
101 * Returns 0 on success, < 0 on error
103 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
107 int infd
= stream
->wait_fd
;
109 ret
= kernctl_snapshot_get_consumed(infd
, pos
);
111 PERROR("kernctl_snapshot_get_consumed");
118 int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
,
122 unsigned long mmap_offset
;
123 const char *mmap_base
= stream
->mmap_base
;
125 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
127 PERROR("Failed to get mmap read offset");
131 *addr
= mmap_base
+ mmap_offset
;
137 * Take a snapshot of all the stream of a channel
139 * Returns 0 on success, < 0 on error
141 int lttng_kconsumer_snapshot_channel(uint64_t key
, char *path
,
142 uint64_t relayd_id
, uint64_t nb_packets_per_stream
,
143 struct lttng_consumer_local_data
*ctx
)
146 struct lttng_consumer_channel
*channel
;
147 struct lttng_consumer_stream
*stream
;
149 DBG("Kernel consumer snapshot channel %" PRIu64
, key
);
153 channel
= consumer_find_channel(key
);
155 ERR("No channel found for key %" PRIu64
, key
);
160 /* Splice is not supported yet for channel snapshot. */
161 if (channel
->output
!= CONSUMER_CHANNEL_MMAP
) {
162 ERR("Unsupported output %d", channel
->output
);
167 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
168 unsigned long consumed_pos
, produced_pos
;
170 health_code_update();
173 * Lock stream because we are about to change its state.
175 pthread_mutex_lock(&stream
->lock
);
178 * Assign the received relayd ID so we can use it for streaming. The streams
179 * are not visible to anyone so this is OK to change it.
181 stream
->relayd_id
= relayd_id
;
182 channel
->relayd_id
= relayd_id
;
183 if (relayd_id
!= (uint64_t) -1ULL) {
184 ret
= consumer_send_relayd_stream(stream
, path
);
186 ERR("sending stream to relayd");
190 ret
= utils_create_stream_file(path
, stream
->name
,
191 stream
->chan
->tracefile_size
,
192 stream
->tracefile_count_current
,
193 stream
->uid
, stream
->gid
, NULL
);
195 ERR("utils_create_stream_file");
199 stream
->out_fd
= ret
;
200 stream
->tracefile_size_current
= 0;
202 DBG("Kernel consumer snapshot stream %s/%s (%" PRIu64
")",
203 path
, stream
->name
, stream
->key
);
205 if (relayd_id
!= -1ULL) {
206 ret
= consumer_send_relayd_streams_sent(relayd_id
);
208 ERR("sending streams sent to relayd");
211 channel
->streams_sent_to_relayd
= true;
214 ret
= kernctl_buffer_flush_empty(stream
->wait_fd
);
217 * Doing a buffer flush which does not take into
218 * account empty packets. This is not perfect
219 * for stream intersection, but required as a
220 * fall-back when "flush_empty" is not
221 * implemented by lttng-modules.
223 ret
= kernctl_buffer_flush(stream
->wait_fd
);
225 ERR("Failed to flush kernel stream");
231 ret
= lttng_kconsumer_take_snapshot(stream
);
233 ERR("Taking kernel snapshot");
237 ret
= lttng_kconsumer_get_produced_snapshot(stream
, &produced_pos
);
239 ERR("Produced kernel snapshot position");
243 ret
= lttng_kconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
245 ERR("Consumerd kernel snapshot position");
249 if (stream
->max_sb_size
== 0) {
250 ret
= kernctl_get_max_subbuf_size(stream
->wait_fd
,
251 &stream
->max_sb_size
);
253 ERR("Getting kernel max_sb_size");
258 consumed_pos
= consumer_get_consume_start_pos(consumed_pos
,
259 produced_pos
, nb_packets_per_stream
,
260 stream
->max_sb_size
);
262 while (consumed_pos
< produced_pos
) {
264 unsigned long len
, padded_len
;
265 const char *subbuf_addr
;
266 struct lttng_buffer_view subbuf_view
;
268 health_code_update();
269 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos
);
271 ret
= kernctl_get_subbuf(stream
->wait_fd
, &consumed_pos
);
273 if (ret
!= -EAGAIN
) {
274 PERROR("kernctl_get_subbuf snapshot");
277 DBG("Kernel consumer get subbuf failed. Skipping it.");
278 consumed_pos
+= stream
->max_sb_size
;
279 stream
->chan
->lost_packets
++;
283 ret
= kernctl_get_subbuf_size(stream
->wait_fd
, &len
);
285 ERR("Snapshot kernctl_get_subbuf_size");
286 goto error_put_subbuf
;
289 ret
= kernctl_get_padded_subbuf_size(stream
->wait_fd
, &padded_len
);
291 ERR("Snapshot kernctl_get_padded_subbuf_size");
292 goto error_put_subbuf
;
295 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
297 goto error_put_subbuf
;
300 subbuf_view
= lttng_buffer_view_init(
301 subbuf_addr
, 0, padded_len
);
302 read_len
= lttng_consumer_on_read_subbuffer_mmap(
303 stream
, &subbuf_view
,
306 * We write the padded len in local tracefiles but the data len
307 * when using a relay. Display the error but continue processing
308 * to try to release the subbuffer.
310 if (relayd_id
!= (uint64_t) -1ULL) {
311 if (read_len
!= len
) {
312 ERR("Error sending to the relay (ret: %zd != len: %lu)",
316 if (read_len
!= padded_len
) {
317 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
318 read_len
, padded_len
);
322 ret
= kernctl_put_subbuf(stream
->wait_fd
);
324 ERR("Snapshot kernctl_put_subbuf");
327 consumed_pos
+= stream
->max_sb_size
;
330 if (relayd_id
== (uint64_t) -1ULL) {
331 if (stream
->out_fd
>= 0) {
332 ret
= close(stream
->out_fd
);
334 PERROR("Kernel consumer snapshot close out_fd");
340 close_relayd_stream(stream
);
341 stream
->relayd_id
= (uint64_t) -1ULL;
343 pthread_mutex_unlock(&stream
->lock
);
351 ret
= kernctl_put_subbuf(stream
->wait_fd
);
353 ERR("Snapshot kernctl_put_subbuf error path");
356 pthread_mutex_unlock(&stream
->lock
);
363 * Read the whole metadata available for a snapshot.
365 * Returns 0 on success, < 0 on error
367 static int lttng_kconsumer_snapshot_metadata(uint64_t key
, char *path
,
368 uint64_t relayd_id
, struct lttng_consumer_local_data
*ctx
)
370 int ret
, use_relayd
= 0;
372 struct lttng_consumer_channel
*metadata_channel
;
373 struct lttng_consumer_stream
*metadata_stream
;
377 DBG("Kernel consumer snapshot metadata with key %" PRIu64
" at path %s",
382 metadata_channel
= consumer_find_channel(key
);
383 if (!metadata_channel
) {
384 ERR("Kernel snapshot metadata not found for key %" PRIu64
, key
);
386 goto error_no_channel
;
389 metadata_stream
= metadata_channel
->metadata_stream
;
390 assert(metadata_stream
);
391 pthread_mutex_lock(&metadata_stream
->lock
);
393 /* Flag once that we have a valid relayd for the stream. */
394 if (relayd_id
!= (uint64_t) -1ULL) {
399 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
404 ret
= utils_create_stream_file(path
, metadata_stream
->name
,
405 metadata_stream
->chan
->tracefile_size
,
406 metadata_stream
->tracefile_count_current
,
407 metadata_stream
->uid
, metadata_stream
->gid
, NULL
);
411 metadata_stream
->out_fd
= ret
;
415 health_code_update();
417 ret_read
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
419 if (ret_read
!= -EAGAIN
) {
420 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
425 /* ret_read is negative at this point so we will exit the loop. */
428 } while (ret_read
>= 0);
431 close_relayd_stream(metadata_stream
);
432 metadata_stream
->relayd_id
= (uint64_t) -1ULL;
434 if (metadata_stream
->out_fd
>= 0) {
435 ret
= close(metadata_stream
->out_fd
);
437 PERROR("Kernel consumer snapshot metadata close out_fd");
439 * Don't go on error here since the snapshot was successful at this
440 * point but somehow the close failed.
443 metadata_stream
->out_fd
= -1;
449 pthread_mutex_unlock(&metadata_stream
->lock
);
450 cds_list_del(&metadata_stream
->send_node
);
451 consumer_stream_destroy(metadata_stream
, NULL
);
452 metadata_channel
->metadata_stream
= NULL
;
459 * Receive command from session daemon and process it.
461 * Return 1 on success else a negative value or 0.
463 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
464 int sock
, struct pollfd
*consumer_sockpoll
)
467 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
468 struct lttcomm_consumer_msg msg
;
470 health_code_update();
472 ret
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
473 if (ret
!= sizeof(msg
)) {
475 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
481 health_code_update();
483 /* Deprecated command */
484 assert(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
486 health_code_update();
488 /* relayd needs RCU read-side protection */
491 switch (msg
.cmd_type
) {
492 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
494 /* Session daemon status message are handled in the following call. */
495 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
496 msg
.u
.relayd_sock
.type
, ctx
, sock
, consumer_sockpoll
,
497 &msg
.u
.relayd_sock
.sock
, msg
.u
.relayd_sock
.session_id
,
498 msg
.u
.relayd_sock
.relayd_session_id
);
501 case LTTNG_CONSUMER_ADD_CHANNEL
:
503 struct lttng_consumer_channel
*new_channel
;
506 health_code_update();
508 /* First send a status message before receiving the fds. */
509 ret
= consumer_send_status_msg(sock
, ret_code
);
511 /* Somehow, the session daemon is not responding anymore. */
515 health_code_update();
517 DBG("consumer_add_channel %" PRIu64
, msg
.u
.channel
.channel_key
);
518 new_channel
= consumer_allocate_channel(msg
.u
.channel
.channel_key
,
519 msg
.u
.channel
.session_id
, msg
.u
.channel
.pathname
,
520 msg
.u
.channel
.name
, msg
.u
.channel
.uid
, msg
.u
.channel
.gid
,
521 msg
.u
.channel
.relayd_id
, msg
.u
.channel
.output
,
522 msg
.u
.channel
.tracefile_size
,
523 msg
.u
.channel
.tracefile_count
, 0,
524 msg
.u
.channel
.monitor
,
525 msg
.u
.channel
.live_timer_interval
,
526 msg
.u
.channel
.is_live
,
528 if (new_channel
== NULL
) {
529 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
532 new_channel
->nb_init_stream_left
= msg
.u
.channel
.nb_init_streams
;
533 switch (msg
.u
.channel
.output
) {
534 case LTTNG_EVENT_SPLICE
:
535 new_channel
->output
= CONSUMER_CHANNEL_SPLICE
;
537 case LTTNG_EVENT_MMAP
:
538 new_channel
->output
= CONSUMER_CHANNEL_MMAP
;
541 ERR("Channel output unknown %d", msg
.u
.channel
.output
);
545 /* Translate and save channel type. */
546 switch (msg
.u
.channel
.type
) {
547 case CONSUMER_CHANNEL_TYPE_DATA
:
548 case CONSUMER_CHANNEL_TYPE_METADATA
:
549 new_channel
->type
= msg
.u
.channel
.type
;
556 health_code_update();
558 if (ctx
->on_recv_channel
!= NULL
) {
559 ret_recv
= ctx
->on_recv_channel(new_channel
);
561 ret
= consumer_add_channel(new_channel
, ctx
);
562 } else if (ret_recv
< 0) {
566 ret
= consumer_add_channel(new_channel
, ctx
);
568 if (CONSUMER_CHANNEL_TYPE_DATA
) {
569 consumer_timer_live_start(new_channel
,
570 msg
.u
.channel
.live_timer_interval
);
573 health_code_update();
575 /* If we received an error in add_channel, we need to report it. */
577 ret
= consumer_send_status_msg(sock
, ret
);
586 case LTTNG_CONSUMER_ADD_STREAM
:
589 struct lttng_pipe
*stream_pipe
;
590 struct lttng_consumer_stream
*new_stream
;
591 struct lttng_consumer_channel
*channel
;
595 * Get stream's channel reference. Needed when adding the stream to the
598 channel
= consumer_find_channel(msg
.u
.stream
.channel_key
);
601 * We could not find the channel. Can happen if cpu hotplug
602 * happens while tearing down.
604 ERR("Unable to find channel key %" PRIu64
, msg
.u
.stream
.channel_key
);
605 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
608 health_code_update();
610 /* First send a status message before receiving the fds. */
611 ret
= consumer_send_status_msg(sock
, ret_code
);
613 /* Somehow, the session daemon is not responding anymore. */
617 health_code_update();
619 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
620 /* Channel was not found. */
626 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
632 health_code_update();
634 /* Get stream file descriptor from socket */
635 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
636 if (ret
!= sizeof(fd
)) {
637 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
642 health_code_update();
645 * Send status code to session daemon only if the recv works. If the
646 * above recv() failed, the session daemon is notified through the
647 * error socket and the teardown is eventually done.
649 ret
= consumer_send_status_msg(sock
, ret_code
);
651 /* Somehow, the session daemon is not responding anymore. */
655 health_code_update();
657 pthread_mutex_lock(&channel
->lock
);
658 new_stream
= consumer_stream_create(
662 LTTNG_CONSUMER_ACTIVE_STREAM
,
672 if (new_stream
== NULL
) {
677 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
680 pthread_mutex_unlock(&channel
->lock
);
684 new_stream
->wait_fd
= fd
;
685 ret
= kernctl_get_max_subbuf_size(new_stream
->wait_fd
,
686 &new_stream
->max_sb_size
);
688 pthread_mutex_unlock(&channel
->lock
);
689 ERR("Failed to get kernel maximal subbuffer size");
694 * We've just assigned the channel to the stream so increment the
695 * refcount right now. We don't need to increment the refcount for
696 * streams in no monitor because we handle manually the cleanup of
697 * those. It is very important to make sure there is NO prior
698 * consumer_del_stream() calls or else the refcount will be unbalanced.
700 if (channel
->monitor
) {
701 uatomic_inc(&new_stream
->chan
->refcount
);
705 * The buffer flush is done on the session daemon side for the kernel
706 * so no need for the stream "hangup_flush_done" variable to be
707 * tracked. This is important for a kernel stream since we don't rely
708 * on the flush state of the stream to read data. It's not the case for
709 * user space tracing.
711 new_stream
->hangup_flush_done
= 0;
713 health_code_update();
715 if (ctx
->on_recv_stream
) {
716 ret
= ctx
->on_recv_stream(new_stream
);
718 consumer_stream_free(new_stream
);
723 health_code_update();
725 if (new_stream
->metadata_flag
) {
726 channel
->metadata_stream
= new_stream
;
729 /* Do not monitor this stream. */
730 if (!channel
->monitor
) {
731 DBG("Kernel consumer add stream %s in no monitor mode with "
732 "relayd id %" PRIu64
, new_stream
->name
,
733 new_stream
->relayd_id
);
734 cds_list_add(&new_stream
->send_node
, &channel
->streams
.head
);
735 pthread_mutex_unlock(&channel
->lock
);
739 /* Send stream to relayd if the stream has an ID. */
740 if (new_stream
->relayd_id
!= (uint64_t) -1ULL) {
741 ret
= consumer_send_relayd_stream(new_stream
,
742 new_stream
->chan
->pathname
);
744 pthread_mutex_unlock(&channel
->lock
);
745 consumer_stream_free(new_stream
);
750 * If adding an extra stream to an already
751 * existing channel (e.g. cpu hotplug), we need
752 * to send the "streams_sent" command to relayd.
754 if (channel
->streams_sent_to_relayd
) {
755 ret
= consumer_send_relayd_streams_sent(
756 new_stream
->relayd_id
);
758 pthread_mutex_unlock(&channel
->lock
);
763 pthread_mutex_unlock(&channel
->lock
);
765 /* Get the right pipe where the stream will be sent. */
766 if (new_stream
->metadata_flag
) {
767 ret
= consumer_add_metadata_stream(new_stream
);
769 ERR("Consumer add metadata stream %" PRIu64
" failed. Continuing",
771 consumer_stream_free(new_stream
);
774 stream_pipe
= ctx
->consumer_metadata_pipe
;
776 ret
= consumer_add_data_stream(new_stream
);
778 ERR("Consumer add stream %" PRIu64
" failed. Continuing",
780 consumer_stream_free(new_stream
);
783 stream_pipe
= ctx
->consumer_data_pipe
;
786 /* Vitible to other threads */
787 new_stream
->globally_visible
= 1;
789 health_code_update();
791 ret
= lttng_pipe_write(stream_pipe
, &new_stream
, sizeof(new_stream
));
793 ERR("Consumer write %s stream to pipe %d",
794 new_stream
->metadata_flag
? "metadata" : "data",
795 lttng_pipe_get_writefd(stream_pipe
));
796 if (new_stream
->metadata_flag
) {
797 consumer_del_stream_for_metadata(new_stream
);
799 consumer_del_stream_for_data(new_stream
);
804 DBG("Kernel consumer ADD_STREAM %s (fd: %d) with relayd id %" PRIu64
,
805 new_stream
->name
, fd
, new_stream
->relayd_stream_id
);
808 case LTTNG_CONSUMER_STREAMS_SENT
:
810 struct lttng_consumer_channel
*channel
;
813 * Get stream's channel reference. Needed when adding the stream to the
816 channel
= consumer_find_channel(msg
.u
.sent_streams
.channel_key
);
819 * We could not find the channel. Can happen if cpu hotplug
820 * happens while tearing down.
822 ERR("Unable to find channel key %" PRIu64
,
823 msg
.u
.sent_streams
.channel_key
);
824 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
827 health_code_update();
830 * Send status code to session daemon.
832 ret
= consumer_send_status_msg(sock
, ret_code
);
833 if (ret
< 0 || ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
834 /* Somehow, the session daemon is not responding anymore. */
838 health_code_update();
841 * We should not send this message if we don't monitor the
842 * streams in this channel.
844 if (!channel
->monitor
) {
848 health_code_update();
849 /* Send stream to relayd if the stream has an ID. */
850 if (msg
.u
.sent_streams
.net_seq_idx
!= (uint64_t) -1ULL) {
851 ret
= consumer_send_relayd_streams_sent(
852 msg
.u
.sent_streams
.net_seq_idx
);
856 channel
->streams_sent_to_relayd
= true;
860 case LTTNG_CONSUMER_UPDATE_STREAM
:
865 case LTTNG_CONSUMER_DESTROY_RELAYD
:
867 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
868 struct consumer_relayd_sock_pair
*relayd
;
870 DBG("Kernel consumer destroying relayd %" PRIu64
, index
);
872 /* Get relayd reference if exists. */
873 relayd
= consumer_find_relayd(index
);
874 if (relayd
== NULL
) {
875 DBG("Unable to find relayd %" PRIu64
, index
);
876 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
880 * Each relayd socket pair has a refcount of stream attached to it
881 * which tells if the relayd is still active or not depending on the
884 * This will set the destroy flag of the relayd object and destroy it
885 * if the refcount reaches zero when called.
887 * The destroy can happen either here or when a stream fd hangs up.
890 consumer_flag_relayd_for_destroy(relayd
);
893 health_code_update();
895 ret
= consumer_send_status_msg(sock
, ret_code
);
897 /* Somehow, the session daemon is not responding anymore. */
903 case LTTNG_CONSUMER_DATA_PENDING
:
906 uint64_t id
= msg
.u
.data_pending
.session_id
;
908 DBG("Kernel consumer data pending command for id %" PRIu64
, id
);
910 ret
= consumer_data_pending(id
);
912 health_code_update();
914 /* Send back returned value to session daemon */
915 ret
= lttcomm_send_unix_sock(sock
, &ret
, sizeof(ret
));
917 PERROR("send data pending ret code");
922 * No need to send back a status message since the data pending
923 * returned value is the response.
927 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
929 if (msg
.u
.snapshot_channel
.metadata
== 1) {
930 ret
= lttng_kconsumer_snapshot_metadata(msg
.u
.snapshot_channel
.key
,
931 msg
.u
.snapshot_channel
.pathname
,
932 msg
.u
.snapshot_channel
.relayd_id
, ctx
);
934 ERR("Snapshot metadata failed");
935 ret_code
= LTTCOMM_CONSUMERD_ERROR_METADATA
;
938 ret
= lttng_kconsumer_snapshot_channel(msg
.u
.snapshot_channel
.key
,
939 msg
.u
.snapshot_channel
.pathname
,
940 msg
.u
.snapshot_channel
.relayd_id
,
941 msg
.u
.snapshot_channel
.nb_packets_per_stream
,
944 ERR("Snapshot channel failed");
945 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
949 health_code_update();
951 ret
= consumer_send_status_msg(sock
, ret_code
);
953 /* Somehow, the session daemon is not responding anymore. */
958 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
960 uint64_t key
= msg
.u
.destroy_channel
.key
;
961 struct lttng_consumer_channel
*channel
;
963 channel
= consumer_find_channel(key
);
965 ERR("Kernel consumer destroy channel %" PRIu64
" not found", key
);
966 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
969 health_code_update();
971 ret
= consumer_send_status_msg(sock
, ret_code
);
973 /* Somehow, the session daemon is not responding anymore. */
977 health_code_update();
979 /* Stop right now if no channel was found. */
985 * This command should ONLY be issued for channel with streams set in
988 assert(!channel
->monitor
);
991 * The refcount should ALWAYS be 0 in the case of a channel in no
994 assert(!uatomic_sub_return(&channel
->refcount
, 1));
996 consumer_del_channel(channel
);
1000 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1003 struct lttng_consumer_channel
*channel
;
1004 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1005 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1007 DBG("Kernel consumer discarded events command for session id %"
1008 PRIu64
", channel key %" PRIu64
, id
, key
);
1010 channel
= consumer_find_channel(key
);
1012 ERR("Kernel consumer discarded events channel %"
1013 PRIu64
" not found", key
);
1016 ret
= channel
->discarded_events
;
1019 health_code_update();
1021 /* Send back returned value to session daemon */
1022 ret
= lttcomm_send_unix_sock(sock
, &ret
, sizeof(ret
));
1024 PERROR("send discarded events");
1030 case LTTNG_CONSUMER_LOST_PACKETS
:
1033 struct lttng_consumer_channel
*channel
;
1034 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1035 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1037 DBG("Kernel consumer lost packets command for session id %"
1038 PRIu64
", channel key %" PRIu64
, id
, key
);
1040 channel
= consumer_find_channel(key
);
1042 ERR("Kernel consumer lost packets channel %"
1043 PRIu64
" not found", key
);
1046 ret
= channel
->lost_packets
;
1049 health_code_update();
1051 /* Send back returned value to session daemon */
1052 ret
= lttcomm_send_unix_sock(sock
, &ret
, sizeof(ret
));
1054 PERROR("send lost packets");
1068 * Return 1 to indicate success since the 0 value can be a socket
1069 * shutdown during the recv() or send() call.
1071 health_code_update();
1076 /* This will issue a consumer stop. */
1081 * Sync metadata meaning request them to the session daemon and snapshot to the
1082 * metadata thread can consumer them.
1084 * Metadata stream lock MUST be acquired.
1086 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1087 * is empty or a negative value on error.
1089 int lttng_kconsumer_sync_metadata(struct lttng_consumer_stream
*metadata
)
1095 ret
= kernctl_buffer_flush(metadata
->wait_fd
);
1097 ERR("Failed to flush kernel stream");
1101 ret
= kernctl_snapshot(metadata
->wait_fd
);
1103 if (ret
!= -EAGAIN
) {
1104 ERR("Sync metadata, taking kernel snapshot failed.");
1107 DBG("Sync metadata, no new kernel metadata");
1108 /* No new metadata, exit. */
1118 int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
1119 struct stream_subbuffer
*subbuf
)
1123 ret
= kernctl_get_subbuf_size(
1124 stream
->wait_fd
, &subbuf
->info
.data
.subbuf_size
);
1129 ret
= kernctl_get_padded_subbuf_size(
1130 stream
->wait_fd
, &subbuf
->info
.data
.padded_subbuf_size
);
1140 int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
1141 struct stream_subbuffer
*subbuf
)
1145 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1150 ret
= kernctl_get_metadata_version(
1151 stream
->wait_fd
, &subbuf
->info
.metadata
.version
);
1161 int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
1162 struct stream_subbuffer
*subbuf
)
1166 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1171 ret
= kernctl_get_packet_size(
1172 stream
->wait_fd
, &subbuf
->info
.data
.packet_size
);
1174 PERROR("Failed to get sub-buffer packet size");
1178 ret
= kernctl_get_content_size(
1179 stream
->wait_fd
, &subbuf
->info
.data
.content_size
);
1181 PERROR("Failed to get sub-buffer content size");
1185 ret
= kernctl_get_timestamp_begin(
1186 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_begin
);
1188 PERROR("Failed to get sub-buffer begin timestamp");
1192 ret
= kernctl_get_timestamp_end(
1193 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_end
);
1195 PERROR("Failed to get sub-buffer end timestamp");
1199 ret
= kernctl_get_events_discarded(
1200 stream
->wait_fd
, &subbuf
->info
.data
.events_discarded
);
1202 PERROR("Failed to get sub-buffer events discarded count");
1206 ret
= kernctl_get_sequence_number(stream
->wait_fd
,
1207 &subbuf
->info
.data
.sequence_number
.value
);
1209 /* May not be supported by older LTTng-modules. */
1210 if (ret
!= -ENOTTY
) {
1211 PERROR("Failed to get sub-buffer sequence number");
1215 subbuf
->info
.data
.sequence_number
.is_set
= true;
1218 ret
= kernctl_get_stream_id(
1219 stream
->wait_fd
, &subbuf
->info
.data
.stream_id
);
1221 PERROR("Failed to get stream id");
1225 ret
= kernctl_get_instance_id(stream
->wait_fd
,
1226 &subbuf
->info
.data
.stream_instance_id
.value
);
1228 /* May not be supported by older LTTng-modules. */
1229 if (ret
!= -ENOTTY
) {
1230 PERROR("Failed to get stream instance id");
1234 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
1241 int get_subbuffer_common(struct lttng_consumer_stream
*stream
,
1242 struct stream_subbuffer
*subbuffer
)
1246 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1251 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1258 int get_next_subbuffer_splice(struct lttng_consumer_stream
*stream
,
1259 struct stream_subbuffer
*subbuffer
)
1263 ret
= get_subbuffer_common(stream
, subbuffer
);
1268 subbuffer
->buffer
.fd
= stream
->wait_fd
;
1274 int get_next_subbuffer_mmap(struct lttng_consumer_stream
*stream
,
1275 struct stream_subbuffer
*subbuffer
)
1280 ret
= get_subbuffer_common(stream
, subbuffer
);
1285 ret
= get_current_subbuf_addr(stream
, &addr
);
1290 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1291 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1297 int get_next_subbuffer_metadata_check(struct lttng_consumer_stream
*stream
,
1298 struct stream_subbuffer
*subbuffer
)
1304 ret
= kernctl_get_next_subbuf_metadata_check(stream
->wait_fd
,
1310 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1316 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
1318 ret
= get_current_subbuf_addr(stream
, &addr
);
1323 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1324 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1325 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1326 subbuffer
->info
.metadata
.padded_subbuf_size
,
1327 coherent
? "true" : "false");
1333 int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
1334 struct stream_subbuffer
*subbuffer
)
1336 const int ret
= kernctl_put_next_subbuf(stream
->wait_fd
);
1339 if (ret
== -EFAULT
) {
1340 PERROR("Error in unreserving sub buffer");
1341 } else if (ret
== -EIO
) {
1342 /* Should never happen with newer LTTng versions */
1343 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1351 bool is_get_next_check_metadata_available(int tracer_fd
)
1353 return kernctl_get_next_subbuf_metadata_check(tracer_fd
, NULL
) !=
1358 int lttng_kconsumer_set_stream_ops(
1359 struct lttng_consumer_stream
*stream
)
1363 if (stream
->metadata_flag
&& stream
->chan
->is_live
) {
1364 DBG("Attempting to enable metadata bucketization for live consumers");
1365 if (is_get_next_check_metadata_available(stream
->wait_fd
)) {
1366 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1367 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1368 get_next_subbuffer_metadata_check
;
1369 ret
= consumer_stream_enable_metadata_bucketization(
1376 * The kernel tracer version is too old to indicate
1377 * when the metadata stream has reached a "coherent"
1378 * (parseable) point.
1380 * This means that a live viewer may see an incoherent
1381 * sequence of metadata and fail to parse it.
1383 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1384 metadata_bucket_destroy(stream
->metadata_bucket
);
1385 stream
->metadata_bucket
= NULL
;
1389 if (!stream
->read_subbuffer_ops
.get_next_subbuffer
) {
1390 if (stream
->chan
->output
== CONSUMER_CHANNEL_MMAP
) {
1391 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1392 get_next_subbuffer_mmap
;
1394 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1395 get_next_subbuffer_splice
;
1399 if (stream
->metadata_flag
) {
1400 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1401 extract_metadata_subbuffer_info
;
1403 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1404 extract_data_subbuffer_info
;
1405 if (stream
->chan
->is_live
) {
1406 stream
->read_subbuffer_ops
.send_live_beacon
=
1407 consumer_flush_kernel_index
;
1411 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
1416 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
1423 * Don't create anything if this is set for streaming or should not be
1426 if (stream
->relayd_id
== (uint64_t) -1ULL && stream
->chan
->monitor
) {
1427 ret
= utils_create_stream_file(stream
->chan
->pathname
, stream
->name
,
1428 stream
->chan
->tracefile_size
, stream
->tracefile_count_current
,
1429 stream
->uid
, stream
->gid
, NULL
);
1433 stream
->out_fd
= ret
;
1434 stream
->tracefile_size_current
= 0;
1436 if (!stream
->metadata_flag
) {
1437 struct lttng_index_file
*index_file
;
1439 index_file
= lttng_index_file_create(stream
->chan
->pathname
,
1440 stream
->name
, stream
->uid
, stream
->gid
,
1441 stream
->chan
->tracefile_size
,
1442 stream
->tracefile_count_current
,
1443 CTF_INDEX_MAJOR
, CTF_INDEX_MINOR
);
1447 stream
->index_file
= index_file
;
1451 if (stream
->output
== LTTNG_EVENT_MMAP
) {
1452 /* get the len of the mmap region */
1453 unsigned long mmap_len
;
1455 ret
= kernctl_get_mmap_len(stream
->wait_fd
, &mmap_len
);
1457 PERROR("kernctl_get_mmap_len");
1458 goto error_close_fd
;
1460 stream
->mmap_len
= (size_t) mmap_len
;
1462 stream
->mmap_base
= mmap(NULL
, stream
->mmap_len
, PROT_READ
,
1463 MAP_PRIVATE
, stream
->wait_fd
, 0);
1464 if (stream
->mmap_base
== MAP_FAILED
) {
1465 PERROR("Error mmaping");
1467 goto error_close_fd
;
1471 ret
= lttng_kconsumer_set_stream_ops(stream
);
1473 goto error_close_fd
;
1476 /* we return 0 to let the library handle the FD internally */
1480 if (stream
->out_fd
>= 0) {
1483 err
= close(stream
->out_fd
);
1485 stream
->out_fd
= -1;
1492 * Check if data is still being extracted from the buffers for a specific
1493 * stream. Consumer data lock MUST be acquired before calling this function
1494 * and the stream lock.
1496 * Return 1 if the traced data are still getting read else 0 meaning that the
1497 * data is available for trace viewer reading.
1499 int lttng_kconsumer_data_pending(struct lttng_consumer_stream
*stream
)
1505 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
1510 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1512 /* There is still data so let's put back this subbuffer. */
1513 ret
= kernctl_put_subbuf(stream
->wait_fd
);
1515 ret
= 1; /* Data is pending */
1519 /* Data is NOT pending and ready to be read. */