2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 #include <sys/mount.h>
33 #include <sys/resource.h>
34 #include <sys/socket.h>
36 #include <sys/types.h>
38 #include <urcu/uatomic.h>
42 #include <common/common.h>
43 #include <common/compat/socket.h>
44 #include <common/compat/getenv.h>
45 #include <common/defaults.h>
46 #include <common/kernel-consumer/kernel-consumer.h>
47 #include <common/futex.h>
48 #include <common/relayd/relayd.h>
49 #include <common/utils.h>
50 #include <common/daemonize.h>
51 #include <common/config/session-config.h>
53 #include "lttng-sessiond.h"
54 #include "buffer-registry.h"
61 #include "kernel-consumer.h"
65 #include "ust-consumer.h"
68 #include "health-sessiond.h"
69 #include "testpoint.h"
70 #include "ust-thread.h"
71 #include "agent-thread.h"
73 #include "load-session-thread.h"
74 #include "notification-thread.h"
75 #include "notification-thread-commands.h"
78 #include "ht-cleanup.h"
79 #include "sessiond-config.h"
82 static const char *help_msg
=
83 #ifdef LTTNG_EMBED_HELP
84 #include <lttng-sessiond.8.h>
91 static pid_t ppid
; /* Parent PID for --sig-parent option */
92 static pid_t child_ppid
; /* Internal parent PID use with daemonize. */
93 static int lockfile_fd
= -1;
95 /* Set to 1 when a SIGUSR1 signal is received. */
96 static int recv_child_signal
;
99 * Consumer daemon specific control data. Every value not initialized here is
100 * set to 0 by the static definition.
102 static struct consumer_data kconsumer_data
= {
103 .type
= LTTNG_CONSUMER_KERNEL
,
106 .channel_monitor_pipe
= -1,
107 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
108 .lock
= PTHREAD_MUTEX_INITIALIZER
,
109 .cond
= PTHREAD_COND_INITIALIZER
,
110 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
112 static struct consumer_data ustconsumer64_data
= {
113 .type
= LTTNG_CONSUMER64_UST
,
116 .channel_monitor_pipe
= -1,
117 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
118 .lock
= PTHREAD_MUTEX_INITIALIZER
,
119 .cond
= PTHREAD_COND_INITIALIZER
,
120 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
122 static struct consumer_data ustconsumer32_data
= {
123 .type
= LTTNG_CONSUMER32_UST
,
126 .channel_monitor_pipe
= -1,
127 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
128 .lock
= PTHREAD_MUTEX_INITIALIZER
,
129 .cond
= PTHREAD_COND_INITIALIZER
,
130 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
133 /* Command line options */
134 static const struct option long_options
[] = {
135 { "client-sock", required_argument
, 0, 'c' },
136 { "apps-sock", required_argument
, 0, 'a' },
137 { "kconsumerd-cmd-sock", required_argument
, 0, '\0' },
138 { "kconsumerd-err-sock", required_argument
, 0, '\0' },
139 { "ustconsumerd32-cmd-sock", required_argument
, 0, '\0' },
140 { "ustconsumerd32-err-sock", required_argument
, 0, '\0' },
141 { "ustconsumerd64-cmd-sock", required_argument
, 0, '\0' },
142 { "ustconsumerd64-err-sock", required_argument
, 0, '\0' },
143 { "consumerd32-path", required_argument
, 0, '\0' },
144 { "consumerd32-libdir", required_argument
, 0, '\0' },
145 { "consumerd64-path", required_argument
, 0, '\0' },
146 { "consumerd64-libdir", required_argument
, 0, '\0' },
147 { "daemonize", no_argument
, 0, 'd' },
148 { "background", no_argument
, 0, 'b' },
149 { "sig-parent", no_argument
, 0, 'S' },
150 { "help", no_argument
, 0, 'h' },
151 { "group", required_argument
, 0, 'g' },
152 { "version", no_argument
, 0, 'V' },
153 { "quiet", no_argument
, 0, 'q' },
154 { "verbose", no_argument
, 0, 'v' },
155 { "verbose-consumer", no_argument
, 0, '\0' },
156 { "no-kernel", no_argument
, 0, '\0' },
157 { "pidfile", required_argument
, 0, 'p' },
158 { "agent-tcp-port", required_argument
, 0, '\0' },
159 { "config", required_argument
, 0, 'f' },
160 { "load", required_argument
, 0, 'l' },
161 { "kmod-probes", required_argument
, 0, '\0' },
162 { "extra-kmod-probes", required_argument
, 0, '\0' },
166 struct sessiond_config config
;
168 /* Command line options to ignore from configuration file */
169 static const char *config_ignore_options
[] = { "help", "version", "config" };
171 /* Shared between threads */
172 static int dispatch_thread_exit
;
174 /* Sockets and FDs */
175 static int client_sock
= -1;
176 static int apps_sock
= -1;
177 int kernel_tracer_fd
= -1;
178 static int kernel_poll_pipe
[2] = { -1, -1 };
181 * Quit pipe for all threads. This permits a single cancellation point
182 * for all threads when receiving an event on the pipe.
184 static int thread_quit_pipe
[2] = { -1, -1 };
187 * This pipe is used to inform the thread managing application communication
188 * that a command is queued and ready to be processed.
190 static int apps_cmd_pipe
[2] = { -1, -1 };
192 int apps_cmd_notify_pipe
[2] = { -1, -1 };
194 /* Pthread, Mutexes and Semaphores */
195 static pthread_t apps_thread
;
196 static pthread_t apps_notify_thread
;
197 static pthread_t reg_apps_thread
;
198 static pthread_t client_thread
;
199 static pthread_t kernel_thread
;
200 static pthread_t dispatch_thread
;
201 static pthread_t health_thread
;
202 static pthread_t ht_cleanup_thread
;
203 static pthread_t agent_reg_thread
;
204 static pthread_t load_session_thread
;
205 static pthread_t notification_thread
;
208 * UST registration command queue. This queue is tied with a futex and uses a N
209 * wakers / 1 waiter implemented and detailed in futex.c/.h
211 * The thread_registration_apps and thread_dispatch_ust_registration uses this
212 * queue along with the wait/wake scheme. The thread_manage_apps receives down
213 * the line new application socket and monitors it for any I/O error or clean
214 * close that triggers an unregistration of the application.
216 static struct ust_cmd_queue ust_cmd_queue
;
219 * Pointer initialized before thread creation.
221 * This points to the tracing session list containing the session count and a
222 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
223 * MUST NOT be taken if you call a public function in session.c.
225 * The lock is nested inside the structure: session_list_ptr->lock. Please use
226 * session_lock_list and session_unlock_list for lock acquisition.
228 static struct ltt_session_list
*session_list_ptr
;
230 int ust_consumerd64_fd
= -1;
231 int ust_consumerd32_fd
= -1;
233 static const char *module_proc_lttng
= "/proc/lttng";
236 * Consumer daemon state which is changed when spawning it, killing it or in
237 * case of a fatal error.
239 enum consumerd_state
{
240 CONSUMER_STARTED
= 1,
241 CONSUMER_STOPPED
= 2,
246 * This consumer daemon state is used to validate if a client command will be
247 * able to reach the consumer. If not, the client is informed. For instance,
248 * doing a "lttng start" when the consumer state is set to ERROR will return an
249 * error to the client.
251 * The following example shows a possible race condition of this scheme:
253 * consumer thread error happens
255 * client cmd checks state -> still OK
256 * consumer thread exit, sets error
257 * client cmd try to talk to consumer
260 * However, since the consumer is a different daemon, we have no way of making
261 * sure the command will reach it safely even with this state flag. This is why
262 * we consider that up to the state validation during command processing, the
263 * command is safe. After that, we can not guarantee the correctness of the
264 * client request vis-a-vis the consumer.
266 static enum consumerd_state ust_consumerd_state
;
267 static enum consumerd_state kernel_consumerd_state
;
269 /* Set in main() with the current page size. */
272 /* Application health monitoring */
273 struct health_app
*health_sessiond
;
275 /* Am I root or not. */
276 int is_root
; /* Set to 1 if the daemon is running as root */
278 const char * const config_section_name
= "sessiond";
280 /* Load session thread information to operate. */
281 struct load_session_thread_data
*load_info
;
283 /* Notification thread handle. */
284 struct notification_thread_handle
*notification_thread_handle
;
286 /* Global hash tables */
287 struct lttng_ht
*agent_apps_ht_by_sock
= NULL
;
290 * The initialization of the session daemon is done in multiple phases.
292 * While all threads are launched near-simultaneously, only some of them
293 * are needed to ensure the session daemon can start to respond to client
296 * There are two important guarantees that we wish to offer with respect
297 * to the initialisation of the session daemon:
298 * - When the daemonize/background launcher process exits, the sessiond
299 * is fully able to respond to client requests,
300 * - Auto-loaded sessions are visible to clients.
302 * In order to achieve this, a number of support threads have to be launched
303 * to allow the "client" thread to function properly. Moreover, since the
304 * "load session" thread needs the client thread, we must provide a way
305 * for the "load session" thread to know that the "client" thread is up
308 * Hence, the support threads decrement the lttng_sessiond_ready counter
309 * while the "client" threads waits for it to reach 0. Once the "client" thread
310 * unblocks, it posts the message_thread_ready semaphore which allows the
311 * "load session" thread to progress.
313 * This implies that the "load session" thread is the last to be initialized
314 * and will explicitly call sessiond_signal_parents(), which signals the parents
315 * that the session daemon is fully initialized.
317 * The three (3) support threads are:
319 * - notification_thread
322 int lttng_sessiond_ready
= 3;
324 int sessiond_check_thread_quit_pipe(int fd
, uint32_t events
)
326 return (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) ? 1 : 0;
329 /* Notify parents that we are ready for cmd and health check */
331 void sessiond_signal_parents(void)
334 * Notify parent pid that we are ready to accept command
335 * for client side. This ppid is the one from the
336 * external process that spawned us.
338 if (config
.sig_parent
) {
343 * Notify the parent of the fork() process that we are
346 if (config
.daemonize
|| config
.background
) {
347 kill(child_ppid
, SIGUSR1
);
352 void sessiond_notify_ready(void)
355 * The _return variant is used since the implied memory barriers are
358 (void) uatomic_sub_return(<tng_sessiond_ready
, 1);
362 int __sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
,
369 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
375 ret
= lttng_poll_add(events
, a_pipe
[0], LPOLLIN
| LPOLLERR
);
387 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
389 int sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
)
391 return __sessiond_set_thread_pollset(events
, size
, thread_quit_pipe
);
395 * Init thread quit pipe.
397 * Return -1 on error or 0 if all pipes are created.
399 static int __init_thread_quit_pipe(int *a_pipe
)
405 PERROR("thread quit pipe");
409 for (i
= 0; i
< 2; i
++) {
410 ret
= fcntl(a_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
421 static int init_thread_quit_pipe(void)
423 return __init_thread_quit_pipe(thread_quit_pipe
);
427 * Stop all threads by closing the thread quit pipe.
429 static void stop_threads(void)
433 /* Stopping all threads */
434 DBG("Terminating all threads");
435 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
437 ERR("write error on thread quit pipe");
440 /* Dispatch thread */
441 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
442 futex_nto1_wake(&ust_cmd_queue
.futex
);
446 * Close every consumer sockets.
448 static void close_consumer_sockets(void)
452 if (kconsumer_data
.err_sock
>= 0) {
453 ret
= close(kconsumer_data
.err_sock
);
455 PERROR("kernel consumer err_sock close");
458 if (ustconsumer32_data
.err_sock
>= 0) {
459 ret
= close(ustconsumer32_data
.err_sock
);
461 PERROR("UST consumerd32 err_sock close");
464 if (ustconsumer64_data
.err_sock
>= 0) {
465 ret
= close(ustconsumer64_data
.err_sock
);
467 PERROR("UST consumerd64 err_sock close");
470 if (kconsumer_data
.cmd_sock
>= 0) {
471 ret
= close(kconsumer_data
.cmd_sock
);
473 PERROR("kernel consumer cmd_sock close");
476 if (ustconsumer32_data
.cmd_sock
>= 0) {
477 ret
= close(ustconsumer32_data
.cmd_sock
);
479 PERROR("UST consumerd32 cmd_sock close");
482 if (ustconsumer64_data
.cmd_sock
>= 0) {
483 ret
= close(ustconsumer64_data
.cmd_sock
);
485 PERROR("UST consumerd64 cmd_sock close");
488 if (kconsumer_data
.channel_monitor_pipe
>= 0) {
489 ret
= close(kconsumer_data
.channel_monitor_pipe
);
491 PERROR("kernel consumer channel monitor pipe close");
494 if (ustconsumer32_data
.channel_monitor_pipe
>= 0) {
495 ret
= close(ustconsumer32_data
.channel_monitor_pipe
);
497 PERROR("UST consumerd32 channel monitor pipe close");
500 if (ustconsumer64_data
.channel_monitor_pipe
>= 0) {
501 ret
= close(ustconsumer64_data
.channel_monitor_pipe
);
503 PERROR("UST consumerd64 channel monitor pipe close");
509 * Wait on consumer process termination.
511 * Need to be called with the consumer data lock held or from a context
512 * ensuring no concurrent access to data (e.g: cleanup).
514 static void wait_consumer(struct consumer_data
*consumer_data
)
519 if (consumer_data
->pid
<= 0) {
523 DBG("Waiting for complete teardown of consumerd (PID: %d)",
525 ret
= waitpid(consumer_data
->pid
, &status
, 0);
527 PERROR("consumerd waitpid pid: %d", consumer_data
->pid
)
528 } else if (!WIFEXITED(status
)) {
529 ERR("consumerd termination with error: %d",
532 consumer_data
->pid
= 0;
536 * Cleanup the session daemon's data structures.
538 static void sessiond_cleanup(void)
541 struct ltt_session
*sess
, *stmp
;
543 DBG("Cleanup sessiond");
546 * Close the thread quit pipe. It has already done its job,
547 * since we are now called.
549 utils_close_pipe(thread_quit_pipe
);
551 ret
= remove(config
.pid_file_path
.value
);
553 PERROR("remove pidfile %s", config
.pid_file_path
.value
);
556 DBG("Removing sessiond and consumerd content of directory %s",
557 config
.rundir
.value
);
560 DBG("Removing %s", config
.pid_file_path
.value
);
561 (void) unlink(config
.pid_file_path
.value
);
563 DBG("Removing %s", config
.agent_port_file_path
.value
);
564 (void) unlink(config
.agent_port_file_path
.value
);
567 DBG("Removing %s", kconsumer_data
.err_unix_sock_path
);
568 (void) unlink(kconsumer_data
.err_unix_sock_path
);
570 DBG("Removing directory %s", config
.kconsumerd_path
.value
);
571 (void) rmdir(config
.kconsumerd_path
.value
);
573 /* ust consumerd 32 */
574 DBG("Removing %s", config
.consumerd32_err_unix_sock_path
.value
);
575 (void) unlink(config
.consumerd32_err_unix_sock_path
.value
);
577 DBG("Removing directory %s", config
.consumerd32_path
.value
);
578 (void) rmdir(config
.consumerd32_path
.value
);
580 /* ust consumerd 64 */
581 DBG("Removing %s", config
.consumerd64_err_unix_sock_path
.value
);
582 (void) unlink(config
.consumerd64_err_unix_sock_path
.value
);
584 DBG("Removing directory %s", config
.consumerd64_path
.value
);
585 (void) rmdir(config
.consumerd64_path
.value
);
587 DBG("Cleaning up all sessions");
589 /* Destroy session list mutex */
590 if (session_list_ptr
!= NULL
) {
591 pthread_mutex_destroy(&session_list_ptr
->lock
);
593 /* Cleanup ALL session */
594 cds_list_for_each_entry_safe(sess
, stmp
,
595 &session_list_ptr
->head
, list
) {
596 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
600 wait_consumer(&kconsumer_data
);
601 wait_consumer(&ustconsumer64_data
);
602 wait_consumer(&ustconsumer32_data
);
604 DBG("Cleaning up all agent apps");
605 agent_app_ht_clean();
607 DBG("Closing all UST sockets");
608 ust_app_clean_list();
609 buffer_reg_destroy_registries();
611 if (is_root
&& !config
.no_kernel
) {
612 DBG2("Closing kernel fd");
613 if (kernel_tracer_fd
>= 0) {
614 ret
= close(kernel_tracer_fd
);
619 DBG("Unloading kernel modules");
620 modprobe_remove_lttng_all();
624 close_consumer_sockets();
627 load_session_destroy_data(load_info
);
632 * We do NOT rmdir rundir because there are other processes
633 * using it, for instance lttng-relayd, which can start in
634 * parallel with this teardown.
639 * Cleanup the daemon's option data structures.
641 static void sessiond_cleanup_options(void)
643 DBG("Cleaning up options");
645 sessiond_config_fini(&config
);
647 run_as_destroy_worker();
651 * Send data on a unix socket using the liblttsessiondcomm API.
653 * Return lttcomm error code.
655 static int send_unix_sock(int sock
, void *buf
, size_t len
)
657 /* Check valid length */
662 return lttcomm_send_unix_sock(sock
, buf
, len
);
666 * Free memory of a command context structure.
668 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
670 DBG("Clean command context structure");
672 if ((*cmd_ctx
)->llm
) {
673 free((*cmd_ctx
)->llm
);
675 if ((*cmd_ctx
)->lsm
) {
676 free((*cmd_ctx
)->lsm
);
684 * Notify UST applications using the shm mmap futex.
686 static int notify_ust_apps(int active
)
690 DBG("Notifying applications of session daemon state: %d", active
);
692 /* See shm.c for this call implying mmap, shm and futex calls */
693 wait_shm_mmap
= shm_ust_get_mmap(config
.wait_shm_path
.value
, is_root
);
694 if (wait_shm_mmap
== NULL
) {
698 /* Wake waiting process */
699 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
701 /* Apps notified successfully */
709 * Setup the outgoing data buffer for the response (llm) by allocating the
710 * right amount of memory and copying the original information from the lsm
713 * Return 0 on success, negative value on error.
715 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
,
716 const void *payload_buf
, size_t payload_len
,
717 const void *cmd_header_buf
, size_t cmd_header_len
)
720 const size_t header_len
= sizeof(struct lttcomm_lttng_msg
);
721 const size_t cmd_header_offset
= header_len
;
722 const size_t payload_offset
= cmd_header_offset
+ cmd_header_len
;
723 const size_t total_msg_size
= header_len
+ cmd_header_len
+ payload_len
;
725 cmd_ctx
->llm
= zmalloc(total_msg_size
);
727 if (cmd_ctx
->llm
== NULL
) {
733 /* Copy common data */
734 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
735 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
736 cmd_ctx
->llm
->cmd_header_size
= cmd_header_len
;
737 cmd_ctx
->llm
->data_size
= payload_len
;
738 cmd_ctx
->lttng_msg_size
= total_msg_size
;
740 /* Copy command header */
741 if (cmd_header_len
) {
742 memcpy(((uint8_t *) cmd_ctx
->llm
) + cmd_header_offset
, cmd_header_buf
,
748 memcpy(((uint8_t *) cmd_ctx
->llm
) + payload_offset
, payload_buf
,
757 * Version of setup_lttng_msg() without command header.
759 static int setup_lttng_msg_no_cmd_header(struct command_ctx
*cmd_ctx
,
760 void *payload_buf
, size_t payload_len
)
762 return setup_lttng_msg(cmd_ctx
, payload_buf
, payload_len
, NULL
, 0);
765 * Update the kernel poll set of all channel fd available over all tracing
766 * session. Add the wakeup pipe at the end of the set.
768 static int update_kernel_poll(struct lttng_poll_event
*events
)
771 struct ltt_session
*session
;
772 struct ltt_kernel_channel
*channel
;
774 DBG("Updating kernel poll set");
777 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
778 session_lock(session
);
779 if (session
->kernel_session
== NULL
) {
780 session_unlock(session
);
784 cds_list_for_each_entry(channel
,
785 &session
->kernel_session
->channel_list
.head
, list
) {
786 /* Add channel fd to the kernel poll set */
787 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
789 session_unlock(session
);
792 DBG("Channel fd %d added to kernel set", channel
->fd
);
794 session_unlock(session
);
796 session_unlock_list();
801 session_unlock_list();
806 * Find the channel fd from 'fd' over all tracing session. When found, check
807 * for new channel stream and send those stream fds to the kernel consumer.
809 * Useful for CPU hotplug feature.
811 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
814 struct ltt_session
*session
;
815 struct ltt_kernel_session
*ksess
;
816 struct ltt_kernel_channel
*channel
;
818 DBG("Updating kernel streams for channel fd %d", fd
);
821 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
822 session_lock(session
);
823 if (session
->kernel_session
== NULL
) {
824 session_unlock(session
);
827 ksess
= session
->kernel_session
;
829 cds_list_for_each_entry(channel
,
830 &ksess
->channel_list
.head
, list
) {
831 struct lttng_ht_iter iter
;
832 struct consumer_socket
*socket
;
834 if (channel
->fd
!= fd
) {
837 DBG("Channel found, updating kernel streams");
838 ret
= kernel_open_channel_stream(channel
);
842 /* Update the stream global counter */
843 ksess
->stream_count_global
+= ret
;
846 * Have we already sent fds to the consumer? If yes, it
847 * means that tracing is started so it is safe to send
848 * our updated stream fds.
850 if (ksess
->consumer_fds_sent
!= 1
851 || ksess
->consumer
== NULL
) {
857 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
858 &iter
.iter
, socket
, node
.node
) {
859 pthread_mutex_lock(socket
->lock
);
860 ret
= kernel_consumer_send_channel_stream(socket
,
862 session
->output_traces
? 1 : 0);
863 pthread_mutex_unlock(socket
->lock
);
871 session_unlock(session
);
873 session_unlock_list();
877 session_unlock(session
);
878 session_unlock_list();
883 * For each tracing session, update newly registered apps. The session list
884 * lock MUST be acquired before calling this.
886 static void update_ust_app(int app_sock
)
888 struct ltt_session
*sess
, *stmp
;
890 /* Consumer is in an ERROR state. Stop any application update. */
891 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
892 /* Stop the update process since the consumer is dead. */
896 /* For all tracing session(s) */
897 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
901 if (!sess
->ust_session
) {
906 assert(app_sock
>= 0);
907 app
= ust_app_find_by_sock(app_sock
);
910 * Application can be unregistered before so
911 * this is possible hence simply stopping the
914 DBG3("UST app update failed to find app sock %d",
918 ust_app_global_update(sess
->ust_session
, app
);
922 session_unlock(sess
);
927 * This thread manage event coming from the kernel.
929 * Features supported in this thread:
932 static void *thread_manage_kernel(void *data
)
934 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
935 uint32_t revents
, nb_fd
;
937 struct lttng_poll_event events
;
939 DBG("[thread] Thread manage kernel started");
941 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
944 * This first step of the while is to clean this structure which could free
945 * non NULL pointers so initialize it before the loop.
947 lttng_poll_init(&events
);
949 if (testpoint(sessiond_thread_manage_kernel
)) {
950 goto error_testpoint
;
953 health_code_update();
955 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
956 goto error_testpoint
;
960 health_code_update();
962 if (update_poll_flag
== 1) {
963 /* Clean events object. We are about to populate it again. */
964 lttng_poll_clean(&events
);
966 ret
= sessiond_set_thread_pollset(&events
, 2);
968 goto error_poll_create
;
971 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
976 /* This will add the available kernel channel if any. */
977 ret
= update_kernel_poll(&events
);
981 update_poll_flag
= 0;
984 DBG("Thread kernel polling");
986 /* Poll infinite value of time */
989 ret
= lttng_poll_wait(&events
, -1);
990 DBG("Thread kernel return from poll on %d fds",
991 LTTNG_POLL_GETNB(&events
));
995 * Restart interrupted system call.
997 if (errno
== EINTR
) {
1001 } else if (ret
== 0) {
1002 /* Should not happen since timeout is infinite */
1003 ERR("Return value of poll is 0 with an infinite timeout.\n"
1004 "This should not have happened! Continuing...");
1010 for (i
= 0; i
< nb_fd
; i
++) {
1011 /* Fetch once the poll data */
1012 revents
= LTTNG_POLL_GETEV(&events
, i
);
1013 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1015 health_code_update();
1018 /* No activity for this FD (poll implementation). */
1022 /* Thread quit pipe has been closed. Killing thread. */
1023 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1029 /* Check for data on kernel pipe */
1030 if (revents
& LPOLLIN
) {
1031 if (pollfd
== kernel_poll_pipe
[0]) {
1032 (void) lttng_read(kernel_poll_pipe
[0],
1035 * Ret value is useless here, if this pipe gets any actions an
1036 * update is required anyway.
1038 update_poll_flag
= 1;
1042 * New CPU detected by the kernel. Adding kernel stream to
1043 * kernel session and updating the kernel consumer
1045 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
1051 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1052 update_poll_flag
= 1;
1055 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1063 lttng_poll_clean(&events
);
1066 utils_close_pipe(kernel_poll_pipe
);
1067 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
1070 ERR("Health error occurred in %s", __func__
);
1071 WARN("Kernel thread died unexpectedly. "
1072 "Kernel tracing can continue but CPU hotplug is disabled.");
1074 health_unregister(health_sessiond
);
1075 DBG("Kernel thread dying");
1080 * Signal pthread condition of the consumer data that the thread.
1082 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
1084 pthread_mutex_lock(&data
->cond_mutex
);
1087 * The state is set before signaling. It can be any value, it's the waiter
1088 * job to correctly interpret this condition variable associated to the
1089 * consumer pthread_cond.
1091 * A value of 0 means that the corresponding thread of the consumer data
1092 * was not started. 1 indicates that the thread has started and is ready
1093 * for action. A negative value means that there was an error during the
1096 data
->consumer_thread_is_ready
= state
;
1097 (void) pthread_cond_signal(&data
->cond
);
1099 pthread_mutex_unlock(&data
->cond_mutex
);
1103 * This thread manage the consumer error sent back to the session daemon.
1105 static void *thread_manage_consumer(void *data
)
1107 int sock
= -1, i
, ret
, pollfd
, err
= -1, should_quit
= 0;
1108 uint32_t revents
, nb_fd
;
1109 enum lttcomm_return_code code
;
1110 struct lttng_poll_event events
;
1111 struct consumer_data
*consumer_data
= data
;
1112 struct consumer_socket
*cmd_socket_wrapper
= NULL
;
1114 DBG("[thread] Manage consumer started");
1116 rcu_register_thread();
1117 rcu_thread_online();
1119 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
1121 health_code_update();
1124 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1125 * metadata_sock. Nothing more will be added to this poll set.
1127 ret
= sessiond_set_thread_pollset(&events
, 3);
1133 * The error socket here is already in a listening state which was done
1134 * just before spawning this thread to avoid a race between the consumer
1135 * daemon exec trying to connect and the listen() call.
1137 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
1142 health_code_update();
1144 /* Infinite blocking call, waiting for transmission */
1146 health_poll_entry();
1148 if (testpoint(sessiond_thread_manage_consumer
)) {
1152 ret
= lttng_poll_wait(&events
, -1);
1156 * Restart interrupted system call.
1158 if (errno
== EINTR
) {
1166 for (i
= 0; i
< nb_fd
; i
++) {
1167 /* Fetch once the poll data */
1168 revents
= LTTNG_POLL_GETEV(&events
, i
);
1169 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1171 health_code_update();
1174 /* No activity for this FD (poll implementation). */
1178 /* Thread quit pipe has been closed. Killing thread. */
1179 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1185 /* Event on the registration socket */
1186 if (pollfd
== consumer_data
->err_sock
) {
1187 if (revents
& LPOLLIN
) {
1189 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1190 ERR("consumer err socket poll error");
1193 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1199 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1205 * Set the CLOEXEC flag. Return code is useless because either way, the
1208 (void) utils_set_fd_cloexec(sock
);
1210 health_code_update();
1212 DBG2("Receiving code from consumer err_sock");
1214 /* Getting status code from kconsumerd */
1215 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1216 sizeof(enum lttcomm_return_code
));
1221 health_code_update();
1222 if (code
!= LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1223 ERR("consumer error when waiting for SOCK_READY : %s",
1224 lttcomm_get_readable_code(-code
));
1228 /* Connect both command and metadata sockets. */
1229 consumer_data
->cmd_sock
=
1230 lttcomm_connect_unix_sock(
1231 consumer_data
->cmd_unix_sock_path
);
1232 consumer_data
->metadata_fd
=
1233 lttcomm_connect_unix_sock(
1234 consumer_data
->cmd_unix_sock_path
);
1235 if (consumer_data
->cmd_sock
< 0 || consumer_data
->metadata_fd
< 0) {
1236 PERROR("consumer connect cmd socket");
1237 /* On error, signal condition and quit. */
1238 signal_consumer_condition(consumer_data
, -1);
1242 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
1244 /* Create metadata socket lock. */
1245 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1246 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1247 PERROR("zmalloc pthread mutex");
1250 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1252 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1253 DBG("Consumer metadata socket ready (fd: %d)",
1254 consumer_data
->metadata_fd
);
1257 * Remove the consumerd error sock since we've established a connection.
1259 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1264 /* Add new accepted error socket. */
1265 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1270 /* Add metadata socket that is successfully connected. */
1271 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1272 LPOLLIN
| LPOLLRDHUP
);
1277 health_code_update();
1280 * Transfer the write-end of the channel monitoring pipe to the
1281 * by issuing a SET_CHANNEL_MONITOR_PIPE command.
1283 cmd_socket_wrapper
= consumer_allocate_socket(&consumer_data
->cmd_sock
);
1284 if (!cmd_socket_wrapper
) {
1287 cmd_socket_wrapper
->lock
= &consumer_data
->lock
;
1289 ret
= consumer_send_channel_monitor_pipe(cmd_socket_wrapper
,
1290 consumer_data
->channel_monitor_pipe
);
1294 /* Discard the socket wrapper as it is no longer needed. */
1295 consumer_destroy_socket(cmd_socket_wrapper
);
1296 cmd_socket_wrapper
= NULL
;
1298 /* The thread is completely initialized, signal that it is ready. */
1299 signal_consumer_condition(consumer_data
, 1);
1301 /* Infinite blocking call, waiting for transmission */
1304 health_code_update();
1306 /* Exit the thread because the thread quit pipe has been triggered. */
1308 /* Not a health error. */
1313 health_poll_entry();
1314 ret
= lttng_poll_wait(&events
, -1);
1318 * Restart interrupted system call.
1320 if (errno
== EINTR
) {
1328 for (i
= 0; i
< nb_fd
; i
++) {
1329 /* Fetch once the poll data */
1330 revents
= LTTNG_POLL_GETEV(&events
, i
);
1331 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1333 health_code_update();
1336 /* No activity for this FD (poll implementation). */
1341 * Thread quit pipe has been triggered, flag that we should stop
1342 * but continue the current loop to handle potential data from
1345 should_quit
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1347 if (pollfd
== sock
) {
1348 /* Event on the consumerd socket */
1349 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)
1350 && !(revents
& LPOLLIN
)) {
1351 ERR("consumer err socket second poll error");
1354 health_code_update();
1355 /* Wait for any kconsumerd error */
1356 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1357 sizeof(enum lttcomm_return_code
));
1359 ERR("consumer closed the command socket");
1363 ERR("consumer return code : %s",
1364 lttcomm_get_readable_code(-code
));
1367 } else if (pollfd
== consumer_data
->metadata_fd
) {
1368 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)
1369 && !(revents
& LPOLLIN
)) {
1370 ERR("consumer err metadata socket second poll error");
1373 /* UST metadata requests */
1374 ret
= ust_consumer_metadata_request(
1375 &consumer_data
->metadata_sock
);
1377 ERR("Handling metadata request");
1381 /* No need for an else branch all FDs are tested prior. */
1383 health_code_update();
1389 * We lock here because we are about to close the sockets and some other
1390 * thread might be using them so get exclusive access which will abort all
1391 * other consumer command by other threads.
1393 pthread_mutex_lock(&consumer_data
->lock
);
1395 /* Immediately set the consumerd state to stopped */
1396 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1397 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1398 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1399 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1400 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1402 /* Code flow error... */
1406 if (consumer_data
->err_sock
>= 0) {
1407 ret
= close(consumer_data
->err_sock
);
1411 consumer_data
->err_sock
= -1;
1413 if (consumer_data
->cmd_sock
>= 0) {
1414 ret
= close(consumer_data
->cmd_sock
);
1418 consumer_data
->cmd_sock
= -1;
1420 if (consumer_data
->metadata_sock
.fd_ptr
&&
1421 *consumer_data
->metadata_sock
.fd_ptr
>= 0) {
1422 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
1434 unlink(consumer_data
->err_unix_sock_path
);
1435 unlink(consumer_data
->cmd_unix_sock_path
);
1436 pthread_mutex_unlock(&consumer_data
->lock
);
1438 /* Cleanup metadata socket mutex. */
1439 if (consumer_data
->metadata_sock
.lock
) {
1440 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1441 free(consumer_data
->metadata_sock
.lock
);
1443 lttng_poll_clean(&events
);
1445 if (cmd_socket_wrapper
) {
1446 consumer_destroy_socket(cmd_socket_wrapper
);
1451 ERR("Health error occurred in %s", __func__
);
1453 health_unregister(health_sessiond
);
1454 DBG("consumer thread cleanup completed");
1456 rcu_thread_offline();
1457 rcu_unregister_thread();
1463 * This thread manage application communication.
1465 static void *thread_manage_apps(void *data
)
1467 int i
, ret
, pollfd
, err
= -1;
1469 uint32_t revents
, nb_fd
;
1470 struct lttng_poll_event events
;
1472 DBG("[thread] Manage application started");
1474 rcu_register_thread();
1475 rcu_thread_online();
1477 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_MANAGE
);
1479 if (testpoint(sessiond_thread_manage_apps
)) {
1480 goto error_testpoint
;
1483 health_code_update();
1485 ret
= sessiond_set_thread_pollset(&events
, 2);
1487 goto error_poll_create
;
1490 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1495 if (testpoint(sessiond_thread_manage_apps_before_loop
)) {
1499 health_code_update();
1502 DBG("Apps thread polling");
1504 /* Inifinite blocking call, waiting for transmission */
1506 health_poll_entry();
1507 ret
= lttng_poll_wait(&events
, -1);
1508 DBG("Apps thread return from poll on %d fds",
1509 LTTNG_POLL_GETNB(&events
));
1513 * Restart interrupted system call.
1515 if (errno
== EINTR
) {
1523 for (i
= 0; i
< nb_fd
; i
++) {
1524 /* Fetch once the poll data */
1525 revents
= LTTNG_POLL_GETEV(&events
, i
);
1526 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1528 health_code_update();
1531 /* No activity for this FD (poll implementation). */
1535 /* Thread quit pipe has been closed. Killing thread. */
1536 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1542 /* Inspect the apps cmd pipe */
1543 if (pollfd
== apps_cmd_pipe
[0]) {
1544 if (revents
& LPOLLIN
) {
1548 size_ret
= lttng_read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1549 if (size_ret
< sizeof(sock
)) {
1550 PERROR("read apps cmd pipe");
1554 health_code_update();
1557 * Since this is a command socket (write then read),
1558 * we only monitor the error events of the socket.
1560 ret
= lttng_poll_add(&events
, sock
,
1561 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1566 DBG("Apps with sock %d added to poll set", sock
);
1567 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1568 ERR("Apps command pipe error");
1571 ERR("Unknown poll events %u for sock %d", revents
, pollfd
);
1576 * At this point, we know that a registered application made
1577 * the event at poll_wait.
1579 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1580 /* Removing from the poll set */
1581 ret
= lttng_poll_del(&events
, pollfd
);
1586 /* Socket closed on remote end. */
1587 ust_app_unregister(pollfd
);
1589 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1594 health_code_update();
1600 lttng_poll_clean(&events
);
1603 utils_close_pipe(apps_cmd_pipe
);
1604 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1607 * We don't clean the UST app hash table here since already registered
1608 * applications can still be controlled so let them be until the session
1609 * daemon dies or the applications stop.
1614 ERR("Health error occurred in %s", __func__
);
1616 health_unregister(health_sessiond
);
1617 DBG("Application communication apps thread cleanup complete");
1618 rcu_thread_offline();
1619 rcu_unregister_thread();
1624 * Send a socket to a thread This is called from the dispatch UST registration
1625 * thread once all sockets are set for the application.
1627 * The sock value can be invalid, we don't really care, the thread will handle
1628 * it and make the necessary cleanup if so.
1630 * On success, return 0 else a negative value being the errno message of the
1633 static int send_socket_to_thread(int fd
, int sock
)
1638 * It's possible that the FD is set as invalid with -1 concurrently just
1639 * before calling this function being a shutdown state of the thread.
1646 ret
= lttng_write(fd
, &sock
, sizeof(sock
));
1647 if (ret
< sizeof(sock
)) {
1648 PERROR("write apps pipe %d", fd
);
1655 /* All good. Don't send back the write positive ret value. */
1662 * Sanitize the wait queue of the dispatch registration thread meaning removing
1663 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1664 * notify socket is never received.
1666 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1668 int ret
, nb_fd
= 0, i
;
1669 unsigned int fd_added
= 0;
1670 struct lttng_poll_event events
;
1671 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1675 lttng_poll_init(&events
);
1677 /* Just skip everything for an empty queue. */
1678 if (!wait_queue
->count
) {
1682 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1687 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1688 &wait_queue
->head
, head
) {
1689 assert(wait_node
->app
);
1690 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1691 LPOLLHUP
| LPOLLERR
);
1704 * Poll but don't block so we can quickly identify the faulty events and
1705 * clean them afterwards from the wait queue.
1707 ret
= lttng_poll_wait(&events
, 0);
1713 for (i
= 0; i
< nb_fd
; i
++) {
1714 /* Get faulty FD. */
1715 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1716 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1719 /* No activity for this FD (poll implementation). */
1723 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1724 &wait_queue
->head
, head
) {
1725 if (pollfd
== wait_node
->app
->sock
&&
1726 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1727 cds_list_del(&wait_node
->head
);
1728 wait_queue
->count
--;
1729 ust_app_destroy(wait_node
->app
);
1732 * Silence warning of use-after-free in
1733 * cds_list_for_each_entry_safe which uses
1734 * __typeof__(*wait_node).
1739 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1746 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1750 lttng_poll_clean(&events
);
1754 lttng_poll_clean(&events
);
1756 ERR("Unable to sanitize wait queue");
1761 * Dispatch request from the registration threads to the application
1762 * communication thread.
1764 static void *thread_dispatch_ust_registration(void *data
)
1767 struct cds_wfcq_node
*node
;
1768 struct ust_command
*ust_cmd
= NULL
;
1769 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1770 struct ust_reg_wait_queue wait_queue
= {
1774 rcu_register_thread();
1776 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH
);
1778 if (testpoint(sessiond_thread_app_reg_dispatch
)) {
1779 goto error_testpoint
;
1782 health_code_update();
1784 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1786 DBG("[thread] Dispatch UST command started");
1789 health_code_update();
1791 /* Atomically prepare the queue futex */
1792 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1794 if (CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1799 struct ust_app
*app
= NULL
;
1803 * Make sure we don't have node(s) that have hung up before receiving
1804 * the notify socket. This is to clean the list in order to avoid
1805 * memory leaks from notify socket that are never seen.
1807 sanitize_wait_queue(&wait_queue
);
1809 health_code_update();
1810 /* Dequeue command for registration */
1811 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
1813 DBG("Woken up but nothing in the UST command queue");
1814 /* Continue thread execution */
1818 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1820 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1821 " gid:%d sock:%d name:%s (version %d.%d)",
1822 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1823 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1824 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1825 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1827 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1828 wait_node
= zmalloc(sizeof(*wait_node
));
1830 PERROR("zmalloc wait_node dispatch");
1831 ret
= close(ust_cmd
->sock
);
1833 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1835 lttng_fd_put(LTTNG_FD_APPS
, 1);
1839 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1841 /* Create application object if socket is CMD. */
1842 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1844 if (!wait_node
->app
) {
1845 ret
= close(ust_cmd
->sock
);
1847 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1849 lttng_fd_put(LTTNG_FD_APPS
, 1);
1855 * Add application to the wait queue so we can set the notify
1856 * socket before putting this object in the global ht.
1858 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
1863 * We have to continue here since we don't have the notify
1864 * socket and the application MUST be added to the hash table
1865 * only at that moment.
1870 * Look for the application in the local wait queue and set the
1871 * notify socket if found.
1873 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1874 &wait_queue
.head
, head
) {
1875 health_code_update();
1876 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
1877 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
1878 cds_list_del(&wait_node
->head
);
1880 app
= wait_node
->app
;
1882 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
1888 * With no application at this stage the received socket is
1889 * basically useless so close it before we free the cmd data
1890 * structure for good.
1893 ret
= close(ust_cmd
->sock
);
1895 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1897 lttng_fd_put(LTTNG_FD_APPS
, 1);
1904 * @session_lock_list
1906 * Lock the global session list so from the register up to the
1907 * registration done message, no thread can see the application
1908 * and change its state.
1910 session_lock_list();
1914 * Add application to the global hash table. This needs to be
1915 * done before the update to the UST registry can locate the
1920 /* Set app version. This call will print an error if needed. */
1921 (void) ust_app_version(app
);
1923 /* Send notify socket through the notify pipe. */
1924 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
1928 session_unlock_list();
1930 * No notify thread, stop the UST tracing. However, this is
1931 * not an internal error of the this thread thus setting
1932 * the health error code to a normal exit.
1939 * Update newly registered application with the tracing
1940 * registry info already enabled information.
1942 update_ust_app(app
->sock
);
1945 * Don't care about return value. Let the manage apps threads
1946 * handle app unregistration upon socket close.
1948 (void) ust_app_register_done(app
);
1951 * Even if the application socket has been closed, send the app
1952 * to the thread and unregistration will take place at that
1955 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
1958 session_unlock_list();
1960 * No apps. thread, stop the UST tracing. However, this is
1961 * not an internal error of the this thread thus setting
1962 * the health error code to a normal exit.
1969 session_unlock_list();
1971 } while (node
!= NULL
);
1973 health_poll_entry();
1974 /* Futex wait on queue. Blocking call on futex() */
1975 futex_nto1_wait(&ust_cmd_queue
.futex
);
1978 /* Normal exit, no error */
1982 /* Clean up wait queue. */
1983 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1984 &wait_queue
.head
, head
) {
1985 cds_list_del(&wait_node
->head
);
1990 /* Empty command queue. */
1992 /* Dequeue command for registration */
1993 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
1997 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1998 ret
= close(ust_cmd
->sock
);
2000 PERROR("close ust sock exit dispatch %d", ust_cmd
->sock
);
2002 lttng_fd_put(LTTNG_FD_APPS
, 1);
2007 DBG("Dispatch thread dying");
2010 ERR("Health error occurred in %s", __func__
);
2012 health_unregister(health_sessiond
);
2013 rcu_unregister_thread();
2018 * This thread manage application registration.
2020 static void *thread_registration_apps(void *data
)
2022 int sock
= -1, i
, ret
, pollfd
, err
= -1;
2023 uint32_t revents
, nb_fd
;
2024 struct lttng_poll_event events
;
2026 * Get allocated in this thread, enqueued to a global queue, dequeued and
2027 * freed in the manage apps thread.
2029 struct ust_command
*ust_cmd
= NULL
;
2031 DBG("[thread] Manage application registration started");
2033 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG
);
2035 if (testpoint(sessiond_thread_registration_apps
)) {
2036 goto error_testpoint
;
2039 ret
= lttcomm_listen_unix_sock(apps_sock
);
2045 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
2046 * more will be added to this poll set.
2048 ret
= sessiond_set_thread_pollset(&events
, 2);
2050 goto error_create_poll
;
2053 /* Add the application registration socket */
2054 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
2056 goto error_poll_add
;
2059 /* Notify all applications to register */
2060 ret
= notify_ust_apps(1);
2062 ERR("Failed to notify applications or create the wait shared memory.\n"
2063 "Execution continues but there might be problem for already\n"
2064 "running applications that wishes to register.");
2068 DBG("Accepting application registration");
2070 /* Inifinite blocking call, waiting for transmission */
2072 health_poll_entry();
2073 ret
= lttng_poll_wait(&events
, -1);
2077 * Restart interrupted system call.
2079 if (errno
== EINTR
) {
2087 for (i
= 0; i
< nb_fd
; i
++) {
2088 health_code_update();
2090 /* Fetch once the poll data */
2091 revents
= LTTNG_POLL_GETEV(&events
, i
);
2092 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2095 /* No activity for this FD (poll implementation). */
2099 /* Thread quit pipe has been closed. Killing thread. */
2100 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
2106 /* Event on the registration socket */
2107 if (pollfd
== apps_sock
) {
2108 if (revents
& LPOLLIN
) {
2109 sock
= lttcomm_accept_unix_sock(apps_sock
);
2115 * Set socket timeout for both receiving and ending.
2116 * app_socket_timeout is in seconds, whereas
2117 * lttcomm_setsockopt_rcv_timeout and
2118 * lttcomm_setsockopt_snd_timeout expect msec as
2121 if (config
.app_socket_timeout
>= 0) {
2122 (void) lttcomm_setsockopt_rcv_timeout(sock
,
2123 config
.app_socket_timeout
* 1000);
2124 (void) lttcomm_setsockopt_snd_timeout(sock
,
2125 config
.app_socket_timeout
* 1000);
2129 * Set the CLOEXEC flag. Return code is useless because
2130 * either way, the show must go on.
2132 (void) utils_set_fd_cloexec(sock
);
2134 /* Create UST registration command for enqueuing */
2135 ust_cmd
= zmalloc(sizeof(struct ust_command
));
2136 if (ust_cmd
== NULL
) {
2137 PERROR("ust command zmalloc");
2146 * Using message-based transmissions to ensure we don't
2147 * have to deal with partially received messages.
2149 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2151 ERR("Exhausted file descriptors allowed for applications.");
2161 health_code_update();
2162 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
2165 /* Close socket of the application. */
2170 lttng_fd_put(LTTNG_FD_APPS
, 1);
2174 health_code_update();
2176 ust_cmd
->sock
= sock
;
2179 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2180 " gid:%d sock:%d name:%s (version %d.%d)",
2181 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
2182 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
2183 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
2184 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
2187 * Lock free enqueue the registration request. The red pill
2188 * has been taken! This apps will be part of the *system*.
2190 cds_wfcq_enqueue(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
, &ust_cmd
->node
);
2193 * Wake the registration queue futex. Implicit memory
2194 * barrier with the exchange in cds_wfcq_enqueue.
2196 futex_nto1_wake(&ust_cmd_queue
.futex
);
2197 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2198 ERR("Register apps socket poll error");
2201 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2210 /* Notify that the registration thread is gone */
2213 if (apps_sock
>= 0) {
2214 ret
= close(apps_sock
);
2224 lttng_fd_put(LTTNG_FD_APPS
, 1);
2226 unlink(config
.apps_unix_sock_path
.value
);
2229 lttng_poll_clean(&events
);
2233 DBG("UST Registration thread cleanup complete");
2236 ERR("Health error occurred in %s", __func__
);
2238 health_unregister(health_sessiond
);
2244 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2245 * exec or it will fails.
2247 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
2250 struct timespec timeout
;
2253 * Make sure we set the readiness flag to 0 because we are NOT ready.
2254 * This access to consumer_thread_is_ready does not need to be
2255 * protected by consumer_data.cond_mutex (yet) since the consumer
2256 * management thread has not been started at this point.
2258 consumer_data
->consumer_thread_is_ready
= 0;
2260 /* Setup pthread condition */
2261 ret
= pthread_condattr_init(&consumer_data
->condattr
);
2264 PERROR("pthread_condattr_init consumer data");
2269 * Set the monotonic clock in order to make sure we DO NOT jump in time
2270 * between the clock_gettime() call and the timedwait call. See bug #324
2271 * for a more details and how we noticed it.
2273 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
2276 PERROR("pthread_condattr_setclock consumer data");
2280 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
2283 PERROR("pthread_cond_init consumer data");
2287 ret
= pthread_create(&consumer_data
->thread
, default_pthread_attr(),
2288 thread_manage_consumer
, consumer_data
);
2291 PERROR("pthread_create consumer");
2296 /* We are about to wait on a pthread condition */
2297 pthread_mutex_lock(&consumer_data
->cond_mutex
);
2299 /* Get time for sem_timedwait absolute timeout */
2300 clock_ret
= lttng_clock_gettime(CLOCK_MONOTONIC
, &timeout
);
2302 * Set the timeout for the condition timed wait even if the clock gettime
2303 * call fails since we might loop on that call and we want to avoid to
2304 * increment the timeout too many times.
2306 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
2309 * The following loop COULD be skipped in some conditions so this is why we
2310 * set ret to 0 in order to make sure at least one round of the loop is
2316 * Loop until the condition is reached or when a timeout is reached. Note
2317 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2318 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2319 * possible. This loop does not take any chances and works with both of
2322 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
2323 if (clock_ret
< 0) {
2324 PERROR("clock_gettime spawn consumer");
2325 /* Infinite wait for the consumerd thread to be ready */
2326 ret
= pthread_cond_wait(&consumer_data
->cond
,
2327 &consumer_data
->cond_mutex
);
2329 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
2330 &consumer_data
->cond_mutex
, &timeout
);
2334 /* Release the pthread condition */
2335 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
2339 if (ret
== ETIMEDOUT
) {
2343 * Call has timed out so we kill the kconsumerd_thread and return
2346 ERR("Condition timed out. The consumer thread was never ready."
2348 pth_ret
= pthread_cancel(consumer_data
->thread
);
2350 PERROR("pthread_cancel consumer thread");
2353 PERROR("pthread_cond_wait failed consumer thread");
2355 /* Caller is expecting a negative value on failure. */
2360 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2361 if (consumer_data
->pid
== 0) {
2362 ERR("Consumerd did not start");
2363 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2366 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2375 * Join consumer thread
2377 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2381 /* Consumer pid must be a real one. */
2382 if (consumer_data
->pid
> 0) {
2384 ret
= kill(consumer_data
->pid
, SIGTERM
);
2386 PERROR("Error killing consumer daemon");
2389 return pthread_join(consumer_data
->thread
, &status
);
2396 * Fork and exec a consumer daemon (consumerd).
2398 * Return pid if successful else -1.
2400 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2404 const char *consumer_to_use
;
2405 const char *verbosity
;
2408 DBG("Spawning consumerd");
2415 if (config
.verbose_consumer
) {
2416 verbosity
= "--verbose";
2417 } else if (lttng_opt_quiet
) {
2418 verbosity
= "--quiet";
2423 switch (consumer_data
->type
) {
2424 case LTTNG_CONSUMER_KERNEL
:
2426 * Find out which consumerd to execute. We will first try the
2427 * 64-bit path, then the sessiond's installation directory, and
2428 * fallback on the 32-bit one,
2430 DBG3("Looking for a kernel consumer at these locations:");
2431 DBG3(" 1) %s", config
.consumerd64_bin_path
.value
? : "NULL");
2432 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, DEFAULT_CONSUMERD_FILE
);
2433 DBG3(" 3) %s", config
.consumerd32_bin_path
.value
? : "NULL");
2434 if (stat(config
.consumerd64_bin_path
.value
, &st
) == 0) {
2435 DBG3("Found location #1");
2436 consumer_to_use
= config
.consumerd64_bin_path
.value
;
2437 } else if (stat(INSTALL_BIN_PATH
"/" DEFAULT_CONSUMERD_FILE
, &st
) == 0) {
2438 DBG3("Found location #2");
2439 consumer_to_use
= INSTALL_BIN_PATH
"/" DEFAULT_CONSUMERD_FILE
;
2440 } else if (config
.consumerd32_bin_path
.value
&&
2441 stat(config
.consumerd32_bin_path
.value
, &st
) == 0) {
2442 DBG3("Found location #3");
2443 consumer_to_use
= config
.consumerd32_bin_path
.value
;
2445 DBG("Could not find any valid consumerd executable");
2449 DBG("Using kernel consumer at: %s", consumer_to_use
);
2450 (void) execl(consumer_to_use
,
2451 "lttng-consumerd", verbosity
, "-k",
2452 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2453 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2454 "--group", config
.tracing_group_name
.value
,
2457 case LTTNG_CONSUMER64_UST
:
2459 char *tmpnew
= NULL
;
2461 if (config
.consumerd64_lib_dir
.value
) {
2465 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
2469 tmplen
= strlen(config
.consumerd64_lib_dir
.value
) + 1 /* : */ + strlen(tmp
);
2470 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2475 strcat(tmpnew
, config
.consumerd64_lib_dir
.value
);
2476 if (tmp
[0] != '\0') {
2477 strcat(tmpnew
, ":");
2478 strcat(tmpnew
, tmp
);
2480 ret
= setenv("LD_LIBRARY_PATH", tmpnew
, 1);
2487 DBG("Using 64-bit UST consumer at: %s", config
.consumerd64_bin_path
.value
);
2488 (void) execl(config
.consumerd64_bin_path
.value
, "lttng-consumerd", verbosity
, "-u",
2489 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2490 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2491 "--group", config
.tracing_group_name
.value
,
2496 case LTTNG_CONSUMER32_UST
:
2498 char *tmpnew
= NULL
;
2500 if (config
.consumerd32_lib_dir
.value
) {
2504 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
2508 tmplen
= strlen(config
.consumerd32_lib_dir
.value
) + 1 /* : */ + strlen(tmp
);
2509 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2514 strcat(tmpnew
, config
.consumerd32_lib_dir
.value
);
2515 if (tmp
[0] != '\0') {
2516 strcat(tmpnew
, ":");
2517 strcat(tmpnew
, tmp
);
2519 ret
= setenv("LD_LIBRARY_PATH", tmpnew
, 1);
2526 DBG("Using 32-bit UST consumer at: %s", config
.consumerd32_bin_path
.value
);
2527 (void) execl(config
.consumerd32_bin_path
.value
, "lttng-consumerd", verbosity
, "-u",
2528 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2529 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2530 "--group", config
.tracing_group_name
.value
,
2536 ERR("unknown consumer type");
2540 PERROR("Consumer execl()");
2542 /* Reaching this point, we got a failure on our execl(). */
2544 } else if (pid
> 0) {
2547 PERROR("start consumer fork");
2555 * Spawn the consumerd daemon and session daemon thread.
2557 static int start_consumerd(struct consumer_data
*consumer_data
)
2562 * Set the listen() state on the socket since there is a possible race
2563 * between the exec() of the consumer daemon and this call if place in the
2564 * consumer thread. See bug #366 for more details.
2566 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2571 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2572 if (consumer_data
->pid
!= 0) {
2573 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2577 ret
= spawn_consumerd(consumer_data
);
2579 ERR("Spawning consumerd failed");
2580 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2584 /* Setting up the consumer_data pid */
2585 consumer_data
->pid
= ret
;
2586 DBG2("Consumer pid %d", consumer_data
->pid
);
2587 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2589 DBG2("Spawning consumer control thread");
2590 ret
= spawn_consumer_thread(consumer_data
);
2592 ERR("Fatal error spawning consumer control thread");
2600 /* Cleanup already created sockets on error. */
2601 if (consumer_data
->err_sock
>= 0) {
2604 err
= close(consumer_data
->err_sock
);
2606 PERROR("close consumer data error socket");
2613 * Setup necessary data for kernel tracer action.
2615 static int init_kernel_tracer(void)
2619 /* Modprobe lttng kernel modules */
2620 ret
= modprobe_lttng_control();
2625 /* Open debugfs lttng */
2626 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2627 if (kernel_tracer_fd
< 0) {
2628 DBG("Failed to open %s", module_proc_lttng
);
2632 /* Validate kernel version */
2633 ret
= kernel_validate_version(kernel_tracer_fd
);
2638 ret
= modprobe_lttng_data();
2643 ret
= kernel_supports_ring_buffer_snapshot_sample_positions(
2650 WARN("Kernel tracer does not support buffer monitoring. "
2651 "The monitoring timer of channels in the kernel domain "
2652 "will be set to 0 (disabled).");
2655 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2659 modprobe_remove_lttng_control();
2660 ret
= close(kernel_tracer_fd
);
2664 kernel_tracer_fd
= -1;
2665 return LTTNG_ERR_KERN_VERSION
;
2668 ret
= close(kernel_tracer_fd
);
2674 modprobe_remove_lttng_control();
2677 WARN("No kernel tracer available");
2678 kernel_tracer_fd
= -1;
2680 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2682 return LTTNG_ERR_KERN_NA
;
2688 * Copy consumer output from the tracing session to the domain session. The
2689 * function also applies the right modification on a per domain basis for the
2690 * trace files destination directory.
2692 * Should *NOT* be called with RCU read-side lock held.
2694 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2697 const char *dir_name
;
2698 struct consumer_output
*consumer
;
2701 assert(session
->consumer
);
2704 case LTTNG_DOMAIN_KERNEL
:
2705 DBG3("Copying tracing session consumer output in kernel session");
2707 * XXX: We should audit the session creation and what this function
2708 * does "extra" in order to avoid a destroy since this function is used
2709 * in the domain session creation (kernel and ust) only. Same for UST
2712 if (session
->kernel_session
->consumer
) {
2713 consumer_output_put(session
->kernel_session
->consumer
);
2715 session
->kernel_session
->consumer
=
2716 consumer_copy_output(session
->consumer
);
2717 /* Ease our life a bit for the next part */
2718 consumer
= session
->kernel_session
->consumer
;
2719 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2721 case LTTNG_DOMAIN_JUL
:
2722 case LTTNG_DOMAIN_LOG4J
:
2723 case LTTNG_DOMAIN_PYTHON
:
2724 case LTTNG_DOMAIN_UST
:
2725 DBG3("Copying tracing session consumer output in UST session");
2726 if (session
->ust_session
->consumer
) {
2727 consumer_output_put(session
->ust_session
->consumer
);
2729 session
->ust_session
->consumer
=
2730 consumer_copy_output(session
->consumer
);
2731 /* Ease our life a bit for the next part */
2732 consumer
= session
->ust_session
->consumer
;
2733 dir_name
= DEFAULT_UST_TRACE_DIR
;
2736 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2740 /* Append correct directory to subdir */
2741 strncat(consumer
->subdir
, dir_name
,
2742 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2743 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2752 * Create an UST session and add it to the session ust list.
2754 * Should *NOT* be called with RCU read-side lock held.
2756 static int create_ust_session(struct ltt_session
*session
,
2757 struct lttng_domain
*domain
)
2760 struct ltt_ust_session
*lus
= NULL
;
2764 assert(session
->consumer
);
2766 switch (domain
->type
) {
2767 case LTTNG_DOMAIN_JUL
:
2768 case LTTNG_DOMAIN_LOG4J
:
2769 case LTTNG_DOMAIN_PYTHON
:
2770 case LTTNG_DOMAIN_UST
:
2773 ERR("Unknown UST domain on create session %d", domain
->type
);
2774 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2778 DBG("Creating UST session");
2780 lus
= trace_ust_create_session(session
->id
);
2782 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2786 lus
->uid
= session
->uid
;
2787 lus
->gid
= session
->gid
;
2788 lus
->output_traces
= session
->output_traces
;
2789 lus
->snapshot_mode
= session
->snapshot_mode
;
2790 lus
->live_timer_interval
= session
->live_timer
;
2791 session
->ust_session
= lus
;
2792 if (session
->shm_path
[0]) {
2793 strncpy(lus
->root_shm_path
, session
->shm_path
,
2794 sizeof(lus
->root_shm_path
));
2795 lus
->root_shm_path
[sizeof(lus
->root_shm_path
) - 1] = '\0';
2796 strncpy(lus
->shm_path
, session
->shm_path
,
2797 sizeof(lus
->shm_path
));
2798 lus
->shm_path
[sizeof(lus
->shm_path
) - 1] = '\0';
2799 strncat(lus
->shm_path
, "/ust",
2800 sizeof(lus
->shm_path
) - strlen(lus
->shm_path
) - 1);
2802 /* Copy session output to the newly created UST session */
2803 ret
= copy_session_consumer(domain
->type
, session
);
2804 if (ret
!= LTTNG_OK
) {
2812 session
->ust_session
= NULL
;
2817 * Create a kernel tracer session then create the default channel.
2819 static int create_kernel_session(struct ltt_session
*session
)
2823 DBG("Creating kernel session");
2825 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2827 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2831 /* Code flow safety */
2832 assert(session
->kernel_session
);
2834 /* Copy session output to the newly created Kernel session */
2835 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2836 if (ret
!= LTTNG_OK
) {
2840 /* Create directory(ies) on local filesystem. */
2841 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2842 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2843 ret
= run_as_mkdir_recursive(
2844 session
->kernel_session
->consumer
->dst
.trace_path
,
2845 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2847 if (errno
!= EEXIST
) {
2848 ERR("Trace directory creation error");
2854 session
->kernel_session
->uid
= session
->uid
;
2855 session
->kernel_session
->gid
= session
->gid
;
2856 session
->kernel_session
->output_traces
= session
->output_traces
;
2857 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
2862 trace_kernel_destroy_session(session
->kernel_session
);
2863 session
->kernel_session
= NULL
;
2868 * Count number of session permitted by uid/gid.
2870 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2873 struct ltt_session
*session
;
2875 DBG("Counting number of available session for UID %d GID %d",
2877 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2879 * Only list the sessions the user can control.
2881 if (!session_access_ok(session
, uid
, gid
)) {
2890 * Process the command requested by the lttng client within the command
2891 * context structure. This function make sure that the return structure (llm)
2892 * is set and ready for transmission before returning.
2894 * Return any error encountered or 0 for success.
2896 * "sock" is only used for special-case var. len data.
2898 * Should *NOT* be called with RCU read-side lock held.
2900 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2904 int need_tracing_session
= 1;
2907 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2909 assert(!rcu_read_ongoing());
2913 switch (cmd_ctx
->lsm
->cmd_type
) {
2914 case LTTNG_CREATE_SESSION
:
2915 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2916 case LTTNG_CREATE_SESSION_LIVE
:
2917 case LTTNG_DESTROY_SESSION
:
2918 case LTTNG_LIST_SESSIONS
:
2919 case LTTNG_LIST_DOMAINS
:
2920 case LTTNG_START_TRACE
:
2921 case LTTNG_STOP_TRACE
:
2922 case LTTNG_DATA_PENDING
:
2923 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
2924 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
2925 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
2926 case LTTNG_SNAPSHOT_RECORD
:
2927 case LTTNG_SAVE_SESSION
:
2928 case LTTNG_SET_SESSION_SHM_PATH
:
2929 case LTTNG_REGENERATE_METADATA
:
2930 case LTTNG_REGENERATE_STATEDUMP
:
2931 case LTTNG_REGISTER_TRIGGER
:
2932 case LTTNG_UNREGISTER_TRIGGER
:
2933 case LTTNG_CLEAR_SESSION
:
2940 if (config
.no_kernel
&& need_domain
2941 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2943 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2945 ret
= LTTNG_ERR_KERN_NA
;
2950 /* Deny register consumer if we already have a spawned consumer. */
2951 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2952 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2953 if (kconsumer_data
.pid
> 0) {
2954 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2955 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2958 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2962 * Check for command that don't needs to allocate a returned payload. We do
2963 * this here so we don't have to make the call for no payload at each
2966 switch(cmd_ctx
->lsm
->cmd_type
) {
2967 case LTTNG_LIST_SESSIONS
:
2968 case LTTNG_LIST_TRACEPOINTS
:
2969 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2970 case LTTNG_LIST_DOMAINS
:
2971 case LTTNG_LIST_CHANNELS
:
2972 case LTTNG_LIST_EVENTS
:
2973 case LTTNG_LIST_SYSCALLS
:
2974 case LTTNG_LIST_TRACKER_PIDS
:
2975 case LTTNG_DATA_PENDING
:
2978 /* Setup lttng message with no payload */
2979 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0);
2981 /* This label does not try to unlock the session */
2982 goto init_setup_error
;
2986 /* Commands that DO NOT need a session. */
2987 switch (cmd_ctx
->lsm
->cmd_type
) {
2988 case LTTNG_CREATE_SESSION
:
2989 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2990 case LTTNG_CREATE_SESSION_LIVE
:
2991 case LTTNG_LIST_SESSIONS
:
2992 case LTTNG_LIST_TRACEPOINTS
:
2993 case LTTNG_LIST_SYSCALLS
:
2994 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2995 case LTTNG_SAVE_SESSION
:
2996 case LTTNG_REGISTER_TRIGGER
:
2997 case LTTNG_UNREGISTER_TRIGGER
:
2998 need_tracing_session
= 0;
3001 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
3003 * We keep the session list lock across _all_ commands
3004 * for now, because the per-session lock does not
3005 * handle teardown properly.
3007 session_lock_list();
3008 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
3009 if (cmd_ctx
->session
== NULL
) {
3010 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
3013 /* Acquire lock for the session */
3014 session_lock(cmd_ctx
->session
);
3020 * Commands that need a valid session but should NOT create one if none
3021 * exists. Instead of creating one and destroying it when the command is
3022 * handled, process that right before so we save some round trip in useless
3025 switch (cmd_ctx
->lsm
->cmd_type
) {
3026 case LTTNG_DISABLE_CHANNEL
:
3027 case LTTNG_DISABLE_EVENT
:
3028 switch (cmd_ctx
->lsm
->domain
.type
) {
3029 case LTTNG_DOMAIN_KERNEL
:
3030 if (!cmd_ctx
->session
->kernel_session
) {
3031 ret
= LTTNG_ERR_NO_CHANNEL
;
3035 case LTTNG_DOMAIN_JUL
:
3036 case LTTNG_DOMAIN_LOG4J
:
3037 case LTTNG_DOMAIN_PYTHON
:
3038 case LTTNG_DOMAIN_UST
:
3039 if (!cmd_ctx
->session
->ust_session
) {
3040 ret
= LTTNG_ERR_NO_CHANNEL
;
3045 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3057 * Check domain type for specific "pre-action".
3059 switch (cmd_ctx
->lsm
->domain
.type
) {
3060 case LTTNG_DOMAIN_KERNEL
:
3062 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
3066 /* Kernel tracer check */
3067 if (kernel_tracer_fd
== -1) {
3068 /* Basically, load kernel tracer modules */
3069 ret
= init_kernel_tracer();
3075 /* Consumer is in an ERROR state. Report back to client */
3076 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
3077 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3081 /* Need a session for kernel command */
3082 if (need_tracing_session
) {
3083 if (cmd_ctx
->session
->kernel_session
== NULL
) {
3084 ret
= create_kernel_session(cmd_ctx
->session
);
3086 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
3091 /* Start the kernel consumer daemon */
3092 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
3093 if (kconsumer_data
.pid
== 0 &&
3094 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3095 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3096 ret
= start_consumerd(&kconsumer_data
);
3098 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3101 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
3103 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3107 * The consumer was just spawned so we need to add the socket to
3108 * the consumer output of the session if exist.
3110 ret
= consumer_create_socket(&kconsumer_data
,
3111 cmd_ctx
->session
->kernel_session
->consumer
);
3118 case LTTNG_DOMAIN_JUL
:
3119 case LTTNG_DOMAIN_LOG4J
:
3120 case LTTNG_DOMAIN_PYTHON
:
3121 case LTTNG_DOMAIN_UST
:
3123 if (!ust_app_supported()) {
3124 ret
= LTTNG_ERR_NO_UST
;
3127 /* Consumer is in an ERROR state. Report back to client */
3128 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
3129 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3133 if (need_tracing_session
) {
3134 /* Create UST session if none exist. */
3135 if (cmd_ctx
->session
->ust_session
== NULL
) {
3136 ret
= create_ust_session(cmd_ctx
->session
,
3137 &cmd_ctx
->lsm
->domain
);
3138 if (ret
!= LTTNG_OK
) {
3143 /* Start the UST consumer daemons */
3145 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
3146 if (config
.consumerd64_bin_path
.value
&&
3147 ustconsumer64_data
.pid
== 0 &&
3148 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3149 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3150 ret
= start_consumerd(&ustconsumer64_data
);
3152 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
3153 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
3157 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
3158 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3160 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3164 * Setup socket for consumer 64 bit. No need for atomic access
3165 * since it was set above and can ONLY be set in this thread.
3167 ret
= consumer_create_socket(&ustconsumer64_data
,
3168 cmd_ctx
->session
->ust_session
->consumer
);
3174 pthread_mutex_lock(&ustconsumer32_data
.pid_mutex
);
3175 if (config
.consumerd32_bin_path
.value
&&
3176 ustconsumer32_data
.pid
== 0 &&
3177 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3178 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3179 ret
= start_consumerd(&ustconsumer32_data
);
3181 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
3182 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
3186 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
3187 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3189 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3193 * Setup socket for consumer 64 bit. No need for atomic access
3194 * since it was set above and can ONLY be set in this thread.
3196 ret
= consumer_create_socket(&ustconsumer32_data
,
3197 cmd_ctx
->session
->ust_session
->consumer
);
3209 /* Validate consumer daemon state when start/stop trace command */
3210 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
3211 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
3212 switch (cmd_ctx
->lsm
->domain
.type
) {
3213 case LTTNG_DOMAIN_NONE
:
3215 case LTTNG_DOMAIN_JUL
:
3216 case LTTNG_DOMAIN_LOG4J
:
3217 case LTTNG_DOMAIN_PYTHON
:
3218 case LTTNG_DOMAIN_UST
:
3219 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
3220 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3224 case LTTNG_DOMAIN_KERNEL
:
3225 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
3226 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3231 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3237 * Check that the UID or GID match that of the tracing session.
3238 * The root user can interact with all sessions.
3240 if (need_tracing_session
) {
3241 if (!session_access_ok(cmd_ctx
->session
,
3242 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3243 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
3244 ret
= LTTNG_ERR_EPERM
;
3250 * Send relayd information to consumer as soon as we have a domain and a
3253 if (cmd_ctx
->session
&& need_domain
) {
3255 * Setup relayd if not done yet. If the relayd information was already
3256 * sent to the consumer, this call will gracefully return.
3258 ret
= cmd_setup_relayd(cmd_ctx
->session
);
3259 if (ret
!= LTTNG_OK
) {
3264 /* Process by command type */
3265 switch (cmd_ctx
->lsm
->cmd_type
) {
3266 case LTTNG_ADD_CONTEXT
:
3269 * An LTTNG_ADD_CONTEXT command might have a supplementary
3270 * payload if the context being added is an application context.
3272 if (cmd_ctx
->lsm
->u
.context
.ctx
.ctx
==
3273 LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
3274 char *provider_name
= NULL
, *context_name
= NULL
;
3275 size_t provider_name_len
=
3276 cmd_ctx
->lsm
->u
.context
.provider_name_len
;
3277 size_t context_name_len
=
3278 cmd_ctx
->lsm
->u
.context
.context_name_len
;
3280 if (provider_name_len
== 0 || context_name_len
== 0) {
3282 * Application provider and context names MUST
3285 ret
= -LTTNG_ERR_INVALID
;
3289 provider_name
= zmalloc(provider_name_len
+ 1);
3290 if (!provider_name
) {
3291 ret
= -LTTNG_ERR_NOMEM
;
3294 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
=
3297 context_name
= zmalloc(context_name_len
+ 1);
3298 if (!context_name
) {
3299 ret
= -LTTNG_ERR_NOMEM
;
3300 goto error_add_context
;
3302 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
=
3305 ret
= lttcomm_recv_unix_sock(sock
, provider_name
,
3308 goto error_add_context
;
3311 ret
= lttcomm_recv_unix_sock(sock
, context_name
,
3314 goto error_add_context
;
3319 * cmd_add_context assumes ownership of the provider and context
3322 ret
= cmd_add_context(cmd_ctx
->session
,
3323 cmd_ctx
->lsm
->domain
.type
,
3324 cmd_ctx
->lsm
->u
.context
.channel_name
,
3325 &cmd_ctx
->lsm
->u
.context
.ctx
,
3326 kernel_poll_pipe
[1]);
3328 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
= NULL
;
3329 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
= NULL
;
3331 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
);
3332 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
);
3338 case LTTNG_DISABLE_CHANNEL
:
3340 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3341 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3344 case LTTNG_DISABLE_EVENT
:
3348 * FIXME: handle filter; for now we just receive the filter's
3349 * bytecode along with the filter expression which are sent by
3350 * liblttng-ctl and discard them.
3352 * This fixes an issue where the client may block while sending
3353 * the filter payload and encounter an error because the session
3354 * daemon closes the socket without ever handling this data.
3356 size_t count
= cmd_ctx
->lsm
->u
.disable
.expression_len
+
3357 cmd_ctx
->lsm
->u
.disable
.bytecode_len
;
3360 char data
[LTTNG_FILTER_MAX_LEN
];
3362 DBG("Discarding disable event command payload of size %zu", count
);
3364 ret
= lttcomm_recv_unix_sock(sock
, data
,
3365 count
> sizeof(data
) ? sizeof(data
) : count
);
3370 count
-= (size_t) ret
;
3373 /* FIXME: passing packed structure to non-packed pointer */
3374 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3375 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3376 &cmd_ctx
->lsm
->u
.disable
.event
);
3379 case LTTNG_ENABLE_CHANNEL
:
3381 cmd_ctx
->lsm
->u
.channel
.chan
.attr
.extended
.ptr
=
3382 (struct lttng_channel_extended
*) &cmd_ctx
->lsm
->u
.channel
.extended
;
3383 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3384 &cmd_ctx
->lsm
->u
.channel
.chan
,
3385 kernel_poll_pipe
[1]);
3388 case LTTNG_TRACK_PID
:
3390 ret
= cmd_track_pid(cmd_ctx
->session
,
3391 cmd_ctx
->lsm
->domain
.type
,
3392 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
3395 case LTTNG_UNTRACK_PID
:
3397 ret
= cmd_untrack_pid(cmd_ctx
->session
,
3398 cmd_ctx
->lsm
->domain
.type
,
3399 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
3402 case LTTNG_ENABLE_EVENT
:
3404 struct lttng_event_exclusion
*exclusion
= NULL
;
3405 struct lttng_filter_bytecode
*bytecode
= NULL
;
3406 char *filter_expression
= NULL
;
3408 /* Handle exclusion events and receive it from the client. */
3409 if (cmd_ctx
->lsm
->u
.enable
.exclusion_count
> 0) {
3410 size_t count
= cmd_ctx
->lsm
->u
.enable
.exclusion_count
;
3412 exclusion
= zmalloc(sizeof(struct lttng_event_exclusion
) +
3413 (count
* LTTNG_SYMBOL_NAME_LEN
));
3415 ret
= LTTNG_ERR_EXCLUSION_NOMEM
;
3419 DBG("Receiving var len exclusion event list from client ...");
3420 exclusion
->count
= count
;
3421 ret
= lttcomm_recv_unix_sock(sock
, exclusion
->names
,
3422 count
* LTTNG_SYMBOL_NAME_LEN
);
3424 DBG("Nothing recv() from client var len data... continuing");
3427 ret
= LTTNG_ERR_EXCLUSION_INVAL
;
3432 /* Get filter expression from client. */
3433 if (cmd_ctx
->lsm
->u
.enable
.expression_len
> 0) {
3434 size_t expression_len
=
3435 cmd_ctx
->lsm
->u
.enable
.expression_len
;
3437 if (expression_len
> LTTNG_FILTER_MAX_LEN
) {
3438 ret
= LTTNG_ERR_FILTER_INVAL
;
3443 filter_expression
= zmalloc(expression_len
);
3444 if (!filter_expression
) {
3446 ret
= LTTNG_ERR_FILTER_NOMEM
;
3450 /* Receive var. len. data */
3451 DBG("Receiving var len filter's expression from client ...");
3452 ret
= lttcomm_recv_unix_sock(sock
, filter_expression
,
3455 DBG("Nothing recv() from client car len data... continuing");
3457 free(filter_expression
);
3459 ret
= LTTNG_ERR_FILTER_INVAL
;
3464 /* Handle filter and get bytecode from client. */
3465 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> 0) {
3466 size_t bytecode_len
= cmd_ctx
->lsm
->u
.enable
.bytecode_len
;
3468 if (bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3469 ret
= LTTNG_ERR_FILTER_INVAL
;
3470 free(filter_expression
);
3475 bytecode
= zmalloc(bytecode_len
);
3477 free(filter_expression
);
3479 ret
= LTTNG_ERR_FILTER_NOMEM
;
3483 /* Receive var. len. data */
3484 DBG("Receiving var len filter's bytecode from client ...");
3485 ret
= lttcomm_recv_unix_sock(sock
, bytecode
, bytecode_len
);
3487 DBG("Nothing recv() from client car len data... continuing");
3489 free(filter_expression
);
3492 ret
= LTTNG_ERR_FILTER_INVAL
;
3496 if ((bytecode
->len
+ sizeof(*bytecode
)) != bytecode_len
) {
3497 free(filter_expression
);
3500 ret
= LTTNG_ERR_FILTER_INVAL
;
3505 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3506 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3507 &cmd_ctx
->lsm
->u
.enable
.event
,
3508 filter_expression
, bytecode
, exclusion
,
3509 kernel_poll_pipe
[1]);
3512 case LTTNG_LIST_TRACEPOINTS
:
3514 struct lttng_event
*events
;
3517 session_lock_list();
3518 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3519 session_unlock_list();
3520 if (nb_events
< 0) {
3521 /* Return value is a negative lttng_error_code. */
3527 * Setup lttng message with payload size set to the event list size in
3528 * bytes and then copy list into the llm payload.
3530 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
3531 sizeof(struct lttng_event
) * nb_events
);
3541 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3543 struct lttng_event_field
*fields
;
3546 session_lock_list();
3547 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
3549 session_unlock_list();
3550 if (nb_fields
< 0) {
3551 /* Return value is a negative lttng_error_code. */
3557 * Setup lttng message with payload size set to the event list size in
3558 * bytes and then copy list into the llm payload.
3560 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, fields
,
3561 sizeof(struct lttng_event_field
) * nb_fields
);
3571 case LTTNG_LIST_SYSCALLS
:
3573 struct lttng_event
*events
;
3576 nb_events
= cmd_list_syscalls(&events
);
3577 if (nb_events
< 0) {
3578 /* Return value is a negative lttng_error_code. */
3584 * Setup lttng message with payload size set to the event list size in
3585 * bytes and then copy list into the llm payload.
3587 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
3588 sizeof(struct lttng_event
) * nb_events
);
3598 case LTTNG_LIST_TRACKER_PIDS
:
3600 int32_t *pids
= NULL
;
3603 nr_pids
= cmd_list_tracker_pids(cmd_ctx
->session
,
3604 cmd_ctx
->lsm
->domain
.type
, &pids
);
3606 /* Return value is a negative lttng_error_code. */
3612 * Setup lttng message with payload size set to the event list size in
3613 * bytes and then copy list into the llm payload.
3615 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, pids
,
3616 sizeof(int32_t) * nr_pids
);
3626 case LTTNG_SET_CONSUMER_URI
:
3629 struct lttng_uri
*uris
;
3631 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3632 len
= nb_uri
* sizeof(struct lttng_uri
);
3635 ret
= LTTNG_ERR_INVALID
;
3639 uris
= zmalloc(len
);
3641 ret
= LTTNG_ERR_FATAL
;
3645 /* Receive variable len data */
3646 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
3647 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3649 DBG("No URIs received from client... continuing");
3651 ret
= LTTNG_ERR_SESSION_FAIL
;
3656 ret
= cmd_set_consumer_uri(cmd_ctx
->session
, nb_uri
, uris
);
3658 if (ret
!= LTTNG_OK
) {
3665 case LTTNG_START_TRACE
:
3667 ret
= cmd_start_trace(cmd_ctx
->session
);
3670 case LTTNG_STOP_TRACE
:
3672 ret
= cmd_stop_trace(cmd_ctx
->session
);
3675 case LTTNG_CREATE_SESSION
:
3678 struct lttng_uri
*uris
= NULL
;
3680 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3681 len
= nb_uri
* sizeof(struct lttng_uri
);
3684 uris
= zmalloc(len
);
3686 ret
= LTTNG_ERR_FATAL
;
3690 /* Receive variable len data */
3691 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3692 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3694 DBG("No URIs received from client... continuing");
3696 ret
= LTTNG_ERR_SESSION_FAIL
;
3701 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3702 DBG("Creating session with ONE network URI is a bad call");
3703 ret
= LTTNG_ERR_SESSION_FAIL
;
3709 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3710 &cmd_ctx
->creds
, 0);
3716 case LTTNG_DESTROY_SESSION
:
3718 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
3720 /* Set session to NULL so we do not unlock it after free. */
3721 cmd_ctx
->session
= NULL
;
3724 case LTTNG_LIST_DOMAINS
:
3727 struct lttng_domain
*domains
= NULL
;
3729 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3731 /* Return value is a negative lttng_error_code. */
3736 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, domains
,
3737 nb_dom
* sizeof(struct lttng_domain
));
3747 case LTTNG_LIST_CHANNELS
:
3749 ssize_t payload_size
;
3750 struct lttng_channel
*channels
= NULL
;
3752 payload_size
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3753 cmd_ctx
->session
, &channels
);
3754 if (payload_size
< 0) {
3755 /* Return value is a negative lttng_error_code. */
3756 ret
= -payload_size
;
3760 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, channels
,
3771 case LTTNG_LIST_EVENTS
:
3774 struct lttng_event
*events
= NULL
;
3775 struct lttcomm_event_command_header cmd_header
;
3778 memset(&cmd_header
, 0, sizeof(cmd_header
));
3779 /* Extended infos are included at the end of events */
3780 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
,
3781 cmd_ctx
->session
, cmd_ctx
->lsm
->u
.list
.channel_name
,
3782 &events
, &total_size
);
3785 /* Return value is a negative lttng_error_code. */
3790 cmd_header
.nb_events
= nb_event
;
3791 ret
= setup_lttng_msg(cmd_ctx
, events
, total_size
,
3792 &cmd_header
, sizeof(cmd_header
));
3802 case LTTNG_LIST_SESSIONS
:
3804 unsigned int nr_sessions
;
3805 void *sessions_payload
;
3808 session_lock_list();
3809 nr_sessions
= lttng_sessions_count(
3810 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3811 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3812 payload_len
= sizeof(struct lttng_session
) * nr_sessions
;
3813 sessions_payload
= zmalloc(payload_len
);
3815 if (!sessions_payload
) {
3816 session_unlock_list();
3821 cmd_list_lttng_sessions(sessions_payload
,
3822 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3823 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3824 session_unlock_list();
3826 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, sessions_payload
,
3828 free(sessions_payload
);
3837 case LTTNG_REGISTER_CONSUMER
:
3839 struct consumer_data
*cdata
;
3841 switch (cmd_ctx
->lsm
->domain
.type
) {
3842 case LTTNG_DOMAIN_KERNEL
:
3843 cdata
= &kconsumer_data
;
3846 ret
= LTTNG_ERR_UND
;
3850 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3851 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3854 case LTTNG_DATA_PENDING
:
3857 uint8_t pending_ret_byte
;
3859 pending_ret
= cmd_data_pending(cmd_ctx
->session
);
3864 * This function may returns 0 or 1 to indicate whether or not
3865 * there is data pending. In case of error, it should return an
3866 * LTTNG_ERR code. However, some code paths may still return
3867 * a nondescript error code, which we handle by returning an
3870 if (pending_ret
== 0 || pending_ret
== 1) {
3872 * ret will be set to LTTNG_OK at the end of
3875 } else if (pending_ret
< 0) {
3876 ret
= LTTNG_ERR_UNK
;
3883 pending_ret_byte
= (uint8_t) pending_ret
;
3885 /* 1 byte to return whether or not data is pending */
3886 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
,
3887 &pending_ret_byte
, 1);
3896 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3898 struct lttcomm_lttng_output_id reply
;
3900 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
3901 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
3902 if (ret
!= LTTNG_OK
) {
3906 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &reply
,
3912 /* Copy output list into message payload */
3916 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3918 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
3919 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
3922 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3925 struct lttng_snapshot_output
*outputs
= NULL
;
3927 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
3928 if (nb_output
< 0) {
3933 assert((nb_output
> 0 && outputs
) || nb_output
== 0);
3934 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, outputs
,
3935 nb_output
* sizeof(struct lttng_snapshot_output
));
3945 case LTTNG_SNAPSHOT_RECORD
:
3947 ret
= cmd_snapshot_record(cmd_ctx
->session
,
3948 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
3949 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
3952 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3955 struct lttng_uri
*uris
= NULL
;
3957 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3958 len
= nb_uri
* sizeof(struct lttng_uri
);
3961 uris
= zmalloc(len
);
3963 ret
= LTTNG_ERR_FATAL
;
3967 /* Receive variable len data */
3968 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3969 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3971 DBG("No URIs received from client... continuing");
3973 ret
= LTTNG_ERR_SESSION_FAIL
;
3978 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3979 DBG("Creating session with ONE network URI is a bad call");
3980 ret
= LTTNG_ERR_SESSION_FAIL
;
3986 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
3987 nb_uri
, &cmd_ctx
->creds
);
3991 case LTTNG_CREATE_SESSION_LIVE
:
3994 struct lttng_uri
*uris
= NULL
;
3996 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3997 len
= nb_uri
* sizeof(struct lttng_uri
);
4000 uris
= zmalloc(len
);
4002 ret
= LTTNG_ERR_FATAL
;
4006 /* Receive variable len data */
4007 DBG("Waiting for %zu URIs from client ...", nb_uri
);
4008 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
4010 DBG("No URIs received from client... continuing");
4012 ret
= LTTNG_ERR_SESSION_FAIL
;
4017 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
4018 DBG("Creating session with ONE network URI is a bad call");
4019 ret
= LTTNG_ERR_SESSION_FAIL
;
4025 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
4026 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
4030 case LTTNG_SAVE_SESSION
:
4032 ret
= cmd_save_sessions(&cmd_ctx
->lsm
->u
.save_session
.attr
,
4036 case LTTNG_SET_SESSION_SHM_PATH
:
4038 ret
= cmd_set_session_shm_path(cmd_ctx
->session
,
4039 cmd_ctx
->lsm
->u
.set_shm_path
.shm_path
);
4042 case LTTNG_REGENERATE_METADATA
:
4044 ret
= cmd_regenerate_metadata(cmd_ctx
->session
);
4047 case LTTNG_REGENERATE_STATEDUMP
:
4049 ret
= cmd_regenerate_statedump(cmd_ctx
->session
);
4052 case LTTNG_REGISTER_TRIGGER
:
4054 ret
= cmd_register_trigger(cmd_ctx
, sock
,
4055 notification_thread_handle
);
4058 case LTTNG_UNREGISTER_TRIGGER
:
4060 ret
= cmd_unregister_trigger(cmd_ctx
, sock
,
4061 notification_thread_handle
);
4064 case LTTNG_CLEAR_SESSION
:
4066 ret
= cmd_clear_session(cmd_ctx
->session
);
4067 if (ret
!= LTTNG_OK
) {
4073 ret
= LTTNG_ERR_UND
;
4078 if (cmd_ctx
->llm
== NULL
) {
4079 DBG("Missing llm structure. Allocating one.");
4080 if (setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0) < 0) {
4084 /* Set return code */
4085 cmd_ctx
->llm
->ret_code
= ret
;
4087 if (cmd_ctx
->session
) {
4088 session_unlock(cmd_ctx
->session
);
4090 if (need_tracing_session
) {
4091 session_unlock_list();
4094 assert(!rcu_read_ongoing());
4099 * Thread managing health check socket.
4101 static void *thread_manage_health(void *data
)
4103 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
4104 uint32_t revents
, nb_fd
;
4105 struct lttng_poll_event events
;
4106 struct health_comm_msg msg
;
4107 struct health_comm_reply reply
;
4109 DBG("[thread] Manage health check started");
4111 rcu_register_thread();
4113 /* We might hit an error path before this is created. */
4114 lttng_poll_init(&events
);
4116 /* Create unix socket */
4117 sock
= lttcomm_create_unix_sock(config
.health_unix_sock_path
.value
);
4119 ERR("Unable to create health check Unix socket");
4124 /* lttng health client socket path permissions */
4125 ret
= chown(config
.health_unix_sock_path
.value
, 0,
4126 utils_get_group_id(config
.tracing_group_name
.value
));
4128 ERR("Unable to set group on %s", config
.health_unix_sock_path
.value
);
4133 ret
= chmod(config
.health_unix_sock_path
.value
,
4134 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4136 ERR("Unable to set permissions on %s", config
.health_unix_sock_path
.value
);
4143 * Set the CLOEXEC flag. Return code is useless because either way, the
4146 (void) utils_set_fd_cloexec(sock
);
4148 ret
= lttcomm_listen_unix_sock(sock
);
4154 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4155 * more will be added to this poll set.
4157 ret
= sessiond_set_thread_pollset(&events
, 2);
4162 /* Add the application registration socket */
4163 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
4168 sessiond_notify_ready();
4171 DBG("Health check ready");
4173 /* Inifinite blocking call, waiting for transmission */
4175 ret
= lttng_poll_wait(&events
, -1);
4178 * Restart interrupted system call.
4180 if (errno
== EINTR
) {
4188 for (i
= 0; i
< nb_fd
; i
++) {
4189 /* Fetch once the poll data */
4190 revents
= LTTNG_POLL_GETEV(&events
, i
);
4191 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4194 /* No activity for this FD (poll implementation). */
4198 /* Thread quit pipe has been closed. Killing thread. */
4199 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4205 /* Event on the registration socket */
4206 if (pollfd
== sock
) {
4207 if (revents
& LPOLLIN
) {
4209 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4210 ERR("Health socket poll error");
4213 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
4219 new_sock
= lttcomm_accept_unix_sock(sock
);
4225 * Set the CLOEXEC flag. Return code is useless because either way, the
4228 (void) utils_set_fd_cloexec(new_sock
);
4230 DBG("Receiving data from client for health...");
4231 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
4233 DBG("Nothing recv() from client... continuing");
4234 ret
= close(new_sock
);
4241 rcu_thread_online();
4243 memset(&reply
, 0, sizeof(reply
));
4244 for (i
= 0; i
< NR_HEALTH_SESSIOND_TYPES
; i
++) {
4246 * health_check_state returns 0 if health is
4249 if (!health_check_state(health_sessiond
, i
)) {
4250 reply
.ret_code
|= 1ULL << i
;
4254 DBG2("Health check return value %" PRIx64
, reply
.ret_code
);
4256 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
4258 ERR("Failed to send health data back to client");
4261 /* End of transmission */
4262 ret
= close(new_sock
);
4271 ERR("Health error occurred in %s", __func__
);
4273 DBG("Health check thread dying");
4274 unlink(config
.health_unix_sock_path
.value
);
4282 lttng_poll_clean(&events
);
4284 rcu_unregister_thread();
4289 * This thread manage all clients request using the unix client socket for
4292 static void *thread_manage_clients(void *data
)
4294 int sock
= -1, ret
, i
, pollfd
, err
= -1;
4296 uint32_t revents
, nb_fd
;
4297 struct command_ctx
*cmd_ctx
= NULL
;
4298 struct lttng_poll_event events
;
4300 DBG("[thread] Manage client started");
4302 rcu_register_thread();
4304 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
4306 health_code_update();
4308 ret
= lttcomm_listen_unix_sock(client_sock
);
4314 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4315 * more will be added to this poll set.
4317 ret
= sessiond_set_thread_pollset(&events
, 2);
4319 goto error_create_poll
;
4322 /* Add the application registration socket */
4323 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
4328 ret
= sem_post(&load_info
->message_thread_ready
);
4330 PERROR("sem_post message_thread_ready");
4335 * Wait until all support threads are initialized before accepting
4338 while (uatomic_read(<tng_sessiond_ready
) != 0) {
4340 struct timeval timeout
;
4343 FD_SET(thread_quit_pipe
[0], &read_fds
);
4344 memset(&timeout
, 0, sizeof(timeout
));
4345 timeout
.tv_usec
= 1000;
4348 * If a support thread failed to launch, it may signal that
4349 * we must exit and the sessiond would never be marked as
4352 * The timeout is set to 1ms, which serves as a way to
4353 * pace down this check.
4355 ret
= select(thread_quit_pipe
[0] + 1, &read_fds
, NULL
, NULL
,
4357 if (ret
> 0 || (ret
< 0 && errno
!= EINTR
)) {
4362 * This barrier is paired with the one in sessiond_notify_ready() to
4363 * ensure that loads accessing data initialized by the other threads,
4364 * on which this thread was waiting, are not performed before this point.
4366 * Note that this could be a 'read' memory barrier, but a full barrier
4367 * is used in case the code changes. The performance implications of
4368 * this choice are minimal since this is a slow path.
4372 /* This testpoint is after we signal readiness to the parent. */
4373 if (testpoint(sessiond_thread_manage_clients
)) {
4377 if (testpoint(sessiond_thread_manage_clients_before_loop
)) {
4381 health_code_update();
4384 DBG("Accepting client command ...");
4386 /* Inifinite blocking call, waiting for transmission */
4388 health_poll_entry();
4389 ret
= lttng_poll_wait(&events
, -1);
4393 * Restart interrupted system call.
4395 if (errno
== EINTR
) {
4403 for (i
= 0; i
< nb_fd
; i
++) {
4404 /* Fetch once the poll data */
4405 revents
= LTTNG_POLL_GETEV(&events
, i
);
4406 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4408 health_code_update();
4411 /* No activity for this FD (poll implementation). */
4415 /* Thread quit pipe has been closed. Killing thread. */
4416 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4422 /* Event on the registration socket */
4423 if (pollfd
== client_sock
) {
4424 if (revents
& LPOLLIN
) {
4426 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4427 ERR("Client socket poll error");
4430 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
4436 DBG("Wait for client response");
4438 health_code_update();
4440 sock
= lttcomm_accept_unix_sock(client_sock
);
4446 * Set the CLOEXEC flag. Return code is useless because either way, the
4449 (void) utils_set_fd_cloexec(sock
);
4451 /* Set socket option for credentials retrieval */
4452 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
4457 /* Allocate context command to process the client request */
4458 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
4459 if (cmd_ctx
== NULL
) {
4460 PERROR("zmalloc cmd_ctx");
4464 /* Allocate data buffer for reception */
4465 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
4466 if (cmd_ctx
->lsm
== NULL
) {
4467 PERROR("zmalloc cmd_ctx->lsm");
4471 cmd_ctx
->llm
= NULL
;
4472 cmd_ctx
->session
= NULL
;
4474 health_code_update();
4477 * Data is received from the lttng client. The struct
4478 * lttcomm_session_msg (lsm) contains the command and data request of
4481 DBG("Receiving data from client ...");
4482 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
4483 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
4485 DBG("Nothing recv() from client... continuing");
4491 clean_command_ctx(&cmd_ctx
);
4495 health_code_update();
4497 // TODO: Validate cmd_ctx including sanity check for
4498 // security purpose.
4500 rcu_thread_online();
4502 * This function dispatch the work to the kernel or userspace tracer
4503 * libs and fill the lttcomm_lttng_msg data structure of all the needed
4504 * informations for the client. The command context struct contains
4505 * everything this function may needs.
4507 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
4508 rcu_thread_offline();
4516 * TODO: Inform client somehow of the fatal error. At
4517 * this point, ret < 0 means that a zmalloc failed
4518 * (ENOMEM). Error detected but still accept
4519 * command, unless a socket error has been
4522 clean_command_ctx(&cmd_ctx
);
4526 health_code_update();
4528 DBG("Sending response (size: %d, retcode: %s (%d))",
4529 cmd_ctx
->lttng_msg_size
,
4530 lttng_strerror(-cmd_ctx
->llm
->ret_code
),
4531 cmd_ctx
->llm
->ret_code
);
4532 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
4534 ERR("Failed to send data back to client");
4537 /* End of transmission */
4544 clean_command_ctx(&cmd_ctx
);
4546 health_code_update();
4558 lttng_poll_clean(&events
);
4559 clean_command_ctx(&cmd_ctx
);
4563 unlink(config
.client_unix_sock_path
.value
);
4564 if (client_sock
>= 0) {
4565 ret
= close(client_sock
);
4573 ERR("Health error occurred in %s", __func__
);
4576 health_unregister(health_sessiond
);
4578 DBG("Client thread dying");
4580 rcu_unregister_thread();
4583 * Since we are creating the consumer threads, we own them, so we need
4584 * to join them before our thread exits.
4586 ret
= join_consumer_thread(&kconsumer_data
);
4589 PERROR("join_consumer");
4592 ret
= join_consumer_thread(&ustconsumer32_data
);
4595 PERROR("join_consumer ust32");
4598 ret
= join_consumer_thread(&ustconsumer64_data
);
4601 PERROR("join_consumer ust64");
4606 static int string_match(const char *str1
, const char *str2
)
4608 return (str1
&& str2
) && !strcmp(str1
, str2
);
4612 * Take an option from the getopt output and set it in the right variable to be
4615 * Return 0 on success else a negative value.
4617 static int set_option(int opt
, const char *arg
, const char *optname
)
4621 if (string_match(optname
, "client-sock") || opt
== 'c') {
4622 if (!arg
|| *arg
== '\0') {
4626 if (lttng_is_setuid_setgid()) {
4627 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4628 "-c, --client-sock");
4630 config_string_set(&config
.client_unix_sock_path
,
4632 if (!config
.client_unix_sock_path
.value
) {
4637 } else if (string_match(optname
, "apps-sock") || opt
== 'a') {
4638 if (!arg
|| *arg
== '\0') {
4642 if (lttng_is_setuid_setgid()) {
4643 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4646 config_string_set(&config
.apps_unix_sock_path
,
4648 if (!config
.apps_unix_sock_path
.value
) {
4653 } else if (string_match(optname
, "daemonize") || opt
== 'd') {
4654 config
.daemonize
= true;
4655 } else if (string_match(optname
, "background") || opt
== 'b') {
4656 config
.background
= true;
4657 } else if (string_match(optname
, "group") || opt
== 'g') {
4658 if (!arg
|| *arg
== '\0') {
4662 if (lttng_is_setuid_setgid()) {
4663 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4666 config_string_set(&config
.tracing_group_name
,
4668 if (!config
.tracing_group_name
.value
) {
4673 } else if (string_match(optname
, "help") || opt
== 'h') {
4674 ret
= utils_show_help(8, "lttng-sessiond", help_msg
);
4676 ERR("Cannot show --help for `lttng-sessiond`");
4679 exit(ret
? EXIT_FAILURE
: EXIT_SUCCESS
);
4680 } else if (string_match(optname
, "version") || opt
== 'V') {
4681 fprintf(stdout
, "%s\n", VERSION
);
4683 } else if (string_match(optname
, "sig-parent") || opt
== 'S') {
4684 config
.sig_parent
= true;
4685 } else if (string_match(optname
, "kconsumerd-err-sock")) {
4686 if (!arg
|| *arg
== '\0') {
4690 if (lttng_is_setuid_setgid()) {
4691 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4692 "--kconsumerd-err-sock");
4694 config_string_set(&config
.kconsumerd_err_unix_sock_path
,
4696 if (!config
.kconsumerd_err_unix_sock_path
.value
) {
4701 } else if (string_match(optname
, "kconsumerd-cmd-sock")) {
4702 if (!arg
|| *arg
== '\0') {
4706 if (lttng_is_setuid_setgid()) {
4707 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4708 "--kconsumerd-cmd-sock");
4710 config_string_set(&config
.kconsumerd_cmd_unix_sock_path
,
4712 if (!config
.kconsumerd_cmd_unix_sock_path
.value
) {
4717 } else if (string_match(optname
, "ustconsumerd64-err-sock")) {
4718 if (!arg
|| *arg
== '\0') {
4722 if (lttng_is_setuid_setgid()) {
4723 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4724 "--ustconsumerd64-err-sock");
4726 config_string_set(&config
.consumerd64_err_unix_sock_path
,
4728 if (!config
.consumerd64_err_unix_sock_path
.value
) {
4733 } else if (string_match(optname
, "ustconsumerd64-cmd-sock")) {
4734 if (!arg
|| *arg
== '\0') {
4738 if (lttng_is_setuid_setgid()) {
4739 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4740 "--ustconsumerd64-cmd-sock");
4742 config_string_set(&config
.consumerd64_cmd_unix_sock_path
,
4744 if (!config
.consumerd64_cmd_unix_sock_path
.value
) {
4749 } else if (string_match(optname
, "ustconsumerd32-err-sock")) {
4750 if (!arg
|| *arg
== '\0') {
4754 if (lttng_is_setuid_setgid()) {
4755 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4756 "--ustconsumerd32-err-sock");
4758 config_string_set(&config
.consumerd32_err_unix_sock_path
,
4760 if (!config
.consumerd32_err_unix_sock_path
.value
) {
4765 } else if (string_match(optname
, "ustconsumerd32-cmd-sock")) {
4766 if (!arg
|| *arg
== '\0') {
4770 if (lttng_is_setuid_setgid()) {
4771 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4772 "--ustconsumerd32-cmd-sock");
4774 config_string_set(&config
.consumerd32_cmd_unix_sock_path
,
4776 if (!config
.consumerd32_cmd_unix_sock_path
.value
) {
4781 } else if (string_match(optname
, "no-kernel")) {
4782 config
.no_kernel
= true;
4783 } else if (string_match(optname
, "quiet") || opt
== 'q') {
4784 config
.quiet
= true;
4785 } else if (string_match(optname
, "verbose") || opt
== 'v') {
4786 /* Verbose level can increase using multiple -v */
4788 /* Value obtained from config file */
4789 config
.verbose
= config_parse_value(arg
);
4791 /* -v used on command line */
4794 /* Clamp value to [0, 3] */
4795 config
.verbose
= config
.verbose
< 0 ? 0 :
4796 (config
.verbose
<= 3 ? config
.verbose
: 3);
4797 } else if (string_match(optname
, "verbose-consumer")) {
4799 config
.verbose_consumer
= config_parse_value(arg
);
4801 config
.verbose_consumer
++;
4803 } else if (string_match(optname
, "consumerd32-path")) {
4804 if (!arg
|| *arg
== '\0') {
4808 if (lttng_is_setuid_setgid()) {
4809 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4810 "--consumerd32-path");
4812 config_string_set(&config
.consumerd32_bin_path
,
4814 if (!config
.consumerd32_bin_path
.value
) {
4819 } else if (string_match(optname
, "consumerd32-libdir")) {
4820 if (!arg
|| *arg
== '\0') {
4824 if (lttng_is_setuid_setgid()) {
4825 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4826 "--consumerd32-libdir");
4828 config_string_set(&config
.consumerd32_lib_dir
,
4830 if (!config
.consumerd32_lib_dir
.value
) {
4835 } else if (string_match(optname
, "consumerd64-path")) {
4836 if (!arg
|| *arg
== '\0') {
4840 if (lttng_is_setuid_setgid()) {
4841 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4842 "--consumerd64-path");
4844 config_string_set(&config
.consumerd64_bin_path
,
4846 if (!config
.consumerd64_bin_path
.value
) {
4851 } else if (string_match(optname
, "consumerd64-libdir")) {
4852 if (!arg
|| *arg
== '\0') {
4856 if (lttng_is_setuid_setgid()) {
4857 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4858 "--consumerd64-libdir");
4860 config_string_set(&config
.consumerd64_lib_dir
,
4862 if (!config
.consumerd64_lib_dir
.value
) {
4867 } else if (string_match(optname
, "pidfile") || opt
== 'p') {
4868 if (!arg
|| *arg
== '\0') {
4872 if (lttng_is_setuid_setgid()) {
4873 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4876 config_string_set(&config
.pid_file_path
, strdup(arg
));
4877 if (!config
.pid_file_path
.value
) {
4882 } else if (string_match(optname
, "agent-tcp-port")) {
4883 if (!arg
|| *arg
== '\0') {
4887 if (lttng_is_setuid_setgid()) {
4888 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4889 "--agent-tcp-port");
4894 v
= strtoul(arg
, NULL
, 0);
4895 if (errno
!= 0 || !isdigit(arg
[0])) {
4896 ERR("Wrong value in --agent-tcp-port parameter: %s", arg
);
4899 if (v
== 0 || v
>= 65535) {
4900 ERR("Port overflow in --agent-tcp-port parameter: %s", arg
);
4903 config
.agent_tcp_port
.begin
= config
.agent_tcp_port
.end
= (int) v
;
4904 DBG3("Agent TCP port set to non default: %i", (int) v
);
4906 } else if (string_match(optname
, "load") || opt
== 'l') {
4907 if (!arg
|| *arg
== '\0') {
4911 if (lttng_is_setuid_setgid()) {
4912 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4915 config_string_set(&config
.load_session_path
, strdup(arg
));
4916 if (!config
.load_session_path
.value
) {
4921 } else if (string_match(optname
, "kmod-probes")) {
4922 if (!arg
|| *arg
== '\0') {
4926 if (lttng_is_setuid_setgid()) {
4927 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4930 config_string_set(&config
.kmod_probes_list
, strdup(arg
));
4931 if (!config
.kmod_probes_list
.value
) {
4936 } else if (string_match(optname
, "extra-kmod-probes")) {
4937 if (!arg
|| *arg
== '\0') {
4941 if (lttng_is_setuid_setgid()) {
4942 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4943 "--extra-kmod-probes");
4945 config_string_set(&config
.kmod_extra_probes_list
,
4947 if (!config
.kmod_extra_probes_list
.value
) {
4952 } else if (string_match(optname
, "config") || opt
== 'f') {
4953 /* This is handled in set_options() thus silent skip. */
4956 /* Unknown option or other error.
4957 * Error is printed by getopt, just return */
4962 if (ret
== -EINVAL
) {
4963 const char *opt_name
= "unknown";
4966 for (i
= 0; i
< sizeof(long_options
) / sizeof(struct option
);
4968 if (opt
== long_options
[i
].val
) {
4969 opt_name
= long_options
[i
].name
;
4974 WARN("Invalid argument provided for option \"%s\", using default value.",
4982 * config_entry_handler_cb used to handle options read from a config file.
4983 * See config_entry_handler_cb comment in common/config/session-config.h for the
4984 * return value conventions.
4986 static int config_entry_handler(const struct config_entry
*entry
, void *unused
)
4990 if (!entry
|| !entry
->name
|| !entry
->value
) {
4995 /* Check if the option is to be ignored */
4996 for (i
= 0; i
< sizeof(config_ignore_options
) / sizeof(char *); i
++) {
4997 if (!strcmp(entry
->name
, config_ignore_options
[i
])) {
5002 for (i
= 0; i
< (sizeof(long_options
) / sizeof(struct option
)) - 1;
5005 /* Ignore if not fully matched. */
5006 if (strcmp(entry
->name
, long_options
[i
].name
)) {
5011 * If the option takes no argument on the command line, we have to
5012 * check if the value is "true". We support non-zero numeric values,
5015 if (!long_options
[i
].has_arg
) {
5016 ret
= config_parse_value(entry
->value
);
5019 WARN("Invalid configuration value \"%s\" for option %s",
5020 entry
->value
, entry
->name
);
5022 /* False, skip boolean config option. */
5027 ret
= set_option(long_options
[i
].val
, entry
->value
, entry
->name
);
5031 WARN("Unrecognized option \"%s\" in daemon configuration file.", entry
->name
);
5038 * daemon configuration loading and argument parsing
5040 static int set_options(int argc
, char **argv
)
5042 int ret
= 0, c
= 0, option_index
= 0;
5043 int orig_optopt
= optopt
, orig_optind
= optind
;
5045 const char *config_path
= NULL
;
5047 optstring
= utils_generate_optstring(long_options
,
5048 sizeof(long_options
) / sizeof(struct option
));
5054 /* Check for the --config option */
5055 while ((c
= getopt_long(argc
, argv
, optstring
, long_options
,
5056 &option_index
)) != -1) {
5060 } else if (c
!= 'f') {
5061 /* if not equal to --config option. */
5065 if (lttng_is_setuid_setgid()) {
5066 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5069 config_path
= utils_expand_path(optarg
);
5071 ERR("Failed to resolve path: %s", optarg
);
5076 ret
= config_get_section_entries(config_path
, config_section_name
,
5077 config_entry_handler
, NULL
);
5080 ERR("Invalid configuration option at line %i", ret
);
5086 /* Reset getopt's global state */
5087 optopt
= orig_optopt
;
5088 optind
= orig_optind
;
5092 * getopt_long() will not set option_index if it encounters a
5095 c
= getopt_long(argc
, argv
, optstring
, long_options
,
5102 * Pass NULL as the long option name if popt left the index
5105 ret
= set_option(c
, optarg
,
5106 option_index
< 0 ? NULL
:
5107 long_options
[option_index
].name
);
5119 * Creates the two needed socket by the daemon.
5120 * apps_sock - The communication socket for all UST apps.
5121 * client_sock - The communication of the cli tool (lttng).
5123 static int init_daemon_socket(void)
5128 old_umask
= umask(0);
5130 /* Create client tool unix socket */
5131 client_sock
= lttcomm_create_unix_sock(config
.client_unix_sock_path
.value
);
5132 if (client_sock
< 0) {
5133 ERR("Create unix sock failed: %s", config
.client_unix_sock_path
.value
);
5138 /* Set the cloexec flag */
5139 ret
= utils_set_fd_cloexec(client_sock
);
5141 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
5142 "Continuing but note that the consumer daemon will have a "
5143 "reference to this socket on exec()", client_sock
);
5146 /* File permission MUST be 660 */
5147 ret
= chmod(config
.client_unix_sock_path
.value
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
5149 ERR("Set file permissions failed: %s", config
.client_unix_sock_path
.value
);
5154 /* Create the application unix socket */
5155 apps_sock
= lttcomm_create_unix_sock(config
.apps_unix_sock_path
.value
);
5156 if (apps_sock
< 0) {
5157 ERR("Create unix sock failed: %s", config
.apps_unix_sock_path
.value
);
5162 /* Set the cloexec flag */
5163 ret
= utils_set_fd_cloexec(apps_sock
);
5165 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
5166 "Continuing but note that the consumer daemon will have a "
5167 "reference to this socket on exec()", apps_sock
);
5170 /* File permission MUST be 666 */
5171 ret
= chmod(config
.apps_unix_sock_path
.value
,
5172 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
5174 ERR("Set file permissions failed: %s", config
.apps_unix_sock_path
.value
);
5179 DBG3("Session daemon client socket %d and application socket %d created",
5180 client_sock
, apps_sock
);
5188 * Create lockfile using the rundir and return its fd.
5190 static int create_lockfile(void)
5192 return utils_create_lock_file(config
.lock_file_path
.value
);
5196 * Check if the global socket is available, and if a daemon is answering at the
5197 * other side. If yes, error is returned.
5199 * Also attempts to create and hold the lock file.
5201 static int check_existing_daemon(void)
5205 /* Is there anybody out there ? */
5206 if (lttng_session_daemon_alive()) {
5211 lockfile_fd
= create_lockfile();
5212 if (lockfile_fd
< 0) {
5220 static void sessiond_cleanup_lock_file(void)
5225 * Cleanup lock file by deleting it and finaly closing it which will
5226 * release the file system lock.
5228 if (lockfile_fd
>= 0) {
5229 ret
= remove(config
.lock_file_path
.value
);
5231 PERROR("remove lock file");
5233 ret
= close(lockfile_fd
);
5235 PERROR("close lock file");
5241 * Set the tracing group gid onto the client socket.
5243 * Race window between mkdir and chown is OK because we are going from more
5244 * permissive (root.root) to less permissive (root.tracing).
5246 static int set_permissions(char *rundir
)
5251 gid
= utils_get_group_id(config
.tracing_group_name
.value
);
5253 /* Set lttng run dir */
5254 ret
= chown(rundir
, 0, gid
);
5256 ERR("Unable to set group on %s", rundir
);
5261 * Ensure all applications and tracing group can search the run
5262 * dir. Allow everyone to read the directory, since it does not
5263 * buy us anything to hide its content.
5265 ret
= chmod(rundir
, S_IRWXU
| S_IRGRP
| S_IXGRP
| S_IROTH
| S_IXOTH
);
5267 ERR("Unable to set permissions on %s", rundir
);
5271 /* lttng client socket path */
5272 ret
= chown(config
.client_unix_sock_path
.value
, 0, gid
);
5274 ERR("Unable to set group on %s", config
.client_unix_sock_path
.value
);
5278 /* kconsumer error socket path */
5279 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, 0);
5281 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
5285 /* 64-bit ustconsumer error socket path */
5286 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, 0);
5288 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
5292 /* 32-bit ustconsumer compat32 error socket path */
5293 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, 0);
5295 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
5299 DBG("All permissions are set");
5305 * Create the lttng run directory needed for all global sockets and pipe.
5307 static int create_lttng_rundir(void)
5311 DBG3("Creating LTTng run directory: %s", config
.rundir
.value
);
5313 ret
= mkdir(config
.rundir
.value
, S_IRWXU
);
5315 if (errno
!= EEXIST
) {
5316 ERR("Unable to create %s", config
.rundir
.value
);
5328 * Setup sockets and directory needed by the consumerds' communication with the
5331 static int set_consumer_sockets(struct consumer_data
*consumer_data
)
5336 switch (consumer_data
->type
) {
5337 case LTTNG_CONSUMER_KERNEL
:
5338 path
= config
.kconsumerd_path
.value
;
5340 case LTTNG_CONSUMER64_UST
:
5341 path
= config
.consumerd64_path
.value
;
5343 case LTTNG_CONSUMER32_UST
:
5344 path
= config
.consumerd32_path
.value
;
5347 ERR("Consumer type unknown");
5353 DBG2("Creating consumer directory: %s", path
);
5355 ret
= mkdir(path
, S_IRWXU
| S_IRGRP
| S_IXGRP
);
5356 if (ret
< 0 && errno
!= EEXIST
) {
5358 ERR("Failed to create %s", path
);
5362 ret
= chown(path
, 0, utils_get_group_id(config
.tracing_group_name
.value
));
5364 ERR("Unable to set group on %s", path
);
5370 /* Create the consumerd error unix socket */
5371 consumer_data
->err_sock
=
5372 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
5373 if (consumer_data
->err_sock
< 0) {
5374 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
5380 * Set the CLOEXEC flag. Return code is useless because either way, the
5383 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
5385 PERROR("utils_set_fd_cloexec");
5386 /* continue anyway */
5389 /* File permission MUST be 660 */
5390 ret
= chmod(consumer_data
->err_unix_sock_path
,
5391 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
5393 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
5403 * Signal handler for the daemon
5405 * Simply stop all worker threads, leaving main() return gracefully after
5406 * joining all threads and calling cleanup().
5408 static void sighandler(int sig
)
5412 DBG("SIGINT caught");
5416 DBG("SIGTERM caught");
5420 CMM_STORE_SHARED(recv_child_signal
, 1);
5428 * Setup signal handler for :
5429 * SIGINT, SIGTERM, SIGPIPE
5431 static int set_signal_handler(void)
5434 struct sigaction sa
;
5437 if ((ret
= sigemptyset(&sigset
)) < 0) {
5438 PERROR("sigemptyset");
5442 sa
.sa_mask
= sigset
;
5445 sa
.sa_handler
= sighandler
;
5446 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
5447 PERROR("sigaction");
5451 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
5452 PERROR("sigaction");
5456 if ((ret
= sigaction(SIGUSR1
, &sa
, NULL
)) < 0) {
5457 PERROR("sigaction");
5461 sa
.sa_handler
= SIG_IGN
;
5462 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
5463 PERROR("sigaction");
5467 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
5473 * Set open files limit to unlimited. This daemon can open a large number of
5474 * file descriptors in order to consume multiple kernel traces.
5476 static void set_ulimit(void)
5481 /* The kernel does not allow an infinite limit for open files */
5482 lim
.rlim_cur
= 65535;
5483 lim
.rlim_max
= 65535;
5485 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
5487 PERROR("failed to set open files limit");
5491 static int write_pidfile(void)
5493 return utils_create_pid_file(getpid(), config
.pid_file_path
.value
);
5496 static int set_clock_plugin_env(void)
5499 char *env_value
= NULL
;
5501 if (!config
.lttng_ust_clock_plugin
.value
) {
5505 ret
= asprintf(&env_value
, "LTTNG_UST_CLOCK_PLUGIN=%s",
5506 config
.lttng_ust_clock_plugin
.value
);
5512 ret
= putenv(env_value
);
5515 PERROR("putenv of LTTNG_UST_CLOCK_PLUGIN");
5519 DBG("Updated LTTNG_UST_CLOCK_PLUGIN environment variable to \"%s\"",
5520 config
.lttng_ust_clock_plugin
.value
);
5528 int main(int argc
, char **argv
)
5530 int ret
= 0, retval
= 0;
5532 const char *env_app_timeout
;
5533 struct lttng_pipe
*ust32_channel_monitor_pipe
= NULL
,
5534 *ust64_channel_monitor_pipe
= NULL
,
5535 *kernel_channel_monitor_pipe
= NULL
;
5536 bool notification_thread_running
= false;
5538 init_kernel_workarounds();
5540 rcu_register_thread();
5542 if (set_signal_handler()) {
5544 goto exit_set_signal_handler
;
5547 page_size
= sysconf(_SC_PAGESIZE
);
5548 if (page_size
< 0) {
5549 PERROR("sysconf _SC_PAGESIZE");
5550 page_size
= LONG_MAX
;
5551 WARN("Fallback page size to %ld", page_size
);
5554 ret
= sessiond_config_init(&config
);
5557 goto exit_set_signal_handler
;
5561 * Init config from environment variables.
5562 * Command line option override env configuration per-doc. Do env first.
5564 sessiond_config_apply_env_config(&config
);
5567 * Parse arguments and load the daemon configuration file.
5569 * We have an exit_options exit path to free memory reserved by
5570 * set_options. This is needed because the rest of sessiond_cleanup()
5571 * depends on ht_cleanup_thread, which depends on lttng_daemonize, which
5572 * depends on set_options.
5575 if (set_options(argc
, argv
)) {
5581 * Resolve all paths received as arguments, configuration option, or
5582 * through environment variable as absolute paths. This is necessary
5583 * since daemonizing causes the sessiond's current working directory
5586 ret
= sessiond_config_resolve_paths(&config
);
5592 lttng_opt_verbose
= config
.verbose
;
5593 lttng_opt_quiet
= config
.quiet
;
5594 kconsumer_data
.err_unix_sock_path
=
5595 config
.kconsumerd_err_unix_sock_path
.value
;
5596 kconsumer_data
.cmd_unix_sock_path
=
5597 config
.kconsumerd_cmd_unix_sock_path
.value
;
5598 ustconsumer32_data
.err_unix_sock_path
=
5599 config
.consumerd32_err_unix_sock_path
.value
;
5600 ustconsumer32_data
.cmd_unix_sock_path
=
5601 config
.consumerd32_cmd_unix_sock_path
.value
;
5602 ustconsumer64_data
.err_unix_sock_path
=
5603 config
.consumerd64_err_unix_sock_path
.value
;
5604 ustconsumer64_data
.cmd_unix_sock_path
=
5605 config
.consumerd64_cmd_unix_sock_path
.value
;
5606 set_clock_plugin_env();
5608 sessiond_config_log(&config
);
5610 if (create_lttng_rundir()) {
5615 /* Abort launch if a session daemon is already running. */
5616 if (check_existing_daemon()) {
5617 ERR("A session daemon is already running.");
5623 if (config
.daemonize
|| config
.background
) {
5626 ret
= lttng_daemonize(&child_ppid
, &recv_child_signal
,
5627 !config
.background
);
5634 * We are in the child. Make sure all other file descriptors are
5635 * closed, in case we are called with more opened file
5636 * descriptors than the standard ones and the lock file.
5638 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
5639 if (i
== lockfile_fd
) {
5646 if (run_as_create_worker(argv
[0]) < 0) {
5647 goto exit_create_run_as_worker_cleanup
;
5651 * Starting from here, we can create threads. This needs to be after
5652 * lttng_daemonize due to RCU.
5656 * Initialize the health check subsystem. This call should set the
5657 * appropriate time values.
5659 health_sessiond
= health_app_create(NR_HEALTH_SESSIOND_TYPES
);
5660 if (!health_sessiond
) {
5661 PERROR("health_app_create error");
5663 goto exit_health_sessiond_cleanup
;
5666 /* Create thread to clean up RCU hash tables */
5667 if (init_ht_cleanup_thread(&ht_cleanup_thread
)) {
5669 goto exit_ht_cleanup
;
5672 /* Create thread quit pipe */
5673 if (init_thread_quit_pipe()) {
5675 goto exit_init_data
;
5678 /* Check if daemon is UID = 0 */
5679 is_root
= !getuid();
5681 /* Create global run dir with root access */
5683 kernel_channel_monitor_pipe
= lttng_pipe_open(0);
5684 if (!kernel_channel_monitor_pipe
) {
5685 ERR("Failed to create kernel consumer channel monitor pipe");
5687 goto exit_init_data
;
5689 kconsumer_data
.channel_monitor_pipe
=
5690 lttng_pipe_release_writefd(
5691 kernel_channel_monitor_pipe
);
5692 if (kconsumer_data
.channel_monitor_pipe
< 0) {
5694 goto exit_init_data
;
5698 /* Set consumer initial state */
5699 kernel_consumerd_state
= CONSUMER_STOPPED
;
5700 ust_consumerd_state
= CONSUMER_STOPPED
;
5702 ust32_channel_monitor_pipe
= lttng_pipe_open(0);
5703 if (!ust32_channel_monitor_pipe
) {
5704 ERR("Failed to create 32-bit user space consumer channel monitor pipe");
5706 goto exit_init_data
;
5708 ustconsumer32_data
.channel_monitor_pipe
= lttng_pipe_release_writefd(
5709 ust32_channel_monitor_pipe
);
5710 if (ustconsumer32_data
.channel_monitor_pipe
< 0) {
5712 goto exit_init_data
;
5715 ust64_channel_monitor_pipe
= lttng_pipe_open(0);
5716 if (!ust64_channel_monitor_pipe
) {
5717 ERR("Failed to create 64-bit user space consumer channel monitor pipe");
5719 goto exit_init_data
;
5721 ustconsumer64_data
.channel_monitor_pipe
= lttng_pipe_release_writefd(
5722 ust64_channel_monitor_pipe
);
5723 if (ustconsumer64_data
.channel_monitor_pipe
< 0) {
5725 goto exit_init_data
;
5729 * Init UST app hash table. Alloc hash table before this point since
5730 * cleanup() can get called after that point.
5732 if (ust_app_ht_alloc()) {
5733 ERR("Failed to allocate UST app hash table");
5735 goto exit_init_data
;
5739 * Initialize agent app hash table. We allocate the hash table here
5740 * since cleanup() can get called after this point.
5742 if (agent_app_ht_alloc()) {
5743 ERR("Failed to allocate Agent app hash table");
5745 goto exit_init_data
;
5749 * These actions must be executed as root. We do that *after* setting up
5750 * the sockets path because we MUST make the check for another daemon using
5751 * those paths *before* trying to set the kernel consumer sockets and init
5755 if (set_consumer_sockets(&kconsumer_data
)) {
5757 goto exit_init_data
;
5760 /* Setup kernel tracer */
5761 if (!config
.no_kernel
) {
5762 init_kernel_tracer();
5763 if (kernel_tracer_fd
>= 0) {
5764 ret
= syscall_init_table();
5766 ERR("Unable to populate syscall table. "
5767 "Syscall tracing won't work "
5768 "for this session daemon.");
5773 /* Set ulimit for open files */
5776 /* init lttng_fd tracking must be done after set_ulimit. */
5779 if (set_consumer_sockets(&ustconsumer64_data
)) {
5781 goto exit_init_data
;
5784 if (set_consumer_sockets(&ustconsumer32_data
)) {
5786 goto exit_init_data
;
5789 /* Setup the needed unix socket */
5790 if (init_daemon_socket()) {
5792 goto exit_init_data
;
5795 /* Set credentials to socket */
5796 if (is_root
&& set_permissions(config
.rundir
.value
)) {
5798 goto exit_init_data
;
5801 /* Get parent pid if -S, --sig-parent is specified. */
5802 if (config
.sig_parent
) {
5806 /* Setup the kernel pipe for waking up the kernel thread */
5807 if (is_root
&& !config
.no_kernel
) {
5808 if (utils_create_pipe_cloexec(kernel_poll_pipe
)) {
5810 goto exit_init_data
;
5814 /* Setup the thread apps communication pipe. */
5815 if (utils_create_pipe_cloexec(apps_cmd_pipe
)) {
5817 goto exit_init_data
;
5820 /* Setup the thread apps notify communication pipe. */
5821 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
)) {
5823 goto exit_init_data
;
5826 /* Initialize global buffer per UID and PID registry. */
5827 buffer_reg_init_uid_registry();
5828 buffer_reg_init_pid_registry();
5830 /* Init UST command queue. */
5831 cds_wfcq_init(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
5834 * Get session list pointer. This pointer MUST NOT be free'd. This list
5835 * is statically declared in session.c
5837 session_list_ptr
= session_get_list();
5841 /* Check for the application socket timeout env variable. */
5842 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
5843 if (env_app_timeout
) {
5844 config
.app_socket_timeout
= atoi(env_app_timeout
);
5846 config
.app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
5849 ret
= write_pidfile();
5851 ERR("Error in write_pidfile");
5853 goto exit_init_data
;
5856 /* Initialize communication library */
5858 /* Initialize TCP timeout values */
5859 lttcomm_inet_init();
5861 if (load_session_init_data(&load_info
) < 0) {
5863 goto exit_init_data
;
5865 load_info
->path
= config
.load_session_path
.value
;
5867 /* Create health-check thread. */
5868 ret
= pthread_create(&health_thread
, default_pthread_attr(),
5869 thread_manage_health
, (void *) NULL
);
5872 PERROR("pthread_create health");
5877 /* notification_thread_data acquires the pipes' read side. */
5878 notification_thread_handle
= notification_thread_handle_create(
5879 ust32_channel_monitor_pipe
,
5880 ust64_channel_monitor_pipe
,
5881 kernel_channel_monitor_pipe
);
5882 if (!notification_thread_handle
) {
5884 ERR("Failed to create notification thread shared data");
5886 goto exit_notification
;
5889 /* Create notification thread. */
5890 ret
= pthread_create(¬ification_thread
, default_pthread_attr(),
5891 thread_notification
, notification_thread_handle
);
5894 PERROR("pthread_create notification");
5897 goto exit_notification
;
5899 notification_thread_running
= true;
5901 /* Create thread to manage the client socket */
5902 ret
= pthread_create(&client_thread
, default_pthread_attr(),
5903 thread_manage_clients
, (void *) NULL
);
5906 PERROR("pthread_create clients");
5912 /* Create thread to dispatch registration */
5913 ret
= pthread_create(&dispatch_thread
, default_pthread_attr(),
5914 thread_dispatch_ust_registration
, (void *) NULL
);
5917 PERROR("pthread_create dispatch");
5923 /* Create thread to manage application registration. */
5924 ret
= pthread_create(®_apps_thread
, default_pthread_attr(),
5925 thread_registration_apps
, (void *) NULL
);
5928 PERROR("pthread_create registration");
5934 /* Create thread to manage application socket */
5935 ret
= pthread_create(&apps_thread
, default_pthread_attr(),
5936 thread_manage_apps
, (void *) NULL
);
5939 PERROR("pthread_create apps");
5945 /* Create thread to manage application notify socket */
5946 ret
= pthread_create(&apps_notify_thread
, default_pthread_attr(),
5947 ust_thread_manage_notify
, (void *) NULL
);
5950 PERROR("pthread_create notify");
5953 goto exit_apps_notify
;
5956 /* Create agent registration thread. */
5957 ret
= pthread_create(&agent_reg_thread
, default_pthread_attr(),
5958 agent_thread_manage_registration
, (void *) NULL
);
5961 PERROR("pthread_create agent");
5964 goto exit_agent_reg
;
5967 /* Don't start this thread if kernel tracing is not requested nor root */
5968 if (is_root
&& !config
.no_kernel
) {
5969 /* Create kernel thread to manage kernel event */
5970 ret
= pthread_create(&kernel_thread
, default_pthread_attr(),
5971 thread_manage_kernel
, (void *) NULL
);
5974 PERROR("pthread_create kernel");
5981 /* Create session loading thread. */
5982 ret
= pthread_create(&load_session_thread
, default_pthread_attr(),
5983 thread_load_session
, load_info
);
5986 PERROR("pthread_create load_session_thread");
5989 goto exit_load_session
;
5993 * This is where we start awaiting program completion (e.g. through
5994 * signal that asks threads to teardown).
5997 ret
= pthread_join(load_session_thread
, &status
);
6000 PERROR("pthread_join load_session_thread");
6005 if (is_root
&& !config
.no_kernel
) {
6006 ret
= pthread_join(kernel_thread
, &status
);
6009 PERROR("pthread_join");
6015 ret
= pthread_join(agent_reg_thread
, &status
);
6018 PERROR("pthread_join agent");
6023 ret
= pthread_join(apps_notify_thread
, &status
);
6026 PERROR("pthread_join apps notify");
6031 ret
= pthread_join(apps_thread
, &status
);
6034 PERROR("pthread_join apps");
6039 ret
= pthread_join(reg_apps_thread
, &status
);
6042 PERROR("pthread_join");
6048 * Join dispatch thread after joining reg_apps_thread to ensure
6049 * we don't leak applications in the queue.
6051 ret
= pthread_join(dispatch_thread
, &status
);
6054 PERROR("pthread_join");
6059 ret
= pthread_join(client_thread
, &status
);
6062 PERROR("pthread_join");
6068 ret
= pthread_join(health_thread
, &status
);
6071 PERROR("pthread_join health thread");
6078 * Wait for all pending call_rcu work to complete before tearing
6079 * down data structures. call_rcu worker may be trying to
6080 * perform lookups in those structures.
6084 * sessiond_cleanup() is called when no other thread is running, except
6085 * the ht_cleanup thread, which is needed to destroy the hash tables.
6087 rcu_thread_online();
6091 * Ensure all prior call_rcu are done. call_rcu callbacks may push
6092 * hash tables to the ht_cleanup thread. Therefore, we ensure that
6093 * the queue is empty before shutting down the clean-up thread.
6098 * The teardown of the notification system is performed after the
6099 * session daemon's teardown in order to allow it to be notified
6100 * of the active session and channels at the moment of the teardown.
6102 if (notification_thread_handle
) {
6103 if (notification_thread_running
) {
6104 notification_thread_command_quit(
6105 notification_thread_handle
);
6106 ret
= pthread_join(notification_thread
, &status
);
6109 PERROR("pthread_join notification thread");
6113 notification_thread_handle_destroy(notification_thread_handle
);
6116 rcu_thread_offline();
6117 rcu_unregister_thread();
6119 ret
= fini_ht_cleanup_thread(&ht_cleanup_thread
);
6123 lttng_pipe_destroy(ust32_channel_monitor_pipe
);
6124 lttng_pipe_destroy(ust64_channel_monitor_pipe
);
6125 lttng_pipe_destroy(kernel_channel_monitor_pipe
);
6128 health_app_destroy(health_sessiond
);
6129 exit_health_sessiond_cleanup
:
6130 exit_create_run_as_worker_cleanup
:
6133 sessiond_cleanup_lock_file();
6134 sessiond_cleanup_options();
6136 exit_set_signal_handler
: