2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
34 #include <sys/types.h>
36 #include <urcu/uatomic.h>
40 #include <common/common.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
48 #include "lttng-sessiond.h"
49 #include "buffer-registry.h"
56 #include "kernel-consumer.h"
60 #include "ust-consumer.h"
63 #include "health-sessiond.h"
64 #include "testpoint.h"
65 #include "ust-thread.h"
67 #define CONSUMERD_FILE "lttng-consumerd"
70 static const char *tracing_group_name
= DEFAULT_TRACING_GROUP
;
71 static const char *opt_pidfile
;
72 static int opt_sig_parent
;
73 static int opt_verbose_consumer
;
74 static int opt_daemon
;
75 static int opt_no_kernel
;
76 static int is_root
; /* Set to 1 if the daemon is running as root */
77 static pid_t ppid
; /* Parent PID for --sig-parent option */
81 * Consumer daemon specific control data. Every value not initialized here is
82 * set to 0 by the static definition.
84 static struct consumer_data kconsumer_data
= {
85 .type
= LTTNG_CONSUMER_KERNEL
,
86 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
87 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
90 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
91 .lock
= PTHREAD_MUTEX_INITIALIZER
,
92 .cond
= PTHREAD_COND_INITIALIZER
,
93 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
95 static struct consumer_data ustconsumer64_data
= {
96 .type
= LTTNG_CONSUMER64_UST
,
97 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
98 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
101 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
102 .lock
= PTHREAD_MUTEX_INITIALIZER
,
103 .cond
= PTHREAD_COND_INITIALIZER
,
104 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
106 static struct consumer_data ustconsumer32_data
= {
107 .type
= LTTNG_CONSUMER32_UST
,
108 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
109 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
112 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
113 .lock
= PTHREAD_MUTEX_INITIALIZER
,
114 .cond
= PTHREAD_COND_INITIALIZER
,
115 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
118 /* Shared between threads */
119 static int dispatch_thread_exit
;
121 /* Global application Unix socket path */
122 static char apps_unix_sock_path
[PATH_MAX
];
123 /* Global client Unix socket path */
124 static char client_unix_sock_path
[PATH_MAX
];
125 /* global wait shm path for UST */
126 static char wait_shm_path
[PATH_MAX
];
127 /* Global health check unix path */
128 static char health_unix_sock_path
[PATH_MAX
];
130 /* Sockets and FDs */
131 static int client_sock
= -1;
132 static int apps_sock
= -1;
133 int kernel_tracer_fd
= -1;
134 static int kernel_poll_pipe
[2] = { -1, -1 };
137 * Quit pipe for all threads. This permits a single cancellation point
138 * for all threads when receiving an event on the pipe.
140 static int thread_quit_pipe
[2] = { -1, -1 };
143 * This pipe is used to inform the thread managing application communication
144 * that a command is queued and ready to be processed.
146 static int apps_cmd_pipe
[2] = { -1, -1 };
148 int apps_cmd_notify_pipe
[2] = { -1, -1 };
150 /* Pthread, Mutexes and Semaphores */
151 static pthread_t apps_thread
;
152 static pthread_t apps_notify_thread
;
153 static pthread_t reg_apps_thread
;
154 static pthread_t client_thread
;
155 static pthread_t kernel_thread
;
156 static pthread_t dispatch_thread
;
157 static pthread_t health_thread
;
158 static pthread_t ht_cleanup_thread
;
161 * UST registration command queue. This queue is tied with a futex and uses a N
162 * wakers / 1 waiter implemented and detailed in futex.c/.h
164 * The thread_manage_apps and thread_dispatch_ust_registration interact with
165 * this queue and the wait/wake scheme.
167 static struct ust_cmd_queue ust_cmd_queue
;
170 * Pointer initialized before thread creation.
172 * This points to the tracing session list containing the session count and a
173 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
174 * MUST NOT be taken if you call a public function in session.c.
176 * The lock is nested inside the structure: session_list_ptr->lock. Please use
177 * session_lock_list and session_unlock_list for lock acquisition.
179 static struct ltt_session_list
*session_list_ptr
;
181 int ust_consumerd64_fd
= -1;
182 int ust_consumerd32_fd
= -1;
184 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
185 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
186 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
187 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
189 static const char *module_proc_lttng
= "/proc/lttng";
192 * Consumer daemon state which is changed when spawning it, killing it or in
193 * case of a fatal error.
195 enum consumerd_state
{
196 CONSUMER_STARTED
= 1,
197 CONSUMER_STOPPED
= 2,
202 * This consumer daemon state is used to validate if a client command will be
203 * able to reach the consumer. If not, the client is informed. For instance,
204 * doing a "lttng start" when the consumer state is set to ERROR will return an
205 * error to the client.
207 * The following example shows a possible race condition of this scheme:
209 * consumer thread error happens
211 * client cmd checks state -> still OK
212 * consumer thread exit, sets error
213 * client cmd try to talk to consumer
216 * However, since the consumer is a different daemon, we have no way of making
217 * sure the command will reach it safely even with this state flag. This is why
218 * we consider that up to the state validation during command processing, the
219 * command is safe. After that, we can not guarantee the correctness of the
220 * client request vis-a-vis the consumer.
222 static enum consumerd_state ust_consumerd_state
;
223 static enum consumerd_state kernel_consumerd_state
;
226 * Socket timeout for receiving and sending in seconds.
228 static int app_socket_timeout
;
230 /* Set in main() with the current page size. */
233 /* Application health monitoring */
234 struct health_app
*health_sessiond
;
237 void setup_consumerd_path(void)
239 const char *bin
, *libdir
;
242 * Allow INSTALL_BIN_PATH to be used as a target path for the
243 * native architecture size consumer if CONFIG_CONSUMER*_PATH
244 * has not been defined.
246 #if (CAA_BITS_PER_LONG == 32)
247 if (!consumerd32_bin
[0]) {
248 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
250 if (!consumerd32_libdir
[0]) {
251 consumerd32_libdir
= INSTALL_LIB_PATH
;
253 #elif (CAA_BITS_PER_LONG == 64)
254 if (!consumerd64_bin
[0]) {
255 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
257 if (!consumerd64_libdir
[0]) {
258 consumerd64_libdir
= INSTALL_LIB_PATH
;
261 #error "Unknown bitness"
265 * runtime env. var. overrides the build default.
267 bin
= getenv("LTTNG_CONSUMERD32_BIN");
269 consumerd32_bin
= bin
;
271 bin
= getenv("LTTNG_CONSUMERD64_BIN");
273 consumerd64_bin
= bin
;
275 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
277 consumerd32_libdir
= libdir
;
279 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
281 consumerd64_libdir
= libdir
;
286 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
288 int sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
)
294 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
300 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
| LPOLLERR
);
312 * Check if the thread quit pipe was triggered.
314 * Return 1 if it was triggered else 0;
316 int sessiond_check_thread_quit_pipe(int fd
, uint32_t events
)
318 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
326 * Init thread quit pipe.
328 * Return -1 on error or 0 if all pipes are created.
330 static int init_thread_quit_pipe(void)
334 ret
= pipe(thread_quit_pipe
);
336 PERROR("thread quit pipe");
340 for (i
= 0; i
< 2; i
++) {
341 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
353 * Stop all threads by closing the thread quit pipe.
355 static void stop_threads(void)
359 /* Stopping all threads */
360 DBG("Terminating all threads");
361 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
363 ERR("write error on thread quit pipe");
366 /* Dispatch thread */
367 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
368 futex_nto1_wake(&ust_cmd_queue
.futex
);
372 * Close every consumer sockets.
374 static void close_consumer_sockets(void)
378 if (kconsumer_data
.err_sock
>= 0) {
379 ret
= close(kconsumer_data
.err_sock
);
381 PERROR("kernel consumer err_sock close");
384 if (ustconsumer32_data
.err_sock
>= 0) {
385 ret
= close(ustconsumer32_data
.err_sock
);
387 PERROR("UST consumerd32 err_sock close");
390 if (ustconsumer64_data
.err_sock
>= 0) {
391 ret
= close(ustconsumer64_data
.err_sock
);
393 PERROR("UST consumerd64 err_sock close");
396 if (kconsumer_data
.cmd_sock
>= 0) {
397 ret
= close(kconsumer_data
.cmd_sock
);
399 PERROR("kernel consumer cmd_sock close");
402 if (ustconsumer32_data
.cmd_sock
>= 0) {
403 ret
= close(ustconsumer32_data
.cmd_sock
);
405 PERROR("UST consumerd32 cmd_sock close");
408 if (ustconsumer64_data
.cmd_sock
>= 0) {
409 ret
= close(ustconsumer64_data
.cmd_sock
);
411 PERROR("UST consumerd64 cmd_sock close");
419 static void cleanup(void)
422 struct ltt_session
*sess
, *stmp
;
428 * Close the thread quit pipe. It has already done its job,
429 * since we are now called.
431 utils_close_pipe(thread_quit_pipe
);
434 * If opt_pidfile is undefined, the default file will be wiped when
435 * removing the rundir.
438 ret
= remove(opt_pidfile
);
440 PERROR("remove pidfile %s", opt_pidfile
);
444 DBG("Removing sessiond and consumerd content of directory %s", rundir
);
447 snprintf(path
, PATH_MAX
,
449 rundir
, DEFAULT_LTTNG_SESSIOND_PIDFILE
);
450 DBG("Removing %s", path
);
454 snprintf(path
, PATH_MAX
,
455 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
457 DBG("Removing %s", path
);
460 snprintf(path
, PATH_MAX
,
461 DEFAULT_KCONSUMERD_PATH
,
463 DBG("Removing directory %s", path
);
466 /* ust consumerd 32 */
467 snprintf(path
, PATH_MAX
,
468 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
470 DBG("Removing %s", path
);
473 snprintf(path
, PATH_MAX
,
474 DEFAULT_USTCONSUMERD32_PATH
,
476 DBG("Removing directory %s", path
);
479 /* ust consumerd 64 */
480 snprintf(path
, PATH_MAX
,
481 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
483 DBG("Removing %s", path
);
486 snprintf(path
, PATH_MAX
,
487 DEFAULT_USTCONSUMERD64_PATH
,
489 DBG("Removing directory %s", path
);
494 DBG("Cleaning up all sessions");
496 /* Destroy session list mutex */
497 if (session_list_ptr
!= NULL
) {
498 pthread_mutex_destroy(&session_list_ptr
->lock
);
500 /* Cleanup ALL session */
501 cds_list_for_each_entry_safe(sess
, stmp
,
502 &session_list_ptr
->head
, list
) {
503 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
507 DBG("Closing all UST sockets");
508 ust_app_clean_list();
509 buffer_reg_destroy_registries();
511 if (is_root
&& !opt_no_kernel
) {
512 DBG2("Closing kernel fd");
513 if (kernel_tracer_fd
>= 0) {
514 ret
= close(kernel_tracer_fd
);
519 DBG("Unloading kernel modules");
520 modprobe_remove_lttng_all();
523 close_consumer_sockets();
526 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
527 "Matthew, BEET driven development works!%c[%dm",
528 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
533 * Send data on a unix socket using the liblttsessiondcomm API.
535 * Return lttcomm error code.
537 static int send_unix_sock(int sock
, void *buf
, size_t len
)
539 /* Check valid length */
544 return lttcomm_send_unix_sock(sock
, buf
, len
);
548 * Free memory of a command context structure.
550 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
552 DBG("Clean command context structure");
554 if ((*cmd_ctx
)->llm
) {
555 free((*cmd_ctx
)->llm
);
557 if ((*cmd_ctx
)->lsm
) {
558 free((*cmd_ctx
)->lsm
);
566 * Notify UST applications using the shm mmap futex.
568 static int notify_ust_apps(int active
)
572 DBG("Notifying applications of session daemon state: %d", active
);
574 /* See shm.c for this call implying mmap, shm and futex calls */
575 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
576 if (wait_shm_mmap
== NULL
) {
580 /* Wake waiting process */
581 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
583 /* Apps notified successfully */
591 * Setup the outgoing data buffer for the response (llm) by allocating the
592 * right amount of memory and copying the original information from the lsm
595 * Return total size of the buffer pointed by buf.
597 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
603 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
604 if (cmd_ctx
->llm
== NULL
) {
610 /* Copy common data */
611 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
612 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
614 cmd_ctx
->llm
->data_size
= size
;
615 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
624 * Update the kernel poll set of all channel fd available over all tracing
625 * session. Add the wakeup pipe at the end of the set.
627 static int update_kernel_poll(struct lttng_poll_event
*events
)
630 struct ltt_session
*session
;
631 struct ltt_kernel_channel
*channel
;
633 DBG("Updating kernel poll set");
636 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
637 session_lock(session
);
638 if (session
->kernel_session
== NULL
) {
639 session_unlock(session
);
643 cds_list_for_each_entry(channel
,
644 &session
->kernel_session
->channel_list
.head
, list
) {
645 /* Add channel fd to the kernel poll set */
646 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
648 session_unlock(session
);
651 DBG("Channel fd %d added to kernel set", channel
->fd
);
653 session_unlock(session
);
655 session_unlock_list();
660 session_unlock_list();
665 * Find the channel fd from 'fd' over all tracing session. When found, check
666 * for new channel stream and send those stream fds to the kernel consumer.
668 * Useful for CPU hotplug feature.
670 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
673 struct ltt_session
*session
;
674 struct ltt_kernel_session
*ksess
;
675 struct ltt_kernel_channel
*channel
;
677 DBG("Updating kernel streams for channel fd %d", fd
);
680 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
681 session_lock(session
);
682 if (session
->kernel_session
== NULL
) {
683 session_unlock(session
);
686 ksess
= session
->kernel_session
;
688 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
689 if (channel
->fd
== fd
) {
690 DBG("Channel found, updating kernel streams");
691 ret
= kernel_open_channel_stream(channel
);
695 /* Update the stream global counter */
696 ksess
->stream_count_global
+= ret
;
699 * Have we already sent fds to the consumer? If yes, it means
700 * that tracing is started so it is safe to send our updated
703 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
704 struct lttng_ht_iter iter
;
705 struct consumer_socket
*socket
;
708 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
709 &iter
.iter
, socket
, node
.node
) {
710 pthread_mutex_lock(socket
->lock
);
711 ret
= kernel_consumer_send_channel_stream(socket
,
713 session
->output_traces
? 1 : 0);
714 pthread_mutex_unlock(socket
->lock
);
725 session_unlock(session
);
727 session_unlock_list();
731 session_unlock(session
);
732 session_unlock_list();
737 * For each tracing session, update newly registered apps. The session list
738 * lock MUST be acquired before calling this.
740 static void update_ust_app(int app_sock
)
742 struct ltt_session
*sess
, *stmp
;
744 /* Consumer is in an ERROR state. Stop any application update. */
745 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
746 /* Stop the update process since the consumer is dead. */
750 /* For all tracing session(s) */
751 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
753 if (sess
->ust_session
) {
754 ust_app_global_update(sess
->ust_session
, app_sock
);
756 session_unlock(sess
);
761 * This thread manage event coming from the kernel.
763 * Features supported in this thread:
766 static void *thread_manage_kernel(void *data
)
768 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
769 uint32_t revents
, nb_fd
;
771 struct lttng_poll_event events
;
773 DBG("[thread] Thread manage kernel started");
775 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
778 * This first step of the while is to clean this structure which could free
779 * non NULL pointers so initialize it before the loop.
781 lttng_poll_init(&events
);
783 if (testpoint(thread_manage_kernel
)) {
784 goto error_testpoint
;
787 health_code_update();
789 if (testpoint(thread_manage_kernel_before_loop
)) {
790 goto error_testpoint
;
794 health_code_update();
796 if (update_poll_flag
== 1) {
797 /* Clean events object. We are about to populate it again. */
798 lttng_poll_clean(&events
);
800 ret
= sessiond_set_thread_pollset(&events
, 2);
802 goto error_poll_create
;
805 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
810 /* This will add the available kernel channel if any. */
811 ret
= update_kernel_poll(&events
);
815 update_poll_flag
= 0;
818 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events
));
820 /* Poll infinite value of time */
823 ret
= lttng_poll_wait(&events
, -1);
827 * Restart interrupted system call.
829 if (errno
== EINTR
) {
833 } else if (ret
== 0) {
834 /* Should not happen since timeout is infinite */
835 ERR("Return value of poll is 0 with an infinite timeout.\n"
836 "This should not have happened! Continuing...");
842 for (i
= 0; i
< nb_fd
; i
++) {
843 /* Fetch once the poll data */
844 revents
= LTTNG_POLL_GETEV(&events
, i
);
845 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
847 health_code_update();
849 /* Thread quit pipe has been closed. Killing thread. */
850 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
856 /* Check for data on kernel pipe */
857 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
859 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
860 } while (ret
< 0 && errno
== EINTR
);
862 * Ret value is useless here, if this pipe gets any actions an
863 * update is required anyway.
865 update_poll_flag
= 1;
869 * New CPU detected by the kernel. Adding kernel stream to
870 * kernel session and updating the kernel consumer
872 if (revents
& LPOLLIN
) {
873 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
879 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
880 * and unregister kernel stream at this point.
889 lttng_poll_clean(&events
);
892 utils_close_pipe(kernel_poll_pipe
);
893 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
896 ERR("Health error occurred in %s", __func__
);
897 WARN("Kernel thread died unexpectedly. "
898 "Kernel tracing can continue but CPU hotplug is disabled.");
900 health_unregister(health_sessiond
);
901 DBG("Kernel thread dying");
906 * Signal pthread condition of the consumer data that the thread.
908 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
910 pthread_mutex_lock(&data
->cond_mutex
);
913 * The state is set before signaling. It can be any value, it's the waiter
914 * job to correctly interpret this condition variable associated to the
915 * consumer pthread_cond.
917 * A value of 0 means that the corresponding thread of the consumer data
918 * was not started. 1 indicates that the thread has started and is ready
919 * for action. A negative value means that there was an error during the
922 data
->consumer_thread_is_ready
= state
;
923 (void) pthread_cond_signal(&data
->cond
);
925 pthread_mutex_unlock(&data
->cond_mutex
);
929 * This thread manage the consumer error sent back to the session daemon.
931 static void *thread_manage_consumer(void *data
)
933 int sock
= -1, i
, ret
, pollfd
, err
= -1;
934 uint32_t revents
, nb_fd
;
935 enum lttcomm_return_code code
;
936 struct lttng_poll_event events
;
937 struct consumer_data
*consumer_data
= data
;
939 DBG("[thread] Manage consumer started");
941 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
943 health_code_update();
946 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
947 * metadata_sock. Nothing more will be added to this poll set.
949 ret
= sessiond_set_thread_pollset(&events
, 3);
955 * The error socket here is already in a listening state which was done
956 * just before spawning this thread to avoid a race between the consumer
957 * daemon exec trying to connect and the listen() call.
959 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
964 health_code_update();
966 /* Infinite blocking call, waiting for transmission */
970 if (testpoint(thread_manage_consumer
)) {
974 ret
= lttng_poll_wait(&events
, -1);
978 * Restart interrupted system call.
980 if (errno
== EINTR
) {
988 for (i
= 0; i
< nb_fd
; i
++) {
989 /* Fetch once the poll data */
990 revents
= LTTNG_POLL_GETEV(&events
, i
);
991 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
993 health_code_update();
995 /* Thread quit pipe has been closed. Killing thread. */
996 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1002 /* Event on the registration socket */
1003 if (pollfd
== consumer_data
->err_sock
) {
1004 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1005 ERR("consumer err socket poll error");
1011 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1017 * Set the CLOEXEC flag. Return code is useless because either way, the
1020 (void) utils_set_fd_cloexec(sock
);
1022 health_code_update();
1024 DBG2("Receiving code from consumer err_sock");
1026 /* Getting status code from kconsumerd */
1027 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1028 sizeof(enum lttcomm_return_code
));
1033 health_code_update();
1035 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1036 /* Connect both socket, command and metadata. */
1037 consumer_data
->cmd_sock
=
1038 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1039 consumer_data
->metadata_fd
=
1040 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1041 if (consumer_data
->cmd_sock
< 0
1042 || consumer_data
->metadata_fd
< 0) {
1043 PERROR("consumer connect cmd socket");
1044 /* On error, signal condition and quit. */
1045 signal_consumer_condition(consumer_data
, -1);
1048 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
1049 /* Create metadata socket lock. */
1050 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1051 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1052 PERROR("zmalloc pthread mutex");
1056 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1058 signal_consumer_condition(consumer_data
, 1);
1059 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1060 DBG("Consumer metadata socket ready (fd: %d)",
1061 consumer_data
->metadata_fd
);
1063 ERR("consumer error when waiting for SOCK_READY : %s",
1064 lttcomm_get_readable_code(-code
));
1068 /* Remove the consumerd error sock since we've established a connexion */
1069 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1074 /* Add new accepted error socket. */
1075 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1080 /* Add metadata socket that is successfully connected. */
1081 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1082 LPOLLIN
| LPOLLRDHUP
);
1087 health_code_update();
1089 /* Infinite blocking call, waiting for transmission */
1092 health_poll_entry();
1093 ret
= lttng_poll_wait(&events
, -1);
1097 * Restart interrupted system call.
1099 if (errno
== EINTR
) {
1107 for (i
= 0; i
< nb_fd
; i
++) {
1108 /* Fetch once the poll data */
1109 revents
= LTTNG_POLL_GETEV(&events
, i
);
1110 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1112 health_code_update();
1114 /* Thread quit pipe has been closed. Killing thread. */
1115 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1121 if (pollfd
== sock
) {
1122 /* Event on the consumerd socket */
1123 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1124 ERR("consumer err socket second poll error");
1127 health_code_update();
1128 /* Wait for any kconsumerd error */
1129 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1130 sizeof(enum lttcomm_return_code
));
1132 ERR("consumer closed the command socket");
1136 ERR("consumer return code : %s",
1137 lttcomm_get_readable_code(-code
));
1140 } else if (pollfd
== consumer_data
->metadata_fd
) {
1141 /* UST metadata requests */
1142 ret
= ust_consumer_metadata_request(
1143 &consumer_data
->metadata_sock
);
1145 ERR("Handling metadata request");
1150 ERR("Unknown pollfd");
1154 health_code_update();
1160 * We lock here because we are about to close the sockets and some other
1161 * thread might be using them so get exclusive access which will abort all
1162 * other consumer command by other threads.
1164 pthread_mutex_lock(&consumer_data
->lock
);
1166 /* Immediately set the consumerd state to stopped */
1167 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1168 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1169 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1170 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1171 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1173 /* Code flow error... */
1177 if (consumer_data
->err_sock
>= 0) {
1178 ret
= close(consumer_data
->err_sock
);
1182 consumer_data
->err_sock
= -1;
1184 if (consumer_data
->cmd_sock
>= 0) {
1185 ret
= close(consumer_data
->cmd_sock
);
1189 consumer_data
->cmd_sock
= -1;
1191 if (*consumer_data
->metadata_sock
.fd_ptr
>= 0) {
1192 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
1205 unlink(consumer_data
->err_unix_sock_path
);
1206 unlink(consumer_data
->cmd_unix_sock_path
);
1207 consumer_data
->pid
= 0;
1208 pthread_mutex_unlock(&consumer_data
->lock
);
1210 /* Cleanup metadata socket mutex. */
1211 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1212 free(consumer_data
->metadata_sock
.lock
);
1214 lttng_poll_clean(&events
);
1218 ERR("Health error occurred in %s", __func__
);
1220 health_unregister(health_sessiond
);
1221 DBG("consumer thread cleanup completed");
1227 * This thread manage application communication.
1229 static void *thread_manage_apps(void *data
)
1231 int i
, ret
, pollfd
, err
= -1;
1232 uint32_t revents
, nb_fd
;
1233 struct lttng_poll_event events
;
1235 DBG("[thread] Manage application started");
1237 rcu_register_thread();
1238 rcu_thread_online();
1240 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_MANAGE
);
1242 if (testpoint(thread_manage_apps
)) {
1243 goto error_testpoint
;
1246 health_code_update();
1248 ret
= sessiond_set_thread_pollset(&events
, 2);
1250 goto error_poll_create
;
1253 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1258 if (testpoint(thread_manage_apps_before_loop
)) {
1262 health_code_update();
1265 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events
));
1267 /* Inifinite blocking call, waiting for transmission */
1269 health_poll_entry();
1270 ret
= lttng_poll_wait(&events
, -1);
1274 * Restart interrupted system call.
1276 if (errno
== EINTR
) {
1284 for (i
= 0; i
< nb_fd
; i
++) {
1285 /* Fetch once the poll data */
1286 revents
= LTTNG_POLL_GETEV(&events
, i
);
1287 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1289 health_code_update();
1291 /* Thread quit pipe has been closed. Killing thread. */
1292 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1298 /* Inspect the apps cmd pipe */
1299 if (pollfd
== apps_cmd_pipe
[0]) {
1300 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1301 ERR("Apps command pipe error");
1303 } else if (revents
& LPOLLIN
) {
1308 ret
= read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1309 } while (ret
< 0 && errno
== EINTR
);
1310 if (ret
< 0 || ret
< sizeof(sock
)) {
1311 PERROR("read apps cmd pipe");
1315 health_code_update();
1318 * We only monitor the error events of the socket. This
1319 * thread does not handle any incoming data from UST
1322 ret
= lttng_poll_add(&events
, sock
,
1323 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1328 DBG("Apps with sock %d added to poll set", sock
);
1330 health_code_update();
1336 * At this point, we know that a registered application made
1337 * the event at poll_wait.
1339 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1340 /* Removing from the poll set */
1341 ret
= lttng_poll_del(&events
, pollfd
);
1346 /* Socket closed on remote end. */
1347 ust_app_unregister(pollfd
);
1352 health_code_update();
1358 lttng_poll_clean(&events
);
1361 utils_close_pipe(apps_cmd_pipe
);
1362 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1365 * We don't clean the UST app hash table here since already registered
1366 * applications can still be controlled so let them be until the session
1367 * daemon dies or the applications stop.
1372 ERR("Health error occurred in %s", __func__
);
1374 health_unregister(health_sessiond
);
1375 DBG("Application communication apps thread cleanup complete");
1376 rcu_thread_offline();
1377 rcu_unregister_thread();
1382 * Send a socket to a thread This is called from the dispatch UST registration
1383 * thread once all sockets are set for the application.
1385 * The sock value can be invalid, we don't really care, the thread will handle
1386 * it and make the necessary cleanup if so.
1388 * On success, return 0 else a negative value being the errno message of the
1391 static int send_socket_to_thread(int fd
, int sock
)
1396 * It's possible that the FD is set as invalid with -1 concurrently just
1397 * before calling this function being a shutdown state of the thread.
1405 ret
= write(fd
, &sock
, sizeof(sock
));
1406 } while (ret
< 0 && errno
== EINTR
);
1407 if (ret
< 0 || ret
!= sizeof(sock
)) {
1408 PERROR("write apps pipe %d", fd
);
1415 /* All good. Don't send back the write positive ret value. */
1422 * Sanitize the wait queue of the dispatch registration thread meaning removing
1423 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1424 * notify socket is never received.
1426 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1428 int ret
, nb_fd
= 0, i
;
1429 unsigned int fd_added
= 0;
1430 struct lttng_poll_event events
;
1431 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1435 lttng_poll_init(&events
);
1437 /* Just skip everything for an empty queue. */
1438 if (!wait_queue
->count
) {
1442 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1447 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1448 &wait_queue
->head
, head
) {
1449 assert(wait_node
->app
);
1450 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1451 LPOLLHUP
| LPOLLERR
);
1464 * Poll but don't block so we can quickly identify the faulty events and
1465 * clean them afterwards from the wait queue.
1467 ret
= lttng_poll_wait(&events
, 0);
1473 for (i
= 0; i
< nb_fd
; i
++) {
1474 /* Get faulty FD. */
1475 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1476 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1478 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1479 &wait_queue
->head
, head
) {
1480 if (pollfd
== wait_node
->app
->sock
&&
1481 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1482 cds_list_del(&wait_node
->head
);
1483 wait_queue
->count
--;
1484 ust_app_destroy(wait_node
->app
);
1492 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1496 lttng_poll_clean(&events
);
1500 lttng_poll_clean(&events
);
1502 ERR("Unable to sanitize wait queue");
1507 * Dispatch request from the registration threads to the application
1508 * communication thread.
1510 static void *thread_dispatch_ust_registration(void *data
)
1513 struct cds_wfq_node
*node
;
1514 struct ust_command
*ust_cmd
= NULL
;
1515 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1516 struct ust_reg_wait_queue wait_queue
= {
1520 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH
);
1522 health_code_update();
1524 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1526 DBG("[thread] Dispatch UST command started");
1528 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1529 health_code_update();
1531 /* Atomically prepare the queue futex */
1532 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1535 struct ust_app
*app
= NULL
;
1539 * Make sure we don't have node(s) that have hung up before receiving
1540 * the notify socket. This is to clean the list in order to avoid
1541 * memory leaks from notify socket that are never seen.
1543 sanitize_wait_queue(&wait_queue
);
1545 health_code_update();
1546 /* Dequeue command for registration */
1547 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1549 DBG("Woken up but nothing in the UST command queue");
1550 /* Continue thread execution */
1554 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1556 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1557 " gid:%d sock:%d name:%s (version %d.%d)",
1558 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1559 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1560 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1561 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1563 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1564 wait_node
= zmalloc(sizeof(*wait_node
));
1566 PERROR("zmalloc wait_node dispatch");
1567 ret
= close(ust_cmd
->sock
);
1569 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1571 lttng_fd_put(1, LTTNG_FD_APPS
);
1575 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1577 /* Create application object if socket is CMD. */
1578 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1580 if (!wait_node
->app
) {
1581 ret
= close(ust_cmd
->sock
);
1583 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1585 lttng_fd_put(1, LTTNG_FD_APPS
);
1591 * Add application to the wait queue so we can set the notify
1592 * socket before putting this object in the global ht.
1594 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
1599 * We have to continue here since we don't have the notify
1600 * socket and the application MUST be added to the hash table
1601 * only at that moment.
1606 * Look for the application in the local wait queue and set the
1607 * notify socket if found.
1609 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1610 &wait_queue
.head
, head
) {
1611 health_code_update();
1612 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
1613 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
1614 cds_list_del(&wait_node
->head
);
1616 app
= wait_node
->app
;
1618 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
1624 * With no application at this stage the received socket is
1625 * basically useless so close it before we free the cmd data
1626 * structure for good.
1629 ret
= close(ust_cmd
->sock
);
1631 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1633 lttng_fd_put(1, LTTNG_FD_APPS
);
1640 * @session_lock_list
1642 * Lock the global session list so from the register up to the
1643 * registration done message, no thread can see the application
1644 * and change its state.
1646 session_lock_list();
1650 * Add application to the global hash table. This needs to be
1651 * done before the update to the UST registry can locate the
1656 /* Set app version. This call will print an error if needed. */
1657 (void) ust_app_version(app
);
1659 /* Send notify socket through the notify pipe. */
1660 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
1664 session_unlock_list();
1666 * No notify thread, stop the UST tracing. However, this is
1667 * not an internal error of the this thread thus setting
1668 * the health error code to a normal exit.
1675 * Update newly registered application with the tracing
1676 * registry info already enabled information.
1678 update_ust_app(app
->sock
);
1681 * Don't care about return value. Let the manage apps threads
1682 * handle app unregistration upon socket close.
1684 (void) ust_app_register_done(app
->sock
);
1687 * Even if the application socket has been closed, send the app
1688 * to the thread and unregistration will take place at that
1691 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
1694 session_unlock_list();
1696 * No apps. thread, stop the UST tracing. However, this is
1697 * not an internal error of the this thread thus setting
1698 * the health error code to a normal exit.
1705 session_unlock_list();
1707 } while (node
!= NULL
);
1709 health_poll_entry();
1710 /* Futex wait on queue. Blocking call on futex() */
1711 futex_nto1_wait(&ust_cmd_queue
.futex
);
1714 /* Normal exit, no error */
1718 /* Clean up wait queue. */
1719 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1720 &wait_queue
.head
, head
) {
1721 cds_list_del(&wait_node
->head
);
1726 DBG("Dispatch thread dying");
1729 ERR("Health error occurred in %s", __func__
);
1731 health_unregister(health_sessiond
);
1736 * This thread manage application registration.
1738 static void *thread_registration_apps(void *data
)
1740 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1741 uint32_t revents
, nb_fd
;
1742 struct lttng_poll_event events
;
1744 * Get allocated in this thread, enqueued to a global queue, dequeued and
1745 * freed in the manage apps thread.
1747 struct ust_command
*ust_cmd
= NULL
;
1749 DBG("[thread] Manage application registration started");
1751 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG
);
1753 if (testpoint(thread_registration_apps
)) {
1754 goto error_testpoint
;
1757 ret
= lttcomm_listen_unix_sock(apps_sock
);
1763 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1764 * more will be added to this poll set.
1766 ret
= sessiond_set_thread_pollset(&events
, 2);
1768 goto error_create_poll
;
1771 /* Add the application registration socket */
1772 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1774 goto error_poll_add
;
1777 /* Notify all applications to register */
1778 ret
= notify_ust_apps(1);
1780 ERR("Failed to notify applications or create the wait shared memory.\n"
1781 "Execution continues but there might be problem for already\n"
1782 "running applications that wishes to register.");
1786 DBG("Accepting application registration");
1788 /* Inifinite blocking call, waiting for transmission */
1790 health_poll_entry();
1791 ret
= lttng_poll_wait(&events
, -1);
1795 * Restart interrupted system call.
1797 if (errno
== EINTR
) {
1805 for (i
= 0; i
< nb_fd
; i
++) {
1806 health_code_update();
1808 /* Fetch once the poll data */
1809 revents
= LTTNG_POLL_GETEV(&events
, i
);
1810 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1812 /* Thread quit pipe has been closed. Killing thread. */
1813 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1819 /* Event on the registration socket */
1820 if (pollfd
== apps_sock
) {
1821 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1822 ERR("Register apps socket poll error");
1824 } else if (revents
& LPOLLIN
) {
1825 sock
= lttcomm_accept_unix_sock(apps_sock
);
1831 * Set socket timeout for both receiving and ending.
1832 * app_socket_timeout is in seconds, whereas
1833 * lttcomm_setsockopt_rcv_timeout and
1834 * lttcomm_setsockopt_snd_timeout expect msec as
1837 (void) lttcomm_setsockopt_rcv_timeout(sock
,
1838 app_socket_timeout
* 1000);
1839 (void) lttcomm_setsockopt_snd_timeout(sock
,
1840 app_socket_timeout
* 1000);
1843 * Set the CLOEXEC flag. Return code is useless because
1844 * either way, the show must go on.
1846 (void) utils_set_fd_cloexec(sock
);
1848 /* Create UST registration command for enqueuing */
1849 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1850 if (ust_cmd
== NULL
) {
1851 PERROR("ust command zmalloc");
1856 * Using message-based transmissions to ensure we don't
1857 * have to deal with partially received messages.
1859 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1861 ERR("Exhausted file descriptors allowed for applications.");
1871 health_code_update();
1872 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
1875 /* Close socket of the application. */
1880 lttng_fd_put(LTTNG_FD_APPS
, 1);
1884 health_code_update();
1886 ust_cmd
->sock
= sock
;
1889 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1890 " gid:%d sock:%d name:%s (version %d.%d)",
1891 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1892 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1893 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1894 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1897 * Lock free enqueue the registration request. The red pill
1898 * has been taken! This apps will be part of the *system*.
1900 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1903 * Wake the registration queue futex. Implicit memory
1904 * barrier with the exchange in cds_wfq_enqueue.
1906 futex_nto1_wake(&ust_cmd_queue
.futex
);
1916 ERR("Health error occurred in %s", __func__
);
1919 /* Notify that the registration thread is gone */
1922 if (apps_sock
>= 0) {
1923 ret
= close(apps_sock
);
1933 lttng_fd_put(LTTNG_FD_APPS
, 1);
1935 unlink(apps_unix_sock_path
);
1938 lttng_poll_clean(&events
);
1942 DBG("UST Registration thread cleanup complete");
1943 health_unregister(health_sessiond
);
1949 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1950 * exec or it will fails.
1952 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1955 struct timespec timeout
;
1957 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1958 consumer_data
->consumer_thread_is_ready
= 0;
1960 /* Setup pthread condition */
1961 ret
= pthread_condattr_init(&consumer_data
->condattr
);
1964 PERROR("pthread_condattr_init consumer data");
1969 * Set the monotonic clock in order to make sure we DO NOT jump in time
1970 * between the clock_gettime() call and the timedwait call. See bug #324
1971 * for a more details and how we noticed it.
1973 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
1976 PERROR("pthread_condattr_setclock consumer data");
1980 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
1983 PERROR("pthread_cond_init consumer data");
1987 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
1990 PERROR("pthread_create consumer");
1995 /* We are about to wait on a pthread condition */
1996 pthread_mutex_lock(&consumer_data
->cond_mutex
);
1998 /* Get time for sem_timedwait absolute timeout */
1999 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
2001 * Set the timeout for the condition timed wait even if the clock gettime
2002 * call fails since we might loop on that call and we want to avoid to
2003 * increment the timeout too many times.
2005 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
2008 * The following loop COULD be skipped in some conditions so this is why we
2009 * set ret to 0 in order to make sure at least one round of the loop is
2015 * Loop until the condition is reached or when a timeout is reached. Note
2016 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2017 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2018 * possible. This loop does not take any chances and works with both of
2021 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
2022 if (clock_ret
< 0) {
2023 PERROR("clock_gettime spawn consumer");
2024 /* Infinite wait for the consumerd thread to be ready */
2025 ret
= pthread_cond_wait(&consumer_data
->cond
,
2026 &consumer_data
->cond_mutex
);
2028 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
2029 &consumer_data
->cond_mutex
, &timeout
);
2033 /* Release the pthread condition */
2034 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
2038 if (ret
== ETIMEDOUT
) {
2040 * Call has timed out so we kill the kconsumerd_thread and return
2043 ERR("Condition timed out. The consumer thread was never ready."
2045 ret
= pthread_cancel(consumer_data
->thread
);
2047 PERROR("pthread_cancel consumer thread");
2050 PERROR("pthread_cond_wait failed consumer thread");
2055 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2056 if (consumer_data
->pid
== 0) {
2057 ERR("Consumerd did not start");
2058 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2061 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2070 * Join consumer thread
2072 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2076 /* Consumer pid must be a real one. */
2077 if (consumer_data
->pid
> 0) {
2079 ret
= kill(consumer_data
->pid
, SIGTERM
);
2081 ERR("Error killing consumer daemon");
2084 return pthread_join(consumer_data
->thread
, &status
);
2091 * Fork and exec a consumer daemon (consumerd).
2093 * Return pid if successful else -1.
2095 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2099 const char *consumer_to_use
;
2100 const char *verbosity
;
2103 DBG("Spawning consumerd");
2110 if (opt_verbose_consumer
) {
2111 verbosity
= "--verbose";
2113 verbosity
= "--quiet";
2115 switch (consumer_data
->type
) {
2116 case LTTNG_CONSUMER_KERNEL
:
2118 * Find out which consumerd to execute. We will first try the
2119 * 64-bit path, then the sessiond's installation directory, and
2120 * fallback on the 32-bit one,
2122 DBG3("Looking for a kernel consumer at these locations:");
2123 DBG3(" 1) %s", consumerd64_bin
);
2124 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
2125 DBG3(" 3) %s", consumerd32_bin
);
2126 if (stat(consumerd64_bin
, &st
) == 0) {
2127 DBG3("Found location #1");
2128 consumer_to_use
= consumerd64_bin
;
2129 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
2130 DBG3("Found location #2");
2131 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
2132 } else if (stat(consumerd32_bin
, &st
) == 0) {
2133 DBG3("Found location #3");
2134 consumer_to_use
= consumerd32_bin
;
2136 DBG("Could not find any valid consumerd executable");
2139 DBG("Using kernel consumer at: %s", consumer_to_use
);
2140 execl(consumer_to_use
,
2141 "lttng-consumerd", verbosity
, "-k",
2142 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2143 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2144 "--group", tracing_group_name
,
2147 case LTTNG_CONSUMER64_UST
:
2149 char *tmpnew
= NULL
;
2151 if (consumerd64_libdir
[0] != '\0') {
2155 tmp
= getenv("LD_LIBRARY_PATH");
2159 tmplen
= strlen("LD_LIBRARY_PATH=")
2160 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
2161 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2166 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2167 strcat(tmpnew
, consumerd64_libdir
);
2168 if (tmp
[0] != '\0') {
2169 strcat(tmpnew
, ":");
2170 strcat(tmpnew
, tmp
);
2172 ret
= putenv(tmpnew
);
2179 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
2180 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
2181 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2182 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2183 "--group", tracing_group_name
,
2185 if (consumerd64_libdir
[0] != '\0') {
2193 case LTTNG_CONSUMER32_UST
:
2195 char *tmpnew
= NULL
;
2197 if (consumerd32_libdir
[0] != '\0') {
2201 tmp
= getenv("LD_LIBRARY_PATH");
2205 tmplen
= strlen("LD_LIBRARY_PATH=")
2206 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
2207 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2212 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2213 strcat(tmpnew
, consumerd32_libdir
);
2214 if (tmp
[0] != '\0') {
2215 strcat(tmpnew
, ":");
2216 strcat(tmpnew
, tmp
);
2218 ret
= putenv(tmpnew
);
2225 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
2226 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
2227 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2228 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2229 "--group", tracing_group_name
,
2231 if (consumerd32_libdir
[0] != '\0') {
2240 PERROR("unknown consumer type");
2244 PERROR("kernel start consumer exec");
2247 } else if (pid
> 0) {
2250 PERROR("start consumer fork");
2258 * Spawn the consumerd daemon and session daemon thread.
2260 static int start_consumerd(struct consumer_data
*consumer_data
)
2265 * Set the listen() state on the socket since there is a possible race
2266 * between the exec() of the consumer daemon and this call if place in the
2267 * consumer thread. See bug #366 for more details.
2269 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2274 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2275 if (consumer_data
->pid
!= 0) {
2276 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2280 ret
= spawn_consumerd(consumer_data
);
2282 ERR("Spawning consumerd failed");
2283 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2287 /* Setting up the consumer_data pid */
2288 consumer_data
->pid
= ret
;
2289 DBG2("Consumer pid %d", consumer_data
->pid
);
2290 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2292 DBG2("Spawning consumer control thread");
2293 ret
= spawn_consumer_thread(consumer_data
);
2295 ERR("Fatal error spawning consumer control thread");
2303 /* Cleanup already created sockets on error. */
2304 if (consumer_data
->err_sock
>= 0) {
2307 err
= close(consumer_data
->err_sock
);
2309 PERROR("close consumer data error socket");
2316 * Setup necessary data for kernel tracer action.
2318 static int init_kernel_tracer(void)
2322 /* Modprobe lttng kernel modules */
2323 ret
= modprobe_lttng_control();
2328 /* Open debugfs lttng */
2329 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2330 if (kernel_tracer_fd
< 0) {
2331 DBG("Failed to open %s", module_proc_lttng
);
2336 /* Validate kernel version */
2337 ret
= kernel_validate_version(kernel_tracer_fd
);
2342 ret
= modprobe_lttng_data();
2347 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2351 modprobe_remove_lttng_control();
2352 ret
= close(kernel_tracer_fd
);
2356 kernel_tracer_fd
= -1;
2357 return LTTNG_ERR_KERN_VERSION
;
2360 ret
= close(kernel_tracer_fd
);
2366 modprobe_remove_lttng_control();
2369 WARN("No kernel tracer available");
2370 kernel_tracer_fd
= -1;
2372 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2374 return LTTNG_ERR_KERN_NA
;
2380 * Copy consumer output from the tracing session to the domain session. The
2381 * function also applies the right modification on a per domain basis for the
2382 * trace files destination directory.
2384 * Should *NOT* be called with RCU read-side lock held.
2386 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2389 const char *dir_name
;
2390 struct consumer_output
*consumer
;
2393 assert(session
->consumer
);
2396 case LTTNG_DOMAIN_KERNEL
:
2397 DBG3("Copying tracing session consumer output in kernel session");
2399 * XXX: We should audit the session creation and what this function
2400 * does "extra" in order to avoid a destroy since this function is used
2401 * in the domain session creation (kernel and ust) only. Same for UST
2404 if (session
->kernel_session
->consumer
) {
2405 consumer_destroy_output(session
->kernel_session
->consumer
);
2407 session
->kernel_session
->consumer
=
2408 consumer_copy_output(session
->consumer
);
2409 /* Ease our life a bit for the next part */
2410 consumer
= session
->kernel_session
->consumer
;
2411 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2413 case LTTNG_DOMAIN_UST
:
2414 DBG3("Copying tracing session consumer output in UST session");
2415 if (session
->ust_session
->consumer
) {
2416 consumer_destroy_output(session
->ust_session
->consumer
);
2418 session
->ust_session
->consumer
=
2419 consumer_copy_output(session
->consumer
);
2420 /* Ease our life a bit for the next part */
2421 consumer
= session
->ust_session
->consumer
;
2422 dir_name
= DEFAULT_UST_TRACE_DIR
;
2425 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2429 /* Append correct directory to subdir */
2430 strncat(consumer
->subdir
, dir_name
,
2431 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2432 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2441 * Create an UST session and add it to the session ust list.
2443 * Should *NOT* be called with RCU read-side lock held.
2445 static int create_ust_session(struct ltt_session
*session
,
2446 struct lttng_domain
*domain
)
2449 struct ltt_ust_session
*lus
= NULL
;
2453 assert(session
->consumer
);
2455 switch (domain
->type
) {
2456 case LTTNG_DOMAIN_UST
:
2459 ERR("Unknown UST domain on create session %d", domain
->type
);
2460 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2464 DBG("Creating UST session");
2466 lus
= trace_ust_create_session(session
->id
);
2468 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2472 lus
->uid
= session
->uid
;
2473 lus
->gid
= session
->gid
;
2474 lus
->output_traces
= session
->output_traces
;
2475 lus
->snapshot_mode
= session
->snapshot_mode
;
2476 lus
->live_timer_interval
= session
->live_timer
;
2477 session
->ust_session
= lus
;
2479 /* Copy session output to the newly created UST session */
2480 ret
= copy_session_consumer(domain
->type
, session
);
2481 if (ret
!= LTTNG_OK
) {
2489 session
->ust_session
= NULL
;
2494 * Create a kernel tracer session then create the default channel.
2496 static int create_kernel_session(struct ltt_session
*session
)
2500 DBG("Creating kernel session");
2502 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2504 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2508 /* Code flow safety */
2509 assert(session
->kernel_session
);
2511 /* Copy session output to the newly created Kernel session */
2512 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2513 if (ret
!= LTTNG_OK
) {
2517 /* Create directory(ies) on local filesystem. */
2518 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2519 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2520 ret
= run_as_mkdir_recursive(
2521 session
->kernel_session
->consumer
->dst
.trace_path
,
2522 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2524 if (ret
!= -EEXIST
) {
2525 ERR("Trace directory creation error");
2531 session
->kernel_session
->uid
= session
->uid
;
2532 session
->kernel_session
->gid
= session
->gid
;
2533 session
->kernel_session
->output_traces
= session
->output_traces
;
2534 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
2539 trace_kernel_destroy_session(session
->kernel_session
);
2540 session
->kernel_session
= NULL
;
2545 * Count number of session permitted by uid/gid.
2547 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2550 struct ltt_session
*session
;
2552 DBG("Counting number of available session for UID %d GID %d",
2554 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2556 * Only list the sessions the user can control.
2558 if (!session_access_ok(session
, uid
, gid
)) {
2567 * Process the command requested by the lttng client within the command
2568 * context structure. This function make sure that the return structure (llm)
2569 * is set and ready for transmission before returning.
2571 * Return any error encountered or 0 for success.
2573 * "sock" is only used for special-case var. len data.
2575 * Should *NOT* be called with RCU read-side lock held.
2577 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2581 int need_tracing_session
= 1;
2584 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2588 switch (cmd_ctx
->lsm
->cmd_type
) {
2589 case LTTNG_CREATE_SESSION
:
2590 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2591 case LTTNG_CREATE_SESSION_LIVE
:
2592 case LTTNG_DESTROY_SESSION
:
2593 case LTTNG_LIST_SESSIONS
:
2594 case LTTNG_LIST_DOMAINS
:
2595 case LTTNG_START_TRACE
:
2596 case LTTNG_STOP_TRACE
:
2597 case LTTNG_DATA_PENDING
:
2598 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
2599 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
2600 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
2601 case LTTNG_SNAPSHOT_RECORD
:
2608 if (opt_no_kernel
&& need_domain
2609 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2611 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2613 ret
= LTTNG_ERR_KERN_NA
;
2618 /* Deny register consumer if we already have a spawned consumer. */
2619 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2620 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2621 if (kconsumer_data
.pid
> 0) {
2622 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2623 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2626 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2630 * Check for command that don't needs to allocate a returned payload. We do
2631 * this here so we don't have to make the call for no payload at each
2634 switch(cmd_ctx
->lsm
->cmd_type
) {
2635 case LTTNG_LIST_SESSIONS
:
2636 case LTTNG_LIST_TRACEPOINTS
:
2637 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2638 case LTTNG_LIST_DOMAINS
:
2639 case LTTNG_LIST_CHANNELS
:
2640 case LTTNG_LIST_EVENTS
:
2643 /* Setup lttng message with no payload */
2644 ret
= setup_lttng_msg(cmd_ctx
, 0);
2646 /* This label does not try to unlock the session */
2647 goto init_setup_error
;
2651 /* Commands that DO NOT need a session. */
2652 switch (cmd_ctx
->lsm
->cmd_type
) {
2653 case LTTNG_CREATE_SESSION
:
2654 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2655 case LTTNG_CREATE_SESSION_LIVE
:
2656 case LTTNG_CALIBRATE
:
2657 case LTTNG_LIST_SESSIONS
:
2658 case LTTNG_LIST_TRACEPOINTS
:
2659 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2660 need_tracing_session
= 0;
2663 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2665 * We keep the session list lock across _all_ commands
2666 * for now, because the per-session lock does not
2667 * handle teardown properly.
2669 session_lock_list();
2670 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2671 if (cmd_ctx
->session
== NULL
) {
2672 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2675 /* Acquire lock for the session */
2676 session_lock(cmd_ctx
->session
);
2686 * Check domain type for specific "pre-action".
2688 switch (cmd_ctx
->lsm
->domain
.type
) {
2689 case LTTNG_DOMAIN_KERNEL
:
2691 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2695 /* Kernel tracer check */
2696 if (kernel_tracer_fd
== -1) {
2697 /* Basically, load kernel tracer modules */
2698 ret
= init_kernel_tracer();
2704 /* Consumer is in an ERROR state. Report back to client */
2705 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2706 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2710 /* Need a session for kernel command */
2711 if (need_tracing_session
) {
2712 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2713 ret
= create_kernel_session(cmd_ctx
->session
);
2715 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2720 /* Start the kernel consumer daemon */
2721 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2722 if (kconsumer_data
.pid
== 0 &&
2723 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2724 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2725 ret
= start_consumerd(&kconsumer_data
);
2727 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2730 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2732 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2736 * The consumer was just spawned so we need to add the socket to
2737 * the consumer output of the session if exist.
2739 ret
= consumer_create_socket(&kconsumer_data
,
2740 cmd_ctx
->session
->kernel_session
->consumer
);
2747 case LTTNG_DOMAIN_JUL
:
2749 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2752 case LTTNG_DOMAIN_UST
:
2754 if (!ust_app_supported()) {
2755 ret
= LTTNG_ERR_NO_UST
;
2758 /* Consumer is in an ERROR state. Report back to client */
2759 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
2760 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2764 if (need_tracing_session
) {
2765 /* Create UST session if none exist. */
2766 if (cmd_ctx
->session
->ust_session
== NULL
) {
2767 ret
= create_ust_session(cmd_ctx
->session
,
2768 &cmd_ctx
->lsm
->domain
);
2769 if (ret
!= LTTNG_OK
) {
2774 /* Start the UST consumer daemons */
2776 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
2777 if (consumerd64_bin
[0] != '\0' &&
2778 ustconsumer64_data
.pid
== 0 &&
2779 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2780 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2781 ret
= start_consumerd(&ustconsumer64_data
);
2783 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
2784 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
2788 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
2789 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2791 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2795 * Setup socket for consumer 64 bit. No need for atomic access
2796 * since it was set above and can ONLY be set in this thread.
2798 ret
= consumer_create_socket(&ustconsumer64_data
,
2799 cmd_ctx
->session
->ust_session
->consumer
);
2805 if (consumerd32_bin
[0] != '\0' &&
2806 ustconsumer32_data
.pid
== 0 &&
2807 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2808 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2809 ret
= start_consumerd(&ustconsumer32_data
);
2811 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
2812 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
2816 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
2817 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2819 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2823 * Setup socket for consumer 64 bit. No need for atomic access
2824 * since it was set above and can ONLY be set in this thread.
2826 ret
= consumer_create_socket(&ustconsumer32_data
,
2827 cmd_ctx
->session
->ust_session
->consumer
);
2839 /* Validate consumer daemon state when start/stop trace command */
2840 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
2841 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
2842 switch (cmd_ctx
->lsm
->domain
.type
) {
2843 case LTTNG_DOMAIN_JUL
:
2844 case LTTNG_DOMAIN_UST
:
2845 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
2846 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2850 case LTTNG_DOMAIN_KERNEL
:
2851 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
2852 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2860 * Check that the UID or GID match that of the tracing session.
2861 * The root user can interact with all sessions.
2863 if (need_tracing_session
) {
2864 if (!session_access_ok(cmd_ctx
->session
,
2865 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2866 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
2867 ret
= LTTNG_ERR_EPERM
;
2873 * Send relayd information to consumer as soon as we have a domain and a
2876 if (cmd_ctx
->session
&& need_domain
) {
2878 * Setup relayd if not done yet. If the relayd information was already
2879 * sent to the consumer, this call will gracefully return.
2881 ret
= cmd_setup_relayd(cmd_ctx
->session
);
2882 if (ret
!= LTTNG_OK
) {
2887 /* Process by command type */
2888 switch (cmd_ctx
->lsm
->cmd_type
) {
2889 case LTTNG_ADD_CONTEXT
:
2891 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2892 cmd_ctx
->lsm
->u
.context
.channel_name
,
2893 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
2896 case LTTNG_DISABLE_CHANNEL
:
2898 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2899 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2902 case LTTNG_DISABLE_EVENT
:
2904 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2905 cmd_ctx
->lsm
->u
.disable
.channel_name
,
2906 cmd_ctx
->lsm
->u
.disable
.name
);
2909 case LTTNG_DISABLE_ALL_EVENT
:
2911 DBG("Disabling all events");
2913 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2914 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2917 case LTTNG_ENABLE_CHANNEL
:
2919 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
2920 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
2923 case LTTNG_ENABLE_EVENT
:
2925 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
2926 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2927 &cmd_ctx
->lsm
->u
.enable
.event
, NULL
, kernel_poll_pipe
[1]);
2930 case LTTNG_ENABLE_ALL_EVENT
:
2932 DBG("Enabling all events");
2934 ret
= cmd_enable_event_all(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
2935 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2936 cmd_ctx
->lsm
->u
.enable
.event
.type
, NULL
, kernel_poll_pipe
[1]);
2939 case LTTNG_LIST_TRACEPOINTS
:
2941 struct lttng_event
*events
;
2944 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
2945 if (nb_events
< 0) {
2946 /* Return value is a negative lttng_error_code. */
2952 * Setup lttng message with payload size set to the event list size in
2953 * bytes and then copy list into the llm payload.
2955 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
2961 /* Copy event list into message payload */
2962 memcpy(cmd_ctx
->llm
->payload
, events
,
2963 sizeof(struct lttng_event
) * nb_events
);
2970 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2972 struct lttng_event_field
*fields
;
2975 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
2977 if (nb_fields
< 0) {
2978 /* Return value is a negative lttng_error_code. */
2984 * Setup lttng message with payload size set to the event list size in
2985 * bytes and then copy list into the llm payload.
2987 ret
= setup_lttng_msg(cmd_ctx
,
2988 sizeof(struct lttng_event_field
) * nb_fields
);
2994 /* Copy event list into message payload */
2995 memcpy(cmd_ctx
->llm
->payload
, fields
,
2996 sizeof(struct lttng_event_field
) * nb_fields
);
3003 case LTTNG_SET_CONSUMER_URI
:
3006 struct lttng_uri
*uris
;
3008 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3009 len
= nb_uri
* sizeof(struct lttng_uri
);
3012 ret
= LTTNG_ERR_INVALID
;
3016 uris
= zmalloc(len
);
3018 ret
= LTTNG_ERR_FATAL
;
3022 /* Receive variable len data */
3023 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
3024 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3026 DBG("No URIs received from client... continuing");
3028 ret
= LTTNG_ERR_SESSION_FAIL
;
3033 ret
= cmd_set_consumer_uri(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3035 if (ret
!= LTTNG_OK
) {
3041 * XXX: 0 means that this URI should be applied on the session. Should
3042 * be a DOMAIN enuam.
3044 if (cmd_ctx
->lsm
->domain
.type
== 0) {
3045 /* Add the URI for the UST session if a consumer is present. */
3046 if (cmd_ctx
->session
->ust_session
&&
3047 cmd_ctx
->session
->ust_session
->consumer
) {
3048 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_UST
, cmd_ctx
->session
,
3050 } else if (cmd_ctx
->session
->kernel_session
&&
3051 cmd_ctx
->session
->kernel_session
->consumer
) {
3052 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL
,
3053 cmd_ctx
->session
, nb_uri
, uris
);
3061 case LTTNG_START_TRACE
:
3063 ret
= cmd_start_trace(cmd_ctx
->session
);
3066 case LTTNG_STOP_TRACE
:
3068 ret
= cmd_stop_trace(cmd_ctx
->session
);
3071 case LTTNG_CREATE_SESSION
:
3074 struct lttng_uri
*uris
= NULL
;
3076 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3077 len
= nb_uri
* sizeof(struct lttng_uri
);
3080 uris
= zmalloc(len
);
3082 ret
= LTTNG_ERR_FATAL
;
3086 /* Receive variable len data */
3087 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3088 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3090 DBG("No URIs received from client... continuing");
3092 ret
= LTTNG_ERR_SESSION_FAIL
;
3097 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3098 DBG("Creating session with ONE network URI is a bad call");
3099 ret
= LTTNG_ERR_SESSION_FAIL
;
3105 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3106 &cmd_ctx
->creds
, 0);
3112 case LTTNG_DESTROY_SESSION
:
3114 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
3116 /* Set session to NULL so we do not unlock it after free. */
3117 cmd_ctx
->session
= NULL
;
3120 case LTTNG_LIST_DOMAINS
:
3123 struct lttng_domain
*domains
;
3125 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3127 /* Return value is a negative lttng_error_code. */
3132 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
3138 /* Copy event list into message payload */
3139 memcpy(cmd_ctx
->llm
->payload
, domains
,
3140 nb_dom
* sizeof(struct lttng_domain
));
3147 case LTTNG_LIST_CHANNELS
:
3150 struct lttng_channel
*channels
;
3152 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3153 cmd_ctx
->session
, &channels
);
3155 /* Return value is a negative lttng_error_code. */
3160 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
3166 /* Copy event list into message payload */
3167 memcpy(cmd_ctx
->llm
->payload
, channels
,
3168 nb_chan
* sizeof(struct lttng_channel
));
3175 case LTTNG_LIST_EVENTS
:
3178 struct lttng_event
*events
= NULL
;
3180 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3181 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
3183 /* Return value is a negative lttng_error_code. */
3188 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
3194 /* Copy event list into message payload */
3195 memcpy(cmd_ctx
->llm
->payload
, events
,
3196 nb_event
* sizeof(struct lttng_event
));
3203 case LTTNG_LIST_SESSIONS
:
3205 unsigned int nr_sessions
;
3207 session_lock_list();
3208 nr_sessions
= lttng_sessions_count(
3209 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3210 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3212 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
3214 session_unlock_list();
3218 /* Filled the session array */
3219 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
3220 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3221 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3223 session_unlock_list();
3228 case LTTNG_CALIBRATE
:
3230 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
3231 &cmd_ctx
->lsm
->u
.calibrate
);
3234 case LTTNG_REGISTER_CONSUMER
:
3236 struct consumer_data
*cdata
;
3238 switch (cmd_ctx
->lsm
->domain
.type
) {
3239 case LTTNG_DOMAIN_KERNEL
:
3240 cdata
= &kconsumer_data
;
3243 ret
= LTTNG_ERR_UND
;
3247 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3248 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3251 case LTTNG_ENABLE_EVENT_WITH_FILTER
:
3253 struct lttng_filter_bytecode
*bytecode
;
3255 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3256 ret
= LTTNG_ERR_FILTER_INVAL
;
3259 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
== 0) {
3260 ret
= LTTNG_ERR_FILTER_INVAL
;
3263 bytecode
= zmalloc(cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
3265 ret
= LTTNG_ERR_FILTER_NOMEM
;
3268 /* Receive var. len. data */
3269 DBG("Receiving var len data from client ...");
3270 ret
= lttcomm_recv_unix_sock(sock
, bytecode
,
3271 cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
3273 DBG("Nothing recv() from client var len data... continuing");
3275 ret
= LTTNG_ERR_FILTER_INVAL
;
3279 if (bytecode
->len
+ sizeof(*bytecode
)
3280 != cmd_ctx
->lsm
->u
.enable
.bytecode_len
) {
3282 ret
= LTTNG_ERR_FILTER_INVAL
;
3286 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3287 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3288 &cmd_ctx
->lsm
->u
.enable
.event
, bytecode
, kernel_poll_pipe
[1]);
3291 case LTTNG_DATA_PENDING
:
3293 ret
= cmd_data_pending(cmd_ctx
->session
);
3296 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3298 struct lttcomm_lttng_output_id reply
;
3300 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
3301 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
3302 if (ret
!= LTTNG_OK
) {
3306 ret
= setup_lttng_msg(cmd_ctx
, sizeof(reply
));
3311 /* Copy output list into message payload */
3312 memcpy(cmd_ctx
->llm
->payload
, &reply
, sizeof(reply
));
3316 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3318 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
3319 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
3322 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3325 struct lttng_snapshot_output
*outputs
= NULL
;
3327 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
3328 if (nb_output
< 0) {
3333 ret
= setup_lttng_msg(cmd_ctx
,
3334 nb_output
* sizeof(struct lttng_snapshot_output
));
3341 /* Copy output list into message payload */
3342 memcpy(cmd_ctx
->llm
->payload
, outputs
,
3343 nb_output
* sizeof(struct lttng_snapshot_output
));
3350 case LTTNG_SNAPSHOT_RECORD
:
3352 ret
= cmd_snapshot_record(cmd_ctx
->session
,
3353 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
3354 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
3357 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3360 struct lttng_uri
*uris
= NULL
;
3362 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3363 len
= nb_uri
* sizeof(struct lttng_uri
);
3366 uris
= zmalloc(len
);
3368 ret
= LTTNG_ERR_FATAL
;
3372 /* Receive variable len data */
3373 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3374 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3376 DBG("No URIs received from client... continuing");
3378 ret
= LTTNG_ERR_SESSION_FAIL
;
3383 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3384 DBG("Creating session with ONE network URI is a bad call");
3385 ret
= LTTNG_ERR_SESSION_FAIL
;
3391 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
3392 nb_uri
, &cmd_ctx
->creds
);
3396 case LTTNG_CREATE_SESSION_LIVE
:
3399 struct lttng_uri
*uris
= NULL
;
3401 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3402 len
= nb_uri
* sizeof(struct lttng_uri
);
3405 uris
= zmalloc(len
);
3407 ret
= LTTNG_ERR_FATAL
;
3411 /* Receive variable len data */
3412 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3413 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3415 DBG("No URIs received from client... continuing");
3417 ret
= LTTNG_ERR_SESSION_FAIL
;
3422 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3423 DBG("Creating session with ONE network URI is a bad call");
3424 ret
= LTTNG_ERR_SESSION_FAIL
;
3430 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
3431 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
3436 ret
= LTTNG_ERR_UND
;
3441 if (cmd_ctx
->llm
== NULL
) {
3442 DBG("Missing llm structure. Allocating one.");
3443 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
3447 /* Set return code */
3448 cmd_ctx
->llm
->ret_code
= ret
;
3450 if (cmd_ctx
->session
) {
3451 session_unlock(cmd_ctx
->session
);
3453 if (need_tracing_session
) {
3454 session_unlock_list();
3461 * Thread managing health check socket.
3463 static void *thread_manage_health(void *data
)
3465 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
3466 uint32_t revents
, nb_fd
;
3467 struct lttng_poll_event events
;
3468 struct health_comm_msg msg
;
3469 struct health_comm_reply reply
;
3471 DBG("[thread] Manage health check started");
3473 rcu_register_thread();
3475 /* We might hit an error path before this is created. */
3476 lttng_poll_init(&events
);
3478 /* Create unix socket */
3479 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
3481 ERR("Unable to create health check Unix socket");
3487 /* lttng health client socket path permissions */
3488 ret
= chown(health_unix_sock_path
, 0,
3489 utils_get_group_id(tracing_group_name
));
3491 ERR("Unable to set group on %s", health_unix_sock_path
);
3497 ret
= chmod(health_unix_sock_path
,
3498 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3500 ERR("Unable to set permissions on %s", health_unix_sock_path
);
3508 * Set the CLOEXEC flag. Return code is useless because either way, the
3511 (void) utils_set_fd_cloexec(sock
);
3513 ret
= lttcomm_listen_unix_sock(sock
);
3519 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3520 * more will be added to this poll set.
3522 ret
= sessiond_set_thread_pollset(&events
, 2);
3527 /* Add the application registration socket */
3528 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
3534 DBG("Health check ready");
3536 /* Inifinite blocking call, waiting for transmission */
3538 ret
= lttng_poll_wait(&events
, -1);
3541 * Restart interrupted system call.
3543 if (errno
== EINTR
) {
3551 for (i
= 0; i
< nb_fd
; i
++) {
3552 /* Fetch once the poll data */
3553 revents
= LTTNG_POLL_GETEV(&events
, i
);
3554 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3556 /* Thread quit pipe has been closed. Killing thread. */
3557 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
3563 /* Event on the registration socket */
3564 if (pollfd
== sock
) {
3565 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3566 ERR("Health socket poll error");
3572 new_sock
= lttcomm_accept_unix_sock(sock
);
3578 * Set the CLOEXEC flag. Return code is useless because either way, the
3581 (void) utils_set_fd_cloexec(new_sock
);
3583 DBG("Receiving data from client for health...");
3584 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3586 DBG("Nothing recv() from client... continuing");
3587 ret
= close(new_sock
);
3595 rcu_thread_online();
3598 for (i
= 0; i
< NR_HEALTH_SESSIOND_TYPES
; i
++) {
3600 * health_check_state returns 0 if health is
3603 if (!health_check_state(health_sessiond
, i
)) {
3604 reply
.ret_code
|= 1ULL << i
;
3608 DBG2("Health check return value %" PRIx64
, reply
.ret_code
);
3610 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3612 ERR("Failed to send health data back to client");
3615 /* End of transmission */
3616 ret
= close(new_sock
);
3626 ERR("Health error occurred in %s", __func__
);
3628 DBG("Health check thread dying");
3629 unlink(health_unix_sock_path
);
3637 lttng_poll_clean(&events
);
3639 rcu_unregister_thread();
3644 * This thread manage all clients request using the unix client socket for
3647 static void *thread_manage_clients(void *data
)
3649 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3651 uint32_t revents
, nb_fd
;
3652 struct command_ctx
*cmd_ctx
= NULL
;
3653 struct lttng_poll_event events
;
3655 DBG("[thread] Manage client started");
3657 rcu_register_thread();
3659 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
3661 if (testpoint(thread_manage_clients
)) {
3662 goto error_testpoint
;
3665 health_code_update();
3667 ret
= lttcomm_listen_unix_sock(client_sock
);
3673 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3674 * more will be added to this poll set.
3676 ret
= sessiond_set_thread_pollset(&events
, 2);
3678 goto error_create_poll
;
3681 /* Add the application registration socket */
3682 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3688 * Notify parent pid that we are ready to accept command for client side.
3690 if (opt_sig_parent
) {
3691 kill(ppid
, SIGUSR1
);
3694 if (testpoint(thread_manage_clients_before_loop
)) {
3698 health_code_update();
3701 DBG("Accepting client command ...");
3703 /* Inifinite blocking call, waiting for transmission */
3705 health_poll_entry();
3706 ret
= lttng_poll_wait(&events
, -1);
3710 * Restart interrupted system call.
3712 if (errno
== EINTR
) {
3720 for (i
= 0; i
< nb_fd
; i
++) {
3721 /* Fetch once the poll data */
3722 revents
= LTTNG_POLL_GETEV(&events
, i
);
3723 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3725 health_code_update();
3727 /* Thread quit pipe has been closed. Killing thread. */
3728 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
3734 /* Event on the registration socket */
3735 if (pollfd
== client_sock
) {
3736 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3737 ERR("Client socket poll error");
3743 DBG("Wait for client response");
3745 health_code_update();
3747 sock
= lttcomm_accept_unix_sock(client_sock
);
3753 * Set the CLOEXEC flag. Return code is useless because either way, the
3756 (void) utils_set_fd_cloexec(sock
);
3758 /* Set socket option for credentials retrieval */
3759 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3764 /* Allocate context command to process the client request */
3765 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3766 if (cmd_ctx
== NULL
) {
3767 PERROR("zmalloc cmd_ctx");
3771 /* Allocate data buffer for reception */
3772 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3773 if (cmd_ctx
->lsm
== NULL
) {
3774 PERROR("zmalloc cmd_ctx->lsm");
3778 cmd_ctx
->llm
= NULL
;
3779 cmd_ctx
->session
= NULL
;
3781 health_code_update();
3784 * Data is received from the lttng client. The struct
3785 * lttcomm_session_msg (lsm) contains the command and data request of
3788 DBG("Receiving data from client ...");
3789 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3790 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3792 DBG("Nothing recv() from client... continuing");
3798 clean_command_ctx(&cmd_ctx
);
3802 health_code_update();
3804 // TODO: Validate cmd_ctx including sanity check for
3805 // security purpose.
3807 rcu_thread_online();
3809 * This function dispatch the work to the kernel or userspace tracer
3810 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3811 * informations for the client. The command context struct contains
3812 * everything this function may needs.
3814 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
3815 rcu_thread_offline();
3823 * TODO: Inform client somehow of the fatal error. At
3824 * this point, ret < 0 means that a zmalloc failed
3825 * (ENOMEM). Error detected but still accept
3826 * command, unless a socket error has been
3829 clean_command_ctx(&cmd_ctx
);
3833 health_code_update();
3835 DBG("Sending response (size: %d, retcode: %s)",
3836 cmd_ctx
->lttng_msg_size
,
3837 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3838 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3840 ERR("Failed to send data back to client");
3843 /* End of transmission */
3850 clean_command_ctx(&cmd_ctx
);
3852 health_code_update();
3864 lttng_poll_clean(&events
);
3865 clean_command_ctx(&cmd_ctx
);
3870 unlink(client_unix_sock_path
);
3871 if (client_sock
>= 0) {
3872 ret
= close(client_sock
);
3880 ERR("Health error occurred in %s", __func__
);
3883 health_unregister(health_sessiond
);
3885 DBG("Client thread dying");
3887 rcu_unregister_thread();
3893 * usage function on stderr
3895 static void usage(void)
3897 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3898 fprintf(stderr
, " -h, --help Display this usage.\n");
3899 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3900 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3901 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3902 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3903 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3904 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3905 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3906 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3907 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3908 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3909 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3910 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3911 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3912 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3913 fprintf(stderr
, " -V, --version Show version number.\n");
3914 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3915 fprintf(stderr
, " -q, --quiet No output at all.\n");
3916 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3917 fprintf(stderr
, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
3918 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3919 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
3923 * daemon argument parsing
3925 static int parse_args(int argc
, char **argv
)
3929 static struct option long_options
[] = {
3930 { "client-sock", 1, 0, 'c' },
3931 { "apps-sock", 1, 0, 'a' },
3932 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3933 { "kconsumerd-err-sock", 1, 0, 'E' },
3934 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3935 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3936 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3937 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3938 { "consumerd32-path", 1, 0, 'u' },
3939 { "consumerd32-libdir", 1, 0, 'U' },
3940 { "consumerd64-path", 1, 0, 't' },
3941 { "consumerd64-libdir", 1, 0, 'T' },
3942 { "daemonize", 0, 0, 'd' },
3943 { "sig-parent", 0, 0, 'S' },
3944 { "help", 0, 0, 'h' },
3945 { "group", 1, 0, 'g' },
3946 { "version", 0, 0, 'V' },
3947 { "quiet", 0, 0, 'q' },
3948 { "verbose", 0, 0, 'v' },
3949 { "verbose-consumer", 0, 0, 'Z' },
3950 { "no-kernel", 0, 0, 'N' },
3951 { "pidfile", 1, 0, 'p' },
3956 int option_index
= 0;
3957 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:",
3958 long_options
, &option_index
);
3965 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
3967 fprintf(stderr
, " with arg %s\n", optarg
);
3971 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3974 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3980 tracing_group_name
= optarg
;
3986 fprintf(stdout
, "%s\n", VERSION
);
3992 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3995 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3998 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4001 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4004 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4007 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4013 lttng_opt_quiet
= 1;
4016 /* Verbose level can increase using multiple -v */
4017 lttng_opt_verbose
+= 1;
4020 opt_verbose_consumer
+= 1;
4023 consumerd32_bin
= optarg
;
4026 consumerd32_libdir
= optarg
;
4029 consumerd64_bin
= optarg
;
4032 consumerd64_libdir
= optarg
;
4035 opt_pidfile
= optarg
;
4038 /* Unknown option or other error.
4039 * Error is printed by getopt, just return */
4048 * Creates the two needed socket by the daemon.
4049 * apps_sock - The communication socket for all UST apps.
4050 * client_sock - The communication of the cli tool (lttng).
4052 static int init_daemon_socket(void)
4057 old_umask
= umask(0);
4059 /* Create client tool unix socket */
4060 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
4061 if (client_sock
< 0) {
4062 ERR("Create unix sock failed: %s", client_unix_sock_path
);
4067 /* Set the cloexec flag */
4068 ret
= utils_set_fd_cloexec(client_sock
);
4070 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4071 "Continuing but note that the consumer daemon will have a "
4072 "reference to this socket on exec()", client_sock
);
4075 /* File permission MUST be 660 */
4076 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4078 ERR("Set file permissions failed: %s", client_unix_sock_path
);
4083 /* Create the application unix socket */
4084 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
4085 if (apps_sock
< 0) {
4086 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
4091 /* Set the cloexec flag */
4092 ret
= utils_set_fd_cloexec(apps_sock
);
4094 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4095 "Continuing but note that the consumer daemon will have a "
4096 "reference to this socket on exec()", apps_sock
);
4099 /* File permission MUST be 666 */
4100 ret
= chmod(apps_unix_sock_path
,
4101 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
4103 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
4108 DBG3("Session daemon client socket %d and application socket %d created",
4109 client_sock
, apps_sock
);
4117 * Check if the global socket is available, and if a daemon is answering at the
4118 * other side. If yes, error is returned.
4120 static int check_existing_daemon(void)
4122 /* Is there anybody out there ? */
4123 if (lttng_session_daemon_alive()) {
4131 * Set the tracing group gid onto the client socket.
4133 * Race window between mkdir and chown is OK because we are going from more
4134 * permissive (root.root) to less permissive (root.tracing).
4136 static int set_permissions(char *rundir
)
4141 gid
= utils_get_group_id(tracing_group_name
);
4143 /* Set lttng run dir */
4144 ret
= chown(rundir
, 0, gid
);
4146 ERR("Unable to set group on %s", rundir
);
4151 * Ensure all applications and tracing group can search the run
4152 * dir. Allow everyone to read the directory, since it does not
4153 * buy us anything to hide its content.
4155 ret
= chmod(rundir
, S_IRWXU
| S_IRGRP
| S_IXGRP
| S_IROTH
| S_IXOTH
);
4157 ERR("Unable to set permissions on %s", rundir
);
4161 /* lttng client socket path */
4162 ret
= chown(client_unix_sock_path
, 0, gid
);
4164 ERR("Unable to set group on %s", client_unix_sock_path
);
4168 /* kconsumer error socket path */
4169 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, 0);
4171 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
4175 /* 64-bit ustconsumer error socket path */
4176 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, 0);
4178 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
4182 /* 32-bit ustconsumer compat32 error socket path */
4183 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, 0);
4185 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
4189 DBG("All permissions are set");
4195 * Create the lttng run directory needed for all global sockets and pipe.
4197 static int create_lttng_rundir(const char *rundir
)
4201 DBG3("Creating LTTng run directory: %s", rundir
);
4203 ret
= mkdir(rundir
, S_IRWXU
);
4205 if (errno
!= EEXIST
) {
4206 ERR("Unable to create %s", rundir
);
4218 * Setup sockets and directory needed by the kconsumerd communication with the
4221 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
4225 char path
[PATH_MAX
];
4227 switch (consumer_data
->type
) {
4228 case LTTNG_CONSUMER_KERNEL
:
4229 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
4231 case LTTNG_CONSUMER64_UST
:
4232 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
4234 case LTTNG_CONSUMER32_UST
:
4235 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
4238 ERR("Consumer type unknown");
4243 DBG2("Creating consumer directory: %s", path
);
4245 ret
= mkdir(path
, S_IRWXU
| S_IRGRP
| S_IXGRP
);
4247 if (errno
!= EEXIST
) {
4249 ERR("Failed to create %s", path
);
4255 ret
= chown(path
, 0, utils_get_group_id(tracing_group_name
));
4257 ERR("Unable to set group on %s", path
);
4263 /* Create the kconsumerd error unix socket */
4264 consumer_data
->err_sock
=
4265 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
4266 if (consumer_data
->err_sock
< 0) {
4267 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
4273 * Set the CLOEXEC flag. Return code is useless because either way, the
4276 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
4278 PERROR("utils_set_fd_cloexec");
4279 /* continue anyway */
4282 /* File permission MUST be 660 */
4283 ret
= chmod(consumer_data
->err_unix_sock_path
,
4284 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4286 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
4296 * Signal handler for the daemon
4298 * Simply stop all worker threads, leaving main() return gracefully after
4299 * joining all threads and calling cleanup().
4301 static void sighandler(int sig
)
4305 DBG("SIGPIPE caught");
4308 DBG("SIGINT caught");
4312 DBG("SIGTERM caught");
4321 * Setup signal handler for :
4322 * SIGINT, SIGTERM, SIGPIPE
4324 static int set_signal_handler(void)
4327 struct sigaction sa
;
4330 if ((ret
= sigemptyset(&sigset
)) < 0) {
4331 PERROR("sigemptyset");
4335 sa
.sa_handler
= sighandler
;
4336 sa
.sa_mask
= sigset
;
4338 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
4339 PERROR("sigaction");
4343 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
4344 PERROR("sigaction");
4348 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
4349 PERROR("sigaction");
4353 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
4359 * Set open files limit to unlimited. This daemon can open a large number of
4360 * file descriptors in order to consumer multiple kernel traces.
4362 static void set_ulimit(void)
4367 /* The kernel does not allowed an infinite limit for open files */
4368 lim
.rlim_cur
= 65535;
4369 lim
.rlim_max
= 65535;
4371 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
4373 PERROR("failed to set open files limit");
4378 * Write pidfile using the rundir and opt_pidfile.
4380 static void write_pidfile(void)
4383 char pidfile_path
[PATH_MAX
];
4388 strncpy(pidfile_path
, opt_pidfile
, sizeof(pidfile_path
));
4390 /* Build pidfile path from rundir and opt_pidfile. */
4391 ret
= snprintf(pidfile_path
, sizeof(pidfile_path
), "%s/"
4392 DEFAULT_LTTNG_SESSIOND_PIDFILE
, rundir
);
4394 PERROR("snprintf pidfile path");
4400 * Create pid file in rundir. Return value is of no importance. The
4401 * execution will continue even though we are not able to write the file.
4403 (void) utils_create_pid_file(getpid(), pidfile_path
);
4412 int main(int argc
, char **argv
)
4416 const char *home_path
, *env_app_timeout
;
4418 init_kernel_workarounds();
4420 rcu_register_thread();
4422 setup_consumerd_path();
4424 page_size
= sysconf(_SC_PAGESIZE
);
4425 if (page_size
< 0) {
4426 PERROR("sysconf _SC_PAGESIZE");
4427 page_size
= LONG_MAX
;
4428 WARN("Fallback page size to %ld", page_size
);
4431 /* Parse arguments */
4433 if ((ret
= parse_args(argc
, argv
)) < 0) {
4443 * child: setsid, close FD 0, 1, 2, chdir /
4444 * parent: exit (if fork is successful)
4452 * We are in the child. Make sure all other file
4453 * descriptors are closed, in case we are called with
4454 * more opened file descriptors than the standard ones.
4456 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
4461 /* Create thread quit pipe */
4462 if ((ret
= init_thread_quit_pipe()) < 0) {
4466 /* Check if daemon is UID = 0 */
4467 is_root
= !getuid();
4470 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
4472 /* Create global run dir with root access */
4473 ret
= create_lttng_rundir(rundir
);
4478 if (strlen(apps_unix_sock_path
) == 0) {
4479 snprintf(apps_unix_sock_path
, PATH_MAX
,
4480 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
4483 if (strlen(client_unix_sock_path
) == 0) {
4484 snprintf(client_unix_sock_path
, PATH_MAX
,
4485 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
4488 /* Set global SHM for ust */
4489 if (strlen(wait_shm_path
) == 0) {
4490 snprintf(wait_shm_path
, PATH_MAX
,
4491 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
4494 if (strlen(health_unix_sock_path
) == 0) {
4495 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
4496 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
4499 /* Setup kernel consumerd path */
4500 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
4501 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
4502 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
4503 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
4505 DBG2("Kernel consumer err path: %s",
4506 kconsumer_data
.err_unix_sock_path
);
4507 DBG2("Kernel consumer cmd path: %s",
4508 kconsumer_data
.cmd_unix_sock_path
);
4510 home_path
= utils_get_home_dir();
4511 if (home_path
== NULL
) {
4512 /* TODO: Add --socket PATH option */
4513 ERR("Can't get HOME directory for sockets creation.");
4519 * Create rundir from home path. This will create something like
4522 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
4528 ret
= create_lttng_rundir(rundir
);
4533 if (strlen(apps_unix_sock_path
) == 0) {
4534 snprintf(apps_unix_sock_path
, PATH_MAX
,
4535 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
4538 /* Set the cli tool unix socket path */
4539 if (strlen(client_unix_sock_path
) == 0) {
4540 snprintf(client_unix_sock_path
, PATH_MAX
,
4541 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
4544 /* Set global SHM for ust */
4545 if (strlen(wait_shm_path
) == 0) {
4546 snprintf(wait_shm_path
, PATH_MAX
,
4547 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, getuid());
4550 /* Set health check Unix path */
4551 if (strlen(health_unix_sock_path
) == 0) {
4552 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
4553 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
4557 /* Set consumer initial state */
4558 kernel_consumerd_state
= CONSUMER_STOPPED
;
4559 ust_consumerd_state
= CONSUMER_STOPPED
;
4561 DBG("Client socket path %s", client_unix_sock_path
);
4562 DBG("Application socket path %s", apps_unix_sock_path
);
4563 DBG("Application wait path %s", wait_shm_path
);
4564 DBG("LTTng run directory path: %s", rundir
);
4566 /* 32 bits consumerd path setup */
4567 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
4568 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
4569 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
4570 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
4572 DBG2("UST consumer 32 bits err path: %s",
4573 ustconsumer32_data
.err_unix_sock_path
);
4574 DBG2("UST consumer 32 bits cmd path: %s",
4575 ustconsumer32_data
.cmd_unix_sock_path
);
4577 /* 64 bits consumerd path setup */
4578 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
4579 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
4580 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4581 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4583 DBG2("UST consumer 64 bits err path: %s",
4584 ustconsumer64_data
.err_unix_sock_path
);
4585 DBG2("UST consumer 64 bits cmd path: %s",
4586 ustconsumer64_data
.cmd_unix_sock_path
);
4589 * See if daemon already exist.
4591 if ((ret
= check_existing_daemon()) < 0) {
4592 ERR("Already running daemon.\n");
4594 * We do not goto exit because we must not cleanup()
4595 * because a daemon is already running.
4601 * Init UST app hash table. Alloc hash table before this point since
4602 * cleanup() can get called after that point.
4606 /* After this point, we can safely call cleanup() with "goto exit" */
4609 * These actions must be executed as root. We do that *after* setting up
4610 * the sockets path because we MUST make the check for another daemon using
4611 * those paths *before* trying to set the kernel consumer sockets and init
4615 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4620 /* Setup kernel tracer */
4621 if (!opt_no_kernel
) {
4622 init_kernel_tracer();
4625 /* Set ulimit for open files */
4628 /* init lttng_fd tracking must be done after set_ulimit. */
4631 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4636 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4641 if ((ret
= set_signal_handler()) < 0) {
4645 /* Setup the needed unix socket */
4646 if ((ret
= init_daemon_socket()) < 0) {
4650 /* Set credentials to socket */
4651 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4655 /* Get parent pid if -S, --sig-parent is specified. */
4656 if (opt_sig_parent
) {
4660 /* Setup the kernel pipe for waking up the kernel thread */
4661 if (is_root
&& !opt_no_kernel
) {
4662 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
4667 /* Setup the thread ht_cleanup communication pipe. */
4668 if (utils_create_pipe_cloexec(ht_cleanup_pipe
) < 0) {
4672 /* Setup the thread apps communication pipe. */
4673 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
4677 /* Setup the thread apps notify communication pipe. */
4678 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
) < 0) {
4682 /* Initialize global buffer per UID and PID registry. */
4683 buffer_reg_init_uid_registry();
4684 buffer_reg_init_pid_registry();
4686 /* Init UST command queue. */
4687 cds_wfq_init(&ust_cmd_queue
.queue
);
4690 * Get session list pointer. This pointer MUST NOT be free(). This list is
4691 * statically declared in session.c
4693 session_list_ptr
= session_get_list();
4695 /* Set up max poll set size */
4696 lttng_poll_set_max_size();
4700 /* Check for the application socket timeout env variable. */
4701 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
4702 if (env_app_timeout
) {
4703 app_socket_timeout
= atoi(env_app_timeout
);
4705 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
4710 /* Initialize communication library */
4712 /* This is to get the TCP timeout value. */
4713 lttcomm_inet_init();
4716 * Initialize the health check subsystem. This call should set the
4717 * appropriate time values.
4719 health_sessiond
= health_app_create(NR_HEALTH_SESSIOND_TYPES
);
4720 if (!health_sessiond
) {
4721 PERROR("health_app_create error");
4722 goto exit_health_sessiond_cleanup
;
4725 /* Create thread to manage the client socket */
4726 ret
= pthread_create(&ht_cleanup_thread
, NULL
,
4727 thread_ht_cleanup
, (void *) NULL
);
4729 PERROR("pthread_create ht_cleanup");
4730 goto exit_ht_cleanup
;
4733 /* Create thread to manage the client socket */
4734 ret
= pthread_create(&health_thread
, NULL
,
4735 thread_manage_health
, (void *) NULL
);
4737 PERROR("pthread_create health");
4741 /* Create thread to manage the client socket */
4742 ret
= pthread_create(&client_thread
, NULL
,
4743 thread_manage_clients
, (void *) NULL
);
4745 PERROR("pthread_create clients");
4749 /* Create thread to dispatch registration */
4750 ret
= pthread_create(&dispatch_thread
, NULL
,
4751 thread_dispatch_ust_registration
, (void *) NULL
);
4753 PERROR("pthread_create dispatch");
4757 /* Create thread to manage application registration. */
4758 ret
= pthread_create(®_apps_thread
, NULL
,
4759 thread_registration_apps
, (void *) NULL
);
4761 PERROR("pthread_create registration");
4765 /* Create thread to manage application socket */
4766 ret
= pthread_create(&apps_thread
, NULL
,
4767 thread_manage_apps
, (void *) NULL
);
4769 PERROR("pthread_create apps");
4773 /* Create thread to manage application notify socket */
4774 ret
= pthread_create(&apps_notify_thread
, NULL
,
4775 ust_thread_manage_notify
, (void *) NULL
);
4777 PERROR("pthread_create apps");
4778 goto exit_apps_notify
;
4781 /* Don't start this thread if kernel tracing is not requested nor root */
4782 if (is_root
&& !opt_no_kernel
) {
4783 /* Create kernel thread to manage kernel event */
4784 ret
= pthread_create(&kernel_thread
, NULL
,
4785 thread_manage_kernel
, (void *) NULL
);
4787 PERROR("pthread_create kernel");
4791 ret
= pthread_join(kernel_thread
, &status
);
4793 PERROR("pthread_join");
4794 goto error
; /* join error, exit without cleanup */
4799 ret
= pthread_join(apps_notify_thread
, &status
);
4801 PERROR("pthread_join apps notify");
4802 goto error
; /* join error, exit without cleanup */
4806 ret
= pthread_join(apps_thread
, &status
);
4808 PERROR("pthread_join apps");
4809 goto error
; /* join error, exit without cleanup */
4814 ret
= pthread_join(reg_apps_thread
, &status
);
4816 PERROR("pthread_join");
4817 goto error
; /* join error, exit without cleanup */
4821 ret
= pthread_join(dispatch_thread
, &status
);
4823 PERROR("pthread_join");
4824 goto error
; /* join error, exit without cleanup */
4828 ret
= pthread_join(client_thread
, &status
);
4830 PERROR("pthread_join");
4831 goto error
; /* join error, exit without cleanup */
4834 ret
= join_consumer_thread(&kconsumer_data
);
4836 PERROR("join_consumer");
4837 goto error
; /* join error, exit without cleanup */
4840 ret
= join_consumer_thread(&ustconsumer32_data
);
4842 PERROR("join_consumer ust32");
4843 goto error
; /* join error, exit without cleanup */
4846 ret
= join_consumer_thread(&ustconsumer64_data
);
4848 PERROR("join_consumer ust64");
4849 goto error
; /* join error, exit without cleanup */
4853 ret
= pthread_join(health_thread
, &status
);
4855 PERROR("pthread_join health thread");
4856 goto error
; /* join error, exit without cleanup */
4860 ret
= pthread_join(ht_cleanup_thread
, &status
);
4862 PERROR("pthread_join ht cleanup thread");
4863 goto error
; /* join error, exit without cleanup */
4866 health_app_destroy(health_sessiond
);
4867 exit_health_sessiond_cleanup
:
4870 * cleanup() is called when no other thread is running.
4872 rcu_thread_online();
4874 rcu_thread_offline();
4875 rcu_unregister_thread();