2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; only version 2 of the License.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307, USA.
25 #include <semaphore.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
35 #include <sys/types.h>
37 #include <urcu/futex.h>
41 #include <common/common.h>
42 #include <common/compat/poll.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/ust-consumer/ust-consumer.h>
47 #include "lttng-sessiond.h"
58 #define CONSUMERD_FILE "lttng-consumerd"
60 struct consumer_data
{
61 enum lttng_consumer_type type
;
63 pthread_t thread
; /* Worker thread interacting with the consumer */
66 /* Mutex to control consumerd pid assignation */
67 pthread_mutex_t pid_mutex
;
73 /* consumer error and command Unix socket path */
74 char err_unix_sock_path
[PATH_MAX
];
75 char cmd_unix_sock_path
[PATH_MAX
];
79 const char default_home_dir
[] = DEFAULT_HOME_DIR
;
80 const char default_tracing_group
[] = LTTNG_DEFAULT_TRACING_GROUP
;
81 const char default_ust_sock_dir
[] = DEFAULT_UST_SOCK_DIR
;
82 const char default_global_apps_pipe
[] = DEFAULT_GLOBAL_APPS_PIPE
;
85 int opt_verbose
; /* Not static for lttngerr.h */
86 int opt_verbose_consumer
; /* Not static for lttngerr.h */
87 int opt_quiet
; /* Not static for lttngerr.h */
90 const char *opt_tracing_group
;
91 static int opt_sig_parent
;
92 static int opt_daemon
;
93 static int opt_no_kernel
;
94 static int is_root
; /* Set to 1 if the daemon is running as root */
95 static pid_t ppid
; /* Parent PID for --sig-parent option */
98 /* Consumer daemon specific control data */
99 static struct consumer_data kconsumer_data
= {
100 .type
= LTTNG_CONSUMER_KERNEL
,
101 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
102 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
104 static struct consumer_data ustconsumer64_data
= {
105 .type
= LTTNG_CONSUMER64_UST
,
106 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
107 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
109 static struct consumer_data ustconsumer32_data
= {
110 .type
= LTTNG_CONSUMER32_UST
,
111 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
112 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
115 static int dispatch_thread_exit
;
117 /* Global application Unix socket path */
118 static char apps_unix_sock_path
[PATH_MAX
];
119 /* Global client Unix socket path */
120 static char client_unix_sock_path
[PATH_MAX
];
121 /* global wait shm path for UST */
122 static char wait_shm_path
[PATH_MAX
];
124 /* Sockets and FDs */
125 static int client_sock
;
126 static int apps_sock
;
127 static int kernel_tracer_fd
;
128 static int kernel_poll_pipe
[2];
131 * Quit pipe for all threads. This permits a single cancellation point
132 * for all threads when receiving an event on the pipe.
134 static int thread_quit_pipe
[2];
137 * This pipe is used to inform the thread managing application communication
138 * that a command is queued and ready to be processed.
140 static int apps_cmd_pipe
[2];
142 /* Pthread, Mutexes and Semaphores */
143 static pthread_t apps_thread
;
144 static pthread_t reg_apps_thread
;
145 static pthread_t client_thread
;
146 static pthread_t kernel_thread
;
147 static pthread_t dispatch_thread
;
151 * UST registration command queue. This queue is tied with a futex and uses a N
152 * wakers / 1 waiter implemented and detailed in futex.c/.h
154 * The thread_manage_apps and thread_dispatch_ust_registration interact with
155 * this queue and the wait/wake scheme.
157 static struct ust_cmd_queue ust_cmd_queue
;
160 * Pointer initialized before thread creation.
162 * This points to the tracing session list containing the session count and a
163 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
164 * MUST NOT be taken if you call a public function in session.c.
166 * The lock is nested inside the structure: session_list_ptr->lock. Please use
167 * session_lock_list and session_unlock_list for lock acquisition.
169 static struct ltt_session_list
*session_list_ptr
;
171 int ust_consumerd64_fd
= -1;
172 int ust_consumerd32_fd
= -1;
174 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
175 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
176 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
177 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
180 void setup_consumerd_path(void)
182 const char *bin
, *libdir
;
185 * Allow INSTALL_BIN_PATH to be used as a target path for the
186 * native architecture size consumer if CONFIG_CONSUMER*_PATH
187 * has not been defined.
189 #if (CAA_BITS_PER_LONG == 32)
190 if (!consumerd32_bin
[0]) {
191 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
193 if (!consumerd32_libdir
[0]) {
194 consumerd32_libdir
= INSTALL_LIB_PATH
;
196 #elif (CAA_BITS_PER_LONG == 64)
197 if (!consumerd64_bin
[0]) {
198 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
200 if (!consumerd64_libdir
[0]) {
201 consumerd64_libdir
= INSTALL_LIB_PATH
;
204 #error "Unknown bitness"
208 * runtime env. var. overrides the build default.
210 bin
= getenv("LTTNG_CONSUMERD32_BIN");
212 consumerd32_bin
= bin
;
214 bin
= getenv("LTTNG_CONSUMERD64_BIN");
216 consumerd64_bin
= bin
;
218 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
220 consumerd32_libdir
= libdir
;
222 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
224 consumerd64_libdir
= libdir
;
229 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
231 static int create_thread_poll_set(struct lttng_poll_event
*events
,
236 if (events
== NULL
|| size
== 0) {
241 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
247 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
);
259 * Check if the thread quit pipe was triggered.
261 * Return 1 if it was triggered else 0;
263 static int check_thread_quit_pipe(int fd
, uint32_t events
)
265 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
273 * Return group ID of the tracing group or -1 if not found.
275 static gid_t
allowed_group(void)
279 if (opt_tracing_group
) {
280 grp
= getgrnam(opt_tracing_group
);
282 grp
= getgrnam(default_tracing_group
);
292 * Init thread quit pipe.
294 * Return -1 on error or 0 if all pipes are created.
296 static int init_thread_quit_pipe(void)
300 ret
= pipe2(thread_quit_pipe
, O_CLOEXEC
);
302 perror("thread quit pipe");
311 * Complete teardown of a kernel session. This free all data structure related
312 * to a kernel session and update counter.
314 static void teardown_kernel_session(struct ltt_session
*session
)
316 if (!session
->kernel_session
) {
317 DBG3("No kernel session when tearingdown session");
321 DBG("Tearing down kernel session");
324 * If a custom kernel consumer was registered, close the socket before
325 * tearing down the complete kernel session structure
327 if (session
->kernel_session
->consumer_fd
!= kconsumer_data
.cmd_sock
) {
328 lttcomm_close_unix_sock(session
->kernel_session
->consumer_fd
);
331 trace_kernel_destroy_session(session
->kernel_session
);
335 * Complete teardown of all UST sessions. This will free everything on his path
336 * and destroy the core essence of all ust sessions :)
338 static void teardown_ust_session(struct ltt_session
*session
)
342 if (!session
->ust_session
) {
343 DBG3("No UST session when tearingdown session");
347 DBG("Tearing down UST session(s)");
349 ret
= ust_app_destroy_trace_all(session
->ust_session
);
351 ERR("Error in ust_app_destroy_trace_all");
354 trace_ust_destroy_session(session
->ust_session
);
358 * Stop all threads by closing the thread quit pipe.
360 static void stop_threads(void)
364 /* Stopping all threads */
365 DBG("Terminating all threads");
366 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
368 ERR("write error on thread quit pipe");
371 /* Dispatch thread */
372 dispatch_thread_exit
= 1;
373 futex_nto1_wake(&ust_cmd_queue
.futex
);
379 static void cleanup(void)
383 struct ltt_session
*sess
, *stmp
;
387 DBG("Removing %s directory", rundir
);
388 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
390 ERR("asprintf failed. Something is really wrong!");
393 /* Remove lttng run directory */
396 ERR("Unable to clean %s", rundir
);
400 DBG("Cleaning up all session");
402 /* Destroy session list mutex */
403 if (session_list_ptr
!= NULL
) {
404 pthread_mutex_destroy(&session_list_ptr
->lock
);
406 /* Cleanup ALL session */
407 cds_list_for_each_entry_safe(sess
, stmp
,
408 &session_list_ptr
->head
, list
) {
409 teardown_kernel_session(sess
);
410 teardown_ust_session(sess
);
415 DBG("Closing all UST sockets");
416 ust_app_clean_list();
418 pthread_mutex_destroy(&kconsumer_data
.pid_mutex
);
420 if (is_root
&& !opt_no_kernel
) {
421 DBG2("Closing kernel fd");
422 close(kernel_tracer_fd
);
423 DBG("Unloading kernel modules");
424 modprobe_remove_lttng_all();
427 close(thread_quit_pipe
[0]);
428 close(thread_quit_pipe
[1]);
431 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
432 "Matthew, BEET driven development works!%c[%dm",
433 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
438 * Send data on a unix socket using the liblttsessiondcomm API.
440 * Return lttcomm error code.
442 static int send_unix_sock(int sock
, void *buf
, size_t len
)
444 /* Check valid length */
449 return lttcomm_send_unix_sock(sock
, buf
, len
);
453 * Free memory of a command context structure.
455 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
457 DBG("Clean command context structure");
459 if ((*cmd_ctx
)->llm
) {
460 free((*cmd_ctx
)->llm
);
462 if ((*cmd_ctx
)->lsm
) {
463 free((*cmd_ctx
)->lsm
);
471 * Send all stream fds of kernel channel to the consumer.
473 static int send_kconsumer_channel_streams(struct consumer_data
*consumer_data
,
474 int sock
, struct ltt_kernel_channel
*channel
,
475 uid_t uid
, gid_t gid
)
478 struct ltt_kernel_stream
*stream
;
479 struct lttcomm_consumer_msg lkm
;
481 DBG("Sending streams of channel %s to kernel consumer",
482 channel
->channel
->name
);
485 lkm
.cmd_type
= LTTNG_CONSUMER_ADD_CHANNEL
;
486 lkm
.u
.channel
.channel_key
= channel
->fd
;
487 lkm
.u
.channel
.max_sb_size
= channel
->channel
->attr
.subbuf_size
;
488 lkm
.u
.channel
.mmap_len
= 0; /* for kernel */
489 DBG("Sending channel %d to consumer", lkm
.u
.channel
.channel_key
);
490 ret
= lttcomm_send_unix_sock(sock
, &lkm
, sizeof(lkm
));
492 perror("send consumer channel");
497 cds_list_for_each_entry(stream
, &channel
->stream_list
.head
, list
) {
501 lkm
.cmd_type
= LTTNG_CONSUMER_ADD_STREAM
;
502 lkm
.u
.stream
.channel_key
= channel
->fd
;
503 lkm
.u
.stream
.stream_key
= stream
->fd
;
504 lkm
.u
.stream
.state
= stream
->state
;
505 lkm
.u
.stream
.output
= channel
->channel
->attr
.output
;
506 lkm
.u
.stream
.mmap_len
= 0; /* for kernel */
507 lkm
.u
.stream
.uid
= uid
;
508 lkm
.u
.stream
.gid
= gid
;
509 strncpy(lkm
.u
.stream
.path_name
, stream
->pathname
, PATH_MAX
- 1);
510 lkm
.u
.stream
.path_name
[PATH_MAX
- 1] = '\0';
511 DBG("Sending stream %d to consumer", lkm
.u
.stream
.stream_key
);
512 ret
= lttcomm_send_unix_sock(sock
, &lkm
, sizeof(lkm
));
514 perror("send consumer stream");
517 ret
= lttcomm_send_fds_unix_sock(sock
, &stream
->fd
, 1);
519 perror("send consumer stream ancillary data");
524 DBG("consumer channel streams sent");
533 * Send all stream fds of the kernel session to the consumer.
535 static int send_kconsumer_session_streams(struct consumer_data
*consumer_data
,
536 struct ltt_kernel_session
*session
)
539 struct ltt_kernel_channel
*chan
;
540 struct lttcomm_consumer_msg lkm
;
541 int sock
= session
->consumer_fd
;
543 DBG("Sending metadata stream fd");
545 /* Extra protection. It's NOT supposed to be set to 0 at this point */
546 if (session
->consumer_fd
== 0) {
547 session
->consumer_fd
= consumer_data
->cmd_sock
;
550 if (session
->metadata_stream_fd
!= 0) {
551 /* Send metadata channel fd */
552 lkm
.cmd_type
= LTTNG_CONSUMER_ADD_CHANNEL
;
553 lkm
.u
.channel
.channel_key
= session
->metadata
->fd
;
554 lkm
.u
.channel
.max_sb_size
= session
->metadata
->conf
->attr
.subbuf_size
;
555 lkm
.u
.channel
.mmap_len
= 0; /* for kernel */
556 DBG("Sending metadata channel %d to consumer", lkm
.u
.stream
.stream_key
);
557 ret
= lttcomm_send_unix_sock(sock
, &lkm
, sizeof(lkm
));
559 perror("send consumer channel");
563 /* Send metadata stream fd */
564 lkm
.cmd_type
= LTTNG_CONSUMER_ADD_STREAM
;
565 lkm
.u
.stream
.channel_key
= session
->metadata
->fd
;
566 lkm
.u
.stream
.stream_key
= session
->metadata_stream_fd
;
567 lkm
.u
.stream
.state
= LTTNG_CONSUMER_ACTIVE_STREAM
;
568 lkm
.u
.stream
.output
= DEFAULT_KERNEL_CHANNEL_OUTPUT
;
569 lkm
.u
.stream
.mmap_len
= 0; /* for kernel */
570 lkm
.u
.stream
.uid
= session
->uid
;
571 lkm
.u
.stream
.gid
= session
->gid
;
572 strncpy(lkm
.u
.stream
.path_name
, session
->metadata
->pathname
, PATH_MAX
- 1);
573 lkm
.u
.stream
.path_name
[PATH_MAX
- 1] = '\0';
574 DBG("Sending metadata stream %d to consumer", lkm
.u
.stream
.stream_key
);
575 ret
= lttcomm_send_unix_sock(sock
, &lkm
, sizeof(lkm
));
577 perror("send consumer stream");
580 ret
= lttcomm_send_fds_unix_sock(sock
, &session
->metadata_stream_fd
, 1);
582 perror("send consumer stream");
587 cds_list_for_each_entry(chan
, &session
->channel_list
.head
, list
) {
588 ret
= send_kconsumer_channel_streams(consumer_data
, sock
, chan
,
589 session
->uid
, session
->gid
);
595 DBG("consumer fds (metadata and channel streams) sent");
604 * Notify UST applications using the shm mmap futex.
606 static int notify_ust_apps(int active
)
610 DBG("Notifying applications of session daemon state: %d", active
);
612 /* See shm.c for this call implying mmap, shm and futex calls */
613 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
614 if (wait_shm_mmap
== NULL
) {
618 /* Wake waiting process */
619 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
621 /* Apps notified successfully */
629 * Setup the outgoing data buffer for the response (llm) by allocating the
630 * right amount of memory and copying the original information from the lsm
633 * Return total size of the buffer pointed by buf.
635 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
641 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
642 if (cmd_ctx
->llm
== NULL
) {
648 /* Copy common data */
649 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
650 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
652 cmd_ctx
->llm
->data_size
= size
;
653 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
662 * Update the kernel poll set of all channel fd available over all tracing
663 * session. Add the wakeup pipe at the end of the set.
665 static int update_kernel_poll(struct lttng_poll_event
*events
)
668 struct ltt_session
*session
;
669 struct ltt_kernel_channel
*channel
;
671 DBG("Updating kernel poll set");
674 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
675 session_lock(session
);
676 if (session
->kernel_session
== NULL
) {
677 session_unlock(session
);
681 cds_list_for_each_entry(channel
,
682 &session
->kernel_session
->channel_list
.head
, list
) {
683 /* Add channel fd to the kernel poll set */
684 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
686 session_unlock(session
);
689 DBG("Channel fd %d added to kernel set", channel
->fd
);
691 session_unlock(session
);
693 session_unlock_list();
698 session_unlock_list();
703 * Find the channel fd from 'fd' over all tracing session. When found, check
704 * for new channel stream and send those stream fds to the kernel consumer.
706 * Useful for CPU hotplug feature.
708 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
711 struct ltt_session
*session
;
712 struct ltt_kernel_channel
*channel
;
714 DBG("Updating kernel streams for channel fd %d", fd
);
717 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
718 session_lock(session
);
719 if (session
->kernel_session
== NULL
) {
720 session_unlock(session
);
724 /* This is not suppose to be 0 but this is an extra security check */
725 if (session
->kernel_session
->consumer_fd
== 0) {
726 session
->kernel_session
->consumer_fd
= consumer_data
->cmd_sock
;
729 cds_list_for_each_entry(channel
,
730 &session
->kernel_session
->channel_list
.head
, list
) {
731 if (channel
->fd
== fd
) {
732 DBG("Channel found, updating kernel streams");
733 ret
= kernel_open_channel_stream(channel
);
739 * Have we already sent fds to the consumer? If yes, it means
740 * that tracing is started so it is safe to send our updated
743 if (session
->kernel_session
->consumer_fds_sent
== 1) {
744 ret
= send_kconsumer_channel_streams(consumer_data
,
745 session
->kernel_session
->consumer_fd
, channel
,
746 session
->uid
, session
->gid
);
754 session_unlock(session
);
756 session_unlock_list();
760 session_unlock(session
);
761 session_unlock_list();
766 * For each tracing session, update newly registered apps.
768 static void update_ust_app(int app_sock
)
770 struct ltt_session
*sess
, *stmp
;
772 /* For all tracing session(s) */
773 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
774 if (sess
->ust_session
) {
775 ust_app_global_update(sess
->ust_session
, app_sock
);
781 * This thread manage event coming from the kernel.
783 * Features supported in this thread:
786 static void *thread_manage_kernel(void *data
)
788 int ret
, i
, pollfd
, update_poll_flag
= 1;
789 uint32_t revents
, nb_fd
;
791 struct lttng_poll_event events
;
793 DBG("Thread manage kernel started");
795 ret
= create_thread_poll_set(&events
, 2);
800 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
806 if (update_poll_flag
== 1) {
808 * Reset number of fd in the poll set. Always 2 since there is the thread
809 * quit pipe and the kernel pipe.
813 ret
= update_kernel_poll(&events
);
817 update_poll_flag
= 0;
820 nb_fd
= LTTNG_POLL_GETNB(&events
);
822 DBG("Thread kernel polling on %d fds", nb_fd
);
824 /* Zeroed the poll events */
825 lttng_poll_reset(&events
);
827 /* Poll infinite value of time */
828 ret
= lttng_poll_wait(&events
, -1);
831 } else if (ret
== 0) {
832 /* Should not happen since timeout is infinite */
833 ERR("Return value of poll is 0 with an infinite timeout.\n"
834 "This should not have happened! Continuing...");
838 for (i
= 0; i
< nb_fd
; i
++) {
839 /* Fetch once the poll data */
840 revents
= LTTNG_POLL_GETEV(&events
, i
);
841 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
843 /* Thread quit pipe has been closed. Killing thread. */
844 ret
= check_thread_quit_pipe(pollfd
, revents
);
849 /* Check for data on kernel pipe */
850 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
851 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
852 update_poll_flag
= 1;
856 * New CPU detected by the kernel. Adding kernel stream to
857 * kernel session and updating the kernel consumer
859 if (revents
& LPOLLIN
) {
860 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
866 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
867 * and unregister kernel stream at this point.
875 DBG("Kernel thread dying");
876 close(kernel_poll_pipe
[0]);
877 close(kernel_poll_pipe
[1]);
879 lttng_poll_clean(&events
);
885 * This thread manage the consumer error sent back to the session daemon.
887 static void *thread_manage_consumer(void *data
)
889 int sock
= 0, i
, ret
, pollfd
;
890 uint32_t revents
, nb_fd
;
891 enum lttcomm_return_code code
;
892 struct lttng_poll_event events
;
893 struct consumer_data
*consumer_data
= data
;
895 DBG("[thread] Manage consumer started");
897 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
903 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
904 * Nothing more will be added to this poll set.
906 ret
= create_thread_poll_set(&events
, 2);
911 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
916 nb_fd
= LTTNG_POLL_GETNB(&events
);
918 /* Inifinite blocking call, waiting for transmission */
919 ret
= lttng_poll_wait(&events
, -1);
924 for (i
= 0; i
< nb_fd
; i
++) {
925 /* Fetch once the poll data */
926 revents
= LTTNG_POLL_GETEV(&events
, i
);
927 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
929 /* Thread quit pipe has been closed. Killing thread. */
930 ret
= check_thread_quit_pipe(pollfd
, revents
);
935 /* Event on the registration socket */
936 if (pollfd
== consumer_data
->err_sock
) {
937 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
938 ERR("consumer err socket poll error");
944 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
949 DBG2("Receiving code from consumer err_sock");
951 /* Getting status code from kconsumerd */
952 ret
= lttcomm_recv_unix_sock(sock
, &code
,
953 sizeof(enum lttcomm_return_code
));
958 if (code
== CONSUMERD_COMMAND_SOCK_READY
) {
959 consumer_data
->cmd_sock
=
960 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
961 if (consumer_data
->cmd_sock
< 0) {
962 sem_post(&consumer_data
->sem
);
963 PERROR("consumer connect");
966 /* Signal condition to tell that the kconsumerd is ready */
967 sem_post(&consumer_data
->sem
);
968 DBG("consumer command socket ready");
970 ERR("consumer error when waiting for SOCK_READY : %s",
971 lttcomm_get_readable_code(-code
));
975 /* Remove the kconsumerd error sock since we've established a connexion */
976 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
981 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
986 /* Update number of fd */
987 nb_fd
= LTTNG_POLL_GETNB(&events
);
989 /* Inifinite blocking call, waiting for transmission */
990 ret
= lttng_poll_wait(&events
, -1);
995 for (i
= 0; i
< nb_fd
; i
++) {
996 /* Fetch once the poll data */
997 revents
= LTTNG_POLL_GETEV(&events
, i
);
998 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1000 /* Thread quit pipe has been closed. Killing thread. */
1001 ret
= check_thread_quit_pipe(pollfd
, revents
);
1006 /* Event on the kconsumerd socket */
1007 if (pollfd
== sock
) {
1008 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1009 ERR("consumer err socket second poll error");
1015 /* Wait for any kconsumerd error */
1016 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1017 sizeof(enum lttcomm_return_code
));
1019 ERR("consumer closed the command socket");
1023 ERR("consumer return code : %s", lttcomm_get_readable_code(-code
));
1026 DBG("consumer thread dying");
1027 close(consumer_data
->err_sock
);
1028 close(consumer_data
->cmd_sock
);
1031 unlink(consumer_data
->err_unix_sock_path
);
1032 unlink(consumer_data
->cmd_unix_sock_path
);
1033 consumer_data
->pid
= 0;
1035 lttng_poll_clean(&events
);
1041 * This thread manage application communication.
1043 static void *thread_manage_apps(void *data
)
1046 uint32_t revents
, nb_fd
;
1047 struct ust_command ust_cmd
;
1048 struct lttng_poll_event events
;
1050 DBG("[thread] Manage application started");
1052 rcu_register_thread();
1053 rcu_thread_online();
1055 ret
= create_thread_poll_set(&events
, 2);
1060 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1066 /* Zeroed the events structure */
1067 lttng_poll_reset(&events
);
1069 nb_fd
= LTTNG_POLL_GETNB(&events
);
1071 DBG("Apps thread polling on %d fds", nb_fd
);
1073 /* Inifinite blocking call, waiting for transmission */
1074 ret
= lttng_poll_wait(&events
, -1);
1079 for (i
= 0; i
< nb_fd
; i
++) {
1080 /* Fetch once the poll data */
1081 revents
= LTTNG_POLL_GETEV(&events
, i
);
1082 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1084 /* Thread quit pipe has been closed. Killing thread. */
1085 ret
= check_thread_quit_pipe(pollfd
, revents
);
1090 /* Inspect the apps cmd pipe */
1091 if (pollfd
== apps_cmd_pipe
[0]) {
1092 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1093 ERR("Apps command pipe error");
1095 } else if (revents
& LPOLLIN
) {
1097 ret
= read(apps_cmd_pipe
[0], &ust_cmd
, sizeof(ust_cmd
));
1098 if (ret
< 0 || ret
< sizeof(ust_cmd
)) {
1099 perror("read apps cmd pipe");
1103 /* Register applicaton to the session daemon */
1104 ret
= ust_app_register(&ust_cmd
.reg_msg
,
1106 if (ret
== -ENOMEM
) {
1108 } else if (ret
< 0) {
1113 * Validate UST version compatibility.
1115 ret
= ust_app_validate_version(ust_cmd
.sock
);
1118 * Add channel(s) and event(s) to newly registered apps
1119 * from lttng global UST domain.
1121 update_ust_app(ust_cmd
.sock
);
1124 ret
= ust_app_register_done(ust_cmd
.sock
);
1127 * If the registration is not possible, we simply
1128 * unregister the apps and continue
1130 ust_app_unregister(ust_cmd
.sock
);
1133 * We just need here to monitor the close of the UST
1134 * socket and poll set monitor those by default.
1136 ret
= lttng_poll_add(&events
, ust_cmd
.sock
, 0);
1141 DBG("Apps with sock %d added to poll set",
1149 * At this point, we know that a registered application made
1150 * the event at poll_wait.
1152 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1153 /* Removing from the poll set */
1154 ret
= lttng_poll_del(&events
, pollfd
);
1159 /* Socket closed on remote end. */
1160 ust_app_unregister(pollfd
);
1168 DBG("Application communication apps dying");
1169 close(apps_cmd_pipe
[0]);
1170 close(apps_cmd_pipe
[1]);
1172 lttng_poll_clean(&events
);
1174 rcu_thread_offline();
1175 rcu_unregister_thread();
1180 * Dispatch request from the registration threads to the application
1181 * communication thread.
1183 static void *thread_dispatch_ust_registration(void *data
)
1186 struct cds_wfq_node
*node
;
1187 struct ust_command
*ust_cmd
= NULL
;
1189 DBG("[thread] Dispatch UST command started");
1191 while (!dispatch_thread_exit
) {
1192 /* Atomically prepare the queue futex */
1193 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1196 /* Dequeue command for registration */
1197 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1199 DBG("Woken up but nothing in the UST command queue");
1200 /* Continue thread execution */
1204 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1206 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1207 " gid:%d sock:%d name:%s (version %d.%d)",
1208 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1209 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1210 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1211 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1213 * Inform apps thread of the new application registration. This
1214 * call is blocking so we can be assured that the data will be read
1215 * at some point in time or wait to the end of the world :)
1217 ret
= write(apps_cmd_pipe
[1], ust_cmd
,
1218 sizeof(struct ust_command
));
1220 perror("write apps cmd pipe");
1221 if (errno
== EBADF
) {
1223 * We can't inform the application thread to process
1224 * registration. We will exit or else application
1225 * registration will not occur and tracing will never
1232 } while (node
!= NULL
);
1234 /* Futex wait on queue. Blocking call on futex() */
1235 futex_nto1_wait(&ust_cmd_queue
.futex
);
1239 DBG("Dispatch thread dying");
1244 * This thread manage application registration.
1246 static void *thread_registration_apps(void *data
)
1248 int sock
= 0, i
, ret
, pollfd
;
1249 uint32_t revents
, nb_fd
;
1250 struct lttng_poll_event events
;
1252 * Get allocated in this thread, enqueued to a global queue, dequeued and
1253 * freed in the manage apps thread.
1255 struct ust_command
*ust_cmd
= NULL
;
1257 DBG("[thread] Manage application registration started");
1259 ret
= lttcomm_listen_unix_sock(apps_sock
);
1265 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1266 * more will be added to this poll set.
1268 ret
= create_thread_poll_set(&events
, 2);
1273 /* Add the application registration socket */
1274 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1279 /* Notify all applications to register */
1280 ret
= notify_ust_apps(1);
1282 ERR("Failed to notify applications or create the wait shared memory.\n"
1283 "Execution continues but there might be problem for already\n"
1284 "running applications that wishes to register.");
1288 DBG("Accepting application registration");
1290 nb_fd
= LTTNG_POLL_GETNB(&events
);
1292 /* Inifinite blocking call, waiting for transmission */
1293 ret
= lttng_poll_wait(&events
, -1);
1298 for (i
= 0; i
< nb_fd
; i
++) {
1299 /* Fetch once the poll data */
1300 revents
= LTTNG_POLL_GETEV(&events
, i
);
1301 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1303 /* Thread quit pipe has been closed. Killing thread. */
1304 ret
= check_thread_quit_pipe(pollfd
, revents
);
1309 /* Event on the registration socket */
1310 if (pollfd
== apps_sock
) {
1311 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1312 ERR("Register apps socket poll error");
1314 } else if (revents
& LPOLLIN
) {
1315 sock
= lttcomm_accept_unix_sock(apps_sock
);
1320 /* Create UST registration command for enqueuing */
1321 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1322 if (ust_cmd
== NULL
) {
1323 perror("ust command zmalloc");
1328 * Using message-based transmissions to ensure we don't
1329 * have to deal with partially received messages.
1331 ret
= lttcomm_recv_unix_sock(sock
, &ust_cmd
->reg_msg
,
1332 sizeof(struct ust_register_msg
));
1333 if (ret
< 0 || ret
< sizeof(struct ust_register_msg
)) {
1335 perror("lttcomm_recv_unix_sock register apps");
1337 ERR("Wrong size received on apps register");
1344 ust_cmd
->sock
= sock
;
1346 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1347 " gid:%d sock:%d name:%s (version %d.%d)",
1348 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1349 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1350 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1351 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1354 * Lock free enqueue the registration request. The red pill
1355 * has been taken! This apps will be part of the *system*.
1357 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1360 * Wake the registration queue futex. Implicit memory
1361 * barrier with the exchange in cds_wfq_enqueue.
1363 futex_nto1_wake(&ust_cmd_queue
.futex
);
1370 DBG("UST Registration thread dying");
1372 /* Notify that the registration thread is gone */
1377 unlink(apps_unix_sock_path
);
1379 lttng_poll_clean(&events
);
1385 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1386 * exec or it will fails.
1388 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1391 struct timespec timeout
;
1393 timeout
.tv_sec
= DEFAULT_SEM_WAIT_TIMEOUT
;
1394 timeout
.tv_nsec
= 0;
1396 /* Setup semaphore */
1397 ret
= sem_init(&consumer_data
->sem
, 0, 0);
1399 PERROR("sem_init consumer semaphore");
1403 ret
= pthread_create(&consumer_data
->thread
, NULL
,
1404 thread_manage_consumer
, consumer_data
);
1406 PERROR("pthread_create consumer");
1411 /* Get time for sem_timedwait absolute timeout */
1412 ret
= clock_gettime(CLOCK_REALTIME
, &timeout
);
1414 PERROR("clock_gettime spawn consumer");
1415 /* Infinite wait for the kconsumerd thread to be ready */
1416 ret
= sem_wait(&consumer_data
->sem
);
1418 /* Normal timeout if the gettime was successful */
1419 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1420 ret
= sem_timedwait(&consumer_data
->sem
, &timeout
);
1424 if (errno
== ETIMEDOUT
) {
1426 * Call has timed out so we kill the kconsumerd_thread and return
1429 ERR("The consumer thread was never ready. Killing it");
1430 ret
= pthread_cancel(consumer_data
->thread
);
1432 PERROR("pthread_cancel consumer thread");
1435 PERROR("semaphore wait failed consumer thread");
1440 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1441 if (consumer_data
->pid
== 0) {
1442 ERR("Kconsumerd did not start");
1443 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1446 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1455 * Join consumer thread
1457 static int join_consumer_thread(struct consumer_data
*consumer_data
)
1462 if (consumer_data
->pid
!= 0) {
1463 ret
= kill(consumer_data
->pid
, SIGTERM
);
1465 ERR("Error killing consumer daemon");
1468 return pthread_join(consumer_data
->thread
, &status
);
1475 * Fork and exec a consumer daemon (consumerd).
1477 * Return pid if successful else -1.
1479 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
1483 const char *consumer_to_use
;
1484 const char *verbosity
;
1487 DBG("Spawning consumerd");
1494 if (opt_verbose_consumer
) {
1495 verbosity
= "--verbose";
1497 verbosity
= "--quiet";
1499 switch (consumer_data
->type
) {
1500 case LTTNG_CONSUMER_KERNEL
:
1502 * Find out which consumerd to execute. We will first try the
1503 * 64-bit path, then the sessiond's installation directory, and
1504 * fallback on the 32-bit one,
1506 DBG3("Looking for a kernel consumer at these locations:");
1507 DBG3(" 1) %s", consumerd64_bin
);
1508 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
1509 DBG3(" 3) %s", consumerd32_bin
);
1510 if (stat(consumerd64_bin
, &st
) == 0) {
1511 DBG3("Found location #1");
1512 consumer_to_use
= consumerd64_bin
;
1513 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
1514 DBG3("Found location #2");
1515 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
1516 } else if (stat(consumerd32_bin
, &st
) == 0) {
1517 DBG3("Found location #3");
1518 consumer_to_use
= consumerd32_bin
;
1520 DBG("Could not find any valid consumerd executable");
1523 DBG("Using kernel consumer at: %s", consumer_to_use
);
1524 execl(consumer_to_use
,
1525 "lttng-consumerd", verbosity
, "-k",
1526 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1527 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1530 case LTTNG_CONSUMER64_UST
:
1532 char *tmpnew
= NULL
;
1534 if (consumerd64_libdir
[0] != '\0') {
1538 tmp
= getenv("LD_LIBRARY_PATH");
1542 tmplen
= strlen("LD_LIBRARY_PATH=")
1543 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
1544 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1549 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1550 strcat(tmpnew
, consumerd64_libdir
);
1551 if (tmp
[0] != '\0') {
1552 strcat(tmpnew
, ":");
1553 strcat(tmpnew
, tmp
);
1555 ret
= putenv(tmpnew
);
1561 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
1562 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
1563 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1564 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1566 if (consumerd64_libdir
[0] != '\0') {
1574 case LTTNG_CONSUMER32_UST
:
1576 char *tmpnew
= NULL
;
1578 if (consumerd32_libdir
[0] != '\0') {
1582 tmp
= getenv("LD_LIBRARY_PATH");
1586 tmplen
= strlen("LD_LIBRARY_PATH=")
1587 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
1588 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1593 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1594 strcat(tmpnew
, consumerd32_libdir
);
1595 if (tmp
[0] != '\0') {
1596 strcat(tmpnew
, ":");
1597 strcat(tmpnew
, tmp
);
1599 ret
= putenv(tmpnew
);
1605 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
1606 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
1607 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1608 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1610 if (consumerd32_libdir
[0] != '\0') {
1619 perror("unknown consumer type");
1623 perror("kernel start consumer exec");
1626 } else if (pid
> 0) {
1629 perror("start consumer fork");
1637 * Spawn the consumerd daemon and session daemon thread.
1639 static int start_consumerd(struct consumer_data
*consumer_data
)
1643 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1644 if (consumer_data
->pid
!= 0) {
1645 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1649 ret
= spawn_consumerd(consumer_data
);
1651 ERR("Spawning consumerd failed");
1652 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1656 /* Setting up the consumer_data pid */
1657 consumer_data
->pid
= ret
;
1658 DBG2("Consumer pid %d", consumer_data
->pid
);
1659 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1661 DBG2("Spawning consumer control thread");
1662 ret
= spawn_consumer_thread(consumer_data
);
1664 ERR("Fatal error spawning consumer control thread");
1676 * Check version of the lttng-modules.
1678 static int validate_lttng_modules_version(void)
1680 return kernel_validate_version(kernel_tracer_fd
);
1684 * Setup necessary data for kernel tracer action.
1686 static int init_kernel_tracer(void)
1690 /* Modprobe lttng kernel modules */
1691 ret
= modprobe_lttng_control();
1696 /* Open debugfs lttng */
1697 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
1698 if (kernel_tracer_fd
< 0) {
1699 DBG("Failed to open %s", module_proc_lttng
);
1704 /* Validate kernel version */
1705 ret
= validate_lttng_modules_version();
1710 ret
= modprobe_lttng_data();
1715 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
1719 modprobe_remove_lttng_control();
1720 close(kernel_tracer_fd
);
1721 kernel_tracer_fd
= 0;
1722 return LTTCOMM_KERN_VERSION
;
1725 close(kernel_tracer_fd
);
1728 modprobe_remove_lttng_control();
1731 WARN("No kernel tracer available");
1732 kernel_tracer_fd
= 0;
1733 return LTTCOMM_KERN_NA
;
1737 * Init tracing by creating trace directory and sending fds kernel consumer.
1739 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
1743 if (session
->consumer_fds_sent
== 0) {
1745 * Assign default kernel consumer socket if no consumer assigned to the
1746 * kernel session. At this point, it's NOT suppose to be 0 but this is
1747 * an extra security check.
1749 if (session
->consumer_fd
== 0) {
1750 session
->consumer_fd
= kconsumer_data
.cmd_sock
;
1753 ret
= send_kconsumer_session_streams(&kconsumer_data
, session
);
1755 ret
= LTTCOMM_KERN_CONSUMER_FAIL
;
1759 session
->consumer_fds_sent
= 1;
1767 * Create an UST session and add it to the session ust list.
1769 static int create_ust_session(struct ltt_session
*session
,
1770 struct lttng_domain
*domain
)
1772 struct ltt_ust_session
*lus
= NULL
;
1775 switch (domain
->type
) {
1776 case LTTNG_DOMAIN_UST
:
1779 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
1783 DBG("Creating UST session");
1785 lus
= trace_ust_create_session(session
->path
, session
->id
, domain
);
1787 ret
= LTTCOMM_UST_SESS_FAIL
;
1791 ret
= run_as_mkdir_recursive(lus
->pathname
, S_IRWXU
| S_IRWXG
,
1792 session
->uid
, session
->gid
);
1794 if (ret
!= -EEXIST
) {
1795 ERR("Trace directory creation error");
1796 ret
= LTTCOMM_UST_SESS_FAIL
;
1801 /* The domain type dictate different actions on session creation */
1802 switch (domain
->type
) {
1803 case LTTNG_DOMAIN_UST
:
1804 /* No ustctl for the global UST domain */
1807 ERR("Unknown UST domain on create session %d", domain
->type
);
1810 lus
->uid
= session
->uid
;
1811 lus
->gid
= session
->gid
;
1812 session
->ust_session
= lus
;
1822 * Create a kernel tracer session then create the default channel.
1824 static int create_kernel_session(struct ltt_session
*session
)
1828 DBG("Creating kernel session");
1830 ret
= kernel_create_session(session
, kernel_tracer_fd
);
1832 ret
= LTTCOMM_KERN_SESS_FAIL
;
1836 /* Set kernel consumer socket fd */
1837 if (kconsumer_data
.cmd_sock
) {
1838 session
->kernel_session
->consumer_fd
= kconsumer_data
.cmd_sock
;
1841 ret
= run_as_mkdir_recursive(session
->kernel_session
->trace_path
,
1842 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
1844 if (ret
!= -EEXIST
) {
1845 ERR("Trace directory creation error");
1849 session
->kernel_session
->uid
= session
->uid
;
1850 session
->kernel_session
->gid
= session
->gid
;
1857 * Check if the UID or GID match the session. Root user has access to all
1860 static int session_access_ok(struct ltt_session
*session
, uid_t uid
, gid_t gid
)
1862 if (uid
!= session
->uid
&& gid
!= session
->gid
&& uid
!= 0) {
1869 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
1872 struct ltt_session
*session
;
1874 DBG("Counting number of available session for UID %d GID %d",
1876 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
1878 * Only list the sessions the user can control.
1880 if (!session_access_ok(session
, uid
, gid
)) {
1889 * Using the session list, filled a lttng_session array to send back to the
1890 * client for session listing.
1892 * The session list lock MUST be acquired before calling this function. Use
1893 * session_lock_list() and session_unlock_list().
1895 static void list_lttng_sessions(struct lttng_session
*sessions
, uid_t uid
,
1899 struct ltt_session
*session
;
1901 DBG("Getting all available session for UID %d GID %d",
1904 * Iterate over session list and append data after the control struct in
1907 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
1909 * Only list the sessions the user can control.
1911 if (!session_access_ok(session
, uid
, gid
)) {
1914 strncpy(sessions
[i
].path
, session
->path
, PATH_MAX
);
1915 sessions
[i
].path
[PATH_MAX
- 1] = '\0';
1916 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
1917 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
1918 sessions
[i
].enabled
= session
->enabled
;
1924 * Fill lttng_channel array of all channels.
1926 static void list_lttng_channels(int domain
, struct ltt_session
*session
,
1927 struct lttng_channel
*channels
)
1930 struct ltt_kernel_channel
*kchan
;
1932 DBG("Listing channels for session %s", session
->name
);
1935 case LTTNG_DOMAIN_KERNEL
:
1936 /* Kernel channels */
1937 if (session
->kernel_session
!= NULL
) {
1938 cds_list_for_each_entry(kchan
,
1939 &session
->kernel_session
->channel_list
.head
, list
) {
1940 /* Copy lttng_channel struct to array */
1941 memcpy(&channels
[i
], kchan
->channel
, sizeof(struct lttng_channel
));
1942 channels
[i
].enabled
= kchan
->enabled
;
1947 case LTTNG_DOMAIN_UST
:
1949 struct lttng_ht_iter iter
;
1950 struct ltt_ust_channel
*uchan
;
1952 cds_lfht_for_each_entry(session
->ust_session
->domain_global
.channels
->ht
,
1953 &iter
.iter
, uchan
, node
.node
) {
1954 strncpy(channels
[i
].name
, uchan
->name
, LTTNG_SYMBOL_NAME_LEN
);
1955 channels
[i
].attr
.overwrite
= uchan
->attr
.overwrite
;
1956 channels
[i
].attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
1957 channels
[i
].attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
1958 channels
[i
].attr
.switch_timer_interval
=
1959 uchan
->attr
.switch_timer_interval
;
1960 channels
[i
].attr
.read_timer_interval
=
1961 uchan
->attr
.read_timer_interval
;
1962 channels
[i
].enabled
= uchan
->enabled
;
1963 switch (uchan
->attr
.output
) {
1964 case LTTNG_UST_MMAP
:
1966 channels
[i
].attr
.output
= LTTNG_EVENT_MMAP
;
1979 * Create a list of ust global domain events.
1981 static int list_lttng_ust_global_events(char *channel_name
,
1982 struct ltt_ust_domain_global
*ust_global
, struct lttng_event
**events
)
1985 unsigned int nb_event
= 0;
1986 struct lttng_ht_iter iter
;
1987 struct lttng_ht_node_str
*node
;
1988 struct ltt_ust_channel
*uchan
;
1989 struct ltt_ust_event
*uevent
;
1990 struct lttng_event
*tmp
;
1992 DBG("Listing UST global events for channel %s", channel_name
);
1996 lttng_ht_lookup(ust_global
->channels
, (void *)channel_name
, &iter
);
1997 node
= lttng_ht_iter_get_node_str(&iter
);
1999 ret
= -LTTCOMM_UST_CHAN_NOT_FOUND
;
2003 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
2005 nb_event
+= lttng_ht_get_count(uchan
->events
);
2007 if (nb_event
== 0) {
2012 DBG3("Listing UST global %d events", nb_event
);
2014 tmp
= zmalloc(nb_event
* sizeof(struct lttng_event
));
2016 ret
= -LTTCOMM_FATAL
;
2020 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
2021 strncpy(tmp
[i
].name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
);
2022 tmp
[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
2023 tmp
[i
].enabled
= uevent
->enabled
;
2024 switch (uevent
->attr
.instrumentation
) {
2025 case LTTNG_UST_TRACEPOINT
:
2026 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT
;
2028 case LTTNG_UST_PROBE
:
2029 tmp
[i
].type
= LTTNG_EVENT_PROBE
;
2031 case LTTNG_UST_FUNCTION
:
2032 tmp
[i
].type
= LTTNG_EVENT_FUNCTION
;
2034 case LTTNG_UST_TRACEPOINT_LOGLEVEL
:
2035 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT_LOGLEVEL
;
2050 * Fill lttng_event array of all kernel events in the channel.
2052 static int list_lttng_kernel_events(char *channel_name
,
2053 struct ltt_kernel_session
*kernel_session
, struct lttng_event
**events
)
2056 unsigned int nb_event
;
2057 struct ltt_kernel_event
*event
;
2058 struct ltt_kernel_channel
*kchan
;
2060 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
2061 if (kchan
== NULL
) {
2062 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2066 nb_event
= kchan
->event_count
;
2068 DBG("Listing events for channel %s", kchan
->channel
->name
);
2070 if (nb_event
== 0) {
2075 *events
= zmalloc(nb_event
* sizeof(struct lttng_event
));
2076 if (*events
== NULL
) {
2077 ret
= LTTCOMM_FATAL
;
2081 /* Kernel channels */
2082 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
2083 strncpy((*events
)[i
].name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
);
2084 (*events
)[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
2085 (*events
)[i
].enabled
= event
->enabled
;
2086 switch (event
->event
->instrumentation
) {
2087 case LTTNG_KERNEL_TRACEPOINT
:
2088 (*events
)[i
].type
= LTTNG_EVENT_TRACEPOINT
;
2090 case LTTNG_KERNEL_KPROBE
:
2091 case LTTNG_KERNEL_KRETPROBE
:
2092 (*events
)[i
].type
= LTTNG_EVENT_PROBE
;
2093 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
2094 sizeof(struct lttng_kernel_kprobe
));
2096 case LTTNG_KERNEL_FUNCTION
:
2097 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
2098 memcpy(&((*events
)[i
].attr
.ftrace
), &event
->event
->u
.ftrace
,
2099 sizeof(struct lttng_kernel_function
));
2101 case LTTNG_KERNEL_NOOP
:
2102 (*events
)[i
].type
= LTTNG_EVENT_NOOP
;
2104 case LTTNG_KERNEL_SYSCALL
:
2105 (*events
)[i
].type
= LTTNG_EVENT_SYSCALL
;
2107 case LTTNG_KERNEL_ALL
:
2121 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
2123 static int cmd_disable_channel(struct ltt_session
*session
,
2124 int domain
, char *channel_name
)
2127 struct ltt_ust_session
*usess
;
2129 usess
= session
->ust_session
;
2132 case LTTNG_DOMAIN_KERNEL
:
2134 ret
= channel_kernel_disable(session
->kernel_session
,
2136 if (ret
!= LTTCOMM_OK
) {
2140 kernel_wait_quiescent(kernel_tracer_fd
);
2143 case LTTNG_DOMAIN_UST
:
2145 struct ltt_ust_channel
*uchan
;
2146 struct lttng_ht
*chan_ht
;
2148 chan_ht
= usess
->domain_global
.channels
;
2150 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
2151 if (uchan
== NULL
) {
2152 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2156 ret
= channel_ust_disable(usess
, domain
, uchan
);
2157 if (ret
!= LTTCOMM_OK
) {
2163 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2164 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2165 case LTTNG_DOMAIN_UST_PID
:
2168 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
2179 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2181 static int cmd_enable_channel(struct ltt_session
*session
,
2182 int domain
, struct lttng_channel
*attr
)
2185 struct ltt_ust_session
*usess
= session
->ust_session
;
2186 struct lttng_ht
*chan_ht
;
2188 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
2191 case LTTNG_DOMAIN_KERNEL
:
2193 struct ltt_kernel_channel
*kchan
;
2195 kchan
= trace_kernel_get_channel_by_name(attr
->name
,
2196 session
->kernel_session
);
2197 if (kchan
== NULL
) {
2198 ret
= channel_kernel_create(session
->kernel_session
,
2199 attr
, kernel_poll_pipe
[1]);
2201 ret
= channel_kernel_enable(session
->kernel_session
, kchan
);
2204 if (ret
!= LTTCOMM_OK
) {
2208 kernel_wait_quiescent(kernel_tracer_fd
);
2211 case LTTNG_DOMAIN_UST
:
2213 struct ltt_ust_channel
*uchan
;
2215 chan_ht
= usess
->domain_global
.channels
;
2217 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
2218 if (uchan
== NULL
) {
2219 ret
= channel_ust_create(usess
, domain
, attr
);
2221 ret
= channel_ust_enable(usess
, domain
, uchan
);
2226 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2227 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2228 case LTTNG_DOMAIN_UST_PID
:
2231 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
2240 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2242 static int cmd_disable_event(struct ltt_session
*session
, int domain
,
2243 char *channel_name
, char *event_name
)
2248 case LTTNG_DOMAIN_KERNEL
:
2250 struct ltt_kernel_channel
*kchan
;
2251 struct ltt_kernel_session
*ksess
;
2253 ksess
= session
->kernel_session
;
2255 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
2256 if (kchan
== NULL
) {
2257 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2261 ret
= event_kernel_disable_tracepoint(ksess
, kchan
, event_name
);
2262 if (ret
!= LTTCOMM_OK
) {
2266 kernel_wait_quiescent(kernel_tracer_fd
);
2269 case LTTNG_DOMAIN_UST
:
2271 struct ltt_ust_channel
*uchan
;
2272 struct ltt_ust_session
*usess
;
2274 usess
= session
->ust_session
;
2276 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2278 if (uchan
== NULL
) {
2279 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2283 ret
= event_ust_disable_tracepoint(usess
, domain
, uchan
, event_name
);
2284 if (ret
!= LTTCOMM_OK
) {
2288 DBG3("Disable UST event %s in channel %s completed", event_name
,
2293 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2294 case LTTNG_DOMAIN_UST_PID
:
2295 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2309 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2311 static int cmd_disable_event_all(struct ltt_session
*session
, int domain
,
2317 case LTTNG_DOMAIN_KERNEL
:
2319 struct ltt_kernel_session
*ksess
;
2320 struct ltt_kernel_channel
*kchan
;
2322 ksess
= session
->kernel_session
;
2324 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
2325 if (kchan
== NULL
) {
2326 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2330 ret
= event_kernel_disable_all(ksess
, kchan
);
2331 if (ret
!= LTTCOMM_OK
) {
2335 kernel_wait_quiescent(kernel_tracer_fd
);
2338 case LTTNG_DOMAIN_UST
:
2340 struct ltt_ust_session
*usess
;
2341 struct ltt_ust_channel
*uchan
;
2343 usess
= session
->ust_session
;
2345 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2347 if (uchan
== NULL
) {
2348 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2352 ret
= event_ust_disable_all_tracepoints(usess
, domain
, uchan
);
2357 DBG3("Disable all UST events in channel %s completed", channel_name
);
2362 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2363 case LTTNG_DOMAIN_UST_PID
:
2364 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2378 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2380 static int cmd_add_context(struct ltt_session
*session
, int domain
,
2381 char *channel_name
, char *event_name
, struct lttng_event_context
*ctx
)
2386 case LTTNG_DOMAIN_KERNEL
:
2387 /* Add kernel context to kernel tracer */
2388 ret
= context_kernel_add(session
->kernel_session
, ctx
,
2389 event_name
, channel_name
);
2390 if (ret
!= LTTCOMM_OK
) {
2394 case LTTNG_DOMAIN_UST
:
2396 struct ltt_ust_session
*usess
= session
->ust_session
;
2398 ret
= context_ust_add(usess
, domain
, ctx
, event_name
, channel_name
);
2399 if (ret
!= LTTCOMM_OK
) {
2405 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2406 case LTTNG_DOMAIN_UST_PID
:
2407 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2421 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2423 * TODO: currently, both events and loglevels are kept within the same
2424 * namespace for UST global registry/app registery, so if an event
2425 * happen to have the same name as the loglevel (very unlikely though),
2426 * and an attempt is made to enable/disable both in the same session,
2427 * the first to be created will be the only one allowed to exist.
2429 static int cmd_enable_event(struct ltt_session
*session
, int domain
,
2430 char *channel_name
, struct lttng_event
*event
)
2433 struct lttng_channel
*attr
;
2434 struct ltt_ust_session
*usess
= session
->ust_session
;
2437 case LTTNG_DOMAIN_KERNEL
:
2439 struct ltt_kernel_channel
*kchan
;
2441 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2442 session
->kernel_session
);
2443 if (kchan
== NULL
) {
2444 attr
= channel_new_default_attr(domain
);
2446 ret
= LTTCOMM_FATAL
;
2449 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
2451 /* This call will notify the kernel thread */
2452 ret
= channel_kernel_create(session
->kernel_session
,
2453 attr
, kernel_poll_pipe
[1]);
2454 if (ret
!= LTTCOMM_OK
) {
2461 /* Get the newly created kernel channel pointer */
2462 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2463 session
->kernel_session
);
2464 if (kchan
== NULL
) {
2465 /* This sould not happen... */
2466 ret
= LTTCOMM_FATAL
;
2470 ret
= event_kernel_enable_tracepoint(session
->kernel_session
, kchan
,
2472 if (ret
!= LTTCOMM_OK
) {
2476 kernel_wait_quiescent(kernel_tracer_fd
);
2479 case LTTNG_DOMAIN_UST
:
2481 struct lttng_channel
*attr
;
2482 struct ltt_ust_channel
*uchan
;
2484 /* Get channel from global UST domain */
2485 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2487 if (uchan
== NULL
) {
2488 /* Create default channel */
2489 attr
= channel_new_default_attr(domain
);
2491 ret
= LTTCOMM_FATAL
;
2494 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
2495 attr
->name
[NAME_MAX
- 1] = '\0';
2497 ret
= channel_ust_create(usess
, domain
, attr
);
2498 if (ret
!= LTTCOMM_OK
) {
2504 /* Get the newly created channel reference back */
2505 uchan
= trace_ust_find_channel_by_name(
2506 usess
->domain_global
.channels
, channel_name
);
2507 if (uchan
== NULL
) {
2508 /* Something is really wrong */
2509 ret
= LTTCOMM_FATAL
;
2514 /* At this point, the session and channel exist on the tracer */
2515 ret
= event_ust_enable_tracepoint(usess
, domain
, uchan
, event
);
2516 if (ret
!= LTTCOMM_OK
) {
2522 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2523 case LTTNG_DOMAIN_UST_PID
:
2524 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2538 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2540 static int cmd_enable_event_all(struct ltt_session
*session
, int domain
,
2541 char *channel_name
, int event_type
)
2544 struct ltt_kernel_channel
*kchan
;
2547 case LTTNG_DOMAIN_KERNEL
:
2548 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2549 session
->kernel_session
);
2550 if (kchan
== NULL
) {
2551 /* This call will notify the kernel thread */
2552 ret
= channel_kernel_create(session
->kernel_session
, NULL
,
2553 kernel_poll_pipe
[1]);
2554 if (ret
!= LTTCOMM_OK
) {
2558 /* Get the newly created kernel channel pointer */
2559 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2560 session
->kernel_session
);
2561 if (kchan
== NULL
) {
2562 /* This sould not happen... */
2563 ret
= LTTCOMM_FATAL
;
2569 switch (event_type
) {
2570 case LTTNG_EVENT_SYSCALL
:
2571 ret
= event_kernel_enable_all_syscalls(session
->kernel_session
,
2572 kchan
, kernel_tracer_fd
);
2574 case LTTNG_EVENT_TRACEPOINT
:
2576 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
2577 * events already registered to the channel.
2579 ret
= event_kernel_enable_all_tracepoints(session
->kernel_session
,
2580 kchan
, kernel_tracer_fd
);
2582 case LTTNG_EVENT_ALL
:
2583 /* Enable syscalls and tracepoints */
2584 ret
= event_kernel_enable_all(session
->kernel_session
,
2585 kchan
, kernel_tracer_fd
);
2588 ret
= LTTCOMM_KERN_ENABLE_FAIL
;
2592 /* Manage return value */
2593 if (ret
!= LTTCOMM_OK
) {
2597 kernel_wait_quiescent(kernel_tracer_fd
);
2599 case LTTNG_DOMAIN_UST
:
2601 struct lttng_channel
*attr
;
2602 struct ltt_ust_channel
*uchan
;
2603 struct ltt_ust_session
*usess
= session
->ust_session
;
2605 /* Get channel from global UST domain */
2606 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2608 if (uchan
== NULL
) {
2609 /* Create default channel */
2610 attr
= channel_new_default_attr(domain
);
2612 ret
= LTTCOMM_FATAL
;
2615 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
2616 attr
->name
[NAME_MAX
- 1] = '\0';
2618 /* Use the internal command enable channel */
2619 ret
= channel_ust_create(usess
, domain
, attr
);
2620 if (ret
!= LTTCOMM_OK
) {
2626 /* Get the newly created channel reference back */
2627 uchan
= trace_ust_find_channel_by_name(
2628 usess
->domain_global
.channels
, channel_name
);
2629 if (uchan
== NULL
) {
2630 /* Something is really wrong */
2631 ret
= LTTCOMM_FATAL
;
2636 /* At this point, the session and channel exist on the tracer */
2638 switch (event_type
) {
2639 case LTTNG_EVENT_ALL
:
2640 case LTTNG_EVENT_TRACEPOINT
:
2641 ret
= event_ust_enable_all_tracepoints(usess
, domain
, uchan
);
2642 if (ret
!= LTTCOMM_OK
) {
2647 ret
= LTTCOMM_UST_ENABLE_FAIL
;
2651 /* Manage return value */
2652 if (ret
!= LTTCOMM_OK
) {
2659 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2660 case LTTNG_DOMAIN_UST_PID
:
2661 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2675 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2677 static ssize_t
cmd_list_tracepoints(int domain
, struct lttng_event
**events
)
2680 ssize_t nb_events
= 0;
2683 case LTTNG_DOMAIN_KERNEL
:
2684 nb_events
= kernel_list_events(kernel_tracer_fd
, events
);
2685 if (nb_events
< 0) {
2686 ret
= LTTCOMM_KERN_LIST_FAIL
;
2690 case LTTNG_DOMAIN_UST
:
2691 nb_events
= ust_app_list_events(events
);
2692 if (nb_events
< 0) {
2693 ret
= LTTCOMM_UST_LIST_FAIL
;
2705 /* Return negative value to differentiate return code */
2710 * Command LTTNG_START_TRACE processed by the client thread.
2712 static int cmd_start_trace(struct ltt_session
*session
)
2715 struct ltt_kernel_session
*ksession
;
2716 struct ltt_ust_session
*usess
;
2719 ksession
= session
->kernel_session
;
2720 usess
= session
->ust_session
;
2722 if (session
->enabled
) {
2723 ret
= LTTCOMM_UST_START_FAIL
;
2727 session
->enabled
= 1;
2729 /* Kernel tracing */
2730 if (ksession
!= NULL
) {
2731 struct ltt_kernel_channel
*kchan
;
2733 /* Open kernel metadata */
2734 if (ksession
->metadata
== NULL
) {
2735 ret
= kernel_open_metadata(ksession
, ksession
->trace_path
);
2737 ret
= LTTCOMM_KERN_META_FAIL
;
2742 /* Open kernel metadata stream */
2743 if (ksession
->metadata_stream_fd
== 0) {
2744 ret
= kernel_open_metadata_stream(ksession
);
2746 ERR("Kernel create metadata stream failed");
2747 ret
= LTTCOMM_KERN_STREAM_FAIL
;
2752 /* For each channel */
2753 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
2754 if (kchan
->stream_count
== 0) {
2755 ret
= kernel_open_channel_stream(kchan
);
2757 ret
= LTTCOMM_KERN_STREAM_FAIL
;
2760 /* Update the stream global counter */
2761 ksession
->stream_count_global
+= ret
;
2765 /* Setup kernel consumer socket and send fds to it */
2766 ret
= init_kernel_tracing(ksession
);
2768 ret
= LTTCOMM_KERN_START_FAIL
;
2772 /* This start the kernel tracing */
2773 ret
= kernel_start_session(ksession
);
2775 ret
= LTTCOMM_KERN_START_FAIL
;
2779 /* Quiescent wait after starting trace */
2780 kernel_wait_quiescent(kernel_tracer_fd
);
2783 /* Flag session that trace should start automatically */
2785 usess
->start_trace
= 1;
2787 ret
= ust_app_start_trace_all(usess
);
2789 ret
= LTTCOMM_UST_START_FAIL
;
2801 * Command LTTNG_STOP_TRACE processed by the client thread.
2803 static int cmd_stop_trace(struct ltt_session
*session
)
2806 struct ltt_kernel_channel
*kchan
;
2807 struct ltt_kernel_session
*ksession
;
2808 struct ltt_ust_session
*usess
;
2811 ksession
= session
->kernel_session
;
2812 usess
= session
->ust_session
;
2814 if (!session
->enabled
) {
2815 ret
= LTTCOMM_UST_STOP_FAIL
;
2819 session
->enabled
= 0;
2822 if (ksession
!= NULL
) {
2823 DBG("Stop kernel tracing");
2825 /* Flush all buffers before stopping */
2826 ret
= kernel_metadata_flush_buffer(ksession
->metadata_stream_fd
);
2828 ERR("Kernel metadata flush failed");
2831 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
2832 ret
= kernel_flush_buffer(kchan
);
2834 ERR("Kernel flush buffer error");
2838 ret
= kernel_stop_session(ksession
);
2840 ret
= LTTCOMM_KERN_STOP_FAIL
;
2844 kernel_wait_quiescent(kernel_tracer_fd
);
2848 usess
->start_trace
= 0;
2850 ret
= ust_app_stop_trace_all(usess
);
2852 ret
= LTTCOMM_UST_STOP_FAIL
;
2864 * Command LTTNG_CREATE_SESSION processed by the client thread.
2866 static int cmd_create_session(char *name
, char *path
, struct ucred
*creds
)
2870 ret
= session_create(name
, path
, creds
->uid
, creds
->gid
);
2871 if (ret
!= LTTCOMM_OK
) {
2882 * Command LTTNG_DESTROY_SESSION processed by the client thread.
2884 static int cmd_destroy_session(struct ltt_session
*session
, char *name
)
2888 /* Clean kernel session teardown */
2889 teardown_kernel_session(session
);
2890 /* UST session teardown */
2891 teardown_ust_session(session
);
2894 * Must notify the kernel thread here to update it's poll setin order
2895 * to remove the channel(s)' fd just destroyed.
2897 ret
= notify_thread_pipe(kernel_poll_pipe
[1]);
2899 perror("write kernel poll pipe");
2902 ret
= session_destroy(session
);
2908 * Command LTTNG_CALIBRATE processed by the client thread.
2910 static int cmd_calibrate(int domain
, struct lttng_calibrate
*calibrate
)
2915 case LTTNG_DOMAIN_KERNEL
:
2917 struct lttng_kernel_calibrate kcalibrate
;
2919 kcalibrate
.type
= calibrate
->type
;
2920 ret
= kernel_calibrate(kernel_tracer_fd
, &kcalibrate
);
2922 ret
= LTTCOMM_KERN_ENABLE_FAIL
;
2927 case LTTNG_DOMAIN_UST
:
2929 struct lttng_ust_calibrate ucalibrate
;
2931 ucalibrate
.type
= calibrate
->type
;
2932 ret
= ust_app_calibrate_glb(&ucalibrate
);
2934 ret
= LTTCOMM_UST_CALIBRATE_FAIL
;
2951 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
2953 static int cmd_register_consumer(struct ltt_session
*session
, int domain
,
2959 case LTTNG_DOMAIN_KERNEL
:
2960 /* Can't register a consumer if there is already one */
2961 if (session
->kernel_session
->consumer_fds_sent
!= 0) {
2962 ret
= LTTCOMM_KERN_CONSUMER_FAIL
;
2966 sock
= lttcomm_connect_unix_sock(sock_path
);
2968 ret
= LTTCOMM_CONNECT_FAIL
;
2972 session
->kernel_session
->consumer_fd
= sock
;
2975 /* TODO: Userspace tracing */
2987 * Command LTTNG_LIST_DOMAINS processed by the client thread.
2989 static ssize_t
cmd_list_domains(struct ltt_session
*session
,
2990 struct lttng_domain
**domains
)
2995 if (session
->kernel_session
!= NULL
) {
2996 DBG3("Listing domains found kernel domain");
3000 if (session
->ust_session
!= NULL
) {
3001 DBG3("Listing domains found UST global domain");
3005 *domains
= zmalloc(nb_dom
* sizeof(struct lttng_domain
));
3006 if (*domains
== NULL
) {
3007 ret
= -LTTCOMM_FATAL
;
3011 if (session
->kernel_session
!= NULL
) {
3012 (*domains
)[index
].type
= LTTNG_DOMAIN_KERNEL
;
3016 if (session
->ust_session
!= NULL
) {
3017 (*domains
)[index
].type
= LTTNG_DOMAIN_UST
;
3028 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3030 static ssize_t
cmd_list_channels(int domain
, struct ltt_session
*session
,
3031 struct lttng_channel
**channels
)
3034 ssize_t nb_chan
= 0;
3037 case LTTNG_DOMAIN_KERNEL
:
3038 if (session
->kernel_session
!= NULL
) {
3039 nb_chan
= session
->kernel_session
->channel_count
;
3041 DBG3("Number of kernel channels %zd", nb_chan
);
3043 case LTTNG_DOMAIN_UST
:
3044 if (session
->ust_session
!= NULL
) {
3045 nb_chan
= lttng_ht_get_count(
3046 session
->ust_session
->domain_global
.channels
);
3048 DBG3("Number of UST global channels %zd", nb_chan
);
3057 *channels
= zmalloc(nb_chan
* sizeof(struct lttng_channel
));
3058 if (*channels
== NULL
) {
3059 ret
= -LTTCOMM_FATAL
;
3063 list_lttng_channels(domain
, session
, *channels
);
3075 * Command LTTNG_LIST_EVENTS processed by the client thread.
3077 static ssize_t
cmd_list_events(int domain
, struct ltt_session
*session
,
3078 char *channel_name
, struct lttng_event
**events
)
3081 ssize_t nb_event
= 0;
3084 case LTTNG_DOMAIN_KERNEL
:
3085 if (session
->kernel_session
!= NULL
) {
3086 nb_event
= list_lttng_kernel_events(channel_name
,
3087 session
->kernel_session
, events
);
3090 case LTTNG_DOMAIN_UST
:
3092 if (session
->ust_session
!= NULL
) {
3093 nb_event
= list_lttng_ust_global_events(channel_name
,
3094 &session
->ust_session
->domain_global
, events
);
3110 * Process the command requested by the lttng client within the command
3111 * context structure. This function make sure that the return structure (llm)
3112 * is set and ready for transmission before returning.
3114 * Return any error encountered or 0 for success.
3116 static int process_client_msg(struct command_ctx
*cmd_ctx
)
3118 int ret
= LTTCOMM_OK
;
3119 int need_tracing_session
= 1;
3121 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
3123 if (opt_no_kernel
&& cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
3124 ret
= LTTCOMM_KERN_NA
;
3129 * Check for command that don't needs to allocate a returned payload. We do
3130 * this here so we don't have to make the call for no payload at each
3133 switch(cmd_ctx
->lsm
->cmd_type
) {
3134 case LTTNG_LIST_SESSIONS
:
3135 case LTTNG_LIST_TRACEPOINTS
:
3136 case LTTNG_LIST_DOMAINS
:
3137 case LTTNG_LIST_CHANNELS
:
3138 case LTTNG_LIST_EVENTS
:
3141 /* Setup lttng message with no payload */
3142 ret
= setup_lttng_msg(cmd_ctx
, 0);
3144 /* This label does not try to unlock the session */
3145 goto init_setup_error
;
3149 /* Commands that DO NOT need a session. */
3150 switch (cmd_ctx
->lsm
->cmd_type
) {
3151 case LTTNG_CALIBRATE
:
3152 case LTTNG_CREATE_SESSION
:
3153 case LTTNG_LIST_SESSIONS
:
3154 case LTTNG_LIST_TRACEPOINTS
:
3155 need_tracing_session
= 0;
3158 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
3159 session_lock_list();
3160 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
3161 session_unlock_list();
3162 if (cmd_ctx
->session
== NULL
) {
3163 if (cmd_ctx
->lsm
->session
.name
!= NULL
) {
3164 ret
= LTTCOMM_SESS_NOT_FOUND
;
3166 /* If no session name specified */
3167 ret
= LTTCOMM_SELECT_SESS
;
3171 /* Acquire lock for the session */
3172 session_lock(cmd_ctx
->session
);
3178 * Check domain type for specific "pre-action".
3180 switch (cmd_ctx
->lsm
->domain
.type
) {
3181 case LTTNG_DOMAIN_KERNEL
:
3183 ret
= LTTCOMM_KERN_NA
;
3187 /* Kernel tracer check */
3188 if (kernel_tracer_fd
== 0) {
3189 /* Basically, load kernel tracer modules */
3190 ret
= init_kernel_tracer();
3196 /* Need a session for kernel command */
3197 if (need_tracing_session
) {
3198 if (cmd_ctx
->session
->kernel_session
== NULL
) {
3199 ret
= create_kernel_session(cmd_ctx
->session
);
3201 ret
= LTTCOMM_KERN_SESS_FAIL
;
3206 /* Start the kernel consumer daemon */
3207 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
3208 if (kconsumer_data
.pid
== 0 &&
3209 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3210 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3211 ret
= start_consumerd(&kconsumer_data
);
3213 ret
= LTTCOMM_KERN_CONSUMER_FAIL
;
3217 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3221 case LTTNG_DOMAIN_UST
:
3223 if (need_tracing_session
) {
3224 if (cmd_ctx
->session
->ust_session
== NULL
) {
3225 ret
= create_ust_session(cmd_ctx
->session
,
3226 &cmd_ctx
->lsm
->domain
);
3227 if (ret
!= LTTCOMM_OK
) {
3231 /* Start the UST consumer daemons */
3233 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
3234 if (consumerd64_bin
[0] != '\0' &&
3235 ustconsumer64_data
.pid
== 0 &&
3236 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3237 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3238 ret
= start_consumerd(&ustconsumer64_data
);
3240 ret
= LTTCOMM_UST_CONSUMER64_FAIL
;
3241 ust_consumerd64_fd
= -EINVAL
;
3245 ust_consumerd64_fd
= ustconsumer64_data
.cmd_sock
;
3247 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3250 if (consumerd32_bin
[0] != '\0' &&
3251 ustconsumer32_data
.pid
== 0 &&
3252 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3253 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3254 ret
= start_consumerd(&ustconsumer32_data
);
3256 ret
= LTTCOMM_UST_CONSUMER32_FAIL
;
3257 ust_consumerd32_fd
= -EINVAL
;
3260 ust_consumerd32_fd
= ustconsumer32_data
.cmd_sock
;
3262 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3272 * Check that the UID or GID match that of the tracing session.
3273 * The root user can interact with all sessions.
3275 if (need_tracing_session
) {
3276 if (!session_access_ok(cmd_ctx
->session
,
3277 cmd_ctx
->creds
.uid
, cmd_ctx
->creds
.gid
)) {
3278 ret
= LTTCOMM_EPERM
;
3283 /* Process by command type */
3284 switch (cmd_ctx
->lsm
->cmd_type
) {
3285 case LTTNG_ADD_CONTEXT
:
3287 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3288 cmd_ctx
->lsm
->u
.context
.channel_name
,
3289 cmd_ctx
->lsm
->u
.context
.event_name
,
3290 &cmd_ctx
->lsm
->u
.context
.ctx
);
3293 case LTTNG_DISABLE_CHANNEL
:
3295 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3296 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3299 case LTTNG_DISABLE_EVENT
:
3301 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3302 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3303 cmd_ctx
->lsm
->u
.disable
.name
);
3307 case LTTNG_DISABLE_ALL_EVENT
:
3309 DBG("Disabling all events");
3311 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3312 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3315 case LTTNG_ENABLE_CHANNEL
:
3317 ret
= cmd_enable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3318 &cmd_ctx
->lsm
->u
.channel
.chan
);
3321 case LTTNG_ENABLE_EVENT
:
3323 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3324 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3325 &cmd_ctx
->lsm
->u
.enable
.event
);
3328 case LTTNG_ENABLE_ALL_EVENT
:
3330 DBG("Enabling all events");
3332 ret
= cmd_enable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3333 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3334 cmd_ctx
->lsm
->u
.enable
.event
.type
);
3337 case LTTNG_LIST_TRACEPOINTS
:
3339 struct lttng_event
*events
;
3342 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3343 if (nb_events
< 0) {
3349 * Setup lttng message with payload size set to the event list size in
3350 * bytes and then copy list into the llm payload.
3352 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
3358 /* Copy event list into message payload */
3359 memcpy(cmd_ctx
->llm
->payload
, events
,
3360 sizeof(struct lttng_event
) * nb_events
);
3367 case LTTNG_START_TRACE
:
3369 ret
= cmd_start_trace(cmd_ctx
->session
);
3372 case LTTNG_STOP_TRACE
:
3374 ret
= cmd_stop_trace(cmd_ctx
->session
);
3377 case LTTNG_CREATE_SESSION
:
3379 ret
= cmd_create_session(cmd_ctx
->lsm
->session
.name
,
3380 cmd_ctx
->lsm
->session
.path
, &cmd_ctx
->creds
);
3383 case LTTNG_DESTROY_SESSION
:
3385 ret
= cmd_destroy_session(cmd_ctx
->session
,
3386 cmd_ctx
->lsm
->session
.name
);
3389 case LTTNG_LIST_DOMAINS
:
3392 struct lttng_domain
*domains
;
3394 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3400 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
3405 /* Copy event list into message payload */
3406 memcpy(cmd_ctx
->llm
->payload
, domains
,
3407 nb_dom
* sizeof(struct lttng_domain
));
3414 case LTTNG_LIST_CHANNELS
:
3417 struct lttng_channel
*channels
;
3419 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3420 cmd_ctx
->session
, &channels
);
3426 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
3431 /* Copy event list into message payload */
3432 memcpy(cmd_ctx
->llm
->payload
, channels
,
3433 nb_chan
* sizeof(struct lttng_channel
));
3440 case LTTNG_LIST_EVENTS
:
3443 struct lttng_event
*events
= NULL
;
3445 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3446 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
3452 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
3457 /* Copy event list into message payload */
3458 memcpy(cmd_ctx
->llm
->payload
, events
,
3459 nb_event
* sizeof(struct lttng_event
));
3466 case LTTNG_LIST_SESSIONS
:
3468 unsigned int nr_sessions
;
3470 session_lock_list();
3471 nr_sessions
= lttng_sessions_count(cmd_ctx
->creds
.uid
, cmd_ctx
->creds
.gid
);
3472 if (nr_sessions
== 0) {
3473 ret
= LTTCOMM_NO_SESSION
;
3474 session_unlock_list();
3477 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
3479 session_unlock_list();
3483 /* Filled the session array */
3484 list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
3485 cmd_ctx
->creds
.uid
, cmd_ctx
->creds
.gid
);
3487 session_unlock_list();
3492 case LTTNG_CALIBRATE
:
3494 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
3495 &cmd_ctx
->lsm
->u
.calibrate
);
3498 case LTTNG_REGISTER_CONSUMER
:
3500 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3501 cmd_ctx
->lsm
->u
.reg
.path
);
3510 if (cmd_ctx
->llm
== NULL
) {
3511 DBG("Missing llm structure. Allocating one.");
3512 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
3516 /* Set return code */
3517 cmd_ctx
->llm
->ret_code
= ret
;
3519 if (cmd_ctx
->session
) {
3520 session_unlock(cmd_ctx
->session
);
3527 * This thread manage all clients request using the unix client socket for
3530 static void *thread_manage_clients(void *data
)
3532 int sock
= 0, ret
, i
, pollfd
;
3533 uint32_t revents
, nb_fd
;
3534 struct command_ctx
*cmd_ctx
= NULL
;
3535 struct lttng_poll_event events
;
3537 DBG("[thread] Manage client started");
3539 rcu_register_thread();
3541 ret
= lttcomm_listen_unix_sock(client_sock
);
3547 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3548 * more will be added to this poll set.
3550 ret
= create_thread_poll_set(&events
, 2);
3555 /* Add the application registration socket */
3556 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3562 * Notify parent pid that we are ready to accept command for client side.
3564 if (opt_sig_parent
) {
3565 kill(ppid
, SIGUSR1
);
3569 DBG("Accepting client command ...");
3571 nb_fd
= LTTNG_POLL_GETNB(&events
);
3573 /* Inifinite blocking call, waiting for transmission */
3574 ret
= lttng_poll_wait(&events
, -1);
3579 for (i
= 0; i
< nb_fd
; i
++) {
3580 /* Fetch once the poll data */
3581 revents
= LTTNG_POLL_GETEV(&events
, i
);
3582 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3584 /* Thread quit pipe has been closed. Killing thread. */
3585 ret
= check_thread_quit_pipe(pollfd
, revents
);
3590 /* Event on the registration socket */
3591 if (pollfd
== client_sock
) {
3592 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3593 ERR("Client socket poll error");
3599 DBG("Wait for client response");
3601 sock
= lttcomm_accept_unix_sock(client_sock
);
3606 /* Set socket option for credentials retrieval */
3607 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3612 /* Allocate context command to process the client request */
3613 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3614 if (cmd_ctx
== NULL
) {
3615 perror("zmalloc cmd_ctx");
3619 /* Allocate data buffer for reception */
3620 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3621 if (cmd_ctx
->lsm
== NULL
) {
3622 perror("zmalloc cmd_ctx->lsm");
3626 cmd_ctx
->llm
= NULL
;
3627 cmd_ctx
->session
= NULL
;
3630 * Data is received from the lttng client. The struct
3631 * lttcomm_session_msg (lsm) contains the command and data request of
3634 DBG("Receiving data from client ...");
3635 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3636 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3638 DBG("Nothing recv() from client... continuing");
3644 // TODO: Validate cmd_ctx including sanity check for
3645 // security purpose.
3647 rcu_thread_online();
3649 * This function dispatch the work to the kernel or userspace tracer
3650 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3651 * informations for the client. The command context struct contains
3652 * everything this function may needs.
3654 ret
= process_client_msg(cmd_ctx
);
3655 rcu_thread_offline();
3658 * TODO: Inform client somehow of the fatal error. At
3659 * this point, ret < 0 means that a zmalloc failed
3660 * (ENOMEM). Error detected but still accept command.
3662 clean_command_ctx(&cmd_ctx
);
3666 DBG("Sending response (size: %d, retcode: %s)",
3667 cmd_ctx
->lttng_msg_size
,
3668 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3669 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3671 ERR("Failed to send data back to client");
3674 /* End of transmission */
3677 clean_command_ctx(&cmd_ctx
);
3681 DBG("Client thread dying");
3682 unlink(client_unix_sock_path
);
3686 lttng_poll_clean(&events
);
3687 clean_command_ctx(&cmd_ctx
);
3689 rcu_unregister_thread();
3695 * usage function on stderr
3697 static void usage(void)
3699 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3700 fprintf(stderr
, " -h, --help Display this usage.\n");
3701 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3702 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3703 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3704 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3705 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3706 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3707 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3708 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3709 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3710 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3711 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3712 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3713 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3714 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3715 fprintf(stderr
, " -V, --version Show version number.\n");
3716 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3717 fprintf(stderr
, " -q, --quiet No output at all.\n");
3718 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3719 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3720 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
3724 * daemon argument parsing
3726 static int parse_args(int argc
, char **argv
)
3730 static struct option long_options
[] = {
3731 { "client-sock", 1, 0, 'c' },
3732 { "apps-sock", 1, 0, 'a' },
3733 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3734 { "kconsumerd-err-sock", 1, 0, 'E' },
3735 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3736 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3737 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3738 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3739 { "consumerd32-path", 1, 0, 'u' },
3740 { "consumerd32-libdir", 1, 0, 'U' },
3741 { "consumerd64-path", 1, 0, 't' },
3742 { "consumerd64-libdir", 1, 0, 'T' },
3743 { "daemonize", 0, 0, 'd' },
3744 { "sig-parent", 0, 0, 'S' },
3745 { "help", 0, 0, 'h' },
3746 { "group", 1, 0, 'g' },
3747 { "version", 0, 0, 'V' },
3748 { "quiet", 0, 0, 'q' },
3749 { "verbose", 0, 0, 'v' },
3750 { "verbose-consumer", 0, 0, 'Z' },
3751 { "no-kernel", 0, 0, 'N' },
3756 int option_index
= 0;
3757 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
3758 long_options
, &option_index
);
3765 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
3767 fprintf(stderr
, " with arg %s\n", optarg
);
3771 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3774 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3780 opt_tracing_group
= optarg
;
3786 fprintf(stdout
, "%s\n", VERSION
);
3792 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3795 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3798 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3801 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3804 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3807 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3816 /* Verbose level can increase using multiple -v */
3820 opt_verbose_consumer
+= 1;
3823 consumerd32_bin
= optarg
;
3826 consumerd32_libdir
= optarg
;
3829 consumerd64_bin
= optarg
;
3832 consumerd64_libdir
= optarg
;
3835 /* Unknown option or other error.
3836 * Error is printed by getopt, just return */
3845 * Creates the two needed socket by the daemon.
3846 * apps_sock - The communication socket for all UST apps.
3847 * client_sock - The communication of the cli tool (lttng).
3849 static int init_daemon_socket(void)
3854 old_umask
= umask(0);
3856 /* Create client tool unix socket */
3857 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
3858 if (client_sock
< 0) {
3859 ERR("Create unix sock failed: %s", client_unix_sock_path
);
3864 /* File permission MUST be 660 */
3865 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3867 ERR("Set file permissions failed: %s", client_unix_sock_path
);
3872 /* Create the application unix socket */
3873 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
3874 if (apps_sock
< 0) {
3875 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
3880 /* File permission MUST be 666 */
3881 ret
= chmod(apps_unix_sock_path
,
3882 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
3884 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
3895 * Check if the global socket is available, and if a daemon is answering at the
3896 * other side. If yes, error is returned.
3898 static int check_existing_daemon(void)
3900 if (access(client_unix_sock_path
, F_OK
) < 0 &&
3901 access(apps_unix_sock_path
, F_OK
) < 0) {
3905 /* Is there anybody out there ? */
3906 if (lttng_session_daemon_alive()) {
3914 * Set the tracing group gid onto the client socket.
3916 * Race window between mkdir and chown is OK because we are going from more
3917 * permissive (root.root) to les permissive (root.tracing).
3919 static int set_permissions(char *rundir
)
3924 gid
= allowed_group();
3926 WARN("No tracing group detected");
3931 /* Set lttng run dir */
3932 ret
= chown(rundir
, 0, gid
);
3934 ERR("Unable to set group on %s", rundir
);
3938 /* lttng client socket path */
3939 ret
= chown(client_unix_sock_path
, 0, gid
);
3941 ERR("Unable to set group on %s", client_unix_sock_path
);
3945 /* kconsumer error socket path */
3946 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, gid
);
3948 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
3952 /* 64-bit ustconsumer error socket path */
3953 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, gid
);
3955 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
3959 /* 32-bit ustconsumer compat32 error socket path */
3960 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, gid
);
3962 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
3966 DBG("All permissions are set");
3973 * Create the pipe used to wake up the kernel thread.
3975 static int create_kernel_poll_pipe(void)
3977 return pipe2(kernel_poll_pipe
, O_CLOEXEC
);
3981 * Create the application command pipe to wake thread_manage_apps.
3983 static int create_apps_cmd_pipe(void)
3985 return pipe2(apps_cmd_pipe
, O_CLOEXEC
);
3989 * Create the lttng run directory needed for all global sockets and pipe.
3991 static int create_lttng_rundir(const char *rundir
)
3995 DBG3("Creating LTTng run directory: %s", rundir
);
3997 ret
= mkdir(rundir
, S_IRWXU
| S_IRWXG
);
3999 if (errno
!= EEXIST
) {
4000 ERR("Unable to create %s", rundir
);
4012 * Setup sockets and directory needed by the kconsumerd communication with the
4015 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
4019 char path
[PATH_MAX
];
4021 switch (consumer_data
->type
) {
4022 case LTTNG_CONSUMER_KERNEL
:
4023 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
4025 case LTTNG_CONSUMER64_UST
:
4026 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
4028 case LTTNG_CONSUMER32_UST
:
4029 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
4032 ERR("Consumer type unknown");
4037 DBG2("Creating consumer directory: %s", path
);
4039 ret
= mkdir(path
, S_IRWXU
| S_IRWXG
);
4041 if (errno
!= EEXIST
) {
4042 ERR("Failed to create %s", path
);
4048 /* Create the kconsumerd error unix socket */
4049 consumer_data
->err_sock
=
4050 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
4051 if (consumer_data
->err_sock
< 0) {
4052 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
4057 /* File permission MUST be 660 */
4058 ret
= chmod(consumer_data
->err_unix_sock_path
,
4059 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4061 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
4071 * Signal handler for the daemon
4073 * Simply stop all worker threads, leaving main() return gracefully after
4074 * joining all threads and calling cleanup().
4076 static void sighandler(int sig
)
4080 DBG("SIGPIPE caugth");
4083 DBG("SIGINT caugth");
4087 DBG("SIGTERM caugth");
4096 * Setup signal handler for :
4097 * SIGINT, SIGTERM, SIGPIPE
4099 static int set_signal_handler(void)
4102 struct sigaction sa
;
4105 if ((ret
= sigemptyset(&sigset
)) < 0) {
4106 perror("sigemptyset");
4110 sa
.sa_handler
= sighandler
;
4111 sa
.sa_mask
= sigset
;
4113 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
4114 perror("sigaction");
4118 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
4119 perror("sigaction");
4123 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
4124 perror("sigaction");
4128 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
4134 * Set open files limit to unlimited. This daemon can open a large number of
4135 * file descriptors in order to consumer multiple kernel traces.
4137 static void set_ulimit(void)
4142 /* The kernel does not allowed an infinite limit for open files */
4143 lim
.rlim_cur
= 65535;
4144 lim
.rlim_max
= 65535;
4146 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
4148 perror("failed to set open files limit");
4155 int main(int argc
, char **argv
)
4159 const char *home_path
;
4161 rcu_register_thread();
4163 /* Create thread quit pipe */
4164 if ((ret
= init_thread_quit_pipe()) < 0) {
4168 setup_consumerd_path();
4170 /* Parse arguments */
4172 if ((ret
= parse_args(argc
, argv
) < 0)) {
4185 /* Check if daemon is UID = 0 */
4186 is_root
= !getuid();
4189 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
4191 /* Create global run dir with root access */
4192 ret
= create_lttng_rundir(rundir
);
4197 if (strlen(apps_unix_sock_path
) == 0) {
4198 snprintf(apps_unix_sock_path
, PATH_MAX
,
4199 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
4202 if (strlen(client_unix_sock_path
) == 0) {
4203 snprintf(client_unix_sock_path
, PATH_MAX
,
4204 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
4207 /* Set global SHM for ust */
4208 if (strlen(wait_shm_path
) == 0) {
4209 snprintf(wait_shm_path
, PATH_MAX
,
4210 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
4213 /* Setup kernel consumerd path */
4214 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
4215 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
4216 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
4217 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
4219 DBG2("Kernel consumer err path: %s",
4220 kconsumer_data
.err_unix_sock_path
);
4221 DBG2("Kernel consumer cmd path: %s",
4222 kconsumer_data
.cmd_unix_sock_path
);
4224 home_path
= get_home_dir();
4225 if (home_path
== NULL
) {
4226 /* TODO: Add --socket PATH option */
4227 ERR("Can't get HOME directory for sockets creation.");
4233 * Create rundir from home path. This will create something like
4236 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
4242 ret
= create_lttng_rundir(rundir
);
4247 if (strlen(apps_unix_sock_path
) == 0) {
4248 snprintf(apps_unix_sock_path
, PATH_MAX
,
4249 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
4252 /* Set the cli tool unix socket path */
4253 if (strlen(client_unix_sock_path
) == 0) {
4254 snprintf(client_unix_sock_path
, PATH_MAX
,
4255 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
4258 /* Set global SHM for ust */
4259 if (strlen(wait_shm_path
) == 0) {
4260 snprintf(wait_shm_path
, PATH_MAX
,
4261 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, geteuid());
4265 DBG("Client socket path %s", client_unix_sock_path
);
4266 DBG("Application socket path %s", apps_unix_sock_path
);
4267 DBG("LTTng run directory path: %s", rundir
);
4269 /* 32 bits consumerd path setup */
4270 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
4271 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
4272 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
4273 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
4275 DBG2("UST consumer 32 bits err path: %s",
4276 ustconsumer32_data
.err_unix_sock_path
);
4277 DBG2("UST consumer 32 bits cmd path: %s",
4278 ustconsumer32_data
.cmd_unix_sock_path
);
4280 /* 64 bits consumerd path setup */
4281 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
4282 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
4283 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4284 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4286 DBG2("UST consumer 64 bits err path: %s",
4287 ustconsumer64_data
.err_unix_sock_path
);
4288 DBG2("UST consumer 64 bits cmd path: %s",
4289 ustconsumer64_data
.cmd_unix_sock_path
);
4292 * See if daemon already exist.
4294 if ((ret
= check_existing_daemon()) < 0) {
4295 ERR("Already running daemon.\n");
4297 * We do not goto exit because we must not cleanup()
4298 * because a daemon is already running.
4303 /* After this point, we can safely call cleanup() with "goto exit" */
4306 * These actions must be executed as root. We do that *after* setting up
4307 * the sockets path because we MUST make the check for another daemon using
4308 * those paths *before* trying to set the kernel consumer sockets and init
4312 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4317 /* Setup kernel tracer */
4318 if (!opt_no_kernel
) {
4319 init_kernel_tracer();
4322 /* Set ulimit for open files */
4326 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4331 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4336 if ((ret
= set_signal_handler()) < 0) {
4340 /* Setup the needed unix socket */
4341 if ((ret
= init_daemon_socket()) < 0) {
4345 /* Set credentials to socket */
4346 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4350 /* Get parent pid if -S, --sig-parent is specified. */
4351 if (opt_sig_parent
) {
4355 /* Setup the kernel pipe for waking up the kernel thread */
4356 if ((ret
= create_kernel_poll_pipe()) < 0) {
4360 /* Setup the thread apps communication pipe. */
4361 if ((ret
= create_apps_cmd_pipe()) < 0) {
4365 /* Init UST command queue. */
4366 cds_wfq_init(&ust_cmd_queue
.queue
);
4368 /* Init UST app hash table */
4372 * Get session list pointer. This pointer MUST NOT be free(). This list is
4373 * statically declared in session.c
4375 session_list_ptr
= session_get_list();
4377 /* Set up max poll set size */
4378 lttng_poll_set_max_size();
4380 /* Create thread to manage the client socket */
4381 ret
= pthread_create(&client_thread
, NULL
,
4382 thread_manage_clients
, (void *) NULL
);
4384 perror("pthread_create clients");
4388 /* Create thread to dispatch registration */
4389 ret
= pthread_create(&dispatch_thread
, NULL
,
4390 thread_dispatch_ust_registration
, (void *) NULL
);
4392 perror("pthread_create dispatch");
4396 /* Create thread to manage application registration. */
4397 ret
= pthread_create(®_apps_thread
, NULL
,
4398 thread_registration_apps
, (void *) NULL
);
4400 perror("pthread_create registration");
4404 /* Create thread to manage application socket */
4405 ret
= pthread_create(&apps_thread
, NULL
,
4406 thread_manage_apps
, (void *) NULL
);
4408 perror("pthread_create apps");
4412 /* Create kernel thread to manage kernel event */
4413 ret
= pthread_create(&kernel_thread
, NULL
,
4414 thread_manage_kernel
, (void *) NULL
);
4416 perror("pthread_create kernel");
4420 ret
= pthread_join(kernel_thread
, &status
);
4422 perror("pthread_join");
4423 goto error
; /* join error, exit without cleanup */
4427 ret
= pthread_join(apps_thread
, &status
);
4429 perror("pthread_join");
4430 goto error
; /* join error, exit without cleanup */
4434 ret
= pthread_join(reg_apps_thread
, &status
);
4436 perror("pthread_join");
4437 goto error
; /* join error, exit without cleanup */
4441 ret
= pthread_join(dispatch_thread
, &status
);
4443 perror("pthread_join");
4444 goto error
; /* join error, exit without cleanup */
4448 ret
= pthread_join(client_thread
, &status
);
4450 perror("pthread_join");
4451 goto error
; /* join error, exit without cleanup */
4454 ret
= join_consumer_thread(&kconsumer_data
);
4456 perror("join_consumer");
4457 goto error
; /* join error, exit without cleanup */
4463 * cleanup() is called when no other thread is running.
4465 rcu_thread_online();
4467 rcu_thread_offline();
4468 rcu_unregister_thread();