Fix: improve error message when UST support is disabled
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <inttypes.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
47
48 #include "lttng-sessiond.h"
49 #include "buffer-registry.h"
50 #include "channel.h"
51 #include "cmd.h"
52 #include "consumer.h"
53 #include "context.h"
54 #include "event.h"
55 #include "kernel.h"
56 #include "kernel-consumer.h"
57 #include "modprobe.h"
58 #include "shm.h"
59 #include "ust-ctl.h"
60 #include "ust-consumer.h"
61 #include "utils.h"
62 #include "fd-limit.h"
63 #include "health.h"
64 #include "testpoint.h"
65 #include "ust-thread.h"
66
67 #define CONSUMERD_FILE "lttng-consumerd"
68
69 /* Const values */
70 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
71
72 const char *progname;
73 const char *opt_tracing_group;
74 static const char *opt_pidfile;
75 static int opt_sig_parent;
76 static int opt_verbose_consumer;
77 static int opt_daemon;
78 static int opt_no_kernel;
79 static int is_root; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid; /* Parent PID for --sig-parent option */
81 static char *rundir;
82
83 /*
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
86 */
87 static struct consumer_data kconsumer_data = {
88 .type = LTTNG_CONSUMER_KERNEL,
89 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
90 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
91 .err_sock = -1,
92 .cmd_sock = -1,
93 .metadata_sock.fd = -1,
94 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
95 .lock = PTHREAD_MUTEX_INITIALIZER,
96 .cond = PTHREAD_COND_INITIALIZER,
97 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
98 };
99 static struct consumer_data ustconsumer64_data = {
100 .type = LTTNG_CONSUMER64_UST,
101 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
102 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
103 .err_sock = -1,
104 .cmd_sock = -1,
105 .metadata_sock.fd = -1,
106 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
107 .lock = PTHREAD_MUTEX_INITIALIZER,
108 .cond = PTHREAD_COND_INITIALIZER,
109 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
110 };
111 static struct consumer_data ustconsumer32_data = {
112 .type = LTTNG_CONSUMER32_UST,
113 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
114 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
115 .err_sock = -1,
116 .cmd_sock = -1,
117 .metadata_sock.fd = -1,
118 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
119 .lock = PTHREAD_MUTEX_INITIALIZER,
120 .cond = PTHREAD_COND_INITIALIZER,
121 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
122 };
123
124 /* Shared between threads */
125 static int dispatch_thread_exit;
126
127 /* Global application Unix socket path */
128 static char apps_unix_sock_path[PATH_MAX];
129 /* Global client Unix socket path */
130 static char client_unix_sock_path[PATH_MAX];
131 /* global wait shm path for UST */
132 static char wait_shm_path[PATH_MAX];
133 /* Global health check unix path */
134 static char health_unix_sock_path[PATH_MAX];
135
136 /* Sockets and FDs */
137 static int client_sock = -1;
138 static int apps_sock = -1;
139 int kernel_tracer_fd = -1;
140 static int kernel_poll_pipe[2] = { -1, -1 };
141
142 /*
143 * Quit pipe for all threads. This permits a single cancellation point
144 * for all threads when receiving an event on the pipe.
145 */
146 static int thread_quit_pipe[2] = { -1, -1 };
147
148 /*
149 * This pipe is used to inform the thread managing application communication
150 * that a command is queued and ready to be processed.
151 */
152 static int apps_cmd_pipe[2] = { -1, -1 };
153
154 int apps_cmd_notify_pipe[2] = { -1, -1 };
155
156 /* Pthread, Mutexes and Semaphores */
157 static pthread_t apps_thread;
158 static pthread_t apps_notify_thread;
159 static pthread_t reg_apps_thread;
160 static pthread_t client_thread;
161 static pthread_t kernel_thread;
162 static pthread_t dispatch_thread;
163 static pthread_t health_thread;
164 static pthread_t ht_cleanup_thread;
165
166 /*
167 * UST registration command queue. This queue is tied with a futex and uses a N
168 * wakers / 1 waiter implemented and detailed in futex.c/.h
169 *
170 * The thread_manage_apps and thread_dispatch_ust_registration interact with
171 * this queue and the wait/wake scheme.
172 */
173 static struct ust_cmd_queue ust_cmd_queue;
174
175 /*
176 * Pointer initialized before thread creation.
177 *
178 * This points to the tracing session list containing the session count and a
179 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
180 * MUST NOT be taken if you call a public function in session.c.
181 *
182 * The lock is nested inside the structure: session_list_ptr->lock. Please use
183 * session_lock_list and session_unlock_list for lock acquisition.
184 */
185 static struct ltt_session_list *session_list_ptr;
186
187 int ust_consumerd64_fd = -1;
188 int ust_consumerd32_fd = -1;
189
190 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
191 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
192 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
193 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
194
195 static const char *module_proc_lttng = "/proc/lttng";
196
197 /*
198 * Consumer daemon state which is changed when spawning it, killing it or in
199 * case of a fatal error.
200 */
201 enum consumerd_state {
202 CONSUMER_STARTED = 1,
203 CONSUMER_STOPPED = 2,
204 CONSUMER_ERROR = 3,
205 };
206
207 /*
208 * This consumer daemon state is used to validate if a client command will be
209 * able to reach the consumer. If not, the client is informed. For instance,
210 * doing a "lttng start" when the consumer state is set to ERROR will return an
211 * error to the client.
212 *
213 * The following example shows a possible race condition of this scheme:
214 *
215 * consumer thread error happens
216 * client cmd arrives
217 * client cmd checks state -> still OK
218 * consumer thread exit, sets error
219 * client cmd try to talk to consumer
220 * ...
221 *
222 * However, since the consumer is a different daemon, we have no way of making
223 * sure the command will reach it safely even with this state flag. This is why
224 * we consider that up to the state validation during command processing, the
225 * command is safe. After that, we can not guarantee the correctness of the
226 * client request vis-a-vis the consumer.
227 */
228 static enum consumerd_state ust_consumerd_state;
229 static enum consumerd_state kernel_consumerd_state;
230
231 /*
232 * Socket timeout for receiving and sending in seconds.
233 */
234 static int app_socket_timeout;
235
236 /* Set in main() with the current page size. */
237 long page_size;
238
239 static
240 void setup_consumerd_path(void)
241 {
242 const char *bin, *libdir;
243
244 /*
245 * Allow INSTALL_BIN_PATH to be used as a target path for the
246 * native architecture size consumer if CONFIG_CONSUMER*_PATH
247 * has not been defined.
248 */
249 #if (CAA_BITS_PER_LONG == 32)
250 if (!consumerd32_bin[0]) {
251 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
252 }
253 if (!consumerd32_libdir[0]) {
254 consumerd32_libdir = INSTALL_LIB_PATH;
255 }
256 #elif (CAA_BITS_PER_LONG == 64)
257 if (!consumerd64_bin[0]) {
258 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
259 }
260 if (!consumerd64_libdir[0]) {
261 consumerd64_libdir = INSTALL_LIB_PATH;
262 }
263 #else
264 #error "Unknown bitness"
265 #endif
266
267 /*
268 * runtime env. var. overrides the build default.
269 */
270 bin = getenv("LTTNG_CONSUMERD32_BIN");
271 if (bin) {
272 consumerd32_bin = bin;
273 }
274 bin = getenv("LTTNG_CONSUMERD64_BIN");
275 if (bin) {
276 consumerd64_bin = bin;
277 }
278 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
279 if (libdir) {
280 consumerd32_libdir = libdir;
281 }
282 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
283 if (libdir) {
284 consumerd64_libdir = libdir;
285 }
286 }
287
288 /*
289 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
290 */
291 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
292 {
293 int ret;
294
295 assert(events);
296
297 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
298 if (ret < 0) {
299 goto error;
300 }
301
302 /* Add quit pipe */
303 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
304 if (ret < 0) {
305 goto error;
306 }
307
308 return 0;
309
310 error:
311 return ret;
312 }
313
314 /*
315 * Check if the thread quit pipe was triggered.
316 *
317 * Return 1 if it was triggered else 0;
318 */
319 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
320 {
321 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
322 return 1;
323 }
324
325 return 0;
326 }
327
328 /*
329 * Return group ID of the tracing group or -1 if not found.
330 */
331 static gid_t allowed_group(void)
332 {
333 struct group *grp;
334
335 if (opt_tracing_group) {
336 grp = getgrnam(opt_tracing_group);
337 } else {
338 grp = getgrnam(default_tracing_group);
339 }
340 if (!grp) {
341 return -1;
342 } else {
343 return grp->gr_gid;
344 }
345 }
346
347 /*
348 * Init thread quit pipe.
349 *
350 * Return -1 on error or 0 if all pipes are created.
351 */
352 static int init_thread_quit_pipe(void)
353 {
354 int ret, i;
355
356 ret = pipe(thread_quit_pipe);
357 if (ret < 0) {
358 PERROR("thread quit pipe");
359 goto error;
360 }
361
362 for (i = 0; i < 2; i++) {
363 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
364 if (ret < 0) {
365 PERROR("fcntl");
366 goto error;
367 }
368 }
369
370 error:
371 return ret;
372 }
373
374 /*
375 * Stop all threads by closing the thread quit pipe.
376 */
377 static void stop_threads(void)
378 {
379 int ret;
380
381 /* Stopping all threads */
382 DBG("Terminating all threads");
383 ret = notify_thread_pipe(thread_quit_pipe[1]);
384 if (ret < 0) {
385 ERR("write error on thread quit pipe");
386 }
387
388 /* Dispatch thread */
389 CMM_STORE_SHARED(dispatch_thread_exit, 1);
390 futex_nto1_wake(&ust_cmd_queue.futex);
391 }
392
393 /*
394 * Close every consumer sockets.
395 */
396 static void close_consumer_sockets(void)
397 {
398 int ret;
399
400 if (kconsumer_data.err_sock >= 0) {
401 ret = close(kconsumer_data.err_sock);
402 if (ret < 0) {
403 PERROR("kernel consumer err_sock close");
404 }
405 }
406 if (ustconsumer32_data.err_sock >= 0) {
407 ret = close(ustconsumer32_data.err_sock);
408 if (ret < 0) {
409 PERROR("UST consumerd32 err_sock close");
410 }
411 }
412 if (ustconsumer64_data.err_sock >= 0) {
413 ret = close(ustconsumer64_data.err_sock);
414 if (ret < 0) {
415 PERROR("UST consumerd64 err_sock close");
416 }
417 }
418 if (kconsumer_data.cmd_sock >= 0) {
419 ret = close(kconsumer_data.cmd_sock);
420 if (ret < 0) {
421 PERROR("kernel consumer cmd_sock close");
422 }
423 }
424 if (ustconsumer32_data.cmd_sock >= 0) {
425 ret = close(ustconsumer32_data.cmd_sock);
426 if (ret < 0) {
427 PERROR("UST consumerd32 cmd_sock close");
428 }
429 }
430 if (ustconsumer64_data.cmd_sock >= 0) {
431 ret = close(ustconsumer64_data.cmd_sock);
432 if (ret < 0) {
433 PERROR("UST consumerd64 cmd_sock close");
434 }
435 }
436 }
437
438 /*
439 * Cleanup the daemon
440 */
441 static void cleanup(void)
442 {
443 int ret;
444 char *cmd = NULL;
445 struct ltt_session *sess, *stmp;
446
447 DBG("Cleaning up");
448
449 /*
450 * Close the thread quit pipe. It has already done its job,
451 * since we are now called.
452 */
453 utils_close_pipe(thread_quit_pipe);
454
455 /*
456 * If opt_pidfile is undefined, the default file will be wiped when
457 * removing the rundir.
458 */
459 if (opt_pidfile) {
460 ret = remove(opt_pidfile);
461 if (ret < 0) {
462 PERROR("remove pidfile %s", opt_pidfile);
463 }
464 }
465
466 DBG("Removing %s directory", rundir);
467 ret = asprintf(&cmd, "rm -rf %s", rundir);
468 if (ret < 0) {
469 ERR("asprintf failed. Something is really wrong!");
470 }
471
472 /* Remove lttng run directory */
473 ret = system(cmd);
474 if (ret < 0) {
475 ERR("Unable to clean %s", rundir);
476 }
477 free(cmd);
478 free(rundir);
479
480 DBG("Cleaning up all sessions");
481
482 /* Destroy session list mutex */
483 if (session_list_ptr != NULL) {
484 pthread_mutex_destroy(&session_list_ptr->lock);
485
486 /* Cleanup ALL session */
487 cds_list_for_each_entry_safe(sess, stmp,
488 &session_list_ptr->head, list) {
489 cmd_destroy_session(sess, kernel_poll_pipe[1]);
490 }
491 }
492
493 DBG("Closing all UST sockets");
494 ust_app_clean_list();
495 buffer_reg_destroy_registries();
496
497 if (is_root && !opt_no_kernel) {
498 DBG2("Closing kernel fd");
499 if (kernel_tracer_fd >= 0) {
500 ret = close(kernel_tracer_fd);
501 if (ret) {
502 PERROR("close");
503 }
504 }
505 DBG("Unloading kernel modules");
506 modprobe_remove_lttng_all();
507 }
508
509 close_consumer_sockets();
510
511 /* <fun> */
512 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
513 "Matthew, BEET driven development works!%c[%dm",
514 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
515 /* </fun> */
516 }
517
518 /*
519 * Send data on a unix socket using the liblttsessiondcomm API.
520 *
521 * Return lttcomm error code.
522 */
523 static int send_unix_sock(int sock, void *buf, size_t len)
524 {
525 /* Check valid length */
526 if (len == 0) {
527 return -1;
528 }
529
530 return lttcomm_send_unix_sock(sock, buf, len);
531 }
532
533 /*
534 * Free memory of a command context structure.
535 */
536 static void clean_command_ctx(struct command_ctx **cmd_ctx)
537 {
538 DBG("Clean command context structure");
539 if (*cmd_ctx) {
540 if ((*cmd_ctx)->llm) {
541 free((*cmd_ctx)->llm);
542 }
543 if ((*cmd_ctx)->lsm) {
544 free((*cmd_ctx)->lsm);
545 }
546 free(*cmd_ctx);
547 *cmd_ctx = NULL;
548 }
549 }
550
551 /*
552 * Notify UST applications using the shm mmap futex.
553 */
554 static int notify_ust_apps(int active)
555 {
556 char *wait_shm_mmap;
557
558 DBG("Notifying applications of session daemon state: %d", active);
559
560 /* See shm.c for this call implying mmap, shm and futex calls */
561 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
562 if (wait_shm_mmap == NULL) {
563 goto error;
564 }
565
566 /* Wake waiting process */
567 futex_wait_update((int32_t *) wait_shm_mmap, active);
568
569 /* Apps notified successfully */
570 return 0;
571
572 error:
573 return -1;
574 }
575
576 /*
577 * Setup the outgoing data buffer for the response (llm) by allocating the
578 * right amount of memory and copying the original information from the lsm
579 * structure.
580 *
581 * Return total size of the buffer pointed by buf.
582 */
583 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
584 {
585 int ret, buf_size;
586
587 buf_size = size;
588
589 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
590 if (cmd_ctx->llm == NULL) {
591 PERROR("zmalloc");
592 ret = -ENOMEM;
593 goto error;
594 }
595
596 /* Copy common data */
597 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
598 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
599
600 cmd_ctx->llm->data_size = size;
601 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
602
603 return buf_size;
604
605 error:
606 return ret;
607 }
608
609 /*
610 * Update the kernel poll set of all channel fd available over all tracing
611 * session. Add the wakeup pipe at the end of the set.
612 */
613 static int update_kernel_poll(struct lttng_poll_event *events)
614 {
615 int ret;
616 struct ltt_session *session;
617 struct ltt_kernel_channel *channel;
618
619 DBG("Updating kernel poll set");
620
621 session_lock_list();
622 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
623 session_lock(session);
624 if (session->kernel_session == NULL) {
625 session_unlock(session);
626 continue;
627 }
628
629 cds_list_for_each_entry(channel,
630 &session->kernel_session->channel_list.head, list) {
631 /* Add channel fd to the kernel poll set */
632 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
633 if (ret < 0) {
634 session_unlock(session);
635 goto error;
636 }
637 DBG("Channel fd %d added to kernel set", channel->fd);
638 }
639 session_unlock(session);
640 }
641 session_unlock_list();
642
643 return 0;
644
645 error:
646 session_unlock_list();
647 return -1;
648 }
649
650 /*
651 * Find the channel fd from 'fd' over all tracing session. When found, check
652 * for new channel stream and send those stream fds to the kernel consumer.
653 *
654 * Useful for CPU hotplug feature.
655 */
656 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
657 {
658 int ret = 0;
659 struct ltt_session *session;
660 struct ltt_kernel_session *ksess;
661 struct ltt_kernel_channel *channel;
662
663 DBG("Updating kernel streams for channel fd %d", fd);
664
665 session_lock_list();
666 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
667 session_lock(session);
668 if (session->kernel_session == NULL) {
669 session_unlock(session);
670 continue;
671 }
672 ksess = session->kernel_session;
673
674 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
675 if (channel->fd == fd) {
676 DBG("Channel found, updating kernel streams");
677 ret = kernel_open_channel_stream(channel);
678 if (ret < 0) {
679 goto error;
680 }
681 /* Update the stream global counter */
682 ksess->stream_count_global += ret;
683
684 /*
685 * Have we already sent fds to the consumer? If yes, it means
686 * that tracing is started so it is safe to send our updated
687 * stream fds.
688 */
689 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
690 struct lttng_ht_iter iter;
691 struct consumer_socket *socket;
692
693 rcu_read_lock();
694 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
695 &iter.iter, socket, node.node) {
696 /* Code flow error */
697 assert(socket->fd >= 0);
698
699 pthread_mutex_lock(socket->lock);
700 ret = kernel_consumer_send_channel_stream(socket,
701 channel, ksess,
702 session->output_traces ? 1 : 0);
703 pthread_mutex_unlock(socket->lock);
704 if (ret < 0) {
705 rcu_read_unlock();
706 goto error;
707 }
708 }
709 rcu_read_unlock();
710 }
711 goto error;
712 }
713 }
714 session_unlock(session);
715 }
716 session_unlock_list();
717 return ret;
718
719 error:
720 session_unlock(session);
721 session_unlock_list();
722 return ret;
723 }
724
725 /*
726 * For each tracing session, update newly registered apps. The session list
727 * lock MUST be acquired before calling this.
728 */
729 static void update_ust_app(int app_sock)
730 {
731 struct ltt_session *sess, *stmp;
732
733 /* For all tracing session(s) */
734 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
735 session_lock(sess);
736 if (sess->ust_session) {
737 ust_app_global_update(sess->ust_session, app_sock);
738 }
739 session_unlock(sess);
740 }
741 }
742
743 /*
744 * This thread manage event coming from the kernel.
745 *
746 * Features supported in this thread:
747 * -) CPU Hotplug
748 */
749 static void *thread_manage_kernel(void *data)
750 {
751 int ret, i, pollfd, update_poll_flag = 1, err = -1;
752 uint32_t revents, nb_fd;
753 char tmp;
754 struct lttng_poll_event events;
755
756 DBG("[thread] Thread manage kernel started");
757
758 health_register(HEALTH_TYPE_KERNEL);
759
760 /*
761 * This first step of the while is to clean this structure which could free
762 * non NULL pointers so initialize it before the loop.
763 */
764 lttng_poll_init(&events);
765
766 if (testpoint(thread_manage_kernel)) {
767 goto error_testpoint;
768 }
769
770 health_code_update();
771
772 if (testpoint(thread_manage_kernel_before_loop)) {
773 goto error_testpoint;
774 }
775
776 while (1) {
777 health_code_update();
778
779 if (update_poll_flag == 1) {
780 /* Clean events object. We are about to populate it again. */
781 lttng_poll_clean(&events);
782
783 ret = sessiond_set_thread_pollset(&events, 2);
784 if (ret < 0) {
785 goto error_poll_create;
786 }
787
788 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
789 if (ret < 0) {
790 goto error;
791 }
792
793 /* This will add the available kernel channel if any. */
794 ret = update_kernel_poll(&events);
795 if (ret < 0) {
796 goto error;
797 }
798 update_poll_flag = 0;
799 }
800
801 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
802
803 /* Poll infinite value of time */
804 restart:
805 health_poll_entry();
806 ret = lttng_poll_wait(&events, -1);
807 health_poll_exit();
808 if (ret < 0) {
809 /*
810 * Restart interrupted system call.
811 */
812 if (errno == EINTR) {
813 goto restart;
814 }
815 goto error;
816 } else if (ret == 0) {
817 /* Should not happen since timeout is infinite */
818 ERR("Return value of poll is 0 with an infinite timeout.\n"
819 "This should not have happened! Continuing...");
820 continue;
821 }
822
823 nb_fd = ret;
824
825 for (i = 0; i < nb_fd; i++) {
826 /* Fetch once the poll data */
827 revents = LTTNG_POLL_GETEV(&events, i);
828 pollfd = LTTNG_POLL_GETFD(&events, i);
829
830 health_code_update();
831
832 /* Thread quit pipe has been closed. Killing thread. */
833 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
834 if (ret) {
835 err = 0;
836 goto exit;
837 }
838
839 /* Check for data on kernel pipe */
840 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
841 do {
842 ret = read(kernel_poll_pipe[0], &tmp, 1);
843 } while (ret < 0 && errno == EINTR);
844 /*
845 * Ret value is useless here, if this pipe gets any actions an
846 * update is required anyway.
847 */
848 update_poll_flag = 1;
849 continue;
850 } else {
851 /*
852 * New CPU detected by the kernel. Adding kernel stream to
853 * kernel session and updating the kernel consumer
854 */
855 if (revents & LPOLLIN) {
856 ret = update_kernel_stream(&kconsumer_data, pollfd);
857 if (ret < 0) {
858 continue;
859 }
860 break;
861 /*
862 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
863 * and unregister kernel stream at this point.
864 */
865 }
866 }
867 }
868 }
869
870 exit:
871 error:
872 lttng_poll_clean(&events);
873 error_poll_create:
874 error_testpoint:
875 utils_close_pipe(kernel_poll_pipe);
876 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
877 if (err) {
878 health_error();
879 ERR("Health error occurred in %s", __func__);
880 WARN("Kernel thread died unexpectedly. "
881 "Kernel tracing can continue but CPU hotplug is disabled.");
882 }
883 health_unregister();
884 DBG("Kernel thread dying");
885 return NULL;
886 }
887
888 /*
889 * Signal pthread condition of the consumer data that the thread.
890 */
891 static void signal_consumer_condition(struct consumer_data *data, int state)
892 {
893 pthread_mutex_lock(&data->cond_mutex);
894
895 /*
896 * The state is set before signaling. It can be any value, it's the waiter
897 * job to correctly interpret this condition variable associated to the
898 * consumer pthread_cond.
899 *
900 * A value of 0 means that the corresponding thread of the consumer data
901 * was not started. 1 indicates that the thread has started and is ready
902 * for action. A negative value means that there was an error during the
903 * thread bootstrap.
904 */
905 data->consumer_thread_is_ready = state;
906 (void) pthread_cond_signal(&data->cond);
907
908 pthread_mutex_unlock(&data->cond_mutex);
909 }
910
911 /*
912 * This thread manage the consumer error sent back to the session daemon.
913 */
914 static void *thread_manage_consumer(void *data)
915 {
916 int sock = -1, i, ret, pollfd, err = -1;
917 uint32_t revents, nb_fd;
918 enum lttcomm_return_code code;
919 struct lttng_poll_event events;
920 struct consumer_data *consumer_data = data;
921
922 DBG("[thread] Manage consumer started");
923
924 health_register(HEALTH_TYPE_CONSUMER);
925
926 health_code_update();
927
928 /*
929 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
930 * metadata_sock. Nothing more will be added to this poll set.
931 */
932 ret = sessiond_set_thread_pollset(&events, 3);
933 if (ret < 0) {
934 goto error_poll;
935 }
936
937 /*
938 * The error socket here is already in a listening state which was done
939 * just before spawning this thread to avoid a race between the consumer
940 * daemon exec trying to connect and the listen() call.
941 */
942 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
943 if (ret < 0) {
944 goto error;
945 }
946
947 health_code_update();
948
949 /* Infinite blocking call, waiting for transmission */
950 restart:
951 health_poll_entry();
952
953 if (testpoint(thread_manage_consumer)) {
954 goto error;
955 }
956
957 ret = lttng_poll_wait(&events, -1);
958 health_poll_exit();
959 if (ret < 0) {
960 /*
961 * Restart interrupted system call.
962 */
963 if (errno == EINTR) {
964 goto restart;
965 }
966 goto error;
967 }
968
969 nb_fd = ret;
970
971 for (i = 0; i < nb_fd; i++) {
972 /* Fetch once the poll data */
973 revents = LTTNG_POLL_GETEV(&events, i);
974 pollfd = LTTNG_POLL_GETFD(&events, i);
975
976 health_code_update();
977
978 /* Thread quit pipe has been closed. Killing thread. */
979 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
980 if (ret) {
981 err = 0;
982 goto exit;
983 }
984
985 /* Event on the registration socket */
986 if (pollfd == consumer_data->err_sock) {
987 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
988 ERR("consumer err socket poll error");
989 goto error;
990 }
991 }
992 }
993
994 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
995 if (sock < 0) {
996 goto error;
997 }
998
999 /*
1000 * Set the CLOEXEC flag. Return code is useless because either way, the
1001 * show must go on.
1002 */
1003 (void) utils_set_fd_cloexec(sock);
1004
1005 health_code_update();
1006
1007 DBG2("Receiving code from consumer err_sock");
1008
1009 /* Getting status code from kconsumerd */
1010 ret = lttcomm_recv_unix_sock(sock, &code,
1011 sizeof(enum lttcomm_return_code));
1012 if (ret <= 0) {
1013 goto error;
1014 }
1015
1016 health_code_update();
1017
1018 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1019 /* Connect both socket, command and metadata. */
1020 consumer_data->cmd_sock =
1021 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1022 consumer_data->metadata_sock.fd =
1023 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1024 if (consumer_data->cmd_sock < 0 ||
1025 consumer_data->metadata_sock.fd < 0) {
1026 PERROR("consumer connect cmd socket");
1027 /* On error, signal condition and quit. */
1028 signal_consumer_condition(consumer_data, -1);
1029 goto error;
1030 }
1031 /* Create metadata socket lock. */
1032 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1033 if (consumer_data->metadata_sock.lock == NULL) {
1034 PERROR("zmalloc pthread mutex");
1035 ret = -1;
1036 goto error;
1037 }
1038 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1039
1040 signal_consumer_condition(consumer_data, 1);
1041 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1042 DBG("Consumer metadata socket ready (fd: %d)",
1043 consumer_data->metadata_sock.fd);
1044 } else {
1045 ERR("consumer error when waiting for SOCK_READY : %s",
1046 lttcomm_get_readable_code(-code));
1047 goto error;
1048 }
1049
1050 /* Remove the consumerd error sock since we've established a connexion */
1051 ret = lttng_poll_del(&events, consumer_data->err_sock);
1052 if (ret < 0) {
1053 goto error;
1054 }
1055
1056 /* Add new accepted error socket. */
1057 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1058 if (ret < 0) {
1059 goto error;
1060 }
1061
1062 /* Add metadata socket that is successfully connected. */
1063 ret = lttng_poll_add(&events, consumer_data->metadata_sock.fd,
1064 LPOLLIN | LPOLLRDHUP);
1065 if (ret < 0) {
1066 goto error;
1067 }
1068
1069 health_code_update();
1070
1071 /* Infinite blocking call, waiting for transmission */
1072 restart_poll:
1073 while (1) {
1074 health_poll_entry();
1075 ret = lttng_poll_wait(&events, -1);
1076 health_poll_exit();
1077 if (ret < 0) {
1078 /*
1079 * Restart interrupted system call.
1080 */
1081 if (errno == EINTR) {
1082 goto restart_poll;
1083 }
1084 goto error;
1085 }
1086
1087 nb_fd = ret;
1088
1089 for (i = 0; i < nb_fd; i++) {
1090 /* Fetch once the poll data */
1091 revents = LTTNG_POLL_GETEV(&events, i);
1092 pollfd = LTTNG_POLL_GETFD(&events, i);
1093
1094 health_code_update();
1095
1096 /* Thread quit pipe has been closed. Killing thread. */
1097 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1098 if (ret) {
1099 err = 0;
1100 goto exit;
1101 }
1102
1103 if (pollfd == sock) {
1104 /* Event on the consumerd socket */
1105 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1106 ERR("consumer err socket second poll error");
1107 goto error;
1108 }
1109 health_code_update();
1110 /* Wait for any kconsumerd error */
1111 ret = lttcomm_recv_unix_sock(sock, &code,
1112 sizeof(enum lttcomm_return_code));
1113 if (ret <= 0) {
1114 ERR("consumer closed the command socket");
1115 goto error;
1116 }
1117
1118 ERR("consumer return code : %s",
1119 lttcomm_get_readable_code(-code));
1120
1121 goto exit;
1122 } else if (pollfd == consumer_data->metadata_sock.fd) {
1123 /* UST metadata requests */
1124 ret = ust_consumer_metadata_request(
1125 &consumer_data->metadata_sock);
1126 if (ret < 0) {
1127 ERR("Handling metadata request");
1128 goto error;
1129 }
1130 break;
1131 } else {
1132 ERR("Unknown pollfd");
1133 goto error;
1134 }
1135 }
1136 health_code_update();
1137 }
1138
1139 exit:
1140 error:
1141 /* Immediately set the consumerd state to stopped */
1142 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1143 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1144 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1145 consumer_data->type == LTTNG_CONSUMER32_UST) {
1146 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1147 } else {
1148 /* Code flow error... */
1149 assert(0);
1150 }
1151
1152 if (consumer_data->err_sock >= 0) {
1153 ret = close(consumer_data->err_sock);
1154 if (ret) {
1155 PERROR("close");
1156 }
1157 consumer_data->err_sock = -1;
1158 }
1159 if (consumer_data->cmd_sock >= 0) {
1160 ret = close(consumer_data->cmd_sock);
1161 if (ret) {
1162 PERROR("close");
1163 }
1164 consumer_data->cmd_sock = -1;
1165 }
1166 if (consumer_data->metadata_sock.fd >= 0) {
1167 ret = close(consumer_data->metadata_sock.fd);
1168 if (ret) {
1169 PERROR("close");
1170 }
1171 }
1172 /* Cleanup metadata socket mutex. */
1173 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1174 free(consumer_data->metadata_sock.lock);
1175
1176 if (sock >= 0) {
1177 ret = close(sock);
1178 if (ret) {
1179 PERROR("close");
1180 }
1181 }
1182
1183 unlink(consumer_data->err_unix_sock_path);
1184 unlink(consumer_data->cmd_unix_sock_path);
1185 consumer_data->pid = 0;
1186
1187 lttng_poll_clean(&events);
1188 error_poll:
1189 if (err) {
1190 health_error();
1191 ERR("Health error occurred in %s", __func__);
1192 }
1193 health_unregister();
1194 DBG("consumer thread cleanup completed");
1195
1196 return NULL;
1197 }
1198
1199 /*
1200 * This thread manage application communication.
1201 */
1202 static void *thread_manage_apps(void *data)
1203 {
1204 int i, ret, pollfd, err = -1;
1205 uint32_t revents, nb_fd;
1206 struct lttng_poll_event events;
1207
1208 DBG("[thread] Manage application started");
1209
1210 rcu_register_thread();
1211 rcu_thread_online();
1212
1213 health_register(HEALTH_TYPE_APP_MANAGE);
1214
1215 if (testpoint(thread_manage_apps)) {
1216 goto error_testpoint;
1217 }
1218
1219 health_code_update();
1220
1221 ret = sessiond_set_thread_pollset(&events, 2);
1222 if (ret < 0) {
1223 goto error_poll_create;
1224 }
1225
1226 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1227 if (ret < 0) {
1228 goto error;
1229 }
1230
1231 if (testpoint(thread_manage_apps_before_loop)) {
1232 goto error;
1233 }
1234
1235 health_code_update();
1236
1237 while (1) {
1238 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1239
1240 /* Inifinite blocking call, waiting for transmission */
1241 restart:
1242 health_poll_entry();
1243 ret = lttng_poll_wait(&events, -1);
1244 health_poll_exit();
1245 if (ret < 0) {
1246 /*
1247 * Restart interrupted system call.
1248 */
1249 if (errno == EINTR) {
1250 goto restart;
1251 }
1252 goto error;
1253 }
1254
1255 nb_fd = ret;
1256
1257 for (i = 0; i < nb_fd; i++) {
1258 /* Fetch once the poll data */
1259 revents = LTTNG_POLL_GETEV(&events, i);
1260 pollfd = LTTNG_POLL_GETFD(&events, i);
1261
1262 health_code_update();
1263
1264 /* Thread quit pipe has been closed. Killing thread. */
1265 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1266 if (ret) {
1267 err = 0;
1268 goto exit;
1269 }
1270
1271 /* Inspect the apps cmd pipe */
1272 if (pollfd == apps_cmd_pipe[0]) {
1273 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1274 ERR("Apps command pipe error");
1275 goto error;
1276 } else if (revents & LPOLLIN) {
1277 int sock;
1278
1279 /* Empty pipe */
1280 do {
1281 ret = read(apps_cmd_pipe[0], &sock, sizeof(sock));
1282 } while (ret < 0 && errno == EINTR);
1283 if (ret < 0 || ret < sizeof(sock)) {
1284 PERROR("read apps cmd pipe");
1285 goto error;
1286 }
1287
1288 health_code_update();
1289
1290 /*
1291 * We only monitor the error events of the socket. This
1292 * thread does not handle any incoming data from UST
1293 * (POLLIN).
1294 */
1295 ret = lttng_poll_add(&events, sock,
1296 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1297 if (ret < 0) {
1298 goto error;
1299 }
1300
1301 /*
1302 * Set socket timeout for both receiving and ending.
1303 * app_socket_timeout is in seconds, whereas
1304 * lttcomm_setsockopt_rcv_timeout and
1305 * lttcomm_setsockopt_snd_timeout expect msec as
1306 * parameter.
1307 */
1308 (void) lttcomm_setsockopt_rcv_timeout(sock,
1309 app_socket_timeout * 1000);
1310 (void) lttcomm_setsockopt_snd_timeout(sock,
1311 app_socket_timeout * 1000);
1312
1313 DBG("Apps with sock %d added to poll set", sock);
1314
1315 health_code_update();
1316
1317 break;
1318 }
1319 } else {
1320 /*
1321 * At this point, we know that a registered application made
1322 * the event at poll_wait.
1323 */
1324 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1325 /* Removing from the poll set */
1326 ret = lttng_poll_del(&events, pollfd);
1327 if (ret < 0) {
1328 goto error;
1329 }
1330
1331 /* Socket closed on remote end. */
1332 ust_app_unregister(pollfd);
1333 break;
1334 }
1335 }
1336
1337 health_code_update();
1338 }
1339 }
1340
1341 exit:
1342 error:
1343 lttng_poll_clean(&events);
1344 error_poll_create:
1345 error_testpoint:
1346 utils_close_pipe(apps_cmd_pipe);
1347 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1348
1349 /*
1350 * We don't clean the UST app hash table here since already registered
1351 * applications can still be controlled so let them be until the session
1352 * daemon dies or the applications stop.
1353 */
1354
1355 if (err) {
1356 health_error();
1357 ERR("Health error occurred in %s", __func__);
1358 }
1359 health_unregister();
1360 DBG("Application communication apps thread cleanup complete");
1361 rcu_thread_offline();
1362 rcu_unregister_thread();
1363 return NULL;
1364 }
1365
1366 /*
1367 * Send a socket to a thread This is called from the dispatch UST registration
1368 * thread once all sockets are set for the application.
1369 *
1370 * On success, return 0 else a negative value being the errno message of the
1371 * write().
1372 */
1373 static int send_socket_to_thread(int fd, int sock)
1374 {
1375 int ret;
1376
1377 /* Sockets MUST be set or else this should not have been called. */
1378 assert(fd >= 0);
1379 assert(sock >= 0);
1380
1381 do {
1382 ret = write(fd, &sock, sizeof(sock));
1383 } while (ret < 0 && errno == EINTR);
1384 if (ret < 0 || ret != sizeof(sock)) {
1385 PERROR("write apps pipe %d", fd);
1386 if (ret < 0) {
1387 ret = -errno;
1388 }
1389 goto error;
1390 }
1391
1392 /* All good. Don't send back the write positive ret value. */
1393 ret = 0;
1394 error:
1395 return ret;
1396 }
1397
1398 /*
1399 * Sanitize the wait queue of the dispatch registration thread meaning removing
1400 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1401 * notify socket is never received.
1402 */
1403 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1404 {
1405 int ret, nb_fd = 0, i;
1406 unsigned int fd_added = 0;
1407 struct lttng_poll_event events;
1408 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1409
1410 assert(wait_queue);
1411
1412 lttng_poll_init(&events);
1413
1414 /* Just skip everything for an empty queue. */
1415 if (!wait_queue->count) {
1416 goto end;
1417 }
1418
1419 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1420 if (ret < 0) {
1421 goto error_create;
1422 }
1423
1424 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1425 &wait_queue->head, head) {
1426 assert(wait_node->app);
1427 ret = lttng_poll_add(&events, wait_node->app->sock,
1428 LPOLLHUP | LPOLLERR);
1429 if (ret < 0) {
1430 goto error;
1431 }
1432
1433 fd_added = 1;
1434 }
1435
1436 if (!fd_added) {
1437 goto end;
1438 }
1439
1440 /*
1441 * Poll but don't block so we can quickly identify the faulty events and
1442 * clean them afterwards from the wait queue.
1443 */
1444 ret = lttng_poll_wait(&events, 0);
1445 if (ret < 0) {
1446 goto error;
1447 }
1448 nb_fd = ret;
1449
1450 for (i = 0; i < nb_fd; i++) {
1451 /* Get faulty FD. */
1452 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1453 int pollfd = LTTNG_POLL_GETFD(&events, i);
1454
1455 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1456 &wait_queue->head, head) {
1457 if (pollfd == wait_node->app->sock &&
1458 (revents & (LPOLLHUP | LPOLLERR))) {
1459 cds_list_del(&wait_node->head);
1460 wait_queue->count--;
1461 ust_app_destroy(wait_node->app);
1462 free(wait_node);
1463 break;
1464 }
1465 }
1466 }
1467
1468 if (nb_fd > 0) {
1469 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1470 }
1471
1472 end:
1473 lttng_poll_clean(&events);
1474 return;
1475
1476 error:
1477 lttng_poll_clean(&events);
1478 error_create:
1479 ERR("Unable to sanitize wait queue");
1480 return;
1481 }
1482
1483 /*
1484 * Dispatch request from the registration threads to the application
1485 * communication thread.
1486 */
1487 static void *thread_dispatch_ust_registration(void *data)
1488 {
1489 int ret, err = -1;
1490 struct cds_wfq_node *node;
1491 struct ust_command *ust_cmd = NULL;
1492 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1493 struct ust_reg_wait_queue wait_queue = {
1494 .count = 0,
1495 };
1496
1497 health_register(HEALTH_TYPE_APP_REG_DISPATCH);
1498
1499 health_code_update();
1500
1501 CDS_INIT_LIST_HEAD(&wait_queue.head);
1502
1503 DBG("[thread] Dispatch UST command started");
1504
1505 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1506 health_code_update();
1507
1508 /* Atomically prepare the queue futex */
1509 futex_nto1_prepare(&ust_cmd_queue.futex);
1510
1511 do {
1512 struct ust_app *app = NULL;
1513 ust_cmd = NULL;
1514
1515 /*
1516 * Make sure we don't have node(s) that have hung up before receiving
1517 * the notify socket. This is to clean the list in order to avoid
1518 * memory leaks from notify socket that are never seen.
1519 */
1520 sanitize_wait_queue(&wait_queue);
1521
1522 health_code_update();
1523 /* Dequeue command for registration */
1524 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1525 if (node == NULL) {
1526 DBG("Woken up but nothing in the UST command queue");
1527 /* Continue thread execution */
1528 break;
1529 }
1530
1531 ust_cmd = caa_container_of(node, struct ust_command, node);
1532
1533 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1534 " gid:%d sock:%d name:%s (version %d.%d)",
1535 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1536 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1537 ust_cmd->sock, ust_cmd->reg_msg.name,
1538 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1539
1540 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1541 wait_node = zmalloc(sizeof(*wait_node));
1542 if (!wait_node) {
1543 PERROR("zmalloc wait_node dispatch");
1544 ret = close(ust_cmd->sock);
1545 if (ret < 0) {
1546 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1547 }
1548 lttng_fd_put(1, LTTNG_FD_APPS);
1549 free(ust_cmd);
1550 goto error;
1551 }
1552 CDS_INIT_LIST_HEAD(&wait_node->head);
1553
1554 /* Create application object if socket is CMD. */
1555 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1556 ust_cmd->sock);
1557 if (!wait_node->app) {
1558 ret = close(ust_cmd->sock);
1559 if (ret < 0) {
1560 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1561 }
1562 lttng_fd_put(1, LTTNG_FD_APPS);
1563 free(wait_node);
1564 free(ust_cmd);
1565 continue;
1566 }
1567 /*
1568 * Add application to the wait queue so we can set the notify
1569 * socket before putting this object in the global ht.
1570 */
1571 cds_list_add(&wait_node->head, &wait_queue.head);
1572 wait_queue.count++;
1573
1574 free(ust_cmd);
1575 /*
1576 * We have to continue here since we don't have the notify
1577 * socket and the application MUST be added to the hash table
1578 * only at that moment.
1579 */
1580 continue;
1581 } else {
1582 /*
1583 * Look for the application in the local wait queue and set the
1584 * notify socket if found.
1585 */
1586 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1587 &wait_queue.head, head) {
1588 health_code_update();
1589 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1590 wait_node->app->notify_sock = ust_cmd->sock;
1591 cds_list_del(&wait_node->head);
1592 wait_queue.count--;
1593 app = wait_node->app;
1594 free(wait_node);
1595 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1596 break;
1597 }
1598 }
1599
1600 /*
1601 * With no application at this stage the received socket is
1602 * basically useless so close it before we free the cmd data
1603 * structure for good.
1604 */
1605 if (!app) {
1606 ret = close(ust_cmd->sock);
1607 if (ret < 0) {
1608 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1609 }
1610 lttng_fd_put(1, LTTNG_FD_APPS);
1611 }
1612 free(ust_cmd);
1613 }
1614
1615 if (app) {
1616 /*
1617 * @session_lock_list
1618 *
1619 * Lock the global session list so from the register up to the
1620 * registration done message, no thread can see the application
1621 * and change its state.
1622 */
1623 session_lock_list();
1624 rcu_read_lock();
1625
1626 /*
1627 * Add application to the global hash table. This needs to be
1628 * done before the update to the UST registry can locate the
1629 * application.
1630 */
1631 ust_app_add(app);
1632
1633 /* Set app version. This call will print an error if needed. */
1634 (void) ust_app_version(app);
1635
1636 /* Send notify socket through the notify pipe. */
1637 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1638 app->notify_sock);
1639 if (ret < 0) {
1640 rcu_read_unlock();
1641 session_unlock_list();
1642 /* No notify thread, stop the UST tracing. */
1643 goto error;
1644 }
1645
1646 /*
1647 * Update newly registered application with the tracing
1648 * registry info already enabled information.
1649 */
1650 update_ust_app(app->sock);
1651
1652 /*
1653 * Don't care about return value. Let the manage apps threads
1654 * handle app unregistration upon socket close.
1655 */
1656 (void) ust_app_register_done(app->sock);
1657
1658 /*
1659 * Even if the application socket has been closed, send the app
1660 * to the thread and unregistration will take place at that
1661 * place.
1662 */
1663 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1664 if (ret < 0) {
1665 rcu_read_unlock();
1666 session_unlock_list();
1667 /* No apps. thread, stop the UST tracing. */
1668 goto error;
1669 }
1670
1671 rcu_read_unlock();
1672 session_unlock_list();
1673 }
1674 } while (node != NULL);
1675
1676 health_poll_entry();
1677 /* Futex wait on queue. Blocking call on futex() */
1678 futex_nto1_wait(&ust_cmd_queue.futex);
1679 health_poll_exit();
1680 }
1681 /* Normal exit, no error */
1682 err = 0;
1683
1684 error:
1685 /* Clean up wait queue. */
1686 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1687 &wait_queue.head, head) {
1688 cds_list_del(&wait_node->head);
1689 wait_queue.count--;
1690 free(wait_node);
1691 }
1692
1693 DBG("Dispatch thread dying");
1694 if (err) {
1695 health_error();
1696 ERR("Health error occurred in %s", __func__);
1697 }
1698 health_unregister();
1699 return NULL;
1700 }
1701
1702 /*
1703 * This thread manage application registration.
1704 */
1705 static void *thread_registration_apps(void *data)
1706 {
1707 int sock = -1, i, ret, pollfd, err = -1;
1708 uint32_t revents, nb_fd;
1709 struct lttng_poll_event events;
1710 /*
1711 * Get allocated in this thread, enqueued to a global queue, dequeued and
1712 * freed in the manage apps thread.
1713 */
1714 struct ust_command *ust_cmd = NULL;
1715
1716 DBG("[thread] Manage application registration started");
1717
1718 health_register(HEALTH_TYPE_APP_REG);
1719
1720 if (testpoint(thread_registration_apps)) {
1721 goto error_testpoint;
1722 }
1723
1724 ret = lttcomm_listen_unix_sock(apps_sock);
1725 if (ret < 0) {
1726 goto error_listen;
1727 }
1728
1729 /*
1730 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1731 * more will be added to this poll set.
1732 */
1733 ret = sessiond_set_thread_pollset(&events, 2);
1734 if (ret < 0) {
1735 goto error_create_poll;
1736 }
1737
1738 /* Add the application registration socket */
1739 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1740 if (ret < 0) {
1741 goto error_poll_add;
1742 }
1743
1744 /* Notify all applications to register */
1745 ret = notify_ust_apps(1);
1746 if (ret < 0) {
1747 ERR("Failed to notify applications or create the wait shared memory.\n"
1748 "Execution continues but there might be problem for already\n"
1749 "running applications that wishes to register.");
1750 }
1751
1752 while (1) {
1753 DBG("Accepting application registration");
1754
1755 /* Inifinite blocking call, waiting for transmission */
1756 restart:
1757 health_poll_entry();
1758 ret = lttng_poll_wait(&events, -1);
1759 health_poll_exit();
1760 if (ret < 0) {
1761 /*
1762 * Restart interrupted system call.
1763 */
1764 if (errno == EINTR) {
1765 goto restart;
1766 }
1767 goto error;
1768 }
1769
1770 nb_fd = ret;
1771
1772 for (i = 0; i < nb_fd; i++) {
1773 health_code_update();
1774
1775 /* Fetch once the poll data */
1776 revents = LTTNG_POLL_GETEV(&events, i);
1777 pollfd = LTTNG_POLL_GETFD(&events, i);
1778
1779 /* Thread quit pipe has been closed. Killing thread. */
1780 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1781 if (ret) {
1782 err = 0;
1783 goto exit;
1784 }
1785
1786 /* Event on the registration socket */
1787 if (pollfd == apps_sock) {
1788 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1789 ERR("Register apps socket poll error");
1790 goto error;
1791 } else if (revents & LPOLLIN) {
1792 sock = lttcomm_accept_unix_sock(apps_sock);
1793 if (sock < 0) {
1794 goto error;
1795 }
1796
1797 /*
1798 * Set the CLOEXEC flag. Return code is useless because
1799 * either way, the show must go on.
1800 */
1801 (void) utils_set_fd_cloexec(sock);
1802
1803 /* Create UST registration command for enqueuing */
1804 ust_cmd = zmalloc(sizeof(struct ust_command));
1805 if (ust_cmd == NULL) {
1806 PERROR("ust command zmalloc");
1807 goto error;
1808 }
1809
1810 /*
1811 * Using message-based transmissions to ensure we don't
1812 * have to deal with partially received messages.
1813 */
1814 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1815 if (ret < 0) {
1816 ERR("Exhausted file descriptors allowed for applications.");
1817 free(ust_cmd);
1818 ret = close(sock);
1819 if (ret) {
1820 PERROR("close");
1821 }
1822 sock = -1;
1823 continue;
1824 }
1825
1826 health_code_update();
1827 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
1828 if (ret < 0) {
1829 free(ust_cmd);
1830 /* Close socket of the application. */
1831 ret = close(sock);
1832 if (ret) {
1833 PERROR("close");
1834 }
1835 lttng_fd_put(LTTNG_FD_APPS, 1);
1836 sock = -1;
1837 continue;
1838 }
1839 health_code_update();
1840
1841 ust_cmd->sock = sock;
1842 sock = -1;
1843
1844 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1845 " gid:%d sock:%d name:%s (version %d.%d)",
1846 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1847 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1848 ust_cmd->sock, ust_cmd->reg_msg.name,
1849 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1850
1851 /*
1852 * Lock free enqueue the registration request. The red pill
1853 * has been taken! This apps will be part of the *system*.
1854 */
1855 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1856
1857 /*
1858 * Wake the registration queue futex. Implicit memory
1859 * barrier with the exchange in cds_wfq_enqueue.
1860 */
1861 futex_nto1_wake(&ust_cmd_queue.futex);
1862 }
1863 }
1864 }
1865 }
1866
1867 exit:
1868 error:
1869 if (err) {
1870 health_error();
1871 ERR("Health error occurred in %s", __func__);
1872 }
1873
1874 /* Notify that the registration thread is gone */
1875 notify_ust_apps(0);
1876
1877 if (apps_sock >= 0) {
1878 ret = close(apps_sock);
1879 if (ret) {
1880 PERROR("close");
1881 }
1882 }
1883 if (sock >= 0) {
1884 ret = close(sock);
1885 if (ret) {
1886 PERROR("close");
1887 }
1888 lttng_fd_put(LTTNG_FD_APPS, 1);
1889 }
1890 unlink(apps_unix_sock_path);
1891
1892 error_poll_add:
1893 lttng_poll_clean(&events);
1894 error_listen:
1895 error_create_poll:
1896 error_testpoint:
1897 DBG("UST Registration thread cleanup complete");
1898 health_unregister();
1899
1900 return NULL;
1901 }
1902
1903 /*
1904 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1905 * exec or it will fails.
1906 */
1907 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1908 {
1909 int ret, clock_ret;
1910 struct timespec timeout;
1911
1912 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1913 consumer_data->consumer_thread_is_ready = 0;
1914
1915 /* Setup pthread condition */
1916 ret = pthread_condattr_init(&consumer_data->condattr);
1917 if (ret != 0) {
1918 errno = ret;
1919 PERROR("pthread_condattr_init consumer data");
1920 goto error;
1921 }
1922
1923 /*
1924 * Set the monotonic clock in order to make sure we DO NOT jump in time
1925 * between the clock_gettime() call and the timedwait call. See bug #324
1926 * for a more details and how we noticed it.
1927 */
1928 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
1929 if (ret != 0) {
1930 errno = ret;
1931 PERROR("pthread_condattr_setclock consumer data");
1932 goto error;
1933 }
1934
1935 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
1936 if (ret != 0) {
1937 errno = ret;
1938 PERROR("pthread_cond_init consumer data");
1939 goto error;
1940 }
1941
1942 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
1943 consumer_data);
1944 if (ret != 0) {
1945 PERROR("pthread_create consumer");
1946 ret = -1;
1947 goto error;
1948 }
1949
1950 /* We are about to wait on a pthread condition */
1951 pthread_mutex_lock(&consumer_data->cond_mutex);
1952
1953 /* Get time for sem_timedwait absolute timeout */
1954 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
1955 /*
1956 * Set the timeout for the condition timed wait even if the clock gettime
1957 * call fails since we might loop on that call and we want to avoid to
1958 * increment the timeout too many times.
1959 */
1960 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1961
1962 /*
1963 * The following loop COULD be skipped in some conditions so this is why we
1964 * set ret to 0 in order to make sure at least one round of the loop is
1965 * done.
1966 */
1967 ret = 0;
1968
1969 /*
1970 * Loop until the condition is reached or when a timeout is reached. Note
1971 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1972 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1973 * possible. This loop does not take any chances and works with both of
1974 * them.
1975 */
1976 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
1977 if (clock_ret < 0) {
1978 PERROR("clock_gettime spawn consumer");
1979 /* Infinite wait for the consumerd thread to be ready */
1980 ret = pthread_cond_wait(&consumer_data->cond,
1981 &consumer_data->cond_mutex);
1982 } else {
1983 ret = pthread_cond_timedwait(&consumer_data->cond,
1984 &consumer_data->cond_mutex, &timeout);
1985 }
1986 }
1987
1988 /* Release the pthread condition */
1989 pthread_mutex_unlock(&consumer_data->cond_mutex);
1990
1991 if (ret != 0) {
1992 errno = ret;
1993 if (ret == ETIMEDOUT) {
1994 /*
1995 * Call has timed out so we kill the kconsumerd_thread and return
1996 * an error.
1997 */
1998 ERR("Condition timed out. The consumer thread was never ready."
1999 " Killing it");
2000 ret = pthread_cancel(consumer_data->thread);
2001 if (ret < 0) {
2002 PERROR("pthread_cancel consumer thread");
2003 }
2004 } else {
2005 PERROR("pthread_cond_wait failed consumer thread");
2006 }
2007 goto error;
2008 }
2009
2010 pthread_mutex_lock(&consumer_data->pid_mutex);
2011 if (consumer_data->pid == 0) {
2012 ERR("Consumerd did not start");
2013 pthread_mutex_unlock(&consumer_data->pid_mutex);
2014 goto error;
2015 }
2016 pthread_mutex_unlock(&consumer_data->pid_mutex);
2017
2018 return 0;
2019
2020 error:
2021 return ret;
2022 }
2023
2024 /*
2025 * Join consumer thread
2026 */
2027 static int join_consumer_thread(struct consumer_data *consumer_data)
2028 {
2029 void *status;
2030
2031 /* Consumer pid must be a real one. */
2032 if (consumer_data->pid > 0) {
2033 int ret;
2034 ret = kill(consumer_data->pid, SIGTERM);
2035 if (ret) {
2036 ERR("Error killing consumer daemon");
2037 return ret;
2038 }
2039 return pthread_join(consumer_data->thread, &status);
2040 } else {
2041 return 0;
2042 }
2043 }
2044
2045 /*
2046 * Fork and exec a consumer daemon (consumerd).
2047 *
2048 * Return pid if successful else -1.
2049 */
2050 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2051 {
2052 int ret;
2053 pid_t pid;
2054 const char *consumer_to_use;
2055 const char *verbosity;
2056 struct stat st;
2057
2058 DBG("Spawning consumerd");
2059
2060 pid = fork();
2061 if (pid == 0) {
2062 /*
2063 * Exec consumerd.
2064 */
2065 if (opt_verbose_consumer) {
2066 verbosity = "--verbose";
2067 } else {
2068 verbosity = "--quiet";
2069 }
2070 switch (consumer_data->type) {
2071 case LTTNG_CONSUMER_KERNEL:
2072 /*
2073 * Find out which consumerd to execute. We will first try the
2074 * 64-bit path, then the sessiond's installation directory, and
2075 * fallback on the 32-bit one,
2076 */
2077 DBG3("Looking for a kernel consumer at these locations:");
2078 DBG3(" 1) %s", consumerd64_bin);
2079 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2080 DBG3(" 3) %s", consumerd32_bin);
2081 if (stat(consumerd64_bin, &st) == 0) {
2082 DBG3("Found location #1");
2083 consumer_to_use = consumerd64_bin;
2084 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2085 DBG3("Found location #2");
2086 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2087 } else if (stat(consumerd32_bin, &st) == 0) {
2088 DBG3("Found location #3");
2089 consumer_to_use = consumerd32_bin;
2090 } else {
2091 DBG("Could not find any valid consumerd executable");
2092 break;
2093 }
2094 DBG("Using kernel consumer at: %s", consumer_to_use);
2095 execl(consumer_to_use,
2096 "lttng-consumerd", verbosity, "-k",
2097 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2098 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2099 NULL);
2100 break;
2101 case LTTNG_CONSUMER64_UST:
2102 {
2103 char *tmpnew = NULL;
2104
2105 if (consumerd64_libdir[0] != '\0') {
2106 char *tmp;
2107 size_t tmplen;
2108
2109 tmp = getenv("LD_LIBRARY_PATH");
2110 if (!tmp) {
2111 tmp = "";
2112 }
2113 tmplen = strlen("LD_LIBRARY_PATH=")
2114 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2115 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2116 if (!tmpnew) {
2117 ret = -ENOMEM;
2118 goto error;
2119 }
2120 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2121 strcat(tmpnew, consumerd64_libdir);
2122 if (tmp[0] != '\0') {
2123 strcat(tmpnew, ":");
2124 strcat(tmpnew, tmp);
2125 }
2126 ret = putenv(tmpnew);
2127 if (ret) {
2128 ret = -errno;
2129 free(tmpnew);
2130 goto error;
2131 }
2132 }
2133 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2134 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2135 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2136 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2137 NULL);
2138 if (consumerd64_libdir[0] != '\0') {
2139 free(tmpnew);
2140 }
2141 if (ret) {
2142 goto error;
2143 }
2144 break;
2145 }
2146 case LTTNG_CONSUMER32_UST:
2147 {
2148 char *tmpnew = NULL;
2149
2150 if (consumerd32_libdir[0] != '\0') {
2151 char *tmp;
2152 size_t tmplen;
2153
2154 tmp = getenv("LD_LIBRARY_PATH");
2155 if (!tmp) {
2156 tmp = "";
2157 }
2158 tmplen = strlen("LD_LIBRARY_PATH=")
2159 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2160 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2161 if (!tmpnew) {
2162 ret = -ENOMEM;
2163 goto error;
2164 }
2165 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2166 strcat(tmpnew, consumerd32_libdir);
2167 if (tmp[0] != '\0') {
2168 strcat(tmpnew, ":");
2169 strcat(tmpnew, tmp);
2170 }
2171 ret = putenv(tmpnew);
2172 if (ret) {
2173 ret = -errno;
2174 free(tmpnew);
2175 goto error;
2176 }
2177 }
2178 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2179 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2180 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2181 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2182 NULL);
2183 if (consumerd32_libdir[0] != '\0') {
2184 free(tmpnew);
2185 }
2186 if (ret) {
2187 goto error;
2188 }
2189 break;
2190 }
2191 default:
2192 PERROR("unknown consumer type");
2193 exit(EXIT_FAILURE);
2194 }
2195 if (errno != 0) {
2196 PERROR("kernel start consumer exec");
2197 }
2198 exit(EXIT_FAILURE);
2199 } else if (pid > 0) {
2200 ret = pid;
2201 } else {
2202 PERROR("start consumer fork");
2203 ret = -errno;
2204 }
2205 error:
2206 return ret;
2207 }
2208
2209 /*
2210 * Spawn the consumerd daemon and session daemon thread.
2211 */
2212 static int start_consumerd(struct consumer_data *consumer_data)
2213 {
2214 int ret;
2215
2216 /*
2217 * Set the listen() state on the socket since there is a possible race
2218 * between the exec() of the consumer daemon and this call if place in the
2219 * consumer thread. See bug #366 for more details.
2220 */
2221 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2222 if (ret < 0) {
2223 goto error;
2224 }
2225
2226 pthread_mutex_lock(&consumer_data->pid_mutex);
2227 if (consumer_data->pid != 0) {
2228 pthread_mutex_unlock(&consumer_data->pid_mutex);
2229 goto end;
2230 }
2231
2232 ret = spawn_consumerd(consumer_data);
2233 if (ret < 0) {
2234 ERR("Spawning consumerd failed");
2235 pthread_mutex_unlock(&consumer_data->pid_mutex);
2236 goto error;
2237 }
2238
2239 /* Setting up the consumer_data pid */
2240 consumer_data->pid = ret;
2241 DBG2("Consumer pid %d", consumer_data->pid);
2242 pthread_mutex_unlock(&consumer_data->pid_mutex);
2243
2244 DBG2("Spawning consumer control thread");
2245 ret = spawn_consumer_thread(consumer_data);
2246 if (ret < 0) {
2247 ERR("Fatal error spawning consumer control thread");
2248 goto error;
2249 }
2250
2251 end:
2252 return 0;
2253
2254 error:
2255 /* Cleanup already created sockets on error. */
2256 if (consumer_data->err_sock >= 0) {
2257 int err;
2258
2259 err = close(consumer_data->err_sock);
2260 if (err < 0) {
2261 PERROR("close consumer data error socket");
2262 }
2263 }
2264 return ret;
2265 }
2266
2267 /*
2268 * Compute health status of each consumer. If one of them is zero (bad
2269 * state), we return 0.
2270 */
2271 static int check_consumer_health(void)
2272 {
2273 int ret;
2274
2275 ret = health_check_state(HEALTH_TYPE_CONSUMER);
2276
2277 DBG3("Health consumer check %d", ret);
2278
2279 return ret;
2280 }
2281
2282 /*
2283 * Setup necessary data for kernel tracer action.
2284 */
2285 static int init_kernel_tracer(void)
2286 {
2287 int ret;
2288
2289 /* Modprobe lttng kernel modules */
2290 ret = modprobe_lttng_control();
2291 if (ret < 0) {
2292 goto error;
2293 }
2294
2295 /* Open debugfs lttng */
2296 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2297 if (kernel_tracer_fd < 0) {
2298 DBG("Failed to open %s", module_proc_lttng);
2299 ret = -1;
2300 goto error_open;
2301 }
2302
2303 /* Validate kernel version */
2304 ret = kernel_validate_version(kernel_tracer_fd);
2305 if (ret < 0) {
2306 goto error_version;
2307 }
2308
2309 ret = modprobe_lttng_data();
2310 if (ret < 0) {
2311 goto error_modules;
2312 }
2313
2314 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2315 return 0;
2316
2317 error_version:
2318 modprobe_remove_lttng_control();
2319 ret = close(kernel_tracer_fd);
2320 if (ret) {
2321 PERROR("close");
2322 }
2323 kernel_tracer_fd = -1;
2324 return LTTNG_ERR_KERN_VERSION;
2325
2326 error_modules:
2327 ret = close(kernel_tracer_fd);
2328 if (ret) {
2329 PERROR("close");
2330 }
2331
2332 error_open:
2333 modprobe_remove_lttng_control();
2334
2335 error:
2336 WARN("No kernel tracer available");
2337 kernel_tracer_fd = -1;
2338 if (!is_root) {
2339 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2340 } else {
2341 return LTTNG_ERR_KERN_NA;
2342 }
2343 }
2344
2345
2346 /*
2347 * Copy consumer output from the tracing session to the domain session. The
2348 * function also applies the right modification on a per domain basis for the
2349 * trace files destination directory.
2350 *
2351 * Should *NOT* be called with RCU read-side lock held.
2352 */
2353 static int copy_session_consumer(int domain, struct ltt_session *session)
2354 {
2355 int ret;
2356 const char *dir_name;
2357 struct consumer_output *consumer;
2358
2359 assert(session);
2360 assert(session->consumer);
2361
2362 switch (domain) {
2363 case LTTNG_DOMAIN_KERNEL:
2364 DBG3("Copying tracing session consumer output in kernel session");
2365 /*
2366 * XXX: We should audit the session creation and what this function
2367 * does "extra" in order to avoid a destroy since this function is used
2368 * in the domain session creation (kernel and ust) only. Same for UST
2369 * domain.
2370 */
2371 if (session->kernel_session->consumer) {
2372 consumer_destroy_output(session->kernel_session->consumer);
2373 }
2374 session->kernel_session->consumer =
2375 consumer_copy_output(session->consumer);
2376 /* Ease our life a bit for the next part */
2377 consumer = session->kernel_session->consumer;
2378 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2379 break;
2380 case LTTNG_DOMAIN_UST:
2381 DBG3("Copying tracing session consumer output in UST session");
2382 if (session->ust_session->consumer) {
2383 consumer_destroy_output(session->ust_session->consumer);
2384 }
2385 session->ust_session->consumer =
2386 consumer_copy_output(session->consumer);
2387 /* Ease our life a bit for the next part */
2388 consumer = session->ust_session->consumer;
2389 dir_name = DEFAULT_UST_TRACE_DIR;
2390 break;
2391 default:
2392 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2393 goto error;
2394 }
2395
2396 /* Append correct directory to subdir */
2397 strncat(consumer->subdir, dir_name,
2398 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2399 DBG3("Copy session consumer subdir %s", consumer->subdir);
2400
2401 ret = LTTNG_OK;
2402
2403 error:
2404 return ret;
2405 }
2406
2407 /*
2408 * Create an UST session and add it to the session ust list.
2409 *
2410 * Should *NOT* be called with RCU read-side lock held.
2411 */
2412 static int create_ust_session(struct ltt_session *session,
2413 struct lttng_domain *domain)
2414 {
2415 int ret;
2416 struct ltt_ust_session *lus = NULL;
2417
2418 assert(session);
2419 assert(domain);
2420 assert(session->consumer);
2421
2422 switch (domain->type) {
2423 case LTTNG_DOMAIN_UST:
2424 break;
2425 default:
2426 ERR("Unknown UST domain on create session %d", domain->type);
2427 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2428 goto error;
2429 }
2430
2431 DBG("Creating UST session");
2432
2433 lus = trace_ust_create_session(session->id);
2434 if (lus == NULL) {
2435 ret = LTTNG_ERR_UST_SESS_FAIL;
2436 goto error;
2437 }
2438
2439 lus->uid = session->uid;
2440 lus->gid = session->gid;
2441 lus->output_traces = session->output_traces;
2442 lus->snapshot_mode = session->snapshot_mode;
2443 session->ust_session = lus;
2444
2445 /* Copy session output to the newly created UST session */
2446 ret = copy_session_consumer(domain->type, session);
2447 if (ret != LTTNG_OK) {
2448 goto error;
2449 }
2450
2451 return LTTNG_OK;
2452
2453 error:
2454 free(lus);
2455 session->ust_session = NULL;
2456 return ret;
2457 }
2458
2459 /*
2460 * Create a kernel tracer session then create the default channel.
2461 */
2462 static int create_kernel_session(struct ltt_session *session)
2463 {
2464 int ret;
2465
2466 DBG("Creating kernel session");
2467
2468 ret = kernel_create_session(session, kernel_tracer_fd);
2469 if (ret < 0) {
2470 ret = LTTNG_ERR_KERN_SESS_FAIL;
2471 goto error;
2472 }
2473
2474 /* Code flow safety */
2475 assert(session->kernel_session);
2476
2477 /* Copy session output to the newly created Kernel session */
2478 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2479 if (ret != LTTNG_OK) {
2480 goto error;
2481 }
2482
2483 /* Create directory(ies) on local filesystem. */
2484 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2485 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2486 ret = run_as_mkdir_recursive(
2487 session->kernel_session->consumer->dst.trace_path,
2488 S_IRWXU | S_IRWXG, session->uid, session->gid);
2489 if (ret < 0) {
2490 if (ret != -EEXIST) {
2491 ERR("Trace directory creation error");
2492 goto error;
2493 }
2494 }
2495 }
2496
2497 session->kernel_session->uid = session->uid;
2498 session->kernel_session->gid = session->gid;
2499 session->kernel_session->output_traces = session->output_traces;
2500 session->kernel_session->snapshot_mode = session->snapshot_mode;
2501
2502 return LTTNG_OK;
2503
2504 error:
2505 trace_kernel_destroy_session(session->kernel_session);
2506 session->kernel_session = NULL;
2507 return ret;
2508 }
2509
2510 /*
2511 * Count number of session permitted by uid/gid.
2512 */
2513 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2514 {
2515 unsigned int i = 0;
2516 struct ltt_session *session;
2517
2518 DBG("Counting number of available session for UID %d GID %d",
2519 uid, gid);
2520 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2521 /*
2522 * Only list the sessions the user can control.
2523 */
2524 if (!session_access_ok(session, uid, gid)) {
2525 continue;
2526 }
2527 i++;
2528 }
2529 return i;
2530 }
2531
2532 /*
2533 * Process the command requested by the lttng client within the command
2534 * context structure. This function make sure that the return structure (llm)
2535 * is set and ready for transmission before returning.
2536 *
2537 * Return any error encountered or 0 for success.
2538 *
2539 * "sock" is only used for special-case var. len data.
2540 *
2541 * Should *NOT* be called with RCU read-side lock held.
2542 */
2543 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2544 int *sock_error)
2545 {
2546 int ret = LTTNG_OK;
2547 int need_tracing_session = 1;
2548 int need_domain;
2549
2550 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2551
2552 *sock_error = 0;
2553
2554 switch (cmd_ctx->lsm->cmd_type) {
2555 case LTTNG_CREATE_SESSION:
2556 case LTTNG_CREATE_SESSION_SNAPSHOT:
2557 case LTTNG_DESTROY_SESSION:
2558 case LTTNG_LIST_SESSIONS:
2559 case LTTNG_LIST_DOMAINS:
2560 case LTTNG_START_TRACE:
2561 case LTTNG_STOP_TRACE:
2562 case LTTNG_DATA_PENDING:
2563 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2564 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2565 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2566 case LTTNG_SNAPSHOT_RECORD:
2567 need_domain = 0;
2568 break;
2569 default:
2570 need_domain = 1;
2571 }
2572
2573 if (opt_no_kernel && need_domain
2574 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2575 if (!is_root) {
2576 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2577 } else {
2578 ret = LTTNG_ERR_KERN_NA;
2579 }
2580 goto error;
2581 }
2582
2583 /* Deny register consumer if we already have a spawned consumer. */
2584 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2585 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2586 if (kconsumer_data.pid > 0) {
2587 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2588 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2589 goto error;
2590 }
2591 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2592 }
2593
2594 /*
2595 * Check for command that don't needs to allocate a returned payload. We do
2596 * this here so we don't have to make the call for no payload at each
2597 * command.
2598 */
2599 switch(cmd_ctx->lsm->cmd_type) {
2600 case LTTNG_LIST_SESSIONS:
2601 case LTTNG_LIST_TRACEPOINTS:
2602 case LTTNG_LIST_TRACEPOINT_FIELDS:
2603 case LTTNG_LIST_DOMAINS:
2604 case LTTNG_LIST_CHANNELS:
2605 case LTTNG_LIST_EVENTS:
2606 break;
2607 default:
2608 /* Setup lttng message with no payload */
2609 ret = setup_lttng_msg(cmd_ctx, 0);
2610 if (ret < 0) {
2611 /* This label does not try to unlock the session */
2612 goto init_setup_error;
2613 }
2614 }
2615
2616 /* Commands that DO NOT need a session. */
2617 switch (cmd_ctx->lsm->cmd_type) {
2618 case LTTNG_CREATE_SESSION:
2619 case LTTNG_CREATE_SESSION_SNAPSHOT:
2620 case LTTNG_CALIBRATE:
2621 case LTTNG_LIST_SESSIONS:
2622 case LTTNG_LIST_TRACEPOINTS:
2623 case LTTNG_LIST_TRACEPOINT_FIELDS:
2624 need_tracing_session = 0;
2625 break;
2626 default:
2627 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2628 /*
2629 * We keep the session list lock across _all_ commands
2630 * for now, because the per-session lock does not
2631 * handle teardown properly.
2632 */
2633 session_lock_list();
2634 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2635 if (cmd_ctx->session == NULL) {
2636 ret = LTTNG_ERR_SESS_NOT_FOUND;
2637 goto error;
2638 } else {
2639 /* Acquire lock for the session */
2640 session_lock(cmd_ctx->session);
2641 }
2642 break;
2643 }
2644
2645 if (!need_domain) {
2646 goto skip_domain;
2647 }
2648
2649 /*
2650 * Check domain type for specific "pre-action".
2651 */
2652 switch (cmd_ctx->lsm->domain.type) {
2653 case LTTNG_DOMAIN_KERNEL:
2654 if (!is_root) {
2655 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2656 goto error;
2657 }
2658
2659 /* Kernel tracer check */
2660 if (kernel_tracer_fd == -1) {
2661 /* Basically, load kernel tracer modules */
2662 ret = init_kernel_tracer();
2663 if (ret != 0) {
2664 goto error;
2665 }
2666 }
2667
2668 /* Consumer is in an ERROR state. Report back to client */
2669 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2670 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2671 goto error;
2672 }
2673
2674 /* Need a session for kernel command */
2675 if (need_tracing_session) {
2676 if (cmd_ctx->session->kernel_session == NULL) {
2677 ret = create_kernel_session(cmd_ctx->session);
2678 if (ret < 0) {
2679 ret = LTTNG_ERR_KERN_SESS_FAIL;
2680 goto error;
2681 }
2682 }
2683
2684 /* Start the kernel consumer daemon */
2685 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2686 if (kconsumer_data.pid == 0 &&
2687 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2688 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2689 ret = start_consumerd(&kconsumer_data);
2690 if (ret < 0) {
2691 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2692 goto error;
2693 }
2694 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2695 } else {
2696 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2697 }
2698
2699 /*
2700 * The consumer was just spawned so we need to add the socket to
2701 * the consumer output of the session if exist.
2702 */
2703 ret = consumer_create_socket(&kconsumer_data,
2704 cmd_ctx->session->kernel_session->consumer);
2705 if (ret < 0) {
2706 goto error;
2707 }
2708 }
2709
2710 break;
2711 case LTTNG_DOMAIN_UST:
2712 {
2713 if (!ust_app_supported()) {
2714 ret = LTTNG_ERR_NO_UST;
2715 goto error;
2716 }
2717 /* Consumer is in an ERROR state. Report back to client */
2718 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2719 ret = LTTNG_ERR_NO_USTCONSUMERD;
2720 goto error;
2721 }
2722
2723 if (need_tracing_session) {
2724 /* Create UST session if none exist. */
2725 if (cmd_ctx->session->ust_session == NULL) {
2726 ret = create_ust_session(cmd_ctx->session,
2727 &cmd_ctx->lsm->domain);
2728 if (ret != LTTNG_OK) {
2729 goto error;
2730 }
2731 }
2732
2733 /* Start the UST consumer daemons */
2734 /* 64-bit */
2735 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2736 if (consumerd64_bin[0] != '\0' &&
2737 ustconsumer64_data.pid == 0 &&
2738 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2739 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2740 ret = start_consumerd(&ustconsumer64_data);
2741 if (ret < 0) {
2742 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2743 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2744 goto error;
2745 }
2746
2747 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2748 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2749 } else {
2750 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2751 }
2752
2753 /*
2754 * Setup socket for consumer 64 bit. No need for atomic access
2755 * since it was set above and can ONLY be set in this thread.
2756 */
2757 ret = consumer_create_socket(&ustconsumer64_data,
2758 cmd_ctx->session->ust_session->consumer);
2759 if (ret < 0) {
2760 goto error;
2761 }
2762
2763 /* 32-bit */
2764 if (consumerd32_bin[0] != '\0' &&
2765 ustconsumer32_data.pid == 0 &&
2766 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2767 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2768 ret = start_consumerd(&ustconsumer32_data);
2769 if (ret < 0) {
2770 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2771 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2772 goto error;
2773 }
2774
2775 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2776 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2777 } else {
2778 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2779 }
2780
2781 /*
2782 * Setup socket for consumer 64 bit. No need for atomic access
2783 * since it was set above and can ONLY be set in this thread.
2784 */
2785 ret = consumer_create_socket(&ustconsumer32_data,
2786 cmd_ctx->session->ust_session->consumer);
2787 if (ret < 0) {
2788 goto error;
2789 }
2790 }
2791 break;
2792 }
2793 default:
2794 break;
2795 }
2796 skip_domain:
2797
2798 /* Validate consumer daemon state when start/stop trace command */
2799 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2800 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2801 switch (cmd_ctx->lsm->domain.type) {
2802 case LTTNG_DOMAIN_UST:
2803 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2804 ret = LTTNG_ERR_NO_USTCONSUMERD;
2805 goto error;
2806 }
2807 break;
2808 case LTTNG_DOMAIN_KERNEL:
2809 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2810 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2811 goto error;
2812 }
2813 break;
2814 }
2815 }
2816
2817 /*
2818 * Check that the UID or GID match that of the tracing session.
2819 * The root user can interact with all sessions.
2820 */
2821 if (need_tracing_session) {
2822 if (!session_access_ok(cmd_ctx->session,
2823 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2824 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2825 ret = LTTNG_ERR_EPERM;
2826 goto error;
2827 }
2828 }
2829
2830 /*
2831 * Send relayd information to consumer as soon as we have a domain and a
2832 * session defined.
2833 */
2834 if (cmd_ctx->session && need_domain) {
2835 /*
2836 * Setup relayd if not done yet. If the relayd information was already
2837 * sent to the consumer, this call will gracefully return.
2838 */
2839 ret = cmd_setup_relayd(cmd_ctx->session);
2840 if (ret != LTTNG_OK) {
2841 goto error;
2842 }
2843 }
2844
2845 /* Process by command type */
2846 switch (cmd_ctx->lsm->cmd_type) {
2847 case LTTNG_ADD_CONTEXT:
2848 {
2849 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2850 cmd_ctx->lsm->u.context.channel_name,
2851 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
2852 break;
2853 }
2854 case LTTNG_DISABLE_CHANNEL:
2855 {
2856 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2857 cmd_ctx->lsm->u.disable.channel_name);
2858 break;
2859 }
2860 case LTTNG_DISABLE_EVENT:
2861 {
2862 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2863 cmd_ctx->lsm->u.disable.channel_name,
2864 cmd_ctx->lsm->u.disable.name);
2865 break;
2866 }
2867 case LTTNG_DISABLE_ALL_EVENT:
2868 {
2869 DBG("Disabling all events");
2870
2871 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2872 cmd_ctx->lsm->u.disable.channel_name);
2873 break;
2874 }
2875 case LTTNG_ENABLE_CHANNEL:
2876 {
2877 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
2878 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
2879 break;
2880 }
2881 case LTTNG_ENABLE_EVENT:
2882 {
2883 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
2884 cmd_ctx->lsm->u.enable.channel_name,
2885 &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
2886 break;
2887 }
2888 case LTTNG_ENABLE_ALL_EVENT:
2889 {
2890 DBG("Enabling all events");
2891
2892 ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
2893 cmd_ctx->lsm->u.enable.channel_name,
2894 cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
2895 break;
2896 }
2897 case LTTNG_LIST_TRACEPOINTS:
2898 {
2899 struct lttng_event *events;
2900 ssize_t nb_events;
2901
2902 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
2903 if (nb_events < 0) {
2904 /* Return value is a negative lttng_error_code. */
2905 ret = -nb_events;
2906 goto error;
2907 }
2908
2909 /*
2910 * Setup lttng message with payload size set to the event list size in
2911 * bytes and then copy list into the llm payload.
2912 */
2913 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
2914 if (ret < 0) {
2915 free(events);
2916 goto setup_error;
2917 }
2918
2919 /* Copy event list into message payload */
2920 memcpy(cmd_ctx->llm->payload, events,
2921 sizeof(struct lttng_event) * nb_events);
2922
2923 free(events);
2924
2925 ret = LTTNG_OK;
2926 break;
2927 }
2928 case LTTNG_LIST_TRACEPOINT_FIELDS:
2929 {
2930 struct lttng_event_field *fields;
2931 ssize_t nb_fields;
2932
2933 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
2934 &fields);
2935 if (nb_fields < 0) {
2936 /* Return value is a negative lttng_error_code. */
2937 ret = -nb_fields;
2938 goto error;
2939 }
2940
2941 /*
2942 * Setup lttng message with payload size set to the event list size in
2943 * bytes and then copy list into the llm payload.
2944 */
2945 ret = setup_lttng_msg(cmd_ctx,
2946 sizeof(struct lttng_event_field) * nb_fields);
2947 if (ret < 0) {
2948 free(fields);
2949 goto setup_error;
2950 }
2951
2952 /* Copy event list into message payload */
2953 memcpy(cmd_ctx->llm->payload, fields,
2954 sizeof(struct lttng_event_field) * nb_fields);
2955
2956 free(fields);
2957
2958 ret = LTTNG_OK;
2959 break;
2960 }
2961 case LTTNG_SET_CONSUMER_URI:
2962 {
2963 size_t nb_uri, len;
2964 struct lttng_uri *uris;
2965
2966 nb_uri = cmd_ctx->lsm->u.uri.size;
2967 len = nb_uri * sizeof(struct lttng_uri);
2968
2969 if (nb_uri == 0) {
2970 ret = LTTNG_ERR_INVALID;
2971 goto error;
2972 }
2973
2974 uris = zmalloc(len);
2975 if (uris == NULL) {
2976 ret = LTTNG_ERR_FATAL;
2977 goto error;
2978 }
2979
2980 /* Receive variable len data */
2981 DBG("Receiving %zu URI(s) from client ...", nb_uri);
2982 ret = lttcomm_recv_unix_sock(sock, uris, len);
2983 if (ret <= 0) {
2984 DBG("No URIs received from client... continuing");
2985 *sock_error = 1;
2986 ret = LTTNG_ERR_SESSION_FAIL;
2987 free(uris);
2988 goto error;
2989 }
2990
2991 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
2992 nb_uri, uris);
2993 if (ret != LTTNG_OK) {
2994 free(uris);
2995 goto error;
2996 }
2997
2998 /*
2999 * XXX: 0 means that this URI should be applied on the session. Should
3000 * be a DOMAIN enuam.
3001 */
3002 if (cmd_ctx->lsm->domain.type == 0) {
3003 /* Add the URI for the UST session if a consumer is present. */
3004 if (cmd_ctx->session->ust_session &&
3005 cmd_ctx->session->ust_session->consumer) {
3006 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
3007 nb_uri, uris);
3008 } else if (cmd_ctx->session->kernel_session &&
3009 cmd_ctx->session->kernel_session->consumer) {
3010 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
3011 cmd_ctx->session, nb_uri, uris);
3012 }
3013 }
3014
3015 free(uris);
3016
3017 break;
3018 }
3019 case LTTNG_START_TRACE:
3020 {
3021 ret = cmd_start_trace(cmd_ctx->session);
3022 break;
3023 }
3024 case LTTNG_STOP_TRACE:
3025 {
3026 ret = cmd_stop_trace(cmd_ctx->session);
3027 break;
3028 }
3029 case LTTNG_CREATE_SESSION:
3030 {
3031 size_t nb_uri, len;
3032 struct lttng_uri *uris = NULL;
3033
3034 nb_uri = cmd_ctx->lsm->u.uri.size;
3035 len = nb_uri * sizeof(struct lttng_uri);
3036
3037 if (nb_uri > 0) {
3038 uris = zmalloc(len);
3039 if (uris == NULL) {
3040 ret = LTTNG_ERR_FATAL;
3041 goto error;
3042 }
3043
3044 /* Receive variable len data */
3045 DBG("Waiting for %zu URIs from client ...", nb_uri);
3046 ret = lttcomm_recv_unix_sock(sock, uris, len);
3047 if (ret <= 0) {
3048 DBG("No URIs received from client... continuing");
3049 *sock_error = 1;
3050 ret = LTTNG_ERR_SESSION_FAIL;
3051 free(uris);
3052 goto error;
3053 }
3054
3055 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3056 DBG("Creating session with ONE network URI is a bad call");
3057 ret = LTTNG_ERR_SESSION_FAIL;
3058 free(uris);
3059 goto error;
3060 }
3061 }
3062
3063 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3064 &cmd_ctx->creds);
3065
3066 free(uris);
3067
3068 break;
3069 }
3070 case LTTNG_DESTROY_SESSION:
3071 {
3072 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3073
3074 /* Set session to NULL so we do not unlock it after free. */
3075 cmd_ctx->session = NULL;
3076 break;
3077 }
3078 case LTTNG_LIST_DOMAINS:
3079 {
3080 ssize_t nb_dom;
3081 struct lttng_domain *domains;
3082
3083 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3084 if (nb_dom < 0) {
3085 /* Return value is a negative lttng_error_code. */
3086 ret = -nb_dom;
3087 goto error;
3088 }
3089
3090 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3091 if (ret < 0) {
3092 free(domains);
3093 goto setup_error;
3094 }
3095
3096 /* Copy event list into message payload */
3097 memcpy(cmd_ctx->llm->payload, domains,
3098 nb_dom * sizeof(struct lttng_domain));
3099
3100 free(domains);
3101
3102 ret = LTTNG_OK;
3103 break;
3104 }
3105 case LTTNG_LIST_CHANNELS:
3106 {
3107 int nb_chan;
3108 struct lttng_channel *channels;
3109
3110 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3111 cmd_ctx->session, &channels);
3112 if (nb_chan < 0) {
3113 /* Return value is a negative lttng_error_code. */
3114 ret = -nb_chan;
3115 goto error;
3116 }
3117
3118 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3119 if (ret < 0) {
3120 free(channels);
3121 goto setup_error;
3122 }
3123
3124 /* Copy event list into message payload */
3125 memcpy(cmd_ctx->llm->payload, channels,
3126 nb_chan * sizeof(struct lttng_channel));
3127
3128 free(channels);
3129
3130 ret = LTTNG_OK;
3131 break;
3132 }
3133 case LTTNG_LIST_EVENTS:
3134 {
3135 ssize_t nb_event;
3136 struct lttng_event *events = NULL;
3137
3138 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3139 cmd_ctx->lsm->u.list.channel_name, &events);
3140 if (nb_event < 0) {
3141 /* Return value is a negative lttng_error_code. */
3142 ret = -nb_event;
3143 goto error;
3144 }
3145
3146 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3147 if (ret < 0) {
3148 free(events);
3149 goto setup_error;
3150 }
3151
3152 /* Copy event list into message payload */
3153 memcpy(cmd_ctx->llm->payload, events,
3154 nb_event * sizeof(struct lttng_event));
3155
3156 free(events);
3157
3158 ret = LTTNG_OK;
3159 break;
3160 }
3161 case LTTNG_LIST_SESSIONS:
3162 {
3163 unsigned int nr_sessions;
3164
3165 session_lock_list();
3166 nr_sessions = lttng_sessions_count(
3167 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3168 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3169
3170 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3171 if (ret < 0) {
3172 session_unlock_list();
3173 goto setup_error;
3174 }
3175
3176 /* Filled the session array */
3177 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3178 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3179 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3180
3181 session_unlock_list();
3182
3183 ret = LTTNG_OK;
3184 break;
3185 }
3186 case LTTNG_CALIBRATE:
3187 {
3188 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3189 &cmd_ctx->lsm->u.calibrate);
3190 break;
3191 }
3192 case LTTNG_REGISTER_CONSUMER:
3193 {
3194 struct consumer_data *cdata;
3195
3196 switch (cmd_ctx->lsm->domain.type) {
3197 case LTTNG_DOMAIN_KERNEL:
3198 cdata = &kconsumer_data;
3199 break;
3200 default:
3201 ret = LTTNG_ERR_UND;
3202 goto error;
3203 }
3204
3205 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3206 cmd_ctx->lsm->u.reg.path, cdata);
3207 break;
3208 }
3209 case LTTNG_ENABLE_EVENT_WITH_FILTER:
3210 {
3211 struct lttng_filter_bytecode *bytecode;
3212
3213 if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
3214 ret = LTTNG_ERR_FILTER_INVAL;
3215 goto error;
3216 }
3217 if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
3218 ret = LTTNG_ERR_FILTER_INVAL;
3219 goto error;
3220 }
3221 bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
3222 if (!bytecode) {
3223 ret = LTTNG_ERR_FILTER_NOMEM;
3224 goto error;
3225 }
3226 /* Receive var. len. data */
3227 DBG("Receiving var len data from client ...");
3228 ret = lttcomm_recv_unix_sock(sock, bytecode,
3229 cmd_ctx->lsm->u.enable.bytecode_len);
3230 if (ret <= 0) {
3231 DBG("Nothing recv() from client var len data... continuing");
3232 *sock_error = 1;
3233 ret = LTTNG_ERR_FILTER_INVAL;
3234 goto error;
3235 }
3236
3237 if (bytecode->len + sizeof(*bytecode)
3238 != cmd_ctx->lsm->u.enable.bytecode_len) {
3239 free(bytecode);
3240 ret = LTTNG_ERR_FILTER_INVAL;
3241 goto error;
3242 }
3243
3244 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3245 cmd_ctx->lsm->u.enable.channel_name,
3246 &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
3247 break;
3248 }
3249 case LTTNG_DATA_PENDING:
3250 {
3251 ret = cmd_data_pending(cmd_ctx->session);
3252 break;
3253 }
3254 case LTTNG_SNAPSHOT_ADD_OUTPUT:
3255 {
3256 struct lttcomm_lttng_output_id reply;
3257
3258 ret = cmd_snapshot_add_output(cmd_ctx->session,
3259 &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
3260 if (ret != LTTNG_OK) {
3261 goto error;
3262 }
3263
3264 ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
3265 if (ret < 0) {
3266 goto setup_error;
3267 }
3268
3269 /* Copy output list into message payload */
3270 memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
3271 ret = LTTNG_OK;
3272 break;
3273 }
3274 case LTTNG_SNAPSHOT_DEL_OUTPUT:
3275 {
3276 ret = cmd_snapshot_del_output(cmd_ctx->session,
3277 &cmd_ctx->lsm->u.snapshot_output.output);
3278 break;
3279 }
3280 case LTTNG_SNAPSHOT_LIST_OUTPUT:
3281 {
3282 ssize_t nb_output;
3283 struct lttng_snapshot_output *outputs = NULL;
3284
3285 nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
3286 if (nb_output < 0) {
3287 ret = -nb_output;
3288 goto error;
3289 }
3290
3291 ret = setup_lttng_msg(cmd_ctx,
3292 nb_output * sizeof(struct lttng_snapshot_output));
3293 if (ret < 0) {
3294 free(outputs);
3295 goto setup_error;
3296 }
3297
3298 if (outputs) {
3299 /* Copy output list into message payload */
3300 memcpy(cmd_ctx->llm->payload, outputs,
3301 nb_output * sizeof(struct lttng_snapshot_output));
3302 free(outputs);
3303 }
3304
3305 ret = LTTNG_OK;
3306 break;
3307 }
3308 case LTTNG_SNAPSHOT_RECORD:
3309 {
3310 ret = cmd_snapshot_record(cmd_ctx->session,
3311 &cmd_ctx->lsm->u.snapshot_record.output,
3312 cmd_ctx->lsm->u.snapshot_record.wait);
3313 break;
3314 }
3315 case LTTNG_CREATE_SESSION_SNAPSHOT:
3316 {
3317 size_t nb_uri, len;
3318 struct lttng_uri *uris = NULL;
3319
3320 nb_uri = cmd_ctx->lsm->u.uri.size;
3321 len = nb_uri * sizeof(struct lttng_uri);
3322
3323 if (nb_uri > 0) {
3324 uris = zmalloc(len);
3325 if (uris == NULL) {
3326 ret = LTTNG_ERR_FATAL;
3327 goto error;
3328 }
3329
3330 /* Receive variable len data */
3331 DBG("Waiting for %zu URIs from client ...", nb_uri);
3332 ret = lttcomm_recv_unix_sock(sock, uris, len);
3333 if (ret <= 0) {
3334 DBG("No URIs received from client... continuing");
3335 *sock_error = 1;
3336 ret = LTTNG_ERR_SESSION_FAIL;
3337 free(uris);
3338 goto error;
3339 }
3340
3341 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3342 DBG("Creating session with ONE network URI is a bad call");
3343 ret = LTTNG_ERR_SESSION_FAIL;
3344 free(uris);
3345 goto error;
3346 }
3347 }
3348
3349 ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
3350 nb_uri, &cmd_ctx->creds);
3351 free(uris);
3352 break;
3353 }
3354 default:
3355 ret = LTTNG_ERR_UND;
3356 break;
3357 }
3358
3359 error:
3360 if (cmd_ctx->llm == NULL) {
3361 DBG("Missing llm structure. Allocating one.");
3362 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3363 goto setup_error;
3364 }
3365 }
3366 /* Set return code */
3367 cmd_ctx->llm->ret_code = ret;
3368 setup_error:
3369 if (cmd_ctx->session) {
3370 session_unlock(cmd_ctx->session);
3371 }
3372 if (need_tracing_session) {
3373 session_unlock_list();
3374 }
3375 init_setup_error:
3376 return ret;
3377 }
3378
3379 /*
3380 * Thread managing health check socket.
3381 */
3382 static void *thread_manage_health(void *data)
3383 {
3384 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
3385 uint32_t revents, nb_fd;
3386 struct lttng_poll_event events;
3387 struct lttcomm_health_msg msg;
3388 struct lttcomm_health_data reply;
3389
3390 DBG("[thread] Manage health check started");
3391
3392 rcu_register_thread();
3393
3394 /* We might hit an error path before this is created. */
3395 lttng_poll_init(&events);
3396
3397 /* Create unix socket */
3398 sock = lttcomm_create_unix_sock(health_unix_sock_path);
3399 if (sock < 0) {
3400 ERR("Unable to create health check Unix socket");
3401 ret = -1;
3402 goto error;
3403 }
3404
3405 /*
3406 * Set the CLOEXEC flag. Return code is useless because either way, the
3407 * show must go on.
3408 */
3409 (void) utils_set_fd_cloexec(sock);
3410
3411 ret = lttcomm_listen_unix_sock(sock);
3412 if (ret < 0) {
3413 goto error;
3414 }
3415
3416 /*
3417 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3418 * more will be added to this poll set.
3419 */
3420 ret = sessiond_set_thread_pollset(&events, 2);
3421 if (ret < 0) {
3422 goto error;
3423 }
3424
3425 /* Add the application registration socket */
3426 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
3427 if (ret < 0) {
3428 goto error;
3429 }
3430
3431 while (1) {
3432 DBG("Health check ready");
3433
3434 /* Inifinite blocking call, waiting for transmission */
3435 restart:
3436 ret = lttng_poll_wait(&events, -1);
3437 if (ret < 0) {
3438 /*
3439 * Restart interrupted system call.
3440 */
3441 if (errno == EINTR) {
3442 goto restart;
3443 }
3444 goto error;
3445 }
3446
3447 nb_fd = ret;
3448
3449 for (i = 0; i < nb_fd; i++) {
3450 /* Fetch once the poll data */
3451 revents = LTTNG_POLL_GETEV(&events, i);
3452 pollfd = LTTNG_POLL_GETFD(&events, i);
3453
3454 /* Thread quit pipe has been closed. Killing thread. */
3455 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3456 if (ret) {
3457 err = 0;
3458 goto exit;
3459 }
3460
3461 /* Event on the registration socket */
3462 if (pollfd == sock) {
3463 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3464 ERR("Health socket poll error");
3465 goto error;
3466 }
3467 }
3468 }
3469
3470 new_sock = lttcomm_accept_unix_sock(sock);
3471 if (new_sock < 0) {
3472 goto error;
3473 }
3474
3475 /*
3476 * Set the CLOEXEC flag. Return code is useless because either way, the
3477 * show must go on.
3478 */
3479 (void) utils_set_fd_cloexec(new_sock);
3480
3481 DBG("Receiving data from client for health...");
3482 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3483 if (ret <= 0) {
3484 DBG("Nothing recv() from client... continuing");
3485 ret = close(new_sock);
3486 if (ret) {
3487 PERROR("close");
3488 }
3489 new_sock = -1;
3490 continue;
3491 }
3492
3493 rcu_thread_online();
3494
3495 switch (msg.component) {
3496 case LTTNG_HEALTH_CMD:
3497 reply.ret_code = health_check_state(HEALTH_TYPE_CMD);
3498 break;
3499 case LTTNG_HEALTH_APP_MANAGE:
3500 reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE);
3501 break;
3502 case LTTNG_HEALTH_APP_REG:
3503 reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG);
3504 break;
3505 case LTTNG_HEALTH_KERNEL:
3506 reply.ret_code = health_check_state(HEALTH_TYPE_KERNEL);
3507 break;
3508 case LTTNG_HEALTH_CONSUMER:
3509 reply.ret_code = check_consumer_health();
3510 break;
3511 case LTTNG_HEALTH_HT_CLEANUP:
3512 reply.ret_code = health_check_state(HEALTH_TYPE_HT_CLEANUP);
3513 break;
3514 case LTTNG_HEALTH_APP_MANAGE_NOTIFY:
3515 reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY);
3516 break;
3517 case LTTNG_HEALTH_APP_REG_DISPATCH:
3518 reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
3519 break;
3520 case LTTNG_HEALTH_ALL:
3521 reply.ret_code =
3522 health_check_state(HEALTH_TYPE_APP_MANAGE) &&
3523 health_check_state(HEALTH_TYPE_APP_REG) &&
3524 health_check_state(HEALTH_TYPE_CMD) &&
3525 health_check_state(HEALTH_TYPE_KERNEL) &&
3526 check_consumer_health() &&
3527 health_check_state(HEALTH_TYPE_HT_CLEANUP) &&
3528 health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY) &&
3529 health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
3530 break;
3531 default:
3532 reply.ret_code = LTTNG_ERR_UND;
3533 break;
3534 }
3535
3536 /*
3537 * Flip ret value since 0 is a success and 1 indicates a bad health for
3538 * the client where in the sessiond it is the opposite. Again, this is
3539 * just to make things easier for us poor developer which enjoy a lot
3540 * lazyness.
3541 */
3542 if (reply.ret_code == 0 || reply.ret_code == 1) {
3543 reply.ret_code = !reply.ret_code;
3544 }
3545
3546 DBG2("Health check return value %d", reply.ret_code);
3547
3548 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3549 if (ret < 0) {
3550 ERR("Failed to send health data back to client");
3551 }
3552
3553 /* End of transmission */
3554 ret = close(new_sock);
3555 if (ret) {
3556 PERROR("close");
3557 }
3558 new_sock = -1;
3559 }
3560
3561 exit:
3562 error:
3563 if (err) {
3564 ERR("Health error occurred in %s", __func__);
3565 }
3566 DBG("Health check thread dying");
3567 unlink(health_unix_sock_path);
3568 if (sock >= 0) {
3569 ret = close(sock);
3570 if (ret) {
3571 PERROR("close");
3572 }
3573 }
3574
3575 lttng_poll_clean(&events);
3576
3577 rcu_unregister_thread();
3578 return NULL;
3579 }
3580
3581 /*
3582 * This thread manage all clients request using the unix client socket for
3583 * communication.
3584 */
3585 static void *thread_manage_clients(void *data)
3586 {
3587 int sock = -1, ret, i, pollfd, err = -1;
3588 int sock_error;
3589 uint32_t revents, nb_fd;
3590 struct command_ctx *cmd_ctx = NULL;
3591 struct lttng_poll_event events;
3592
3593 DBG("[thread] Manage client started");
3594
3595 rcu_register_thread();
3596
3597 health_register(HEALTH_TYPE_CMD);
3598
3599 if (testpoint(thread_manage_clients)) {
3600 goto error_testpoint;
3601 }
3602
3603 health_code_update();
3604
3605 ret = lttcomm_listen_unix_sock(client_sock);
3606 if (ret < 0) {
3607 goto error_listen;
3608 }
3609
3610 /*
3611 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3612 * more will be added to this poll set.
3613 */
3614 ret = sessiond_set_thread_pollset(&events, 2);
3615 if (ret < 0) {
3616 goto error_create_poll;
3617 }
3618
3619 /* Add the application registration socket */
3620 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3621 if (ret < 0) {
3622 goto error;
3623 }
3624
3625 /*
3626 * Notify parent pid that we are ready to accept command for client side.
3627 */
3628 if (opt_sig_parent) {
3629 kill(ppid, SIGUSR1);
3630 }
3631
3632 if (testpoint(thread_manage_clients_before_loop)) {
3633 goto error;
3634 }
3635
3636 health_code_update();
3637
3638 while (1) {
3639 DBG("Accepting client command ...");
3640
3641 /* Inifinite blocking call, waiting for transmission */
3642 restart:
3643 health_poll_entry();
3644 ret = lttng_poll_wait(&events, -1);
3645 health_poll_exit();
3646 if (ret < 0) {
3647 /*
3648 * Restart interrupted system call.
3649 */
3650 if (errno == EINTR) {
3651 goto restart;
3652 }
3653 goto error;
3654 }
3655
3656 nb_fd = ret;
3657
3658 for (i = 0; i < nb_fd; i++) {
3659 /* Fetch once the poll data */
3660 revents = LTTNG_POLL_GETEV(&events, i);
3661 pollfd = LTTNG_POLL_GETFD(&events, i);
3662
3663 health_code_update();
3664
3665 /* Thread quit pipe has been closed. Killing thread. */
3666 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3667 if (ret) {
3668 err = 0;
3669 goto exit;
3670 }
3671
3672 /* Event on the registration socket */
3673 if (pollfd == client_sock) {
3674 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3675 ERR("Client socket poll error");
3676 goto error;
3677 }
3678 }
3679 }
3680
3681 DBG("Wait for client response");
3682
3683 health_code_update();
3684
3685 sock = lttcomm_accept_unix_sock(client_sock);
3686 if (sock < 0) {
3687 goto error;
3688 }
3689
3690 /*
3691 * Set the CLOEXEC flag. Return code is useless because either way, the
3692 * show must go on.
3693 */
3694 (void) utils_set_fd_cloexec(sock);
3695
3696 /* Set socket option for credentials retrieval */
3697 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3698 if (ret < 0) {
3699 goto error;
3700 }
3701
3702 /* Allocate context command to process the client request */
3703 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3704 if (cmd_ctx == NULL) {
3705 PERROR("zmalloc cmd_ctx");
3706 goto error;
3707 }
3708
3709 /* Allocate data buffer for reception */
3710 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3711 if (cmd_ctx->lsm == NULL) {
3712 PERROR("zmalloc cmd_ctx->lsm");
3713 goto error;
3714 }
3715
3716 cmd_ctx->llm = NULL;
3717 cmd_ctx->session = NULL;
3718
3719 health_code_update();
3720
3721 /*
3722 * Data is received from the lttng client. The struct
3723 * lttcomm_session_msg (lsm) contains the command and data request of
3724 * the client.
3725 */
3726 DBG("Receiving data from client ...");
3727 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3728 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3729 if (ret <= 0) {
3730 DBG("Nothing recv() from client... continuing");
3731 ret = close(sock);
3732 if (ret) {
3733 PERROR("close");
3734 }
3735 sock = -1;
3736 clean_command_ctx(&cmd_ctx);
3737 continue;
3738 }
3739
3740 health_code_update();
3741
3742 // TODO: Validate cmd_ctx including sanity check for
3743 // security purpose.
3744
3745 rcu_thread_online();
3746 /*
3747 * This function dispatch the work to the kernel or userspace tracer
3748 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3749 * informations for the client. The command context struct contains
3750 * everything this function may needs.
3751 */
3752 ret = process_client_msg(cmd_ctx, sock, &sock_error);
3753 rcu_thread_offline();
3754 if (ret < 0) {
3755 ret = close(sock);
3756 if (ret) {
3757 PERROR("close");
3758 }
3759 sock = -1;
3760 /*
3761 * TODO: Inform client somehow of the fatal error. At
3762 * this point, ret < 0 means that a zmalloc failed
3763 * (ENOMEM). Error detected but still accept
3764 * command, unless a socket error has been
3765 * detected.
3766 */
3767 clean_command_ctx(&cmd_ctx);
3768 continue;
3769 }
3770
3771 health_code_update();
3772
3773 DBG("Sending response (size: %d, retcode: %s)",
3774 cmd_ctx->lttng_msg_size,
3775 lttng_strerror(-cmd_ctx->llm->ret_code));
3776 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
3777 if (ret < 0) {
3778 ERR("Failed to send data back to client");
3779 }
3780
3781 /* End of transmission */
3782 ret = close(sock);
3783 if (ret) {
3784 PERROR("close");
3785 }
3786 sock = -1;
3787
3788 clean_command_ctx(&cmd_ctx);
3789
3790 health_code_update();
3791 }
3792
3793 exit:
3794 error:
3795 if (sock >= 0) {
3796 ret = close(sock);
3797 if (ret) {
3798 PERROR("close");
3799 }
3800 }
3801
3802 lttng_poll_clean(&events);
3803 clean_command_ctx(&cmd_ctx);
3804
3805 error_listen:
3806 error_create_poll:
3807 error_testpoint:
3808 unlink(client_unix_sock_path);
3809 if (client_sock >= 0) {
3810 ret = close(client_sock);
3811 if (ret) {
3812 PERROR("close");
3813 }
3814 }
3815
3816 if (err) {
3817 health_error();
3818 ERR("Health error occurred in %s", __func__);
3819 }
3820
3821 health_unregister();
3822
3823 DBG("Client thread dying");
3824
3825 rcu_unregister_thread();
3826 return NULL;
3827 }
3828
3829
3830 /*
3831 * usage function on stderr
3832 */
3833 static void usage(void)
3834 {
3835 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
3836 fprintf(stderr, " -h, --help Display this usage.\n");
3837 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
3838 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3839 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3840 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3841 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3842 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3843 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3844 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3845 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3846 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3847 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3848 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3849 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
3850 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3851 fprintf(stderr, " -V, --version Show version number.\n");
3852 fprintf(stderr, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3853 fprintf(stderr, " -q, --quiet No output at all.\n");
3854 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3855 fprintf(stderr, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
3856 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3857 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
3858 }
3859
3860 /*
3861 * daemon argument parsing
3862 */
3863 static int parse_args(int argc, char **argv)
3864 {
3865 int c;
3866
3867 static struct option long_options[] = {
3868 { "client-sock", 1, 0, 'c' },
3869 { "apps-sock", 1, 0, 'a' },
3870 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3871 { "kconsumerd-err-sock", 1, 0, 'E' },
3872 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3873 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3874 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3875 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3876 { "consumerd32-path", 1, 0, 'u' },
3877 { "consumerd32-libdir", 1, 0, 'U' },
3878 { "consumerd64-path", 1, 0, 't' },
3879 { "consumerd64-libdir", 1, 0, 'T' },
3880 { "daemonize", 0, 0, 'd' },
3881 { "sig-parent", 0, 0, 'S' },
3882 { "help", 0, 0, 'h' },
3883 { "group", 1, 0, 'g' },
3884 { "version", 0, 0, 'V' },
3885 { "quiet", 0, 0, 'q' },
3886 { "verbose", 0, 0, 'v' },
3887 { "verbose-consumer", 0, 0, 'Z' },
3888 { "no-kernel", 0, 0, 'N' },
3889 { "pidfile", 1, 0, 'p' },
3890 { NULL, 0, 0, 0 }
3891 };
3892
3893 while (1) {
3894 int option_index = 0;
3895 c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:",
3896 long_options, &option_index);
3897 if (c == -1) {
3898 break;
3899 }
3900
3901 switch (c) {
3902 case 0:
3903 fprintf(stderr, "option %s", long_options[option_index].name);
3904 if (optarg) {
3905 fprintf(stderr, " with arg %s\n", optarg);
3906 }
3907 break;
3908 case 'c':
3909 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
3910 break;
3911 case 'a':
3912 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
3913 break;
3914 case 'd':
3915 opt_daemon = 1;
3916 break;
3917 case 'g':
3918 opt_tracing_group = optarg;
3919 break;
3920 case 'h':
3921 usage();
3922 exit(EXIT_FAILURE);
3923 case 'V':
3924 fprintf(stdout, "%s\n", VERSION);
3925 exit(EXIT_SUCCESS);
3926 case 'S':
3927 opt_sig_parent = 1;
3928 break;
3929 case 'E':
3930 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3931 break;
3932 case 'C':
3933 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3934 break;
3935 case 'F':
3936 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3937 break;
3938 case 'D':
3939 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3940 break;
3941 case 'H':
3942 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3943 break;
3944 case 'G':
3945 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3946 break;
3947 case 'N':
3948 opt_no_kernel = 1;
3949 break;
3950 case 'q':
3951 lttng_opt_quiet = 1;
3952 break;
3953 case 'v':
3954 /* Verbose level can increase using multiple -v */
3955 lttng_opt_verbose += 1;
3956 break;
3957 case 'Z':
3958 opt_verbose_consumer += 1;
3959 break;
3960 case 'u':
3961 consumerd32_bin= optarg;
3962 break;
3963 case 'U':
3964 consumerd32_libdir = optarg;
3965 break;
3966 case 't':
3967 consumerd64_bin = optarg;
3968 break;
3969 case 'T':
3970 consumerd64_libdir = optarg;
3971 break;
3972 case 'p':
3973 opt_pidfile = optarg;
3974 break;
3975 default:
3976 /* Unknown option or other error.
3977 * Error is printed by getopt, just return */
3978 return -1;
3979 }
3980 }
3981
3982 return 0;
3983 }
3984
3985 /*
3986 * Creates the two needed socket by the daemon.
3987 * apps_sock - The communication socket for all UST apps.
3988 * client_sock - The communication of the cli tool (lttng).
3989 */
3990 static int init_daemon_socket(void)
3991 {
3992 int ret = 0;
3993 mode_t old_umask;
3994
3995 old_umask = umask(0);
3996
3997 /* Create client tool unix socket */
3998 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
3999 if (client_sock < 0) {
4000 ERR("Create unix sock failed: %s", client_unix_sock_path);
4001 ret = -1;
4002 goto end;
4003 }
4004
4005 /* Set the cloexec flag */
4006 ret = utils_set_fd_cloexec(client_sock);
4007 if (ret < 0) {
4008 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4009 "Continuing but note that the consumer daemon will have a "
4010 "reference to this socket on exec()", client_sock);
4011 }
4012
4013 /* File permission MUST be 660 */
4014 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4015 if (ret < 0) {
4016 ERR("Set file permissions failed: %s", client_unix_sock_path);
4017 PERROR("chmod");
4018 goto end;
4019 }
4020
4021 /* Create the application unix socket */
4022 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
4023 if (apps_sock < 0) {
4024 ERR("Create unix sock failed: %s", apps_unix_sock_path);
4025 ret = -1;
4026 goto end;
4027 }
4028
4029 /* Set the cloexec flag */
4030 ret = utils_set_fd_cloexec(apps_sock);
4031 if (ret < 0) {
4032 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4033 "Continuing but note that the consumer daemon will have a "
4034 "reference to this socket on exec()", apps_sock);
4035 }
4036
4037 /* File permission MUST be 666 */
4038 ret = chmod(apps_unix_sock_path,
4039 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
4040 if (ret < 0) {
4041 ERR("Set file permissions failed: %s", apps_unix_sock_path);
4042 PERROR("chmod");
4043 goto end;
4044 }
4045
4046 DBG3("Session daemon client socket %d and application socket %d created",
4047 client_sock, apps_sock);
4048
4049 end:
4050 umask(old_umask);
4051 return ret;
4052 }
4053
4054 /*
4055 * Check if the global socket is available, and if a daemon is answering at the
4056 * other side. If yes, error is returned.
4057 */
4058 static int check_existing_daemon(void)
4059 {
4060 /* Is there anybody out there ? */
4061 if (lttng_session_daemon_alive()) {
4062 return -EEXIST;
4063 }
4064
4065 return 0;
4066 }
4067
4068 /*
4069 * Set the tracing group gid onto the client socket.
4070 *
4071 * Race window between mkdir and chown is OK because we are going from more
4072 * permissive (root.root) to less permissive (root.tracing).
4073 */
4074 static int set_permissions(char *rundir)
4075 {
4076 int ret;
4077 gid_t gid;
4078
4079 ret = allowed_group();
4080 if (ret < 0) {
4081 WARN("No tracing group detected");
4082 /* Setting gid to 0 if no tracing group is found */
4083 gid = 0;
4084 } else {
4085 gid = ret;
4086 }
4087
4088 /* Set lttng run dir */
4089 ret = chown(rundir, 0, gid);
4090 if (ret < 0) {
4091 ERR("Unable to set group on %s", rundir);
4092 PERROR("chown");
4093 }
4094
4095 /* Ensure all applications and tracing group can search the run dir */
4096 ret = chmod(rundir, S_IRWXU | S_IXGRP | S_IXOTH);
4097 if (ret < 0) {
4098 ERR("Unable to set permissions on %s", rundir);
4099 PERROR("chmod");
4100 }
4101
4102 /* lttng client socket path */
4103 ret = chown(client_unix_sock_path, 0, gid);
4104 if (ret < 0) {
4105 ERR("Unable to set group on %s", client_unix_sock_path);
4106 PERROR("chown");
4107 }
4108
4109 /* kconsumer error socket path */
4110 ret = chown(kconsumer_data.err_unix_sock_path, 0, gid);
4111 if (ret < 0) {
4112 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
4113 PERROR("chown");
4114 }
4115
4116 /* 64-bit ustconsumer error socket path */
4117 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, gid);
4118 if (ret < 0) {
4119 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
4120 PERROR("chown");
4121 }
4122
4123 /* 32-bit ustconsumer compat32 error socket path */
4124 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, gid);
4125 if (ret < 0) {
4126 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
4127 PERROR("chown");
4128 }
4129
4130 DBG("All permissions are set");
4131
4132 return ret;
4133 }
4134
4135 /*
4136 * Create the lttng run directory needed for all global sockets and pipe.
4137 */
4138 static int create_lttng_rundir(const char *rundir)
4139 {
4140 int ret;
4141
4142 DBG3("Creating LTTng run directory: %s", rundir);
4143
4144 ret = mkdir(rundir, S_IRWXU);
4145 if (ret < 0) {
4146 if (errno != EEXIST) {
4147 ERR("Unable to create %s", rundir);
4148 goto error;
4149 } else {
4150 ret = 0;
4151 }
4152 }
4153
4154 error:
4155 return ret;
4156 }
4157
4158 /*
4159 * Setup sockets and directory needed by the kconsumerd communication with the
4160 * session daemon.
4161 */
4162 static int set_consumer_sockets(struct consumer_data *consumer_data,
4163 const char *rundir)
4164 {
4165 int ret;
4166 char path[PATH_MAX];
4167
4168 switch (consumer_data->type) {
4169 case LTTNG_CONSUMER_KERNEL:
4170 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
4171 break;
4172 case LTTNG_CONSUMER64_UST:
4173 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
4174 break;
4175 case LTTNG_CONSUMER32_UST:
4176 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
4177 break;
4178 default:
4179 ERR("Consumer type unknown");
4180 ret = -EINVAL;
4181 goto error;
4182 }
4183
4184 DBG2("Creating consumer directory: %s", path);
4185
4186 ret = mkdir(path, S_IRWXU);
4187 if (ret < 0) {
4188 if (errno != EEXIST) {
4189 PERROR("mkdir");
4190 ERR("Failed to create %s", path);
4191 goto error;
4192 }
4193 ret = -1;
4194 }
4195
4196 /* Create the kconsumerd error unix socket */
4197 consumer_data->err_sock =
4198 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
4199 if (consumer_data->err_sock < 0) {
4200 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
4201 ret = -1;
4202 goto error;
4203 }
4204
4205 /*
4206 * Set the CLOEXEC flag. Return code is useless because either way, the
4207 * show must go on.
4208 */
4209 ret = utils_set_fd_cloexec(consumer_data->err_sock);
4210 if (ret < 0) {
4211 PERROR("utils_set_fd_cloexec");
4212 /* continue anyway */
4213 }
4214
4215 /* File permission MUST be 660 */
4216 ret = chmod(consumer_data->err_unix_sock_path,
4217 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4218 if (ret < 0) {
4219 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
4220 PERROR("chmod");
4221 goto error;
4222 }
4223
4224 error:
4225 return ret;
4226 }
4227
4228 /*
4229 * Signal handler for the daemon
4230 *
4231 * Simply stop all worker threads, leaving main() return gracefully after
4232 * joining all threads and calling cleanup().
4233 */
4234 static void sighandler(int sig)
4235 {
4236 switch (sig) {
4237 case SIGPIPE:
4238 DBG("SIGPIPE caught");
4239 return;
4240 case SIGINT:
4241 DBG("SIGINT caught");
4242 stop_threads();
4243 break;
4244 case SIGTERM:
4245 DBG("SIGTERM caught");
4246 stop_threads();
4247 break;
4248 default:
4249 break;
4250 }
4251 }
4252
4253 /*
4254 * Setup signal handler for :
4255 * SIGINT, SIGTERM, SIGPIPE
4256 */
4257 static int set_signal_handler(void)
4258 {
4259 int ret = 0;
4260 struct sigaction sa;
4261 sigset_t sigset;
4262
4263 if ((ret = sigemptyset(&sigset)) < 0) {
4264 PERROR("sigemptyset");
4265 return ret;
4266 }
4267
4268 sa.sa_handler = sighandler;
4269 sa.sa_mask = sigset;
4270 sa.sa_flags = 0;
4271 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
4272 PERROR("sigaction");
4273 return ret;
4274 }
4275
4276 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
4277 PERROR("sigaction");
4278 return ret;
4279 }
4280
4281 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
4282 PERROR("sigaction");
4283 return ret;
4284 }
4285
4286 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
4287
4288 return ret;
4289 }
4290
4291 /*
4292 * Set open files limit to unlimited. This daemon can open a large number of
4293 * file descriptors in order to consumer multiple kernel traces.
4294 */
4295 static void set_ulimit(void)
4296 {
4297 int ret;
4298 struct rlimit lim;
4299
4300 /* The kernel does not allowed an infinite limit for open files */
4301 lim.rlim_cur = 65535;
4302 lim.rlim_max = 65535;
4303
4304 ret = setrlimit(RLIMIT_NOFILE, &lim);
4305 if (ret < 0) {
4306 PERROR("failed to set open files limit");
4307 }
4308 }
4309
4310 /*
4311 * Write pidfile using the rundir and opt_pidfile.
4312 */
4313 static void write_pidfile(void)
4314 {
4315 int ret;
4316 char pidfile_path[PATH_MAX];
4317
4318 assert(rundir);
4319
4320 if (opt_pidfile) {
4321 strncpy(pidfile_path, opt_pidfile, sizeof(pidfile_path));
4322 } else {
4323 /* Build pidfile path from rundir and opt_pidfile. */
4324 ret = snprintf(pidfile_path, sizeof(pidfile_path), "%s/"
4325 DEFAULT_LTTNG_SESSIOND_PIDFILE, rundir);
4326 if (ret < 0) {
4327 PERROR("snprintf pidfile path");
4328 goto error;
4329 }
4330 }
4331
4332 /*
4333 * Create pid file in rundir. Return value is of no importance. The
4334 * execution will continue even though we are not able to write the file.
4335 */
4336 (void) utils_create_pid_file(getpid(), pidfile_path);
4337
4338 error:
4339 return;
4340 }
4341
4342 /*
4343 * main
4344 */
4345 int main(int argc, char **argv)
4346 {
4347 int ret = 0;
4348 void *status;
4349 const char *home_path, *env_app_timeout;
4350
4351 init_kernel_workarounds();
4352
4353 rcu_register_thread();
4354
4355 setup_consumerd_path();
4356
4357 page_size = sysconf(_SC_PAGESIZE);
4358 if (page_size < 0) {
4359 PERROR("sysconf _SC_PAGESIZE");
4360 page_size = LONG_MAX;
4361 WARN("Fallback page size to %ld", page_size);
4362 }
4363
4364 /* Parse arguments */
4365 progname = argv[0];
4366 if ((ret = parse_args(argc, argv)) < 0) {
4367 goto error;
4368 }
4369
4370 /* Daemonize */
4371 if (opt_daemon) {
4372 int i;
4373
4374 /*
4375 * fork
4376 * child: setsid, close FD 0, 1, 2, chdir /
4377 * parent: exit (if fork is successful)
4378 */
4379 ret = daemon(0, 0);
4380 if (ret < 0) {
4381 PERROR("daemon");
4382 goto error;
4383 }
4384 /*
4385 * We are in the child. Make sure all other file
4386 * descriptors are closed, in case we are called with
4387 * more opened file descriptors than the standard ones.
4388 */
4389 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
4390 (void) close(i);
4391 }
4392 }
4393
4394 /* Create thread quit pipe */
4395 if ((ret = init_thread_quit_pipe()) < 0) {
4396 goto error;
4397 }
4398
4399 /* Check if daemon is UID = 0 */
4400 is_root = !getuid();
4401
4402 if (is_root) {
4403 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
4404
4405 /* Create global run dir with root access */
4406 ret = create_lttng_rundir(rundir);
4407 if (ret < 0) {
4408 goto error;
4409 }
4410
4411 if (strlen(apps_unix_sock_path) == 0) {
4412 snprintf(apps_unix_sock_path, PATH_MAX,
4413 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
4414 }
4415
4416 if (strlen(client_unix_sock_path) == 0) {
4417 snprintf(client_unix_sock_path, PATH_MAX,
4418 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
4419 }
4420
4421 /* Set global SHM for ust */
4422 if (strlen(wait_shm_path) == 0) {
4423 snprintf(wait_shm_path, PATH_MAX,
4424 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
4425 }
4426
4427 if (strlen(health_unix_sock_path) == 0) {
4428 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4429 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
4430 }
4431
4432 /* Setup kernel consumerd path */
4433 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
4434 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
4435 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
4436 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
4437
4438 DBG2("Kernel consumer err path: %s",
4439 kconsumer_data.err_unix_sock_path);
4440 DBG2("Kernel consumer cmd path: %s",
4441 kconsumer_data.cmd_unix_sock_path);
4442 } else {
4443 home_path = utils_get_home_dir();
4444 if (home_path == NULL) {
4445 /* TODO: Add --socket PATH option */
4446 ERR("Can't get HOME directory for sockets creation.");
4447 ret = -EPERM;
4448 goto error;
4449 }
4450
4451 /*
4452 * Create rundir from home path. This will create something like
4453 * $HOME/.lttng
4454 */
4455 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
4456 if (ret < 0) {
4457 ret = -ENOMEM;
4458 goto error;
4459 }
4460
4461 ret = create_lttng_rundir(rundir);
4462 if (ret < 0) {
4463 goto error;
4464 }
4465
4466 if (strlen(apps_unix_sock_path) == 0) {
4467 snprintf(apps_unix_sock_path, PATH_MAX,
4468 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
4469 }
4470
4471 /* Set the cli tool unix socket path */
4472 if (strlen(client_unix_sock_path) == 0) {
4473 snprintf(client_unix_sock_path, PATH_MAX,
4474 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
4475 }
4476
4477 /* Set global SHM for ust */
4478 if (strlen(wait_shm_path) == 0) {
4479 snprintf(wait_shm_path, PATH_MAX,
4480 DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
4481 }
4482
4483 /* Set health check Unix path */
4484 if (strlen(health_unix_sock_path) == 0) {
4485 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4486 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
4487 }
4488 }
4489
4490 /* Set consumer initial state */
4491 kernel_consumerd_state = CONSUMER_STOPPED;
4492 ust_consumerd_state = CONSUMER_STOPPED;
4493
4494 DBG("Client socket path %s", client_unix_sock_path);
4495 DBG("Application socket path %s", apps_unix_sock_path);
4496 DBG("Application wait path %s", wait_shm_path);
4497 DBG("LTTng run directory path: %s", rundir);
4498
4499 /* 32 bits consumerd path setup */
4500 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
4501 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
4502 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
4503 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
4504
4505 DBG2("UST consumer 32 bits err path: %s",
4506 ustconsumer32_data.err_unix_sock_path);
4507 DBG2("UST consumer 32 bits cmd path: %s",
4508 ustconsumer32_data.cmd_unix_sock_path);
4509
4510 /* 64 bits consumerd path setup */
4511 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
4512 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
4513 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
4514 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
4515
4516 DBG2("UST consumer 64 bits err path: %s",
4517 ustconsumer64_data.err_unix_sock_path);
4518 DBG2("UST consumer 64 bits cmd path: %s",
4519 ustconsumer64_data.cmd_unix_sock_path);
4520
4521 /*
4522 * See if daemon already exist.
4523 */
4524 if ((ret = check_existing_daemon()) < 0) {
4525 ERR("Already running daemon.\n");
4526 /*
4527 * We do not goto exit because we must not cleanup()
4528 * because a daemon is already running.
4529 */
4530 goto error;
4531 }
4532
4533 /*
4534 * Init UST app hash table. Alloc hash table before this point since
4535 * cleanup() can get called after that point.
4536 */
4537 ust_app_ht_alloc();
4538
4539 /* After this point, we can safely call cleanup() with "goto exit" */
4540
4541 /*
4542 * These actions must be executed as root. We do that *after* setting up
4543 * the sockets path because we MUST make the check for another daemon using
4544 * those paths *before* trying to set the kernel consumer sockets and init
4545 * kernel tracer.
4546 */
4547 if (is_root) {
4548 ret = set_consumer_sockets(&kconsumer_data, rundir);
4549 if (ret < 0) {
4550 goto exit;
4551 }
4552
4553 /* Setup kernel tracer */
4554 if (!opt_no_kernel) {
4555 init_kernel_tracer();
4556 }
4557
4558 /* Set ulimit for open files */
4559 set_ulimit();
4560 }
4561 /* init lttng_fd tracking must be done after set_ulimit. */
4562 lttng_fd_init();
4563
4564 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
4565 if (ret < 0) {
4566 goto exit;
4567 }
4568
4569 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
4570 if (ret < 0) {
4571 goto exit;
4572 }
4573
4574 if ((ret = set_signal_handler()) < 0) {
4575 goto exit;
4576 }
4577
4578 /* Setup the needed unix socket */
4579 if ((ret = init_daemon_socket()) < 0) {
4580 goto exit;
4581 }
4582
4583 /* Set credentials to socket */
4584 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
4585 goto exit;
4586 }
4587
4588 /* Get parent pid if -S, --sig-parent is specified. */
4589 if (opt_sig_parent) {
4590 ppid = getppid();
4591 }
4592
4593 /* Setup the kernel pipe for waking up the kernel thread */
4594 if (is_root && !opt_no_kernel) {
4595 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
4596 goto exit;
4597 }
4598 }
4599
4600 /* Setup the thread ht_cleanup communication pipe. */
4601 if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
4602 goto exit;
4603 }
4604
4605 /* Setup the thread apps communication pipe. */
4606 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
4607 goto exit;
4608 }
4609
4610 /* Setup the thread apps notify communication pipe. */
4611 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
4612 goto exit;
4613 }
4614
4615 /* Initialize global buffer per UID and PID registry. */
4616 buffer_reg_init_uid_registry();
4617 buffer_reg_init_pid_registry();
4618
4619 /* Init UST command queue. */
4620 cds_wfq_init(&ust_cmd_queue.queue);
4621
4622 /*
4623 * Get session list pointer. This pointer MUST NOT be free(). This list is
4624 * statically declared in session.c
4625 */
4626 session_list_ptr = session_get_list();
4627
4628 /* Set up max poll set size */
4629 lttng_poll_set_max_size();
4630
4631 cmd_init();
4632
4633 /* Check for the application socket timeout env variable. */
4634 env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
4635 if (env_app_timeout) {
4636 app_socket_timeout = atoi(env_app_timeout);
4637 } else {
4638 app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
4639 }
4640
4641 write_pidfile();
4642
4643 /* Initialize communication library */
4644 lttcomm_init();
4645
4646 /* Create thread to manage the client socket */
4647 ret = pthread_create(&ht_cleanup_thread, NULL,
4648 thread_ht_cleanup, (void *) NULL);
4649 if (ret != 0) {
4650 PERROR("pthread_create ht_cleanup");
4651 goto exit_ht_cleanup;
4652 }
4653
4654 /* Create thread to manage the client socket */
4655 ret = pthread_create(&health_thread, NULL,
4656 thread_manage_health, (void *) NULL);
4657 if (ret != 0) {
4658 PERROR("pthread_create health");
4659 goto exit_health;
4660 }
4661
4662 /* Create thread to manage the client socket */
4663 ret = pthread_create(&client_thread, NULL,
4664 thread_manage_clients, (void *) NULL);
4665 if (ret != 0) {
4666 PERROR("pthread_create clients");
4667 goto exit_client;
4668 }
4669
4670 /* Create thread to dispatch registration */
4671 ret = pthread_create(&dispatch_thread, NULL,
4672 thread_dispatch_ust_registration, (void *) NULL);
4673 if (ret != 0) {
4674 PERROR("pthread_create dispatch");
4675 goto exit_dispatch;
4676 }
4677
4678 /* Create thread to manage application registration. */
4679 ret = pthread_create(&reg_apps_thread, NULL,
4680 thread_registration_apps, (void *) NULL);
4681 if (ret != 0) {
4682 PERROR("pthread_create registration");
4683 goto exit_reg_apps;
4684 }
4685
4686 /* Create thread to manage application socket */
4687 ret = pthread_create(&apps_thread, NULL,
4688 thread_manage_apps, (void *) NULL);
4689 if (ret != 0) {
4690 PERROR("pthread_create apps");
4691 goto exit_apps;
4692 }
4693
4694 /* Create thread to manage application notify socket */
4695 ret = pthread_create(&apps_notify_thread, NULL,
4696 ust_thread_manage_notify, (void *) NULL);
4697 if (ret != 0) {
4698 PERROR("pthread_create apps");
4699 goto exit_apps;
4700 }
4701
4702 /* Don't start this thread if kernel tracing is not requested nor root */
4703 if (is_root && !opt_no_kernel) {
4704 /* Create kernel thread to manage kernel event */
4705 ret = pthread_create(&kernel_thread, NULL,
4706 thread_manage_kernel, (void *) NULL);
4707 if (ret != 0) {
4708 PERROR("pthread_create kernel");
4709 goto exit_kernel;
4710 }
4711
4712 ret = pthread_join(kernel_thread, &status);
4713 if (ret != 0) {
4714 PERROR("pthread_join");
4715 goto error; /* join error, exit without cleanup */
4716 }
4717 }
4718
4719 exit_kernel:
4720 ret = pthread_join(apps_thread, &status);
4721 if (ret != 0) {
4722 PERROR("pthread_join");
4723 goto error; /* join error, exit without cleanup */
4724 }
4725
4726 exit_apps:
4727 ret = pthread_join(reg_apps_thread, &status);
4728 if (ret != 0) {
4729 PERROR("pthread_join");
4730 goto error; /* join error, exit without cleanup */
4731 }
4732
4733 exit_reg_apps:
4734 ret = pthread_join(dispatch_thread, &status);
4735 if (ret != 0) {
4736 PERROR("pthread_join");
4737 goto error; /* join error, exit without cleanup */
4738 }
4739
4740 exit_dispatch:
4741 ret = pthread_join(client_thread, &status);
4742 if (ret != 0) {
4743 PERROR("pthread_join");
4744 goto error; /* join error, exit without cleanup */
4745 }
4746
4747 ret = join_consumer_thread(&kconsumer_data);
4748 if (ret != 0) {
4749 PERROR("join_consumer");
4750 goto error; /* join error, exit without cleanup */
4751 }
4752
4753 ret = join_consumer_thread(&ustconsumer32_data);
4754 if (ret != 0) {
4755 PERROR("join_consumer ust32");
4756 goto error; /* join error, exit without cleanup */
4757 }
4758
4759 ret = join_consumer_thread(&ustconsumer64_data);
4760 if (ret != 0) {
4761 PERROR("join_consumer ust64");
4762 goto error; /* join error, exit without cleanup */
4763 }
4764
4765 exit_client:
4766 ret = pthread_join(health_thread, &status);
4767 if (ret != 0) {
4768 PERROR("pthread_join health thread");
4769 goto error; /* join error, exit without cleanup */
4770 }
4771
4772 exit_health:
4773 ret = pthread_join(ht_cleanup_thread, &status);
4774 if (ret != 0) {
4775 PERROR("pthread_join ht cleanup thread");
4776 goto error; /* join error, exit without cleanup */
4777 }
4778 exit_ht_cleanup:
4779 exit:
4780 /*
4781 * cleanup() is called when no other thread is running.
4782 */
4783 rcu_thread_online();
4784 cleanup();
4785 rcu_thread_offline();
4786 rcu_unregister_thread();
4787 if (!ret) {
4788 exit(EXIT_SUCCESS);
4789 }
4790 error:
4791 exit(EXIT_FAILURE);
4792 }
This page took 0.12187 seconds and 6 git commands to generate.