a54c0bf8df0a7c76d1bd39f4a4b99273501cb214
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <inttypes.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
47
48 #include "lttng-sessiond.h"
49 #include "buffer-registry.h"
50 #include "channel.h"
51 #include "cmd.h"
52 #include "consumer.h"
53 #include "context.h"
54 #include "event.h"
55 #include "kernel.h"
56 #include "kernel-consumer.h"
57 #include "modprobe.h"
58 #include "shm.h"
59 #include "ust-ctl.h"
60 #include "ust-consumer.h"
61 #include "utils.h"
62 #include "fd-limit.h"
63 #include "health-sessiond.h"
64 #include "testpoint.h"
65 #include "ust-thread.h"
66
67 #define CONSUMERD_FILE "lttng-consumerd"
68
69 /* Const values */
70 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
71
72 const char *progname;
73 const char *opt_tracing_group;
74 static const char *opt_pidfile;
75 static int opt_sig_parent;
76 static int opt_verbose_consumer;
77 static int opt_daemon;
78 static int opt_no_kernel;
79 static int is_root; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid; /* Parent PID for --sig-parent option */
81 static char *rundir;
82
83 /*
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
86 */
87 static struct consumer_data kconsumer_data = {
88 .type = LTTNG_CONSUMER_KERNEL,
89 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
90 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
91 .err_sock = -1,
92 .cmd_sock = -1,
93 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
94 .lock = PTHREAD_MUTEX_INITIALIZER,
95 .cond = PTHREAD_COND_INITIALIZER,
96 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
97 };
98 static struct consumer_data ustconsumer64_data = {
99 .type = LTTNG_CONSUMER64_UST,
100 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
101 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
102 .err_sock = -1,
103 .cmd_sock = -1,
104 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
105 .lock = PTHREAD_MUTEX_INITIALIZER,
106 .cond = PTHREAD_COND_INITIALIZER,
107 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
108 };
109 static struct consumer_data ustconsumer32_data = {
110 .type = LTTNG_CONSUMER32_UST,
111 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
112 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
113 .err_sock = -1,
114 .cmd_sock = -1,
115 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
116 .lock = PTHREAD_MUTEX_INITIALIZER,
117 .cond = PTHREAD_COND_INITIALIZER,
118 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
119 };
120
121 /* Shared between threads */
122 static int dispatch_thread_exit;
123
124 /* Global application Unix socket path */
125 static char apps_unix_sock_path[PATH_MAX];
126 /* Global client Unix socket path */
127 static char client_unix_sock_path[PATH_MAX];
128 /* global wait shm path for UST */
129 static char wait_shm_path[PATH_MAX];
130 /* Global health check unix path */
131 static char health_unix_sock_path[PATH_MAX];
132
133 /* Sockets and FDs */
134 static int client_sock = -1;
135 static int apps_sock = -1;
136 int kernel_tracer_fd = -1;
137 static int kernel_poll_pipe[2] = { -1, -1 };
138
139 /*
140 * Quit pipe for all threads. This permits a single cancellation point
141 * for all threads when receiving an event on the pipe.
142 */
143 static int thread_quit_pipe[2] = { -1, -1 };
144
145 /*
146 * This pipe is used to inform the thread managing application communication
147 * that a command is queued and ready to be processed.
148 */
149 static int apps_cmd_pipe[2] = { -1, -1 };
150
151 int apps_cmd_notify_pipe[2] = { -1, -1 };
152
153 /* Pthread, Mutexes and Semaphores */
154 static pthread_t apps_thread;
155 static pthread_t apps_notify_thread;
156 static pthread_t reg_apps_thread;
157 static pthread_t client_thread;
158 static pthread_t kernel_thread;
159 static pthread_t dispatch_thread;
160 static pthread_t health_thread;
161 static pthread_t ht_cleanup_thread;
162
163 /*
164 * UST registration command queue. This queue is tied with a futex and uses a N
165 * wakers / 1 waiter implemented and detailed in futex.c/.h
166 *
167 * The thread_manage_apps and thread_dispatch_ust_registration interact with
168 * this queue and the wait/wake scheme.
169 */
170 static struct ust_cmd_queue ust_cmd_queue;
171
172 /*
173 * Pointer initialized before thread creation.
174 *
175 * This points to the tracing session list containing the session count and a
176 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
177 * MUST NOT be taken if you call a public function in session.c.
178 *
179 * The lock is nested inside the structure: session_list_ptr->lock. Please use
180 * session_lock_list and session_unlock_list for lock acquisition.
181 */
182 static struct ltt_session_list *session_list_ptr;
183
184 int ust_consumerd64_fd = -1;
185 int ust_consumerd32_fd = -1;
186
187 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
188 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
189 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
190 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
191
192 static const char *module_proc_lttng = "/proc/lttng";
193
194 /*
195 * Consumer daemon state which is changed when spawning it, killing it or in
196 * case of a fatal error.
197 */
198 enum consumerd_state {
199 CONSUMER_STARTED = 1,
200 CONSUMER_STOPPED = 2,
201 CONSUMER_ERROR = 3,
202 };
203
204 /*
205 * This consumer daemon state is used to validate if a client command will be
206 * able to reach the consumer. If not, the client is informed. For instance,
207 * doing a "lttng start" when the consumer state is set to ERROR will return an
208 * error to the client.
209 *
210 * The following example shows a possible race condition of this scheme:
211 *
212 * consumer thread error happens
213 * client cmd arrives
214 * client cmd checks state -> still OK
215 * consumer thread exit, sets error
216 * client cmd try to talk to consumer
217 * ...
218 *
219 * However, since the consumer is a different daemon, we have no way of making
220 * sure the command will reach it safely even with this state flag. This is why
221 * we consider that up to the state validation during command processing, the
222 * command is safe. After that, we can not guarantee the correctness of the
223 * client request vis-a-vis the consumer.
224 */
225 static enum consumerd_state ust_consumerd_state;
226 static enum consumerd_state kernel_consumerd_state;
227
228 /*
229 * Socket timeout for receiving and sending in seconds.
230 */
231 static int app_socket_timeout;
232
233 /* Set in main() with the current page size. */
234 long page_size;
235
236 /* Application health monitoring */
237 struct health_app *health_sessiond;
238
239 static
240 void setup_consumerd_path(void)
241 {
242 const char *bin, *libdir;
243
244 /*
245 * Allow INSTALL_BIN_PATH to be used as a target path for the
246 * native architecture size consumer if CONFIG_CONSUMER*_PATH
247 * has not been defined.
248 */
249 #if (CAA_BITS_PER_LONG == 32)
250 if (!consumerd32_bin[0]) {
251 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
252 }
253 if (!consumerd32_libdir[0]) {
254 consumerd32_libdir = INSTALL_LIB_PATH;
255 }
256 #elif (CAA_BITS_PER_LONG == 64)
257 if (!consumerd64_bin[0]) {
258 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
259 }
260 if (!consumerd64_libdir[0]) {
261 consumerd64_libdir = INSTALL_LIB_PATH;
262 }
263 #else
264 #error "Unknown bitness"
265 #endif
266
267 /*
268 * runtime env. var. overrides the build default.
269 */
270 bin = getenv("LTTNG_CONSUMERD32_BIN");
271 if (bin) {
272 consumerd32_bin = bin;
273 }
274 bin = getenv("LTTNG_CONSUMERD64_BIN");
275 if (bin) {
276 consumerd64_bin = bin;
277 }
278 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
279 if (libdir) {
280 consumerd32_libdir = libdir;
281 }
282 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
283 if (libdir) {
284 consumerd64_libdir = libdir;
285 }
286 }
287
288 /*
289 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
290 */
291 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
292 {
293 int ret;
294
295 assert(events);
296
297 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
298 if (ret < 0) {
299 goto error;
300 }
301
302 /* Add quit pipe */
303 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
304 if (ret < 0) {
305 goto error;
306 }
307
308 return 0;
309
310 error:
311 return ret;
312 }
313
314 /*
315 * Check if the thread quit pipe was triggered.
316 *
317 * Return 1 if it was triggered else 0;
318 */
319 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
320 {
321 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
322 return 1;
323 }
324
325 return 0;
326 }
327
328 /*
329 * Return group ID of the tracing group or -1 if not found.
330 */
331 static gid_t allowed_group(void)
332 {
333 struct group *grp;
334
335 if (opt_tracing_group) {
336 grp = getgrnam(opt_tracing_group);
337 } else {
338 grp = getgrnam(default_tracing_group);
339 }
340 if (!grp) {
341 return -1;
342 } else {
343 return grp->gr_gid;
344 }
345 }
346
347 /*
348 * Init thread quit pipe.
349 *
350 * Return -1 on error or 0 if all pipes are created.
351 */
352 static int init_thread_quit_pipe(void)
353 {
354 int ret, i;
355
356 ret = pipe(thread_quit_pipe);
357 if (ret < 0) {
358 PERROR("thread quit pipe");
359 goto error;
360 }
361
362 for (i = 0; i < 2; i++) {
363 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
364 if (ret < 0) {
365 PERROR("fcntl");
366 goto error;
367 }
368 }
369
370 error:
371 return ret;
372 }
373
374 /*
375 * Stop all threads by closing the thread quit pipe.
376 */
377 static void stop_threads(void)
378 {
379 int ret;
380
381 /* Stopping all threads */
382 DBG("Terminating all threads");
383 ret = notify_thread_pipe(thread_quit_pipe[1]);
384 if (ret < 0) {
385 ERR("write error on thread quit pipe");
386 }
387
388 /* Dispatch thread */
389 CMM_STORE_SHARED(dispatch_thread_exit, 1);
390 futex_nto1_wake(&ust_cmd_queue.futex);
391 }
392
393 /*
394 * Close every consumer sockets.
395 */
396 static void close_consumer_sockets(void)
397 {
398 int ret;
399
400 if (kconsumer_data.err_sock >= 0) {
401 ret = close(kconsumer_data.err_sock);
402 if (ret < 0) {
403 PERROR("kernel consumer err_sock close");
404 }
405 }
406 if (ustconsumer32_data.err_sock >= 0) {
407 ret = close(ustconsumer32_data.err_sock);
408 if (ret < 0) {
409 PERROR("UST consumerd32 err_sock close");
410 }
411 }
412 if (ustconsumer64_data.err_sock >= 0) {
413 ret = close(ustconsumer64_data.err_sock);
414 if (ret < 0) {
415 PERROR("UST consumerd64 err_sock close");
416 }
417 }
418 if (kconsumer_data.cmd_sock >= 0) {
419 ret = close(kconsumer_data.cmd_sock);
420 if (ret < 0) {
421 PERROR("kernel consumer cmd_sock close");
422 }
423 }
424 if (ustconsumer32_data.cmd_sock >= 0) {
425 ret = close(ustconsumer32_data.cmd_sock);
426 if (ret < 0) {
427 PERROR("UST consumerd32 cmd_sock close");
428 }
429 }
430 if (ustconsumer64_data.cmd_sock >= 0) {
431 ret = close(ustconsumer64_data.cmd_sock);
432 if (ret < 0) {
433 PERROR("UST consumerd64 cmd_sock close");
434 }
435 }
436 }
437
438 /*
439 * Cleanup the daemon
440 */
441 static void cleanup(void)
442 {
443 int ret;
444 char *cmd = NULL;
445 struct ltt_session *sess, *stmp;
446
447 DBG("Cleaning up");
448
449 /*
450 * Close the thread quit pipe. It has already done its job,
451 * since we are now called.
452 */
453 utils_close_pipe(thread_quit_pipe);
454
455 /*
456 * If opt_pidfile is undefined, the default file will be wiped when
457 * removing the rundir.
458 */
459 if (opt_pidfile) {
460 ret = remove(opt_pidfile);
461 if (ret < 0) {
462 PERROR("remove pidfile %s", opt_pidfile);
463 }
464 }
465
466 DBG("Removing %s directory", rundir);
467 ret = asprintf(&cmd, "rm -rf %s", rundir);
468 if (ret < 0) {
469 ERR("asprintf failed. Something is really wrong!");
470 }
471
472 /* Remove lttng run directory */
473 ret = system(cmd);
474 if (ret < 0) {
475 ERR("Unable to clean %s", rundir);
476 }
477 free(cmd);
478 free(rundir);
479
480 DBG("Cleaning up all sessions");
481
482 /* Destroy session list mutex */
483 if (session_list_ptr != NULL) {
484 pthread_mutex_destroy(&session_list_ptr->lock);
485
486 /* Cleanup ALL session */
487 cds_list_for_each_entry_safe(sess, stmp,
488 &session_list_ptr->head, list) {
489 cmd_destroy_session(sess, kernel_poll_pipe[1]);
490 }
491 }
492
493 DBG("Closing all UST sockets");
494 ust_app_clean_list();
495 buffer_reg_destroy_registries();
496
497 if (is_root && !opt_no_kernel) {
498 DBG2("Closing kernel fd");
499 if (kernel_tracer_fd >= 0) {
500 ret = close(kernel_tracer_fd);
501 if (ret) {
502 PERROR("close");
503 }
504 }
505 DBG("Unloading kernel modules");
506 modprobe_remove_lttng_all();
507 }
508
509 close_consumer_sockets();
510
511 /* <fun> */
512 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
513 "Matthew, BEET driven development works!%c[%dm",
514 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
515 /* </fun> */
516 }
517
518 /*
519 * Send data on a unix socket using the liblttsessiondcomm API.
520 *
521 * Return lttcomm error code.
522 */
523 static int send_unix_sock(int sock, void *buf, size_t len)
524 {
525 /* Check valid length */
526 if (len == 0) {
527 return -1;
528 }
529
530 return lttcomm_send_unix_sock(sock, buf, len);
531 }
532
533 /*
534 * Free memory of a command context structure.
535 */
536 static void clean_command_ctx(struct command_ctx **cmd_ctx)
537 {
538 DBG("Clean command context structure");
539 if (*cmd_ctx) {
540 if ((*cmd_ctx)->llm) {
541 free((*cmd_ctx)->llm);
542 }
543 if ((*cmd_ctx)->lsm) {
544 free((*cmd_ctx)->lsm);
545 }
546 free(*cmd_ctx);
547 *cmd_ctx = NULL;
548 }
549 }
550
551 /*
552 * Notify UST applications using the shm mmap futex.
553 */
554 static int notify_ust_apps(int active)
555 {
556 char *wait_shm_mmap;
557
558 DBG("Notifying applications of session daemon state: %d", active);
559
560 /* See shm.c for this call implying mmap, shm and futex calls */
561 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
562 if (wait_shm_mmap == NULL) {
563 goto error;
564 }
565
566 /* Wake waiting process */
567 futex_wait_update((int32_t *) wait_shm_mmap, active);
568
569 /* Apps notified successfully */
570 return 0;
571
572 error:
573 return -1;
574 }
575
576 /*
577 * Setup the outgoing data buffer for the response (llm) by allocating the
578 * right amount of memory and copying the original information from the lsm
579 * structure.
580 *
581 * Return total size of the buffer pointed by buf.
582 */
583 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
584 {
585 int ret, buf_size;
586
587 buf_size = size;
588
589 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
590 if (cmd_ctx->llm == NULL) {
591 PERROR("zmalloc");
592 ret = -ENOMEM;
593 goto error;
594 }
595
596 /* Copy common data */
597 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
598 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
599
600 cmd_ctx->llm->data_size = size;
601 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
602
603 return buf_size;
604
605 error:
606 return ret;
607 }
608
609 /*
610 * Update the kernel poll set of all channel fd available over all tracing
611 * session. Add the wakeup pipe at the end of the set.
612 */
613 static int update_kernel_poll(struct lttng_poll_event *events)
614 {
615 int ret;
616 struct ltt_session *session;
617 struct ltt_kernel_channel *channel;
618
619 DBG("Updating kernel poll set");
620
621 session_lock_list();
622 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
623 session_lock(session);
624 if (session->kernel_session == NULL) {
625 session_unlock(session);
626 continue;
627 }
628
629 cds_list_for_each_entry(channel,
630 &session->kernel_session->channel_list.head, list) {
631 /* Add channel fd to the kernel poll set */
632 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
633 if (ret < 0) {
634 session_unlock(session);
635 goto error;
636 }
637 DBG("Channel fd %d added to kernel set", channel->fd);
638 }
639 session_unlock(session);
640 }
641 session_unlock_list();
642
643 return 0;
644
645 error:
646 session_unlock_list();
647 return -1;
648 }
649
650 /*
651 * Find the channel fd from 'fd' over all tracing session. When found, check
652 * for new channel stream and send those stream fds to the kernel consumer.
653 *
654 * Useful for CPU hotplug feature.
655 */
656 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
657 {
658 int ret = 0;
659 struct ltt_session *session;
660 struct ltt_kernel_session *ksess;
661 struct ltt_kernel_channel *channel;
662
663 DBG("Updating kernel streams for channel fd %d", fd);
664
665 session_lock_list();
666 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
667 session_lock(session);
668 if (session->kernel_session == NULL) {
669 session_unlock(session);
670 continue;
671 }
672 ksess = session->kernel_session;
673
674 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
675 if (channel->fd == fd) {
676 DBG("Channel found, updating kernel streams");
677 ret = kernel_open_channel_stream(channel);
678 if (ret < 0) {
679 goto error;
680 }
681 /* Update the stream global counter */
682 ksess->stream_count_global += ret;
683
684 /*
685 * Have we already sent fds to the consumer? If yes, it means
686 * that tracing is started so it is safe to send our updated
687 * stream fds.
688 */
689 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
690 struct lttng_ht_iter iter;
691 struct consumer_socket *socket;
692
693 rcu_read_lock();
694 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
695 &iter.iter, socket, node.node) {
696 pthread_mutex_lock(socket->lock);
697 ret = kernel_consumer_send_channel_stream(socket,
698 channel, ksess,
699 session->output_traces ? 1 : 0);
700 pthread_mutex_unlock(socket->lock);
701 if (ret < 0) {
702 rcu_read_unlock();
703 goto error;
704 }
705 }
706 rcu_read_unlock();
707 }
708 goto error;
709 }
710 }
711 session_unlock(session);
712 }
713 session_unlock_list();
714 return ret;
715
716 error:
717 session_unlock(session);
718 session_unlock_list();
719 return ret;
720 }
721
722 /*
723 * For each tracing session, update newly registered apps. The session list
724 * lock MUST be acquired before calling this.
725 */
726 static void update_ust_app(int app_sock)
727 {
728 struct ltt_session *sess, *stmp;
729
730 /* Consumer is in an ERROR state. Stop any application update. */
731 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
732 /* Stop the update process since the consumer is dead. */
733 return;
734 }
735
736 /* For all tracing session(s) */
737 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
738 session_lock(sess);
739 if (sess->ust_session) {
740 ust_app_global_update(sess->ust_session, app_sock);
741 }
742 session_unlock(sess);
743 }
744 }
745
746 /*
747 * This thread manage event coming from the kernel.
748 *
749 * Features supported in this thread:
750 * -) CPU Hotplug
751 */
752 static void *thread_manage_kernel(void *data)
753 {
754 int ret, i, pollfd, update_poll_flag = 1, err = -1;
755 uint32_t revents, nb_fd;
756 char tmp;
757 struct lttng_poll_event events;
758
759 DBG("[thread] Thread manage kernel started");
760
761 health_register(health_sessiond, HEALTH_TYPE_KERNEL);
762
763 /*
764 * This first step of the while is to clean this structure which could free
765 * non NULL pointers so initialize it before the loop.
766 */
767 lttng_poll_init(&events);
768
769 if (testpoint(thread_manage_kernel)) {
770 goto error_testpoint;
771 }
772
773 health_code_update();
774
775 if (testpoint(thread_manage_kernel_before_loop)) {
776 goto error_testpoint;
777 }
778
779 while (1) {
780 health_code_update();
781
782 if (update_poll_flag == 1) {
783 /* Clean events object. We are about to populate it again. */
784 lttng_poll_clean(&events);
785
786 ret = sessiond_set_thread_pollset(&events, 2);
787 if (ret < 0) {
788 goto error_poll_create;
789 }
790
791 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
792 if (ret < 0) {
793 goto error;
794 }
795
796 /* This will add the available kernel channel if any. */
797 ret = update_kernel_poll(&events);
798 if (ret < 0) {
799 goto error;
800 }
801 update_poll_flag = 0;
802 }
803
804 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
805
806 /* Poll infinite value of time */
807 restart:
808 health_poll_entry();
809 ret = lttng_poll_wait(&events, -1);
810 health_poll_exit();
811 if (ret < 0) {
812 /*
813 * Restart interrupted system call.
814 */
815 if (errno == EINTR) {
816 goto restart;
817 }
818 goto error;
819 } else if (ret == 0) {
820 /* Should not happen since timeout is infinite */
821 ERR("Return value of poll is 0 with an infinite timeout.\n"
822 "This should not have happened! Continuing...");
823 continue;
824 }
825
826 nb_fd = ret;
827
828 for (i = 0; i < nb_fd; i++) {
829 /* Fetch once the poll data */
830 revents = LTTNG_POLL_GETEV(&events, i);
831 pollfd = LTTNG_POLL_GETFD(&events, i);
832
833 health_code_update();
834
835 /* Thread quit pipe has been closed. Killing thread. */
836 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
837 if (ret) {
838 err = 0;
839 goto exit;
840 }
841
842 /* Check for data on kernel pipe */
843 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
844 do {
845 ret = read(kernel_poll_pipe[0], &tmp, 1);
846 } while (ret < 0 && errno == EINTR);
847 /*
848 * Ret value is useless here, if this pipe gets any actions an
849 * update is required anyway.
850 */
851 update_poll_flag = 1;
852 continue;
853 } else {
854 /*
855 * New CPU detected by the kernel. Adding kernel stream to
856 * kernel session and updating the kernel consumer
857 */
858 if (revents & LPOLLIN) {
859 ret = update_kernel_stream(&kconsumer_data, pollfd);
860 if (ret < 0) {
861 continue;
862 }
863 break;
864 /*
865 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
866 * and unregister kernel stream at this point.
867 */
868 }
869 }
870 }
871 }
872
873 exit:
874 error:
875 lttng_poll_clean(&events);
876 error_poll_create:
877 error_testpoint:
878 utils_close_pipe(kernel_poll_pipe);
879 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
880 if (err) {
881 health_error();
882 ERR("Health error occurred in %s", __func__);
883 WARN("Kernel thread died unexpectedly. "
884 "Kernel tracing can continue but CPU hotplug is disabled.");
885 }
886 health_unregister(health_sessiond);
887 DBG("Kernel thread dying");
888 return NULL;
889 }
890
891 /*
892 * Signal pthread condition of the consumer data that the thread.
893 */
894 static void signal_consumer_condition(struct consumer_data *data, int state)
895 {
896 pthread_mutex_lock(&data->cond_mutex);
897
898 /*
899 * The state is set before signaling. It can be any value, it's the waiter
900 * job to correctly interpret this condition variable associated to the
901 * consumer pthread_cond.
902 *
903 * A value of 0 means that the corresponding thread of the consumer data
904 * was not started. 1 indicates that the thread has started and is ready
905 * for action. A negative value means that there was an error during the
906 * thread bootstrap.
907 */
908 data->consumer_thread_is_ready = state;
909 (void) pthread_cond_signal(&data->cond);
910
911 pthread_mutex_unlock(&data->cond_mutex);
912 }
913
914 /*
915 * This thread manage the consumer error sent back to the session daemon.
916 */
917 static void *thread_manage_consumer(void *data)
918 {
919 int sock = -1, i, ret, pollfd, err = -1;
920 uint32_t revents, nb_fd;
921 enum lttcomm_return_code code;
922 struct lttng_poll_event events;
923 struct consumer_data *consumer_data = data;
924
925 DBG("[thread] Manage consumer started");
926
927 health_register(health_sessiond, HEALTH_TYPE_CONSUMER);
928
929 health_code_update();
930
931 /*
932 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
933 * metadata_sock. Nothing more will be added to this poll set.
934 */
935 ret = sessiond_set_thread_pollset(&events, 3);
936 if (ret < 0) {
937 goto error_poll;
938 }
939
940 /*
941 * The error socket here is already in a listening state which was done
942 * just before spawning this thread to avoid a race between the consumer
943 * daemon exec trying to connect and the listen() call.
944 */
945 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
946 if (ret < 0) {
947 goto error;
948 }
949
950 health_code_update();
951
952 /* Infinite blocking call, waiting for transmission */
953 restart:
954 health_poll_entry();
955
956 if (testpoint(thread_manage_consumer)) {
957 goto error;
958 }
959
960 ret = lttng_poll_wait(&events, -1);
961 health_poll_exit();
962 if (ret < 0) {
963 /*
964 * Restart interrupted system call.
965 */
966 if (errno == EINTR) {
967 goto restart;
968 }
969 goto error;
970 }
971
972 nb_fd = ret;
973
974 for (i = 0; i < nb_fd; i++) {
975 /* Fetch once the poll data */
976 revents = LTTNG_POLL_GETEV(&events, i);
977 pollfd = LTTNG_POLL_GETFD(&events, i);
978
979 health_code_update();
980
981 /* Thread quit pipe has been closed. Killing thread. */
982 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
983 if (ret) {
984 err = 0;
985 goto exit;
986 }
987
988 /* Event on the registration socket */
989 if (pollfd == consumer_data->err_sock) {
990 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
991 ERR("consumer err socket poll error");
992 goto error;
993 }
994 }
995 }
996
997 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
998 if (sock < 0) {
999 goto error;
1000 }
1001
1002 /*
1003 * Set the CLOEXEC flag. Return code is useless because either way, the
1004 * show must go on.
1005 */
1006 (void) utils_set_fd_cloexec(sock);
1007
1008 health_code_update();
1009
1010 DBG2("Receiving code from consumer err_sock");
1011
1012 /* Getting status code from kconsumerd */
1013 ret = lttcomm_recv_unix_sock(sock, &code,
1014 sizeof(enum lttcomm_return_code));
1015 if (ret <= 0) {
1016 goto error;
1017 }
1018
1019 health_code_update();
1020
1021 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1022 /* Connect both socket, command and metadata. */
1023 consumer_data->cmd_sock =
1024 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1025 consumer_data->metadata_fd =
1026 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1027 if (consumer_data->cmd_sock < 0
1028 || consumer_data->metadata_fd < 0) {
1029 PERROR("consumer connect cmd socket");
1030 /* On error, signal condition and quit. */
1031 signal_consumer_condition(consumer_data, -1);
1032 goto error;
1033 }
1034 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1035 /* Create metadata socket lock. */
1036 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1037 if (consumer_data->metadata_sock.lock == NULL) {
1038 PERROR("zmalloc pthread mutex");
1039 ret = -1;
1040 goto error;
1041 }
1042 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1043
1044 signal_consumer_condition(consumer_data, 1);
1045 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1046 DBG("Consumer metadata socket ready (fd: %d)",
1047 consumer_data->metadata_fd);
1048 } else {
1049 ERR("consumer error when waiting for SOCK_READY : %s",
1050 lttcomm_get_readable_code(-code));
1051 goto error;
1052 }
1053
1054 /* Remove the consumerd error sock since we've established a connexion */
1055 ret = lttng_poll_del(&events, consumer_data->err_sock);
1056 if (ret < 0) {
1057 goto error;
1058 }
1059
1060 /* Add new accepted error socket. */
1061 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1062 if (ret < 0) {
1063 goto error;
1064 }
1065
1066 /* Add metadata socket that is successfully connected. */
1067 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
1068 LPOLLIN | LPOLLRDHUP);
1069 if (ret < 0) {
1070 goto error;
1071 }
1072
1073 health_code_update();
1074
1075 /* Infinite blocking call, waiting for transmission */
1076 restart_poll:
1077 while (1) {
1078 health_poll_entry();
1079 ret = lttng_poll_wait(&events, -1);
1080 health_poll_exit();
1081 if (ret < 0) {
1082 /*
1083 * Restart interrupted system call.
1084 */
1085 if (errno == EINTR) {
1086 goto restart_poll;
1087 }
1088 goto error;
1089 }
1090
1091 nb_fd = ret;
1092
1093 for (i = 0; i < nb_fd; i++) {
1094 /* Fetch once the poll data */
1095 revents = LTTNG_POLL_GETEV(&events, i);
1096 pollfd = LTTNG_POLL_GETFD(&events, i);
1097
1098 health_code_update();
1099
1100 /* Thread quit pipe has been closed. Killing thread. */
1101 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1102 if (ret) {
1103 err = 0;
1104 goto exit;
1105 }
1106
1107 if (pollfd == sock) {
1108 /* Event on the consumerd socket */
1109 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1110 ERR("consumer err socket second poll error");
1111 goto error;
1112 }
1113 health_code_update();
1114 /* Wait for any kconsumerd error */
1115 ret = lttcomm_recv_unix_sock(sock, &code,
1116 sizeof(enum lttcomm_return_code));
1117 if (ret <= 0) {
1118 ERR("consumer closed the command socket");
1119 goto error;
1120 }
1121
1122 ERR("consumer return code : %s",
1123 lttcomm_get_readable_code(-code));
1124
1125 goto exit;
1126 } else if (pollfd == consumer_data->metadata_fd) {
1127 /* UST metadata requests */
1128 ret = ust_consumer_metadata_request(
1129 &consumer_data->metadata_sock);
1130 if (ret < 0) {
1131 ERR("Handling metadata request");
1132 goto error;
1133 }
1134 break;
1135 } else {
1136 ERR("Unknown pollfd");
1137 goto error;
1138 }
1139 }
1140 health_code_update();
1141 }
1142
1143 exit:
1144 error:
1145 /*
1146 * We lock here because we are about to close the sockets and some other
1147 * thread might be using them so get exclusive access which will abort all
1148 * other consumer command by other threads.
1149 */
1150 pthread_mutex_lock(&consumer_data->lock);
1151
1152 /* Immediately set the consumerd state to stopped */
1153 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1154 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1155 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1156 consumer_data->type == LTTNG_CONSUMER32_UST) {
1157 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1158 } else {
1159 /* Code flow error... */
1160 assert(0);
1161 }
1162
1163 if (consumer_data->err_sock >= 0) {
1164 ret = close(consumer_data->err_sock);
1165 if (ret) {
1166 PERROR("close");
1167 }
1168 consumer_data->err_sock = -1;
1169 }
1170 if (consumer_data->cmd_sock >= 0) {
1171 ret = close(consumer_data->cmd_sock);
1172 if (ret) {
1173 PERROR("close");
1174 }
1175 consumer_data->cmd_sock = -1;
1176 }
1177 if (*consumer_data->metadata_sock.fd_ptr >= 0) {
1178 ret = close(*consumer_data->metadata_sock.fd_ptr);
1179 if (ret) {
1180 PERROR("close");
1181 }
1182 }
1183
1184 if (sock >= 0) {
1185 ret = close(sock);
1186 if (ret) {
1187 PERROR("close");
1188 }
1189 }
1190
1191 unlink(consumer_data->err_unix_sock_path);
1192 unlink(consumer_data->cmd_unix_sock_path);
1193 consumer_data->pid = 0;
1194 pthread_mutex_unlock(&consumer_data->lock);
1195
1196 /* Cleanup metadata socket mutex. */
1197 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1198 free(consumer_data->metadata_sock.lock);
1199
1200 lttng_poll_clean(&events);
1201 error_poll:
1202 if (err) {
1203 health_error();
1204 ERR("Health error occurred in %s", __func__);
1205 }
1206 health_unregister(health_sessiond);
1207 DBG("consumer thread cleanup completed");
1208
1209 return NULL;
1210 }
1211
1212 /*
1213 * This thread manage application communication.
1214 */
1215 static void *thread_manage_apps(void *data)
1216 {
1217 int i, ret, pollfd, err = -1;
1218 uint32_t revents, nb_fd;
1219 struct lttng_poll_event events;
1220
1221 DBG("[thread] Manage application started");
1222
1223 rcu_register_thread();
1224 rcu_thread_online();
1225
1226 health_register(health_sessiond, HEALTH_TYPE_APP_MANAGE);
1227
1228 if (testpoint(thread_manage_apps)) {
1229 goto error_testpoint;
1230 }
1231
1232 health_code_update();
1233
1234 ret = sessiond_set_thread_pollset(&events, 2);
1235 if (ret < 0) {
1236 goto error_poll_create;
1237 }
1238
1239 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1240 if (ret < 0) {
1241 goto error;
1242 }
1243
1244 if (testpoint(thread_manage_apps_before_loop)) {
1245 goto error;
1246 }
1247
1248 health_code_update();
1249
1250 while (1) {
1251 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1252
1253 /* Inifinite blocking call, waiting for transmission */
1254 restart:
1255 health_poll_entry();
1256 ret = lttng_poll_wait(&events, -1);
1257 health_poll_exit();
1258 if (ret < 0) {
1259 /*
1260 * Restart interrupted system call.
1261 */
1262 if (errno == EINTR) {
1263 goto restart;
1264 }
1265 goto error;
1266 }
1267
1268 nb_fd = ret;
1269
1270 for (i = 0; i < nb_fd; i++) {
1271 /* Fetch once the poll data */
1272 revents = LTTNG_POLL_GETEV(&events, i);
1273 pollfd = LTTNG_POLL_GETFD(&events, i);
1274
1275 health_code_update();
1276
1277 /* Thread quit pipe has been closed. Killing thread. */
1278 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1279 if (ret) {
1280 err = 0;
1281 goto exit;
1282 }
1283
1284 /* Inspect the apps cmd pipe */
1285 if (pollfd == apps_cmd_pipe[0]) {
1286 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1287 ERR("Apps command pipe error");
1288 goto error;
1289 } else if (revents & LPOLLIN) {
1290 int sock;
1291
1292 /* Empty pipe */
1293 do {
1294 ret = read(apps_cmd_pipe[0], &sock, sizeof(sock));
1295 } while (ret < 0 && errno == EINTR);
1296 if (ret < 0 || ret < sizeof(sock)) {
1297 PERROR("read apps cmd pipe");
1298 goto error;
1299 }
1300
1301 health_code_update();
1302
1303 /*
1304 * We only monitor the error events of the socket. This
1305 * thread does not handle any incoming data from UST
1306 * (POLLIN).
1307 */
1308 ret = lttng_poll_add(&events, sock,
1309 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1310 if (ret < 0) {
1311 goto error;
1312 }
1313
1314 /*
1315 * Set socket timeout for both receiving and ending.
1316 * app_socket_timeout is in seconds, whereas
1317 * lttcomm_setsockopt_rcv_timeout and
1318 * lttcomm_setsockopt_snd_timeout expect msec as
1319 * parameter.
1320 */
1321 (void) lttcomm_setsockopt_rcv_timeout(sock,
1322 app_socket_timeout * 1000);
1323 (void) lttcomm_setsockopt_snd_timeout(sock,
1324 app_socket_timeout * 1000);
1325
1326 DBG("Apps with sock %d added to poll set", sock);
1327
1328 health_code_update();
1329
1330 break;
1331 }
1332 } else {
1333 /*
1334 * At this point, we know that a registered application made
1335 * the event at poll_wait.
1336 */
1337 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1338 /* Removing from the poll set */
1339 ret = lttng_poll_del(&events, pollfd);
1340 if (ret < 0) {
1341 goto error;
1342 }
1343
1344 /* Socket closed on remote end. */
1345 ust_app_unregister(pollfd);
1346 break;
1347 }
1348 }
1349
1350 health_code_update();
1351 }
1352 }
1353
1354 exit:
1355 error:
1356 lttng_poll_clean(&events);
1357 error_poll_create:
1358 error_testpoint:
1359 utils_close_pipe(apps_cmd_pipe);
1360 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1361
1362 /*
1363 * We don't clean the UST app hash table here since already registered
1364 * applications can still be controlled so let them be until the session
1365 * daemon dies or the applications stop.
1366 */
1367
1368 if (err) {
1369 health_error();
1370 ERR("Health error occurred in %s", __func__);
1371 }
1372 health_unregister(health_sessiond);
1373 DBG("Application communication apps thread cleanup complete");
1374 rcu_thread_offline();
1375 rcu_unregister_thread();
1376 return NULL;
1377 }
1378
1379 /*
1380 * Send a socket to a thread This is called from the dispatch UST registration
1381 * thread once all sockets are set for the application.
1382 *
1383 * The sock value can be invalid, we don't really care, the thread will handle
1384 * it and make the necessary cleanup if so.
1385 *
1386 * On success, return 0 else a negative value being the errno message of the
1387 * write().
1388 */
1389 static int send_socket_to_thread(int fd, int sock)
1390 {
1391 int ret;
1392
1393 /*
1394 * It's possible that the FD is set as invalid with -1 concurrently just
1395 * before calling this function being a shutdown state of the thread.
1396 */
1397 if (fd < 0) {
1398 ret = -EBADF;
1399 goto error;
1400 }
1401
1402 do {
1403 ret = write(fd, &sock, sizeof(sock));
1404 } while (ret < 0 && errno == EINTR);
1405 if (ret < 0 || ret != sizeof(sock)) {
1406 PERROR("write apps pipe %d", fd);
1407 if (ret < 0) {
1408 ret = -errno;
1409 }
1410 goto error;
1411 }
1412
1413 /* All good. Don't send back the write positive ret value. */
1414 ret = 0;
1415 error:
1416 return ret;
1417 }
1418
1419 /*
1420 * Sanitize the wait queue of the dispatch registration thread meaning removing
1421 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1422 * notify socket is never received.
1423 */
1424 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1425 {
1426 int ret, nb_fd = 0, i;
1427 unsigned int fd_added = 0;
1428 struct lttng_poll_event events;
1429 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1430
1431 assert(wait_queue);
1432
1433 lttng_poll_init(&events);
1434
1435 /* Just skip everything for an empty queue. */
1436 if (!wait_queue->count) {
1437 goto end;
1438 }
1439
1440 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1441 if (ret < 0) {
1442 goto error_create;
1443 }
1444
1445 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1446 &wait_queue->head, head) {
1447 assert(wait_node->app);
1448 ret = lttng_poll_add(&events, wait_node->app->sock,
1449 LPOLLHUP | LPOLLERR);
1450 if (ret < 0) {
1451 goto error;
1452 }
1453
1454 fd_added = 1;
1455 }
1456
1457 if (!fd_added) {
1458 goto end;
1459 }
1460
1461 /*
1462 * Poll but don't block so we can quickly identify the faulty events and
1463 * clean them afterwards from the wait queue.
1464 */
1465 ret = lttng_poll_wait(&events, 0);
1466 if (ret < 0) {
1467 goto error;
1468 }
1469 nb_fd = ret;
1470
1471 for (i = 0; i < nb_fd; i++) {
1472 /* Get faulty FD. */
1473 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1474 int pollfd = LTTNG_POLL_GETFD(&events, i);
1475
1476 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1477 &wait_queue->head, head) {
1478 if (pollfd == wait_node->app->sock &&
1479 (revents & (LPOLLHUP | LPOLLERR))) {
1480 cds_list_del(&wait_node->head);
1481 wait_queue->count--;
1482 ust_app_destroy(wait_node->app);
1483 free(wait_node);
1484 break;
1485 }
1486 }
1487 }
1488
1489 if (nb_fd > 0) {
1490 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1491 }
1492
1493 end:
1494 lttng_poll_clean(&events);
1495 return;
1496
1497 error:
1498 lttng_poll_clean(&events);
1499 error_create:
1500 ERR("Unable to sanitize wait queue");
1501 return;
1502 }
1503
1504 /*
1505 * Dispatch request from the registration threads to the application
1506 * communication thread.
1507 */
1508 static void *thread_dispatch_ust_registration(void *data)
1509 {
1510 int ret, err = -1;
1511 struct cds_wfq_node *node;
1512 struct ust_command *ust_cmd = NULL;
1513 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1514 struct ust_reg_wait_queue wait_queue = {
1515 .count = 0,
1516 };
1517
1518 health_register(health_sessiond, HEALTH_TYPE_APP_REG_DISPATCH);
1519
1520 health_code_update();
1521
1522 CDS_INIT_LIST_HEAD(&wait_queue.head);
1523
1524 DBG("[thread] Dispatch UST command started");
1525
1526 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1527 health_code_update();
1528
1529 /* Atomically prepare the queue futex */
1530 futex_nto1_prepare(&ust_cmd_queue.futex);
1531
1532 do {
1533 struct ust_app *app = NULL;
1534 ust_cmd = NULL;
1535
1536 /*
1537 * Make sure we don't have node(s) that have hung up before receiving
1538 * the notify socket. This is to clean the list in order to avoid
1539 * memory leaks from notify socket that are never seen.
1540 */
1541 sanitize_wait_queue(&wait_queue);
1542
1543 health_code_update();
1544 /* Dequeue command for registration */
1545 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1546 if (node == NULL) {
1547 DBG("Woken up but nothing in the UST command queue");
1548 /* Continue thread execution */
1549 break;
1550 }
1551
1552 ust_cmd = caa_container_of(node, struct ust_command, node);
1553
1554 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1555 " gid:%d sock:%d name:%s (version %d.%d)",
1556 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1557 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1558 ust_cmd->sock, ust_cmd->reg_msg.name,
1559 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1560
1561 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1562 wait_node = zmalloc(sizeof(*wait_node));
1563 if (!wait_node) {
1564 PERROR("zmalloc wait_node dispatch");
1565 ret = close(ust_cmd->sock);
1566 if (ret < 0) {
1567 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1568 }
1569 lttng_fd_put(1, LTTNG_FD_APPS);
1570 free(ust_cmd);
1571 goto error;
1572 }
1573 CDS_INIT_LIST_HEAD(&wait_node->head);
1574
1575 /* Create application object if socket is CMD. */
1576 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1577 ust_cmd->sock);
1578 if (!wait_node->app) {
1579 ret = close(ust_cmd->sock);
1580 if (ret < 0) {
1581 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1582 }
1583 lttng_fd_put(1, LTTNG_FD_APPS);
1584 free(wait_node);
1585 free(ust_cmd);
1586 continue;
1587 }
1588 /*
1589 * Add application to the wait queue so we can set the notify
1590 * socket before putting this object in the global ht.
1591 */
1592 cds_list_add(&wait_node->head, &wait_queue.head);
1593 wait_queue.count++;
1594
1595 free(ust_cmd);
1596 /*
1597 * We have to continue here since we don't have the notify
1598 * socket and the application MUST be added to the hash table
1599 * only at that moment.
1600 */
1601 continue;
1602 } else {
1603 /*
1604 * Look for the application in the local wait queue and set the
1605 * notify socket if found.
1606 */
1607 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1608 &wait_queue.head, head) {
1609 health_code_update();
1610 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1611 wait_node->app->notify_sock = ust_cmd->sock;
1612 cds_list_del(&wait_node->head);
1613 wait_queue.count--;
1614 app = wait_node->app;
1615 free(wait_node);
1616 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1617 break;
1618 }
1619 }
1620
1621 /*
1622 * With no application at this stage the received socket is
1623 * basically useless so close it before we free the cmd data
1624 * structure for good.
1625 */
1626 if (!app) {
1627 ret = close(ust_cmd->sock);
1628 if (ret < 0) {
1629 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1630 }
1631 lttng_fd_put(1, LTTNG_FD_APPS);
1632 }
1633 free(ust_cmd);
1634 }
1635
1636 if (app) {
1637 /*
1638 * @session_lock_list
1639 *
1640 * Lock the global session list so from the register up to the
1641 * registration done message, no thread can see the application
1642 * and change its state.
1643 */
1644 session_lock_list();
1645 rcu_read_lock();
1646
1647 /*
1648 * Add application to the global hash table. This needs to be
1649 * done before the update to the UST registry can locate the
1650 * application.
1651 */
1652 ust_app_add(app);
1653
1654 /* Set app version. This call will print an error if needed. */
1655 (void) ust_app_version(app);
1656
1657 /* Send notify socket through the notify pipe. */
1658 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1659 app->notify_sock);
1660 if (ret < 0) {
1661 rcu_read_unlock();
1662 session_unlock_list();
1663 /*
1664 * No notify thread, stop the UST tracing. However, this is
1665 * not an internal error of the this thread thus setting
1666 * the health error code to a normal exit.
1667 */
1668 err = 0;
1669 goto error;
1670 }
1671
1672 /*
1673 * Update newly registered application with the tracing
1674 * registry info already enabled information.
1675 */
1676 update_ust_app(app->sock);
1677
1678 /*
1679 * Don't care about return value. Let the manage apps threads
1680 * handle app unregistration upon socket close.
1681 */
1682 (void) ust_app_register_done(app->sock);
1683
1684 /*
1685 * Even if the application socket has been closed, send the app
1686 * to the thread and unregistration will take place at that
1687 * place.
1688 */
1689 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1690 if (ret < 0) {
1691 rcu_read_unlock();
1692 session_unlock_list();
1693 /*
1694 * No apps. thread, stop the UST tracing. However, this is
1695 * not an internal error of the this thread thus setting
1696 * the health error code to a normal exit.
1697 */
1698 err = 0;
1699 goto error;
1700 }
1701
1702 rcu_read_unlock();
1703 session_unlock_list();
1704 }
1705 } while (node != NULL);
1706
1707 health_poll_entry();
1708 /* Futex wait on queue. Blocking call on futex() */
1709 futex_nto1_wait(&ust_cmd_queue.futex);
1710 health_poll_exit();
1711 }
1712 /* Normal exit, no error */
1713 err = 0;
1714
1715 error:
1716 /* Clean up wait queue. */
1717 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1718 &wait_queue.head, head) {
1719 cds_list_del(&wait_node->head);
1720 wait_queue.count--;
1721 free(wait_node);
1722 }
1723
1724 DBG("Dispatch thread dying");
1725 if (err) {
1726 health_error();
1727 ERR("Health error occurred in %s", __func__);
1728 }
1729 health_unregister(health_sessiond);
1730 return NULL;
1731 }
1732
1733 /*
1734 * This thread manage application registration.
1735 */
1736 static void *thread_registration_apps(void *data)
1737 {
1738 int sock = -1, i, ret, pollfd, err = -1;
1739 uint32_t revents, nb_fd;
1740 struct lttng_poll_event events;
1741 /*
1742 * Get allocated in this thread, enqueued to a global queue, dequeued and
1743 * freed in the manage apps thread.
1744 */
1745 struct ust_command *ust_cmd = NULL;
1746
1747 DBG("[thread] Manage application registration started");
1748
1749 health_register(health_sessiond, HEALTH_TYPE_APP_REG);
1750
1751 if (testpoint(thread_registration_apps)) {
1752 goto error_testpoint;
1753 }
1754
1755 ret = lttcomm_listen_unix_sock(apps_sock);
1756 if (ret < 0) {
1757 goto error_listen;
1758 }
1759
1760 /*
1761 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1762 * more will be added to this poll set.
1763 */
1764 ret = sessiond_set_thread_pollset(&events, 2);
1765 if (ret < 0) {
1766 goto error_create_poll;
1767 }
1768
1769 /* Add the application registration socket */
1770 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1771 if (ret < 0) {
1772 goto error_poll_add;
1773 }
1774
1775 /* Notify all applications to register */
1776 ret = notify_ust_apps(1);
1777 if (ret < 0) {
1778 ERR("Failed to notify applications or create the wait shared memory.\n"
1779 "Execution continues but there might be problem for already\n"
1780 "running applications that wishes to register.");
1781 }
1782
1783 while (1) {
1784 DBG("Accepting application registration");
1785
1786 /* Inifinite blocking call, waiting for transmission */
1787 restart:
1788 health_poll_entry();
1789 ret = lttng_poll_wait(&events, -1);
1790 health_poll_exit();
1791 if (ret < 0) {
1792 /*
1793 * Restart interrupted system call.
1794 */
1795 if (errno == EINTR) {
1796 goto restart;
1797 }
1798 goto error;
1799 }
1800
1801 nb_fd = ret;
1802
1803 for (i = 0; i < nb_fd; i++) {
1804 health_code_update();
1805
1806 /* Fetch once the poll data */
1807 revents = LTTNG_POLL_GETEV(&events, i);
1808 pollfd = LTTNG_POLL_GETFD(&events, i);
1809
1810 /* Thread quit pipe has been closed. Killing thread. */
1811 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1812 if (ret) {
1813 err = 0;
1814 goto exit;
1815 }
1816
1817 /* Event on the registration socket */
1818 if (pollfd == apps_sock) {
1819 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1820 ERR("Register apps socket poll error");
1821 goto error;
1822 } else if (revents & LPOLLIN) {
1823 sock = lttcomm_accept_unix_sock(apps_sock);
1824 if (sock < 0) {
1825 goto error;
1826 }
1827
1828 /*
1829 * Set the CLOEXEC flag. Return code is useless because
1830 * either way, the show must go on.
1831 */
1832 (void) utils_set_fd_cloexec(sock);
1833
1834 /* Create UST registration command for enqueuing */
1835 ust_cmd = zmalloc(sizeof(struct ust_command));
1836 if (ust_cmd == NULL) {
1837 PERROR("ust command zmalloc");
1838 goto error;
1839 }
1840
1841 /*
1842 * Using message-based transmissions to ensure we don't
1843 * have to deal with partially received messages.
1844 */
1845 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1846 if (ret < 0) {
1847 ERR("Exhausted file descriptors allowed for applications.");
1848 free(ust_cmd);
1849 ret = close(sock);
1850 if (ret) {
1851 PERROR("close");
1852 }
1853 sock = -1;
1854 continue;
1855 }
1856
1857 health_code_update();
1858 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
1859 if (ret < 0) {
1860 free(ust_cmd);
1861 /* Close socket of the application. */
1862 ret = close(sock);
1863 if (ret) {
1864 PERROR("close");
1865 }
1866 lttng_fd_put(LTTNG_FD_APPS, 1);
1867 sock = -1;
1868 continue;
1869 }
1870 health_code_update();
1871
1872 ust_cmd->sock = sock;
1873 sock = -1;
1874
1875 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1876 " gid:%d sock:%d name:%s (version %d.%d)",
1877 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1878 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1879 ust_cmd->sock, ust_cmd->reg_msg.name,
1880 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1881
1882 /*
1883 * Lock free enqueue the registration request. The red pill
1884 * has been taken! This apps will be part of the *system*.
1885 */
1886 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1887
1888 /*
1889 * Wake the registration queue futex. Implicit memory
1890 * barrier with the exchange in cds_wfq_enqueue.
1891 */
1892 futex_nto1_wake(&ust_cmd_queue.futex);
1893 }
1894 }
1895 }
1896 }
1897
1898 exit:
1899 error:
1900 if (err) {
1901 health_error();
1902 ERR("Health error occurred in %s", __func__);
1903 }
1904
1905 /* Notify that the registration thread is gone */
1906 notify_ust_apps(0);
1907
1908 if (apps_sock >= 0) {
1909 ret = close(apps_sock);
1910 if (ret) {
1911 PERROR("close");
1912 }
1913 }
1914 if (sock >= 0) {
1915 ret = close(sock);
1916 if (ret) {
1917 PERROR("close");
1918 }
1919 lttng_fd_put(LTTNG_FD_APPS, 1);
1920 }
1921 unlink(apps_unix_sock_path);
1922
1923 error_poll_add:
1924 lttng_poll_clean(&events);
1925 error_listen:
1926 error_create_poll:
1927 error_testpoint:
1928 DBG("UST Registration thread cleanup complete");
1929 health_unregister(health_sessiond);
1930
1931 return NULL;
1932 }
1933
1934 /*
1935 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1936 * exec or it will fails.
1937 */
1938 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1939 {
1940 int ret, clock_ret;
1941 struct timespec timeout;
1942
1943 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1944 consumer_data->consumer_thread_is_ready = 0;
1945
1946 /* Setup pthread condition */
1947 ret = pthread_condattr_init(&consumer_data->condattr);
1948 if (ret != 0) {
1949 errno = ret;
1950 PERROR("pthread_condattr_init consumer data");
1951 goto error;
1952 }
1953
1954 /*
1955 * Set the monotonic clock in order to make sure we DO NOT jump in time
1956 * between the clock_gettime() call and the timedwait call. See bug #324
1957 * for a more details and how we noticed it.
1958 */
1959 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
1960 if (ret != 0) {
1961 errno = ret;
1962 PERROR("pthread_condattr_setclock consumer data");
1963 goto error;
1964 }
1965
1966 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
1967 if (ret != 0) {
1968 errno = ret;
1969 PERROR("pthread_cond_init consumer data");
1970 goto error;
1971 }
1972
1973 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
1974 consumer_data);
1975 if (ret != 0) {
1976 PERROR("pthread_create consumer");
1977 ret = -1;
1978 goto error;
1979 }
1980
1981 /* We are about to wait on a pthread condition */
1982 pthread_mutex_lock(&consumer_data->cond_mutex);
1983
1984 /* Get time for sem_timedwait absolute timeout */
1985 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
1986 /*
1987 * Set the timeout for the condition timed wait even if the clock gettime
1988 * call fails since we might loop on that call and we want to avoid to
1989 * increment the timeout too many times.
1990 */
1991 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1992
1993 /*
1994 * The following loop COULD be skipped in some conditions so this is why we
1995 * set ret to 0 in order to make sure at least one round of the loop is
1996 * done.
1997 */
1998 ret = 0;
1999
2000 /*
2001 * Loop until the condition is reached or when a timeout is reached. Note
2002 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2003 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2004 * possible. This loop does not take any chances and works with both of
2005 * them.
2006 */
2007 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2008 if (clock_ret < 0) {
2009 PERROR("clock_gettime spawn consumer");
2010 /* Infinite wait for the consumerd thread to be ready */
2011 ret = pthread_cond_wait(&consumer_data->cond,
2012 &consumer_data->cond_mutex);
2013 } else {
2014 ret = pthread_cond_timedwait(&consumer_data->cond,
2015 &consumer_data->cond_mutex, &timeout);
2016 }
2017 }
2018
2019 /* Release the pthread condition */
2020 pthread_mutex_unlock(&consumer_data->cond_mutex);
2021
2022 if (ret != 0) {
2023 errno = ret;
2024 if (ret == ETIMEDOUT) {
2025 /*
2026 * Call has timed out so we kill the kconsumerd_thread and return
2027 * an error.
2028 */
2029 ERR("Condition timed out. The consumer thread was never ready."
2030 " Killing it");
2031 ret = pthread_cancel(consumer_data->thread);
2032 if (ret < 0) {
2033 PERROR("pthread_cancel consumer thread");
2034 }
2035 } else {
2036 PERROR("pthread_cond_wait failed consumer thread");
2037 }
2038 goto error;
2039 }
2040
2041 pthread_mutex_lock(&consumer_data->pid_mutex);
2042 if (consumer_data->pid == 0) {
2043 ERR("Consumerd did not start");
2044 pthread_mutex_unlock(&consumer_data->pid_mutex);
2045 goto error;
2046 }
2047 pthread_mutex_unlock(&consumer_data->pid_mutex);
2048
2049 return 0;
2050
2051 error:
2052 return ret;
2053 }
2054
2055 /*
2056 * Join consumer thread
2057 */
2058 static int join_consumer_thread(struct consumer_data *consumer_data)
2059 {
2060 void *status;
2061
2062 /* Consumer pid must be a real one. */
2063 if (consumer_data->pid > 0) {
2064 int ret;
2065 ret = kill(consumer_data->pid, SIGTERM);
2066 if (ret) {
2067 ERR("Error killing consumer daemon");
2068 return ret;
2069 }
2070 return pthread_join(consumer_data->thread, &status);
2071 } else {
2072 return 0;
2073 }
2074 }
2075
2076 /*
2077 * Fork and exec a consumer daemon (consumerd).
2078 *
2079 * Return pid if successful else -1.
2080 */
2081 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2082 {
2083 int ret;
2084 pid_t pid;
2085 const char *consumer_to_use;
2086 const char *verbosity;
2087 struct stat st;
2088
2089 DBG("Spawning consumerd");
2090
2091 pid = fork();
2092 if (pid == 0) {
2093 /*
2094 * Exec consumerd.
2095 */
2096 if (opt_verbose_consumer) {
2097 verbosity = "--verbose";
2098 } else {
2099 verbosity = "--quiet";
2100 }
2101 switch (consumer_data->type) {
2102 case LTTNG_CONSUMER_KERNEL:
2103 /*
2104 * Find out which consumerd to execute. We will first try the
2105 * 64-bit path, then the sessiond's installation directory, and
2106 * fallback on the 32-bit one,
2107 */
2108 DBG3("Looking for a kernel consumer at these locations:");
2109 DBG3(" 1) %s", consumerd64_bin);
2110 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2111 DBG3(" 3) %s", consumerd32_bin);
2112 if (stat(consumerd64_bin, &st) == 0) {
2113 DBG3("Found location #1");
2114 consumer_to_use = consumerd64_bin;
2115 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2116 DBG3("Found location #2");
2117 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2118 } else if (stat(consumerd32_bin, &st) == 0) {
2119 DBG3("Found location #3");
2120 consumer_to_use = consumerd32_bin;
2121 } else {
2122 DBG("Could not find any valid consumerd executable");
2123 break;
2124 }
2125 DBG("Using kernel consumer at: %s", consumer_to_use);
2126 execl(consumer_to_use,
2127 "lttng-consumerd", verbosity, "-k",
2128 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2129 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2130 NULL);
2131 break;
2132 case LTTNG_CONSUMER64_UST:
2133 {
2134 char *tmpnew = NULL;
2135
2136 if (consumerd64_libdir[0] != '\0') {
2137 char *tmp;
2138 size_t tmplen;
2139
2140 tmp = getenv("LD_LIBRARY_PATH");
2141 if (!tmp) {
2142 tmp = "";
2143 }
2144 tmplen = strlen("LD_LIBRARY_PATH=")
2145 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2146 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2147 if (!tmpnew) {
2148 ret = -ENOMEM;
2149 goto error;
2150 }
2151 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2152 strcat(tmpnew, consumerd64_libdir);
2153 if (tmp[0] != '\0') {
2154 strcat(tmpnew, ":");
2155 strcat(tmpnew, tmp);
2156 }
2157 ret = putenv(tmpnew);
2158 if (ret) {
2159 ret = -errno;
2160 free(tmpnew);
2161 goto error;
2162 }
2163 }
2164 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2165 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2166 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2167 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2168 NULL);
2169 if (consumerd64_libdir[0] != '\0') {
2170 free(tmpnew);
2171 }
2172 if (ret) {
2173 goto error;
2174 }
2175 break;
2176 }
2177 case LTTNG_CONSUMER32_UST:
2178 {
2179 char *tmpnew = NULL;
2180
2181 if (consumerd32_libdir[0] != '\0') {
2182 char *tmp;
2183 size_t tmplen;
2184
2185 tmp = getenv("LD_LIBRARY_PATH");
2186 if (!tmp) {
2187 tmp = "";
2188 }
2189 tmplen = strlen("LD_LIBRARY_PATH=")
2190 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2191 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2192 if (!tmpnew) {
2193 ret = -ENOMEM;
2194 goto error;
2195 }
2196 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2197 strcat(tmpnew, consumerd32_libdir);
2198 if (tmp[0] != '\0') {
2199 strcat(tmpnew, ":");
2200 strcat(tmpnew, tmp);
2201 }
2202 ret = putenv(tmpnew);
2203 if (ret) {
2204 ret = -errno;
2205 free(tmpnew);
2206 goto error;
2207 }
2208 }
2209 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2210 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2211 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2212 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2213 NULL);
2214 if (consumerd32_libdir[0] != '\0') {
2215 free(tmpnew);
2216 }
2217 if (ret) {
2218 goto error;
2219 }
2220 break;
2221 }
2222 default:
2223 PERROR("unknown consumer type");
2224 exit(EXIT_FAILURE);
2225 }
2226 if (errno != 0) {
2227 PERROR("kernel start consumer exec");
2228 }
2229 exit(EXIT_FAILURE);
2230 } else if (pid > 0) {
2231 ret = pid;
2232 } else {
2233 PERROR("start consumer fork");
2234 ret = -errno;
2235 }
2236 error:
2237 return ret;
2238 }
2239
2240 /*
2241 * Spawn the consumerd daemon and session daemon thread.
2242 */
2243 static int start_consumerd(struct consumer_data *consumer_data)
2244 {
2245 int ret;
2246
2247 /*
2248 * Set the listen() state on the socket since there is a possible race
2249 * between the exec() of the consumer daemon and this call if place in the
2250 * consumer thread. See bug #366 for more details.
2251 */
2252 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2253 if (ret < 0) {
2254 goto error;
2255 }
2256
2257 pthread_mutex_lock(&consumer_data->pid_mutex);
2258 if (consumer_data->pid != 0) {
2259 pthread_mutex_unlock(&consumer_data->pid_mutex);
2260 goto end;
2261 }
2262
2263 ret = spawn_consumerd(consumer_data);
2264 if (ret < 0) {
2265 ERR("Spawning consumerd failed");
2266 pthread_mutex_unlock(&consumer_data->pid_mutex);
2267 goto error;
2268 }
2269
2270 /* Setting up the consumer_data pid */
2271 consumer_data->pid = ret;
2272 DBG2("Consumer pid %d", consumer_data->pid);
2273 pthread_mutex_unlock(&consumer_data->pid_mutex);
2274
2275 DBG2("Spawning consumer control thread");
2276 ret = spawn_consumer_thread(consumer_data);
2277 if (ret < 0) {
2278 ERR("Fatal error spawning consumer control thread");
2279 goto error;
2280 }
2281
2282 end:
2283 return 0;
2284
2285 error:
2286 /* Cleanup already created sockets on error. */
2287 if (consumer_data->err_sock >= 0) {
2288 int err;
2289
2290 err = close(consumer_data->err_sock);
2291 if (err < 0) {
2292 PERROR("close consumer data error socket");
2293 }
2294 }
2295 return ret;
2296 }
2297
2298 /*
2299 * Compute health status of each consumer. If one of them is zero (bad
2300 * state), we return 0.
2301 */
2302 static int check_consumer_health(void)
2303 {
2304 int ret;
2305
2306 ret = health_check_state(health_sessiond, HEALTH_TYPE_CONSUMER);
2307
2308 DBG3("Health consumer check %d", ret);
2309
2310 return ret;
2311 }
2312
2313 /*
2314 * Setup necessary data for kernel tracer action.
2315 */
2316 static int init_kernel_tracer(void)
2317 {
2318 int ret;
2319
2320 /* Modprobe lttng kernel modules */
2321 ret = modprobe_lttng_control();
2322 if (ret < 0) {
2323 goto error;
2324 }
2325
2326 /* Open debugfs lttng */
2327 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2328 if (kernel_tracer_fd < 0) {
2329 DBG("Failed to open %s", module_proc_lttng);
2330 ret = -1;
2331 goto error_open;
2332 }
2333
2334 /* Validate kernel version */
2335 ret = kernel_validate_version(kernel_tracer_fd);
2336 if (ret < 0) {
2337 goto error_version;
2338 }
2339
2340 ret = modprobe_lttng_data();
2341 if (ret < 0) {
2342 goto error_modules;
2343 }
2344
2345 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2346 return 0;
2347
2348 error_version:
2349 modprobe_remove_lttng_control();
2350 ret = close(kernel_tracer_fd);
2351 if (ret) {
2352 PERROR("close");
2353 }
2354 kernel_tracer_fd = -1;
2355 return LTTNG_ERR_KERN_VERSION;
2356
2357 error_modules:
2358 ret = close(kernel_tracer_fd);
2359 if (ret) {
2360 PERROR("close");
2361 }
2362
2363 error_open:
2364 modprobe_remove_lttng_control();
2365
2366 error:
2367 WARN("No kernel tracer available");
2368 kernel_tracer_fd = -1;
2369 if (!is_root) {
2370 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2371 } else {
2372 return LTTNG_ERR_KERN_NA;
2373 }
2374 }
2375
2376
2377 /*
2378 * Copy consumer output from the tracing session to the domain session. The
2379 * function also applies the right modification on a per domain basis for the
2380 * trace files destination directory.
2381 *
2382 * Should *NOT* be called with RCU read-side lock held.
2383 */
2384 static int copy_session_consumer(int domain, struct ltt_session *session)
2385 {
2386 int ret;
2387 const char *dir_name;
2388 struct consumer_output *consumer;
2389
2390 assert(session);
2391 assert(session->consumer);
2392
2393 switch (domain) {
2394 case LTTNG_DOMAIN_KERNEL:
2395 DBG3("Copying tracing session consumer output in kernel session");
2396 /*
2397 * XXX: We should audit the session creation and what this function
2398 * does "extra" in order to avoid a destroy since this function is used
2399 * in the domain session creation (kernel and ust) only. Same for UST
2400 * domain.
2401 */
2402 if (session->kernel_session->consumer) {
2403 consumer_destroy_output(session->kernel_session->consumer);
2404 }
2405 session->kernel_session->consumer =
2406 consumer_copy_output(session->consumer);
2407 /* Ease our life a bit for the next part */
2408 consumer = session->kernel_session->consumer;
2409 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2410 break;
2411 case LTTNG_DOMAIN_UST:
2412 DBG3("Copying tracing session consumer output in UST session");
2413 if (session->ust_session->consumer) {
2414 consumer_destroy_output(session->ust_session->consumer);
2415 }
2416 session->ust_session->consumer =
2417 consumer_copy_output(session->consumer);
2418 /* Ease our life a bit for the next part */
2419 consumer = session->ust_session->consumer;
2420 dir_name = DEFAULT_UST_TRACE_DIR;
2421 break;
2422 default:
2423 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2424 goto error;
2425 }
2426
2427 /* Append correct directory to subdir */
2428 strncat(consumer->subdir, dir_name,
2429 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2430 DBG3("Copy session consumer subdir %s", consumer->subdir);
2431
2432 ret = LTTNG_OK;
2433
2434 error:
2435 return ret;
2436 }
2437
2438 /*
2439 * Create an UST session and add it to the session ust list.
2440 *
2441 * Should *NOT* be called with RCU read-side lock held.
2442 */
2443 static int create_ust_session(struct ltt_session *session,
2444 struct lttng_domain *domain)
2445 {
2446 int ret;
2447 struct ltt_ust_session *lus = NULL;
2448
2449 assert(session);
2450 assert(domain);
2451 assert(session->consumer);
2452
2453 switch (domain->type) {
2454 case LTTNG_DOMAIN_UST:
2455 break;
2456 default:
2457 ERR("Unknown UST domain on create session %d", domain->type);
2458 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2459 goto error;
2460 }
2461
2462 DBG("Creating UST session");
2463
2464 lus = trace_ust_create_session(session->id);
2465 if (lus == NULL) {
2466 ret = LTTNG_ERR_UST_SESS_FAIL;
2467 goto error;
2468 }
2469
2470 lus->uid = session->uid;
2471 lus->gid = session->gid;
2472 lus->output_traces = session->output_traces;
2473 lus->snapshot_mode = session->snapshot_mode;
2474 lus->live_timer_interval = session->live_timer;
2475 session->ust_session = lus;
2476
2477 /* Copy session output to the newly created UST session */
2478 ret = copy_session_consumer(domain->type, session);
2479 if (ret != LTTNG_OK) {
2480 goto error;
2481 }
2482
2483 return LTTNG_OK;
2484
2485 error:
2486 free(lus);
2487 session->ust_session = NULL;
2488 return ret;
2489 }
2490
2491 /*
2492 * Create a kernel tracer session then create the default channel.
2493 */
2494 static int create_kernel_session(struct ltt_session *session)
2495 {
2496 int ret;
2497
2498 DBG("Creating kernel session");
2499
2500 ret = kernel_create_session(session, kernel_tracer_fd);
2501 if (ret < 0) {
2502 ret = LTTNG_ERR_KERN_SESS_FAIL;
2503 goto error;
2504 }
2505
2506 /* Code flow safety */
2507 assert(session->kernel_session);
2508
2509 /* Copy session output to the newly created Kernel session */
2510 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2511 if (ret != LTTNG_OK) {
2512 goto error;
2513 }
2514
2515 /* Create directory(ies) on local filesystem. */
2516 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2517 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2518 ret = run_as_mkdir_recursive(
2519 session->kernel_session->consumer->dst.trace_path,
2520 S_IRWXU | S_IRWXG, session->uid, session->gid);
2521 if (ret < 0) {
2522 if (ret != -EEXIST) {
2523 ERR("Trace directory creation error");
2524 goto error;
2525 }
2526 }
2527 }
2528
2529 session->kernel_session->uid = session->uid;
2530 session->kernel_session->gid = session->gid;
2531 session->kernel_session->output_traces = session->output_traces;
2532 session->kernel_session->snapshot_mode = session->snapshot_mode;
2533
2534 return LTTNG_OK;
2535
2536 error:
2537 trace_kernel_destroy_session(session->kernel_session);
2538 session->kernel_session = NULL;
2539 return ret;
2540 }
2541
2542 /*
2543 * Count number of session permitted by uid/gid.
2544 */
2545 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2546 {
2547 unsigned int i = 0;
2548 struct ltt_session *session;
2549
2550 DBG("Counting number of available session for UID %d GID %d",
2551 uid, gid);
2552 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2553 /*
2554 * Only list the sessions the user can control.
2555 */
2556 if (!session_access_ok(session, uid, gid)) {
2557 continue;
2558 }
2559 i++;
2560 }
2561 return i;
2562 }
2563
2564 /*
2565 * Process the command requested by the lttng client within the command
2566 * context structure. This function make sure that the return structure (llm)
2567 * is set and ready for transmission before returning.
2568 *
2569 * Return any error encountered or 0 for success.
2570 *
2571 * "sock" is only used for special-case var. len data.
2572 *
2573 * Should *NOT* be called with RCU read-side lock held.
2574 */
2575 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2576 int *sock_error)
2577 {
2578 int ret = LTTNG_OK;
2579 int need_tracing_session = 1;
2580 int need_domain;
2581
2582 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2583
2584 *sock_error = 0;
2585
2586 switch (cmd_ctx->lsm->cmd_type) {
2587 case LTTNG_CREATE_SESSION:
2588 case LTTNG_CREATE_SESSION_SNAPSHOT:
2589 case LTTNG_CREATE_SESSION_LIVE:
2590 case LTTNG_DESTROY_SESSION:
2591 case LTTNG_LIST_SESSIONS:
2592 case LTTNG_LIST_DOMAINS:
2593 case LTTNG_START_TRACE:
2594 case LTTNG_STOP_TRACE:
2595 case LTTNG_DATA_PENDING:
2596 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2597 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2598 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2599 case LTTNG_SNAPSHOT_RECORD:
2600 need_domain = 0;
2601 break;
2602 default:
2603 need_domain = 1;
2604 }
2605
2606 if (opt_no_kernel && need_domain
2607 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2608 if (!is_root) {
2609 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2610 } else {
2611 ret = LTTNG_ERR_KERN_NA;
2612 }
2613 goto error;
2614 }
2615
2616 /* Deny register consumer if we already have a spawned consumer. */
2617 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2618 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2619 if (kconsumer_data.pid > 0) {
2620 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2621 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2622 goto error;
2623 }
2624 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2625 }
2626
2627 /*
2628 * Check for command that don't needs to allocate a returned payload. We do
2629 * this here so we don't have to make the call for no payload at each
2630 * command.
2631 */
2632 switch(cmd_ctx->lsm->cmd_type) {
2633 case LTTNG_LIST_SESSIONS:
2634 case LTTNG_LIST_TRACEPOINTS:
2635 case LTTNG_LIST_TRACEPOINT_FIELDS:
2636 case LTTNG_LIST_DOMAINS:
2637 case LTTNG_LIST_CHANNELS:
2638 case LTTNG_LIST_EVENTS:
2639 break;
2640 default:
2641 /* Setup lttng message with no payload */
2642 ret = setup_lttng_msg(cmd_ctx, 0);
2643 if (ret < 0) {
2644 /* This label does not try to unlock the session */
2645 goto init_setup_error;
2646 }
2647 }
2648
2649 /* Commands that DO NOT need a session. */
2650 switch (cmd_ctx->lsm->cmd_type) {
2651 case LTTNG_CREATE_SESSION:
2652 case LTTNG_CREATE_SESSION_SNAPSHOT:
2653 case LTTNG_CREATE_SESSION_LIVE:
2654 case LTTNG_CALIBRATE:
2655 case LTTNG_LIST_SESSIONS:
2656 case LTTNG_LIST_TRACEPOINTS:
2657 case LTTNG_LIST_TRACEPOINT_FIELDS:
2658 need_tracing_session = 0;
2659 break;
2660 default:
2661 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2662 /*
2663 * We keep the session list lock across _all_ commands
2664 * for now, because the per-session lock does not
2665 * handle teardown properly.
2666 */
2667 session_lock_list();
2668 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2669 if (cmd_ctx->session == NULL) {
2670 ret = LTTNG_ERR_SESS_NOT_FOUND;
2671 goto error;
2672 } else {
2673 /* Acquire lock for the session */
2674 session_lock(cmd_ctx->session);
2675 }
2676 break;
2677 }
2678
2679 if (!need_domain) {
2680 goto skip_domain;
2681 }
2682
2683 /*
2684 * Check domain type for specific "pre-action".
2685 */
2686 switch (cmd_ctx->lsm->domain.type) {
2687 case LTTNG_DOMAIN_KERNEL:
2688 if (!is_root) {
2689 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2690 goto error;
2691 }
2692
2693 /* Kernel tracer check */
2694 if (kernel_tracer_fd == -1) {
2695 /* Basically, load kernel tracer modules */
2696 ret = init_kernel_tracer();
2697 if (ret != 0) {
2698 goto error;
2699 }
2700 }
2701
2702 /* Consumer is in an ERROR state. Report back to client */
2703 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2704 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2705 goto error;
2706 }
2707
2708 /* Need a session for kernel command */
2709 if (need_tracing_session) {
2710 if (cmd_ctx->session->kernel_session == NULL) {
2711 ret = create_kernel_session(cmd_ctx->session);
2712 if (ret < 0) {
2713 ret = LTTNG_ERR_KERN_SESS_FAIL;
2714 goto error;
2715 }
2716 }
2717
2718 /* Start the kernel consumer daemon */
2719 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2720 if (kconsumer_data.pid == 0 &&
2721 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2722 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2723 ret = start_consumerd(&kconsumer_data);
2724 if (ret < 0) {
2725 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2726 goto error;
2727 }
2728 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2729 } else {
2730 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2731 }
2732
2733 /*
2734 * The consumer was just spawned so we need to add the socket to
2735 * the consumer output of the session if exist.
2736 */
2737 ret = consumer_create_socket(&kconsumer_data,
2738 cmd_ctx->session->kernel_session->consumer);
2739 if (ret < 0) {
2740 goto error;
2741 }
2742 }
2743
2744 break;
2745 case LTTNG_DOMAIN_UST:
2746 {
2747 if (!ust_app_supported()) {
2748 ret = LTTNG_ERR_NO_UST;
2749 goto error;
2750 }
2751 /* Consumer is in an ERROR state. Report back to client */
2752 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2753 ret = LTTNG_ERR_NO_USTCONSUMERD;
2754 goto error;
2755 }
2756
2757 if (need_tracing_session) {
2758 /* Create UST session if none exist. */
2759 if (cmd_ctx->session->ust_session == NULL) {
2760 ret = create_ust_session(cmd_ctx->session,
2761 &cmd_ctx->lsm->domain);
2762 if (ret != LTTNG_OK) {
2763 goto error;
2764 }
2765 }
2766
2767 /* Start the UST consumer daemons */
2768 /* 64-bit */
2769 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2770 if (consumerd64_bin[0] != '\0' &&
2771 ustconsumer64_data.pid == 0 &&
2772 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2773 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2774 ret = start_consumerd(&ustconsumer64_data);
2775 if (ret < 0) {
2776 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2777 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2778 goto error;
2779 }
2780
2781 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2782 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2783 } else {
2784 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2785 }
2786
2787 /*
2788 * Setup socket for consumer 64 bit. No need for atomic access
2789 * since it was set above and can ONLY be set in this thread.
2790 */
2791 ret = consumer_create_socket(&ustconsumer64_data,
2792 cmd_ctx->session->ust_session->consumer);
2793 if (ret < 0) {
2794 goto error;
2795 }
2796
2797 /* 32-bit */
2798 if (consumerd32_bin[0] != '\0' &&
2799 ustconsumer32_data.pid == 0 &&
2800 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2801 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2802 ret = start_consumerd(&ustconsumer32_data);
2803 if (ret < 0) {
2804 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2805 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2806 goto error;
2807 }
2808
2809 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2810 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2811 } else {
2812 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2813 }
2814
2815 /*
2816 * Setup socket for consumer 64 bit. No need for atomic access
2817 * since it was set above and can ONLY be set in this thread.
2818 */
2819 ret = consumer_create_socket(&ustconsumer32_data,
2820 cmd_ctx->session->ust_session->consumer);
2821 if (ret < 0) {
2822 goto error;
2823 }
2824 }
2825 break;
2826 }
2827 default:
2828 break;
2829 }
2830 skip_domain:
2831
2832 /* Validate consumer daemon state when start/stop trace command */
2833 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2834 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2835 switch (cmd_ctx->lsm->domain.type) {
2836 case LTTNG_DOMAIN_UST:
2837 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2838 ret = LTTNG_ERR_NO_USTCONSUMERD;
2839 goto error;
2840 }
2841 break;
2842 case LTTNG_DOMAIN_KERNEL:
2843 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2844 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2845 goto error;
2846 }
2847 break;
2848 }
2849 }
2850
2851 /*
2852 * Check that the UID or GID match that of the tracing session.
2853 * The root user can interact with all sessions.
2854 */
2855 if (need_tracing_session) {
2856 if (!session_access_ok(cmd_ctx->session,
2857 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2858 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2859 ret = LTTNG_ERR_EPERM;
2860 goto error;
2861 }
2862 }
2863
2864 /*
2865 * Send relayd information to consumer as soon as we have a domain and a
2866 * session defined.
2867 */
2868 if (cmd_ctx->session && need_domain) {
2869 /*
2870 * Setup relayd if not done yet. If the relayd information was already
2871 * sent to the consumer, this call will gracefully return.
2872 */
2873 ret = cmd_setup_relayd(cmd_ctx->session);
2874 if (ret != LTTNG_OK) {
2875 goto error;
2876 }
2877 }
2878
2879 /* Process by command type */
2880 switch (cmd_ctx->lsm->cmd_type) {
2881 case LTTNG_ADD_CONTEXT:
2882 {
2883 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2884 cmd_ctx->lsm->u.context.channel_name,
2885 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
2886 break;
2887 }
2888 case LTTNG_DISABLE_CHANNEL:
2889 {
2890 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2891 cmd_ctx->lsm->u.disable.channel_name);
2892 break;
2893 }
2894 case LTTNG_DISABLE_EVENT:
2895 {
2896 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2897 cmd_ctx->lsm->u.disable.channel_name,
2898 cmd_ctx->lsm->u.disable.name);
2899 break;
2900 }
2901 case LTTNG_DISABLE_ALL_EVENT:
2902 {
2903 DBG("Disabling all events");
2904
2905 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2906 cmd_ctx->lsm->u.disable.channel_name);
2907 break;
2908 }
2909 case LTTNG_ENABLE_CHANNEL:
2910 {
2911 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
2912 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
2913 break;
2914 }
2915 case LTTNG_ENABLE_EVENT:
2916 {
2917 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
2918 cmd_ctx->lsm->u.enable.channel_name,
2919 &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
2920 break;
2921 }
2922 case LTTNG_ENABLE_ALL_EVENT:
2923 {
2924 DBG("Enabling all events");
2925
2926 ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
2927 cmd_ctx->lsm->u.enable.channel_name,
2928 cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
2929 break;
2930 }
2931 case LTTNG_LIST_TRACEPOINTS:
2932 {
2933 struct lttng_event *events;
2934 ssize_t nb_events;
2935
2936 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
2937 if (nb_events < 0) {
2938 /* Return value is a negative lttng_error_code. */
2939 ret = -nb_events;
2940 goto error;
2941 }
2942
2943 /*
2944 * Setup lttng message with payload size set to the event list size in
2945 * bytes and then copy list into the llm payload.
2946 */
2947 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
2948 if (ret < 0) {
2949 free(events);
2950 goto setup_error;
2951 }
2952
2953 /* Copy event list into message payload */
2954 memcpy(cmd_ctx->llm->payload, events,
2955 sizeof(struct lttng_event) * nb_events);
2956
2957 free(events);
2958
2959 ret = LTTNG_OK;
2960 break;
2961 }
2962 case LTTNG_LIST_TRACEPOINT_FIELDS:
2963 {
2964 struct lttng_event_field *fields;
2965 ssize_t nb_fields;
2966
2967 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
2968 &fields);
2969 if (nb_fields < 0) {
2970 /* Return value is a negative lttng_error_code. */
2971 ret = -nb_fields;
2972 goto error;
2973 }
2974
2975 /*
2976 * Setup lttng message with payload size set to the event list size in
2977 * bytes and then copy list into the llm payload.
2978 */
2979 ret = setup_lttng_msg(cmd_ctx,
2980 sizeof(struct lttng_event_field) * nb_fields);
2981 if (ret < 0) {
2982 free(fields);
2983 goto setup_error;
2984 }
2985
2986 /* Copy event list into message payload */
2987 memcpy(cmd_ctx->llm->payload, fields,
2988 sizeof(struct lttng_event_field) * nb_fields);
2989
2990 free(fields);
2991
2992 ret = LTTNG_OK;
2993 break;
2994 }
2995 case LTTNG_SET_CONSUMER_URI:
2996 {
2997 size_t nb_uri, len;
2998 struct lttng_uri *uris;
2999
3000 nb_uri = cmd_ctx->lsm->u.uri.size;
3001 len = nb_uri * sizeof(struct lttng_uri);
3002
3003 if (nb_uri == 0) {
3004 ret = LTTNG_ERR_INVALID;
3005 goto error;
3006 }
3007
3008 uris = zmalloc(len);
3009 if (uris == NULL) {
3010 ret = LTTNG_ERR_FATAL;
3011 goto error;
3012 }
3013
3014 /* Receive variable len data */
3015 DBG("Receiving %zu URI(s) from client ...", nb_uri);
3016 ret = lttcomm_recv_unix_sock(sock, uris, len);
3017 if (ret <= 0) {
3018 DBG("No URIs received from client... continuing");
3019 *sock_error = 1;
3020 ret = LTTNG_ERR_SESSION_FAIL;
3021 free(uris);
3022 goto error;
3023 }
3024
3025 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3026 nb_uri, uris);
3027 if (ret != LTTNG_OK) {
3028 free(uris);
3029 goto error;
3030 }
3031
3032 /*
3033 * XXX: 0 means that this URI should be applied on the session. Should
3034 * be a DOMAIN enuam.
3035 */
3036 if (cmd_ctx->lsm->domain.type == 0) {
3037 /* Add the URI for the UST session if a consumer is present. */
3038 if (cmd_ctx->session->ust_session &&
3039 cmd_ctx->session->ust_session->consumer) {
3040 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
3041 nb_uri, uris);
3042 } else if (cmd_ctx->session->kernel_session &&
3043 cmd_ctx->session->kernel_session->consumer) {
3044 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
3045 cmd_ctx->session, nb_uri, uris);
3046 }
3047 }
3048
3049 free(uris);
3050
3051 break;
3052 }
3053 case LTTNG_START_TRACE:
3054 {
3055 ret = cmd_start_trace(cmd_ctx->session);
3056 break;
3057 }
3058 case LTTNG_STOP_TRACE:
3059 {
3060 ret = cmd_stop_trace(cmd_ctx->session);
3061 break;
3062 }
3063 case LTTNG_CREATE_SESSION:
3064 {
3065 size_t nb_uri, len;
3066 struct lttng_uri *uris = NULL;
3067
3068 nb_uri = cmd_ctx->lsm->u.uri.size;
3069 len = nb_uri * sizeof(struct lttng_uri);
3070
3071 if (nb_uri > 0) {
3072 uris = zmalloc(len);
3073 if (uris == NULL) {
3074 ret = LTTNG_ERR_FATAL;
3075 goto error;
3076 }
3077
3078 /* Receive variable len data */
3079 DBG("Waiting for %zu URIs from client ...", nb_uri);
3080 ret = lttcomm_recv_unix_sock(sock, uris, len);
3081 if (ret <= 0) {
3082 DBG("No URIs received from client... continuing");
3083 *sock_error = 1;
3084 ret = LTTNG_ERR_SESSION_FAIL;
3085 free(uris);
3086 goto error;
3087 }
3088
3089 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3090 DBG("Creating session with ONE network URI is a bad call");
3091 ret = LTTNG_ERR_SESSION_FAIL;
3092 free(uris);
3093 goto error;
3094 }
3095 }
3096
3097 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3098 &cmd_ctx->creds, 0);
3099
3100 free(uris);
3101
3102 break;
3103 }
3104 case LTTNG_DESTROY_SESSION:
3105 {
3106 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3107
3108 /* Set session to NULL so we do not unlock it after free. */
3109 cmd_ctx->session = NULL;
3110 break;
3111 }
3112 case LTTNG_LIST_DOMAINS:
3113 {
3114 ssize_t nb_dom;
3115 struct lttng_domain *domains;
3116
3117 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3118 if (nb_dom < 0) {
3119 /* Return value is a negative lttng_error_code. */
3120 ret = -nb_dom;
3121 goto error;
3122 }
3123
3124 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3125 if (ret < 0) {
3126 free(domains);
3127 goto setup_error;
3128 }
3129
3130 /* Copy event list into message payload */
3131 memcpy(cmd_ctx->llm->payload, domains,
3132 nb_dom * sizeof(struct lttng_domain));
3133
3134 free(domains);
3135
3136 ret = LTTNG_OK;
3137 break;
3138 }
3139 case LTTNG_LIST_CHANNELS:
3140 {
3141 int nb_chan;
3142 struct lttng_channel *channels;
3143
3144 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3145 cmd_ctx->session, &channels);
3146 if (nb_chan < 0) {
3147 /* Return value is a negative lttng_error_code. */
3148 ret = -nb_chan;
3149 goto error;
3150 }
3151
3152 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3153 if (ret < 0) {
3154 free(channels);
3155 goto setup_error;
3156 }
3157
3158 /* Copy event list into message payload */
3159 memcpy(cmd_ctx->llm->payload, channels,
3160 nb_chan * sizeof(struct lttng_channel));
3161
3162 free(channels);
3163
3164 ret = LTTNG_OK;
3165 break;
3166 }
3167 case LTTNG_LIST_EVENTS:
3168 {
3169 ssize_t nb_event;
3170 struct lttng_event *events = NULL;
3171
3172 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3173 cmd_ctx->lsm->u.list.channel_name, &events);
3174 if (nb_event < 0) {
3175 /* Return value is a negative lttng_error_code. */
3176 ret = -nb_event;
3177 goto error;
3178 }
3179
3180 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3181 if (ret < 0) {
3182 free(events);
3183 goto setup_error;
3184 }
3185
3186 /* Copy event list into message payload */
3187 memcpy(cmd_ctx->llm->payload, events,
3188 nb_event * sizeof(struct lttng_event));
3189
3190 free(events);
3191
3192 ret = LTTNG_OK;
3193 break;
3194 }
3195 case LTTNG_LIST_SESSIONS:
3196 {
3197 unsigned int nr_sessions;
3198
3199 session_lock_list();
3200 nr_sessions = lttng_sessions_count(
3201 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3202 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3203
3204 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3205 if (ret < 0) {
3206 session_unlock_list();
3207 goto setup_error;
3208 }
3209
3210 /* Filled the session array */
3211 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3212 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3213 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3214
3215 session_unlock_list();
3216
3217 ret = LTTNG_OK;
3218 break;
3219 }
3220 case LTTNG_CALIBRATE:
3221 {
3222 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3223 &cmd_ctx->lsm->u.calibrate);
3224 break;
3225 }
3226 case LTTNG_REGISTER_CONSUMER:
3227 {
3228 struct consumer_data *cdata;
3229
3230 switch (cmd_ctx->lsm->domain.type) {
3231 case LTTNG_DOMAIN_KERNEL:
3232 cdata = &kconsumer_data;
3233 break;
3234 default:
3235 ret = LTTNG_ERR_UND;
3236 goto error;
3237 }
3238
3239 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3240 cmd_ctx->lsm->u.reg.path, cdata);
3241 break;
3242 }
3243 case LTTNG_ENABLE_EVENT_WITH_FILTER:
3244 {
3245 struct lttng_filter_bytecode *bytecode;
3246
3247 if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
3248 ret = LTTNG_ERR_FILTER_INVAL;
3249 goto error;
3250 }
3251 if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
3252 ret = LTTNG_ERR_FILTER_INVAL;
3253 goto error;
3254 }
3255 bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
3256 if (!bytecode) {
3257 ret = LTTNG_ERR_FILTER_NOMEM;
3258 goto error;
3259 }
3260 /* Receive var. len. data */
3261 DBG("Receiving var len data from client ...");
3262 ret = lttcomm_recv_unix_sock(sock, bytecode,
3263 cmd_ctx->lsm->u.enable.bytecode_len);
3264 if (ret <= 0) {
3265 DBG("Nothing recv() from client var len data... continuing");
3266 *sock_error = 1;
3267 ret = LTTNG_ERR_FILTER_INVAL;
3268 goto error;
3269 }
3270
3271 if (bytecode->len + sizeof(*bytecode)
3272 != cmd_ctx->lsm->u.enable.bytecode_len) {
3273 free(bytecode);
3274 ret = LTTNG_ERR_FILTER_INVAL;
3275 goto error;
3276 }
3277
3278 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3279 cmd_ctx->lsm->u.enable.channel_name,
3280 &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
3281 break;
3282 }
3283 case LTTNG_DATA_PENDING:
3284 {
3285 ret = cmd_data_pending(cmd_ctx->session);
3286 break;
3287 }
3288 case LTTNG_SNAPSHOT_ADD_OUTPUT:
3289 {
3290 struct lttcomm_lttng_output_id reply;
3291
3292 ret = cmd_snapshot_add_output(cmd_ctx->session,
3293 &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
3294 if (ret != LTTNG_OK) {
3295 goto error;
3296 }
3297
3298 ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
3299 if (ret < 0) {
3300 goto setup_error;
3301 }
3302
3303 /* Copy output list into message payload */
3304 memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
3305 ret = LTTNG_OK;
3306 break;
3307 }
3308 case LTTNG_SNAPSHOT_DEL_OUTPUT:
3309 {
3310 ret = cmd_snapshot_del_output(cmd_ctx->session,
3311 &cmd_ctx->lsm->u.snapshot_output.output);
3312 break;
3313 }
3314 case LTTNG_SNAPSHOT_LIST_OUTPUT:
3315 {
3316 ssize_t nb_output;
3317 struct lttng_snapshot_output *outputs = NULL;
3318
3319 nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
3320 if (nb_output < 0) {
3321 ret = -nb_output;
3322 goto error;
3323 }
3324
3325 ret = setup_lttng_msg(cmd_ctx,
3326 nb_output * sizeof(struct lttng_snapshot_output));
3327 if (ret < 0) {
3328 free(outputs);
3329 goto setup_error;
3330 }
3331
3332 if (outputs) {
3333 /* Copy output list into message payload */
3334 memcpy(cmd_ctx->llm->payload, outputs,
3335 nb_output * sizeof(struct lttng_snapshot_output));
3336 free(outputs);
3337 }
3338
3339 ret = LTTNG_OK;
3340 break;
3341 }
3342 case LTTNG_SNAPSHOT_RECORD:
3343 {
3344 ret = cmd_snapshot_record(cmd_ctx->session,
3345 &cmd_ctx->lsm->u.snapshot_record.output,
3346 cmd_ctx->lsm->u.snapshot_record.wait);
3347 break;
3348 }
3349 case LTTNG_CREATE_SESSION_SNAPSHOT:
3350 {
3351 size_t nb_uri, len;
3352 struct lttng_uri *uris = NULL;
3353
3354 nb_uri = cmd_ctx->lsm->u.uri.size;
3355 len = nb_uri * sizeof(struct lttng_uri);
3356
3357 if (nb_uri > 0) {
3358 uris = zmalloc(len);
3359 if (uris == NULL) {
3360 ret = LTTNG_ERR_FATAL;
3361 goto error;
3362 }
3363
3364 /* Receive variable len data */
3365 DBG("Waiting for %zu URIs from client ...", nb_uri);
3366 ret = lttcomm_recv_unix_sock(sock, uris, len);
3367 if (ret <= 0) {
3368 DBG("No URIs received from client... continuing");
3369 *sock_error = 1;
3370 ret = LTTNG_ERR_SESSION_FAIL;
3371 free(uris);
3372 goto error;
3373 }
3374
3375 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3376 DBG("Creating session with ONE network URI is a bad call");
3377 ret = LTTNG_ERR_SESSION_FAIL;
3378 free(uris);
3379 goto error;
3380 }
3381 }
3382
3383 ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
3384 nb_uri, &cmd_ctx->creds);
3385 free(uris);
3386 break;
3387 }
3388 case LTTNG_CREATE_SESSION_LIVE:
3389 {
3390 size_t nb_uri, len;
3391 struct lttng_uri *uris = NULL;
3392
3393 nb_uri = cmd_ctx->lsm->u.uri.size;
3394 len = nb_uri * sizeof(struct lttng_uri);
3395
3396 if (nb_uri > 0) {
3397 uris = zmalloc(len);
3398 if (uris == NULL) {
3399 ret = LTTNG_ERR_FATAL;
3400 goto error;
3401 }
3402
3403 /* Receive variable len data */
3404 DBG("Waiting for %zu URIs from client ...", nb_uri);
3405 ret = lttcomm_recv_unix_sock(sock, uris, len);
3406 if (ret <= 0) {
3407 DBG("No URIs received from client... continuing");
3408 *sock_error = 1;
3409 ret = LTTNG_ERR_SESSION_FAIL;
3410 free(uris);
3411 goto error;
3412 }
3413
3414 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3415 DBG("Creating session with ONE network URI is a bad call");
3416 ret = LTTNG_ERR_SESSION_FAIL;
3417 free(uris);
3418 goto error;
3419 }
3420 }
3421
3422 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
3423 nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
3424 free(uris);
3425 break;
3426 }
3427 default:
3428 ret = LTTNG_ERR_UND;
3429 break;
3430 }
3431
3432 error:
3433 if (cmd_ctx->llm == NULL) {
3434 DBG("Missing llm structure. Allocating one.");
3435 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3436 goto setup_error;
3437 }
3438 }
3439 /* Set return code */
3440 cmd_ctx->llm->ret_code = ret;
3441 setup_error:
3442 if (cmd_ctx->session) {
3443 session_unlock(cmd_ctx->session);
3444 }
3445 if (need_tracing_session) {
3446 session_unlock_list();
3447 }
3448 init_setup_error:
3449 return ret;
3450 }
3451
3452 /*
3453 * Thread managing health check socket.
3454 */
3455 static void *thread_manage_health(void *data)
3456 {
3457 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
3458 uint32_t revents, nb_fd;
3459 struct lttng_poll_event events;
3460 struct lttcomm_health_msg msg;
3461 struct lttcomm_health_data reply;
3462
3463 DBG("[thread] Manage health check started");
3464
3465 rcu_register_thread();
3466
3467 /* We might hit an error path before this is created. */
3468 lttng_poll_init(&events);
3469
3470 /* Create unix socket */
3471 sock = lttcomm_create_unix_sock(health_unix_sock_path);
3472 if (sock < 0) {
3473 ERR("Unable to create health check Unix socket");
3474 ret = -1;
3475 goto error;
3476 }
3477
3478 /*
3479 * Set the CLOEXEC flag. Return code is useless because either way, the
3480 * show must go on.
3481 */
3482 (void) utils_set_fd_cloexec(sock);
3483
3484 ret = lttcomm_listen_unix_sock(sock);
3485 if (ret < 0) {
3486 goto error;
3487 }
3488
3489 /*
3490 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3491 * more will be added to this poll set.
3492 */
3493 ret = sessiond_set_thread_pollset(&events, 2);
3494 if (ret < 0) {
3495 goto error;
3496 }
3497
3498 /* Add the application registration socket */
3499 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
3500 if (ret < 0) {
3501 goto error;
3502 }
3503
3504 while (1) {
3505 DBG("Health check ready");
3506
3507 /* Inifinite blocking call, waiting for transmission */
3508 restart:
3509 ret = lttng_poll_wait(&events, -1);
3510 if (ret < 0) {
3511 /*
3512 * Restart interrupted system call.
3513 */
3514 if (errno == EINTR) {
3515 goto restart;
3516 }
3517 goto error;
3518 }
3519
3520 nb_fd = ret;
3521
3522 for (i = 0; i < nb_fd; i++) {
3523 /* Fetch once the poll data */
3524 revents = LTTNG_POLL_GETEV(&events, i);
3525 pollfd = LTTNG_POLL_GETFD(&events, i);
3526
3527 /* Thread quit pipe has been closed. Killing thread. */
3528 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3529 if (ret) {
3530 err = 0;
3531 goto exit;
3532 }
3533
3534 /* Event on the registration socket */
3535 if (pollfd == sock) {
3536 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3537 ERR("Health socket poll error");
3538 goto error;
3539 }
3540 }
3541 }
3542
3543 new_sock = lttcomm_accept_unix_sock(sock);
3544 if (new_sock < 0) {
3545 goto error;
3546 }
3547
3548 /*
3549 * Set the CLOEXEC flag. Return code is useless because either way, the
3550 * show must go on.
3551 */
3552 (void) utils_set_fd_cloexec(new_sock);
3553
3554 DBG("Receiving data from client for health...");
3555 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3556 if (ret <= 0) {
3557 DBG("Nothing recv() from client... continuing");
3558 ret = close(new_sock);
3559 if (ret) {
3560 PERROR("close");
3561 }
3562 new_sock = -1;
3563 continue;
3564 }
3565
3566 rcu_thread_online();
3567
3568 switch (msg.component) {
3569 case LTTNG_HEALTH_CMD:
3570 reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_CMD);
3571 break;
3572 case LTTNG_HEALTH_APP_MANAGE:
3573 reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_APP_MANAGE);
3574 break;
3575 case LTTNG_HEALTH_APP_REG:
3576 reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_APP_REG);
3577 break;
3578 case LTTNG_HEALTH_KERNEL:
3579 reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_KERNEL);
3580 break;
3581 case LTTNG_HEALTH_CONSUMER:
3582 reply.ret_code = check_consumer_health();
3583 break;
3584 case LTTNG_HEALTH_HT_CLEANUP:
3585 reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_HT_CLEANUP);
3586 break;
3587 case LTTNG_HEALTH_APP_MANAGE_NOTIFY:
3588 reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_APP_MANAGE_NOTIFY);
3589 break;
3590 case LTTNG_HEALTH_APP_REG_DISPATCH:
3591 reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_APP_REG_DISPATCH);
3592 break;
3593 case LTTNG_HEALTH_ALL:
3594 reply.ret_code =
3595 health_check_state(health_sessiond, HEALTH_TYPE_APP_MANAGE) &&
3596 health_check_state(health_sessiond, HEALTH_TYPE_APP_REG) &&
3597 health_check_state(health_sessiond, HEALTH_TYPE_CMD) &&
3598 health_check_state(health_sessiond, HEALTH_TYPE_KERNEL) &&
3599 check_consumer_health() &&
3600 health_check_state(health_sessiond, HEALTH_TYPE_HT_CLEANUP) &&
3601 health_check_state(health_sessiond, HEALTH_TYPE_APP_MANAGE_NOTIFY) &&
3602 health_check_state(health_sessiond, HEALTH_TYPE_APP_REG_DISPATCH);
3603 break;
3604 default:
3605 reply.ret_code = LTTNG_ERR_UND;
3606 break;
3607 }
3608
3609 /*
3610 * Flip ret value since 0 is a success and 1 indicates a bad health for
3611 * the client where in the sessiond it is the opposite. Again, this is
3612 * just to make things easier for us poor developer which enjoy a lot
3613 * lazyness.
3614 */
3615 if (reply.ret_code == 0 || reply.ret_code == 1) {
3616 reply.ret_code = !reply.ret_code;
3617 }
3618
3619 DBG2("Health check return value %d", reply.ret_code);
3620
3621 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3622 if (ret < 0) {
3623 ERR("Failed to send health data back to client");
3624 }
3625
3626 /* End of transmission */
3627 ret = close(new_sock);
3628 if (ret) {
3629 PERROR("close");
3630 }
3631 new_sock = -1;
3632 }
3633
3634 exit:
3635 error:
3636 if (err) {
3637 ERR("Health error occurred in %s", __func__);
3638 }
3639 DBG("Health check thread dying");
3640 unlink(health_unix_sock_path);
3641 if (sock >= 0) {
3642 ret = close(sock);
3643 if (ret) {
3644 PERROR("close");
3645 }
3646 }
3647
3648 lttng_poll_clean(&events);
3649
3650 rcu_unregister_thread();
3651 return NULL;
3652 }
3653
3654 /*
3655 * This thread manage all clients request using the unix client socket for
3656 * communication.
3657 */
3658 static void *thread_manage_clients(void *data)
3659 {
3660 int sock = -1, ret, i, pollfd, err = -1;
3661 int sock_error;
3662 uint32_t revents, nb_fd;
3663 struct command_ctx *cmd_ctx = NULL;
3664 struct lttng_poll_event events;
3665
3666 DBG("[thread] Manage client started");
3667
3668 rcu_register_thread();
3669
3670 health_register(health_sessiond, HEALTH_TYPE_CMD);
3671
3672 if (testpoint(thread_manage_clients)) {
3673 goto error_testpoint;
3674 }
3675
3676 health_code_update();
3677
3678 ret = lttcomm_listen_unix_sock(client_sock);
3679 if (ret < 0) {
3680 goto error_listen;
3681 }
3682
3683 /*
3684 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3685 * more will be added to this poll set.
3686 */
3687 ret = sessiond_set_thread_pollset(&events, 2);
3688 if (ret < 0) {
3689 goto error_create_poll;
3690 }
3691
3692 /* Add the application registration socket */
3693 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3694 if (ret < 0) {
3695 goto error;
3696 }
3697
3698 /*
3699 * Notify parent pid that we are ready to accept command for client side.
3700 */
3701 if (opt_sig_parent) {
3702 kill(ppid, SIGUSR1);
3703 }
3704
3705 if (testpoint(thread_manage_clients_before_loop)) {
3706 goto error;
3707 }
3708
3709 health_code_update();
3710
3711 while (1) {
3712 DBG("Accepting client command ...");
3713
3714 /* Inifinite blocking call, waiting for transmission */
3715 restart:
3716 health_poll_entry();
3717 ret = lttng_poll_wait(&events, -1);
3718 health_poll_exit();
3719 if (ret < 0) {
3720 /*
3721 * Restart interrupted system call.
3722 */
3723 if (errno == EINTR) {
3724 goto restart;
3725 }
3726 goto error;
3727 }
3728
3729 nb_fd = ret;
3730
3731 for (i = 0; i < nb_fd; i++) {
3732 /* Fetch once the poll data */
3733 revents = LTTNG_POLL_GETEV(&events, i);
3734 pollfd = LTTNG_POLL_GETFD(&events, i);
3735
3736 health_code_update();
3737
3738 /* Thread quit pipe has been closed. Killing thread. */
3739 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3740 if (ret) {
3741 err = 0;
3742 goto exit;
3743 }
3744
3745 /* Event on the registration socket */
3746 if (pollfd == client_sock) {
3747 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3748 ERR("Client socket poll error");
3749 goto error;
3750 }
3751 }
3752 }
3753
3754 DBG("Wait for client response");
3755
3756 health_code_update();
3757
3758 sock = lttcomm_accept_unix_sock(client_sock);
3759 if (sock < 0) {
3760 goto error;
3761 }
3762
3763 /*
3764 * Set the CLOEXEC flag. Return code is useless because either way, the
3765 * show must go on.
3766 */
3767 (void) utils_set_fd_cloexec(sock);
3768
3769 /* Set socket option for credentials retrieval */
3770 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3771 if (ret < 0) {
3772 goto error;
3773 }
3774
3775 /* Allocate context command to process the client request */
3776 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3777 if (cmd_ctx == NULL) {
3778 PERROR("zmalloc cmd_ctx");
3779 goto error;
3780 }
3781
3782 /* Allocate data buffer for reception */
3783 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3784 if (cmd_ctx->lsm == NULL) {
3785 PERROR("zmalloc cmd_ctx->lsm");
3786 goto error;
3787 }
3788
3789 cmd_ctx->llm = NULL;
3790 cmd_ctx->session = NULL;
3791
3792 health_code_update();
3793
3794 /*
3795 * Data is received from the lttng client. The struct
3796 * lttcomm_session_msg (lsm) contains the command and data request of
3797 * the client.
3798 */
3799 DBG("Receiving data from client ...");
3800 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3801 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3802 if (ret <= 0) {
3803 DBG("Nothing recv() from client... continuing");
3804 ret = close(sock);
3805 if (ret) {
3806 PERROR("close");
3807 }
3808 sock = -1;
3809 clean_command_ctx(&cmd_ctx);
3810 continue;
3811 }
3812
3813 health_code_update();
3814
3815 // TODO: Validate cmd_ctx including sanity check for
3816 // security purpose.
3817
3818 rcu_thread_online();
3819 /*
3820 * This function dispatch the work to the kernel or userspace tracer
3821 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3822 * informations for the client. The command context struct contains
3823 * everything this function may needs.
3824 */
3825 ret = process_client_msg(cmd_ctx, sock, &sock_error);
3826 rcu_thread_offline();
3827 if (ret < 0) {
3828 ret = close(sock);
3829 if (ret) {
3830 PERROR("close");
3831 }
3832 sock = -1;
3833 /*
3834 * TODO: Inform client somehow of the fatal error. At
3835 * this point, ret < 0 means that a zmalloc failed
3836 * (ENOMEM). Error detected but still accept
3837 * command, unless a socket error has been
3838 * detected.
3839 */
3840 clean_command_ctx(&cmd_ctx);
3841 continue;
3842 }
3843
3844 health_code_update();
3845
3846 DBG("Sending response (size: %d, retcode: %s)",
3847 cmd_ctx->lttng_msg_size,
3848 lttng_strerror(-cmd_ctx->llm->ret_code));
3849 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
3850 if (ret < 0) {
3851 ERR("Failed to send data back to client");
3852 }
3853
3854 /* End of transmission */
3855 ret = close(sock);
3856 if (ret) {
3857 PERROR("close");
3858 }
3859 sock = -1;
3860
3861 clean_command_ctx(&cmd_ctx);
3862
3863 health_code_update();
3864 }
3865
3866 exit:
3867 error:
3868 if (sock >= 0) {
3869 ret = close(sock);
3870 if (ret) {
3871 PERROR("close");
3872 }
3873 }
3874
3875 lttng_poll_clean(&events);
3876 clean_command_ctx(&cmd_ctx);
3877
3878 error_listen:
3879 error_create_poll:
3880 error_testpoint:
3881 unlink(client_unix_sock_path);
3882 if (client_sock >= 0) {
3883 ret = close(client_sock);
3884 if (ret) {
3885 PERROR("close");
3886 }
3887 }
3888
3889 if (err) {
3890 health_error();
3891 ERR("Health error occurred in %s", __func__);
3892 }
3893
3894 health_unregister(health_sessiond);
3895
3896 DBG("Client thread dying");
3897
3898 rcu_unregister_thread();
3899 return NULL;
3900 }
3901
3902
3903 /*
3904 * usage function on stderr
3905 */
3906 static void usage(void)
3907 {
3908 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
3909 fprintf(stderr, " -h, --help Display this usage.\n");
3910 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
3911 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3912 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3913 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3914 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3915 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3916 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3917 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3918 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3919 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3920 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3921 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3922 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
3923 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3924 fprintf(stderr, " -V, --version Show version number.\n");
3925 fprintf(stderr, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3926 fprintf(stderr, " -q, --quiet No output at all.\n");
3927 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3928 fprintf(stderr, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
3929 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3930 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
3931 }
3932
3933 /*
3934 * daemon argument parsing
3935 */
3936 static int parse_args(int argc, char **argv)
3937 {
3938 int c;
3939
3940 static struct option long_options[] = {
3941 { "client-sock", 1, 0, 'c' },
3942 { "apps-sock", 1, 0, 'a' },
3943 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3944 { "kconsumerd-err-sock", 1, 0, 'E' },
3945 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3946 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3947 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3948 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3949 { "consumerd32-path", 1, 0, 'u' },
3950 { "consumerd32-libdir", 1, 0, 'U' },
3951 { "consumerd64-path", 1, 0, 't' },
3952 { "consumerd64-libdir", 1, 0, 'T' },
3953 { "daemonize", 0, 0, 'd' },
3954 { "sig-parent", 0, 0, 'S' },
3955 { "help", 0, 0, 'h' },
3956 { "group", 1, 0, 'g' },
3957 { "version", 0, 0, 'V' },
3958 { "quiet", 0, 0, 'q' },
3959 { "verbose", 0, 0, 'v' },
3960 { "verbose-consumer", 0, 0, 'Z' },
3961 { "no-kernel", 0, 0, 'N' },
3962 { "pidfile", 1, 0, 'p' },
3963 { NULL, 0, 0, 0 }
3964 };
3965
3966 while (1) {
3967 int option_index = 0;
3968 c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:",
3969 long_options, &option_index);
3970 if (c == -1) {
3971 break;
3972 }
3973
3974 switch (c) {
3975 case 0:
3976 fprintf(stderr, "option %s", long_options[option_index].name);
3977 if (optarg) {
3978 fprintf(stderr, " with arg %s\n", optarg);
3979 }
3980 break;
3981 case 'c':
3982 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
3983 break;
3984 case 'a':
3985 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
3986 break;
3987 case 'd':
3988 opt_daemon = 1;
3989 break;
3990 case 'g':
3991 opt_tracing_group = optarg;
3992 break;
3993 case 'h':
3994 usage();
3995 exit(EXIT_FAILURE);
3996 case 'V':
3997 fprintf(stdout, "%s\n", VERSION);
3998 exit(EXIT_SUCCESS);
3999 case 'S':
4000 opt_sig_parent = 1;
4001 break;
4002 case 'E':
4003 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4004 break;
4005 case 'C':
4006 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4007 break;
4008 case 'F':
4009 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4010 break;
4011 case 'D':
4012 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4013 break;
4014 case 'H':
4015 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4016 break;
4017 case 'G':
4018 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4019 break;
4020 case 'N':
4021 opt_no_kernel = 1;
4022 break;
4023 case 'q':
4024 lttng_opt_quiet = 1;
4025 break;
4026 case 'v':
4027 /* Verbose level can increase using multiple -v */
4028 lttng_opt_verbose += 1;
4029 break;
4030 case 'Z':
4031 opt_verbose_consumer += 1;
4032 break;
4033 case 'u':
4034 consumerd32_bin= optarg;
4035 break;
4036 case 'U':
4037 consumerd32_libdir = optarg;
4038 break;
4039 case 't':
4040 consumerd64_bin = optarg;
4041 break;
4042 case 'T':
4043 consumerd64_libdir = optarg;
4044 break;
4045 case 'p':
4046 opt_pidfile = optarg;
4047 break;
4048 default:
4049 /* Unknown option or other error.
4050 * Error is printed by getopt, just return */
4051 return -1;
4052 }
4053 }
4054
4055 return 0;
4056 }
4057
4058 /*
4059 * Creates the two needed socket by the daemon.
4060 * apps_sock - The communication socket for all UST apps.
4061 * client_sock - The communication of the cli tool (lttng).
4062 */
4063 static int init_daemon_socket(void)
4064 {
4065 int ret = 0;
4066 mode_t old_umask;
4067
4068 old_umask = umask(0);
4069
4070 /* Create client tool unix socket */
4071 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
4072 if (client_sock < 0) {
4073 ERR("Create unix sock failed: %s", client_unix_sock_path);
4074 ret = -1;
4075 goto end;
4076 }
4077
4078 /* Set the cloexec flag */
4079 ret = utils_set_fd_cloexec(client_sock);
4080 if (ret < 0) {
4081 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4082 "Continuing but note that the consumer daemon will have a "
4083 "reference to this socket on exec()", client_sock);
4084 }
4085
4086 /* File permission MUST be 660 */
4087 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4088 if (ret < 0) {
4089 ERR("Set file permissions failed: %s", client_unix_sock_path);
4090 PERROR("chmod");
4091 goto end;
4092 }
4093
4094 /* Create the application unix socket */
4095 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
4096 if (apps_sock < 0) {
4097 ERR("Create unix sock failed: %s", apps_unix_sock_path);
4098 ret = -1;
4099 goto end;
4100 }
4101
4102 /* Set the cloexec flag */
4103 ret = utils_set_fd_cloexec(apps_sock);
4104 if (ret < 0) {
4105 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4106 "Continuing but note that the consumer daemon will have a "
4107 "reference to this socket on exec()", apps_sock);
4108 }
4109
4110 /* File permission MUST be 666 */
4111 ret = chmod(apps_unix_sock_path,
4112 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
4113 if (ret < 0) {
4114 ERR("Set file permissions failed: %s", apps_unix_sock_path);
4115 PERROR("chmod");
4116 goto end;
4117 }
4118
4119 DBG3("Session daemon client socket %d and application socket %d created",
4120 client_sock, apps_sock);
4121
4122 end:
4123 umask(old_umask);
4124 return ret;
4125 }
4126
4127 /*
4128 * Check if the global socket is available, and if a daemon is answering at the
4129 * other side. If yes, error is returned.
4130 */
4131 static int check_existing_daemon(void)
4132 {
4133 /* Is there anybody out there ? */
4134 if (lttng_session_daemon_alive()) {
4135 return -EEXIST;
4136 }
4137
4138 return 0;
4139 }
4140
4141 /*
4142 * Set the tracing group gid onto the client socket.
4143 *
4144 * Race window between mkdir and chown is OK because we are going from more
4145 * permissive (root.root) to less permissive (root.tracing).
4146 */
4147 static int set_permissions(char *rundir)
4148 {
4149 int ret;
4150 gid_t gid;
4151
4152 ret = allowed_group();
4153 if (ret < 0) {
4154 WARN("No tracing group detected");
4155 /* Setting gid to 0 if no tracing group is found */
4156 gid = 0;
4157 } else {
4158 gid = ret;
4159 }
4160
4161 /* Set lttng run dir */
4162 ret = chown(rundir, 0, gid);
4163 if (ret < 0) {
4164 ERR("Unable to set group on %s", rundir);
4165 PERROR("chown");
4166 }
4167
4168 /* Ensure all applications and tracing group can search the run dir */
4169 ret = chmod(rundir, S_IRWXU | S_IXGRP | S_IXOTH);
4170 if (ret < 0) {
4171 ERR("Unable to set permissions on %s", rundir);
4172 PERROR("chmod");
4173 }
4174
4175 /* lttng client socket path */
4176 ret = chown(client_unix_sock_path, 0, gid);
4177 if (ret < 0) {
4178 ERR("Unable to set group on %s", client_unix_sock_path);
4179 PERROR("chown");
4180 }
4181
4182 /* kconsumer error socket path */
4183 ret = chown(kconsumer_data.err_unix_sock_path, 0, gid);
4184 if (ret < 0) {
4185 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
4186 PERROR("chown");
4187 }
4188
4189 /* 64-bit ustconsumer error socket path */
4190 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, gid);
4191 if (ret < 0) {
4192 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
4193 PERROR("chown");
4194 }
4195
4196 /* 32-bit ustconsumer compat32 error socket path */
4197 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, gid);
4198 if (ret < 0) {
4199 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
4200 PERROR("chown");
4201 }
4202
4203 DBG("All permissions are set");
4204
4205 return ret;
4206 }
4207
4208 /*
4209 * Create the lttng run directory needed for all global sockets and pipe.
4210 */
4211 static int create_lttng_rundir(const char *rundir)
4212 {
4213 int ret;
4214
4215 DBG3("Creating LTTng run directory: %s", rundir);
4216
4217 ret = mkdir(rundir, S_IRWXU);
4218 if (ret < 0) {
4219 if (errno != EEXIST) {
4220 ERR("Unable to create %s", rundir);
4221 goto error;
4222 } else {
4223 ret = 0;
4224 }
4225 }
4226
4227 error:
4228 return ret;
4229 }
4230
4231 /*
4232 * Setup sockets and directory needed by the kconsumerd communication with the
4233 * session daemon.
4234 */
4235 static int set_consumer_sockets(struct consumer_data *consumer_data,
4236 const char *rundir)
4237 {
4238 int ret;
4239 char path[PATH_MAX];
4240
4241 switch (consumer_data->type) {
4242 case LTTNG_CONSUMER_KERNEL:
4243 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
4244 break;
4245 case LTTNG_CONSUMER64_UST:
4246 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
4247 break;
4248 case LTTNG_CONSUMER32_UST:
4249 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
4250 break;
4251 default:
4252 ERR("Consumer type unknown");
4253 ret = -EINVAL;
4254 goto error;
4255 }
4256
4257 DBG2("Creating consumer directory: %s", path);
4258
4259 ret = mkdir(path, S_IRWXU);
4260 if (ret < 0) {
4261 if (errno != EEXIST) {
4262 PERROR("mkdir");
4263 ERR("Failed to create %s", path);
4264 goto error;
4265 }
4266 ret = -1;
4267 }
4268
4269 /* Create the kconsumerd error unix socket */
4270 consumer_data->err_sock =
4271 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
4272 if (consumer_data->err_sock < 0) {
4273 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
4274 ret = -1;
4275 goto error;
4276 }
4277
4278 /*
4279 * Set the CLOEXEC flag. Return code is useless because either way, the
4280 * show must go on.
4281 */
4282 ret = utils_set_fd_cloexec(consumer_data->err_sock);
4283 if (ret < 0) {
4284 PERROR("utils_set_fd_cloexec");
4285 /* continue anyway */
4286 }
4287
4288 /* File permission MUST be 660 */
4289 ret = chmod(consumer_data->err_unix_sock_path,
4290 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4291 if (ret < 0) {
4292 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
4293 PERROR("chmod");
4294 goto error;
4295 }
4296
4297 error:
4298 return ret;
4299 }
4300
4301 /*
4302 * Signal handler for the daemon
4303 *
4304 * Simply stop all worker threads, leaving main() return gracefully after
4305 * joining all threads and calling cleanup().
4306 */
4307 static void sighandler(int sig)
4308 {
4309 switch (sig) {
4310 case SIGPIPE:
4311 DBG("SIGPIPE caught");
4312 return;
4313 case SIGINT:
4314 DBG("SIGINT caught");
4315 stop_threads();
4316 break;
4317 case SIGTERM:
4318 DBG("SIGTERM caught");
4319 stop_threads();
4320 break;
4321 default:
4322 break;
4323 }
4324 }
4325
4326 /*
4327 * Setup signal handler for :
4328 * SIGINT, SIGTERM, SIGPIPE
4329 */
4330 static int set_signal_handler(void)
4331 {
4332 int ret = 0;
4333 struct sigaction sa;
4334 sigset_t sigset;
4335
4336 if ((ret = sigemptyset(&sigset)) < 0) {
4337 PERROR("sigemptyset");
4338 return ret;
4339 }
4340
4341 sa.sa_handler = sighandler;
4342 sa.sa_mask = sigset;
4343 sa.sa_flags = 0;
4344 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
4345 PERROR("sigaction");
4346 return ret;
4347 }
4348
4349 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
4350 PERROR("sigaction");
4351 return ret;
4352 }
4353
4354 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
4355 PERROR("sigaction");
4356 return ret;
4357 }
4358
4359 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
4360
4361 return ret;
4362 }
4363
4364 /*
4365 * Set open files limit to unlimited. This daemon can open a large number of
4366 * file descriptors in order to consumer multiple kernel traces.
4367 */
4368 static void set_ulimit(void)
4369 {
4370 int ret;
4371 struct rlimit lim;
4372
4373 /* The kernel does not allowed an infinite limit for open files */
4374 lim.rlim_cur = 65535;
4375 lim.rlim_max = 65535;
4376
4377 ret = setrlimit(RLIMIT_NOFILE, &lim);
4378 if (ret < 0) {
4379 PERROR("failed to set open files limit");
4380 }
4381 }
4382
4383 /*
4384 * Write pidfile using the rundir and opt_pidfile.
4385 */
4386 static void write_pidfile(void)
4387 {
4388 int ret;
4389 char pidfile_path[PATH_MAX];
4390
4391 assert(rundir);
4392
4393 if (opt_pidfile) {
4394 strncpy(pidfile_path, opt_pidfile, sizeof(pidfile_path));
4395 } else {
4396 /* Build pidfile path from rundir and opt_pidfile. */
4397 ret = snprintf(pidfile_path, sizeof(pidfile_path), "%s/"
4398 DEFAULT_LTTNG_SESSIOND_PIDFILE, rundir);
4399 if (ret < 0) {
4400 PERROR("snprintf pidfile path");
4401 goto error;
4402 }
4403 }
4404
4405 /*
4406 * Create pid file in rundir. Return value is of no importance. The
4407 * execution will continue even though we are not able to write the file.
4408 */
4409 (void) utils_create_pid_file(getpid(), pidfile_path);
4410
4411 error:
4412 return;
4413 }
4414
4415 /*
4416 * main
4417 */
4418 int main(int argc, char **argv)
4419 {
4420 int ret = 0;
4421 void *status;
4422 const char *home_path, *env_app_timeout;
4423
4424 init_kernel_workarounds();
4425
4426 rcu_register_thread();
4427
4428 setup_consumerd_path();
4429
4430 page_size = sysconf(_SC_PAGESIZE);
4431 if (page_size < 0) {
4432 PERROR("sysconf _SC_PAGESIZE");
4433 page_size = LONG_MAX;
4434 WARN("Fallback page size to %ld", page_size);
4435 }
4436
4437 /* Parse arguments */
4438 progname = argv[0];
4439 if ((ret = parse_args(argc, argv)) < 0) {
4440 goto error;
4441 }
4442
4443 /* Daemonize */
4444 if (opt_daemon) {
4445 int i;
4446
4447 /*
4448 * fork
4449 * child: setsid, close FD 0, 1, 2, chdir /
4450 * parent: exit (if fork is successful)
4451 */
4452 ret = daemon(0, 0);
4453 if (ret < 0) {
4454 PERROR("daemon");
4455 goto error;
4456 }
4457 /*
4458 * We are in the child. Make sure all other file
4459 * descriptors are closed, in case we are called with
4460 * more opened file descriptors than the standard ones.
4461 */
4462 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
4463 (void) close(i);
4464 }
4465 }
4466
4467 /* Create thread quit pipe */
4468 if ((ret = init_thread_quit_pipe()) < 0) {
4469 goto error;
4470 }
4471
4472 /* Check if daemon is UID = 0 */
4473 is_root = !getuid();
4474
4475 if (is_root) {
4476 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
4477
4478 /* Create global run dir with root access */
4479 ret = create_lttng_rundir(rundir);
4480 if (ret < 0) {
4481 goto error;
4482 }
4483
4484 if (strlen(apps_unix_sock_path) == 0) {
4485 snprintf(apps_unix_sock_path, PATH_MAX,
4486 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
4487 }
4488
4489 if (strlen(client_unix_sock_path) == 0) {
4490 snprintf(client_unix_sock_path, PATH_MAX,
4491 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
4492 }
4493
4494 /* Set global SHM for ust */
4495 if (strlen(wait_shm_path) == 0) {
4496 snprintf(wait_shm_path, PATH_MAX,
4497 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
4498 }
4499
4500 if (strlen(health_unix_sock_path) == 0) {
4501 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4502 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
4503 }
4504
4505 /* Setup kernel consumerd path */
4506 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
4507 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
4508 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
4509 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
4510
4511 DBG2("Kernel consumer err path: %s",
4512 kconsumer_data.err_unix_sock_path);
4513 DBG2("Kernel consumer cmd path: %s",
4514 kconsumer_data.cmd_unix_sock_path);
4515 } else {
4516 home_path = utils_get_home_dir();
4517 if (home_path == NULL) {
4518 /* TODO: Add --socket PATH option */
4519 ERR("Can't get HOME directory for sockets creation.");
4520 ret = -EPERM;
4521 goto error;
4522 }
4523
4524 /*
4525 * Create rundir from home path. This will create something like
4526 * $HOME/.lttng
4527 */
4528 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
4529 if (ret < 0) {
4530 ret = -ENOMEM;
4531 goto error;
4532 }
4533
4534 ret = create_lttng_rundir(rundir);
4535 if (ret < 0) {
4536 goto error;
4537 }
4538
4539 if (strlen(apps_unix_sock_path) == 0) {
4540 snprintf(apps_unix_sock_path, PATH_MAX,
4541 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
4542 }
4543
4544 /* Set the cli tool unix socket path */
4545 if (strlen(client_unix_sock_path) == 0) {
4546 snprintf(client_unix_sock_path, PATH_MAX,
4547 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
4548 }
4549
4550 /* Set global SHM for ust */
4551 if (strlen(wait_shm_path) == 0) {
4552 snprintf(wait_shm_path, PATH_MAX,
4553 DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
4554 }
4555
4556 /* Set health check Unix path */
4557 if (strlen(health_unix_sock_path) == 0) {
4558 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4559 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
4560 }
4561 }
4562
4563 /* Set consumer initial state */
4564 kernel_consumerd_state = CONSUMER_STOPPED;
4565 ust_consumerd_state = CONSUMER_STOPPED;
4566
4567 DBG("Client socket path %s", client_unix_sock_path);
4568 DBG("Application socket path %s", apps_unix_sock_path);
4569 DBG("Application wait path %s", wait_shm_path);
4570 DBG("LTTng run directory path: %s", rundir);
4571
4572 /* 32 bits consumerd path setup */
4573 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
4574 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
4575 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
4576 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
4577
4578 DBG2("UST consumer 32 bits err path: %s",
4579 ustconsumer32_data.err_unix_sock_path);
4580 DBG2("UST consumer 32 bits cmd path: %s",
4581 ustconsumer32_data.cmd_unix_sock_path);
4582
4583 /* 64 bits consumerd path setup */
4584 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
4585 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
4586 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
4587 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
4588
4589 DBG2("UST consumer 64 bits err path: %s",
4590 ustconsumer64_data.err_unix_sock_path);
4591 DBG2("UST consumer 64 bits cmd path: %s",
4592 ustconsumer64_data.cmd_unix_sock_path);
4593
4594 /*
4595 * See if daemon already exist.
4596 */
4597 if ((ret = check_existing_daemon()) < 0) {
4598 ERR("Already running daemon.\n");
4599 /*
4600 * We do not goto exit because we must not cleanup()
4601 * because a daemon is already running.
4602 */
4603 goto error;
4604 }
4605
4606 /*
4607 * Init UST app hash table. Alloc hash table before this point since
4608 * cleanup() can get called after that point.
4609 */
4610 ust_app_ht_alloc();
4611
4612 /* After this point, we can safely call cleanup() with "goto exit" */
4613
4614 /*
4615 * These actions must be executed as root. We do that *after* setting up
4616 * the sockets path because we MUST make the check for another daemon using
4617 * those paths *before* trying to set the kernel consumer sockets and init
4618 * kernel tracer.
4619 */
4620 if (is_root) {
4621 ret = set_consumer_sockets(&kconsumer_data, rundir);
4622 if (ret < 0) {
4623 goto exit;
4624 }
4625
4626 /* Setup kernel tracer */
4627 if (!opt_no_kernel) {
4628 init_kernel_tracer();
4629 }
4630
4631 /* Set ulimit for open files */
4632 set_ulimit();
4633 }
4634 /* init lttng_fd tracking must be done after set_ulimit. */
4635 lttng_fd_init();
4636
4637 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
4638 if (ret < 0) {
4639 goto exit;
4640 }
4641
4642 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
4643 if (ret < 0) {
4644 goto exit;
4645 }
4646
4647 if ((ret = set_signal_handler()) < 0) {
4648 goto exit;
4649 }
4650
4651 /* Setup the needed unix socket */
4652 if ((ret = init_daemon_socket()) < 0) {
4653 goto exit;
4654 }
4655
4656 /* Set credentials to socket */
4657 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
4658 goto exit;
4659 }
4660
4661 /* Get parent pid if -S, --sig-parent is specified. */
4662 if (opt_sig_parent) {
4663 ppid = getppid();
4664 }
4665
4666 /* Setup the kernel pipe for waking up the kernel thread */
4667 if (is_root && !opt_no_kernel) {
4668 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
4669 goto exit;
4670 }
4671 }
4672
4673 /* Setup the thread ht_cleanup communication pipe. */
4674 if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
4675 goto exit;
4676 }
4677
4678 /* Setup the thread apps communication pipe. */
4679 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
4680 goto exit;
4681 }
4682
4683 /* Setup the thread apps notify communication pipe. */
4684 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
4685 goto exit;
4686 }
4687
4688 /* Initialize global buffer per UID and PID registry. */
4689 buffer_reg_init_uid_registry();
4690 buffer_reg_init_pid_registry();
4691
4692 /* Init UST command queue. */
4693 cds_wfq_init(&ust_cmd_queue.queue);
4694
4695 /*
4696 * Get session list pointer. This pointer MUST NOT be free(). This list is
4697 * statically declared in session.c
4698 */
4699 session_list_ptr = session_get_list();
4700
4701 /* Set up max poll set size */
4702 lttng_poll_set_max_size();
4703
4704 cmd_init();
4705
4706 /* Check for the application socket timeout env variable. */
4707 env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
4708 if (env_app_timeout) {
4709 app_socket_timeout = atoi(env_app_timeout);
4710 } else {
4711 app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
4712 }
4713
4714 write_pidfile();
4715
4716 /* Initialize communication library */
4717 lttcomm_init();
4718 /* This is to get the TCP timeout value. */
4719 lttcomm_inet_init();
4720
4721 /*
4722 * Initialize the health check subsystem. This call should set the
4723 * appropriate time values.
4724 */
4725 health_sessiond = health_app_create(HEALTH_NUM_TYPE);
4726 if (!health_sessiond) {
4727 PERROR("health_app_create error");
4728 goto exit_health_sessiond_cleanup;
4729 }
4730 health_init(health_sessiond);
4731
4732 /* Create thread to manage the client socket */
4733 ret = pthread_create(&ht_cleanup_thread, NULL,
4734 thread_ht_cleanup, (void *) NULL);
4735 if (ret != 0) {
4736 PERROR("pthread_create ht_cleanup");
4737 goto exit_ht_cleanup;
4738 }
4739
4740 /* Create thread to manage the client socket */
4741 ret = pthread_create(&health_thread, NULL,
4742 thread_manage_health, (void *) NULL);
4743 if (ret != 0) {
4744 PERROR("pthread_create health");
4745 goto exit_health;
4746 }
4747
4748 /* Create thread to manage the client socket */
4749 ret = pthread_create(&client_thread, NULL,
4750 thread_manage_clients, (void *) NULL);
4751 if (ret != 0) {
4752 PERROR("pthread_create clients");
4753 goto exit_client;
4754 }
4755
4756 /* Create thread to dispatch registration */
4757 ret = pthread_create(&dispatch_thread, NULL,
4758 thread_dispatch_ust_registration, (void *) NULL);
4759 if (ret != 0) {
4760 PERROR("pthread_create dispatch");
4761 goto exit_dispatch;
4762 }
4763
4764 /* Create thread to manage application registration. */
4765 ret = pthread_create(&reg_apps_thread, NULL,
4766 thread_registration_apps, (void *) NULL);
4767 if (ret != 0) {
4768 PERROR("pthread_create registration");
4769 goto exit_reg_apps;
4770 }
4771
4772 /* Create thread to manage application socket */
4773 ret = pthread_create(&apps_thread, NULL,
4774 thread_manage_apps, (void *) NULL);
4775 if (ret != 0) {
4776 PERROR("pthread_create apps");
4777 goto exit_apps;
4778 }
4779
4780 /* Create thread to manage application notify socket */
4781 ret = pthread_create(&apps_notify_thread, NULL,
4782 ust_thread_manage_notify, (void *) NULL);
4783 if (ret != 0) {
4784 PERROR("pthread_create apps");
4785 goto exit_apps_notify;
4786 }
4787
4788 /* Don't start this thread if kernel tracing is not requested nor root */
4789 if (is_root && !opt_no_kernel) {
4790 /* Create kernel thread to manage kernel event */
4791 ret = pthread_create(&kernel_thread, NULL,
4792 thread_manage_kernel, (void *) NULL);
4793 if (ret != 0) {
4794 PERROR("pthread_create kernel");
4795 goto exit_kernel;
4796 }
4797
4798 ret = pthread_join(kernel_thread, &status);
4799 if (ret != 0) {
4800 PERROR("pthread_join");
4801 goto error; /* join error, exit without cleanup */
4802 }
4803 }
4804
4805 exit_kernel:
4806 ret = pthread_join(apps_notify_thread, &status);
4807 if (ret != 0) {
4808 PERROR("pthread_join apps notify");
4809 goto error; /* join error, exit without cleanup */
4810 }
4811
4812 exit_apps_notify:
4813 ret = pthread_join(apps_thread, &status);
4814 if (ret != 0) {
4815 PERROR("pthread_join apps");
4816 goto error; /* join error, exit without cleanup */
4817 }
4818
4819
4820 exit_apps:
4821 ret = pthread_join(reg_apps_thread, &status);
4822 if (ret != 0) {
4823 PERROR("pthread_join");
4824 goto error; /* join error, exit without cleanup */
4825 }
4826
4827 exit_reg_apps:
4828 ret = pthread_join(dispatch_thread, &status);
4829 if (ret != 0) {
4830 PERROR("pthread_join");
4831 goto error; /* join error, exit without cleanup */
4832 }
4833
4834 exit_dispatch:
4835 ret = pthread_join(client_thread, &status);
4836 if (ret != 0) {
4837 PERROR("pthread_join");
4838 goto error; /* join error, exit without cleanup */
4839 }
4840
4841 ret = join_consumer_thread(&kconsumer_data);
4842 if (ret != 0) {
4843 PERROR("join_consumer");
4844 goto error; /* join error, exit without cleanup */
4845 }
4846
4847 ret = join_consumer_thread(&ustconsumer32_data);
4848 if (ret != 0) {
4849 PERROR("join_consumer ust32");
4850 goto error; /* join error, exit without cleanup */
4851 }
4852
4853 ret = join_consumer_thread(&ustconsumer64_data);
4854 if (ret != 0) {
4855 PERROR("join_consumer ust64");
4856 goto error; /* join error, exit without cleanup */
4857 }
4858
4859 exit_client:
4860 ret = pthread_join(health_thread, &status);
4861 if (ret != 0) {
4862 PERROR("pthread_join health thread");
4863 goto error; /* join error, exit without cleanup */
4864 }
4865
4866 exit_health:
4867 ret = pthread_join(ht_cleanup_thread, &status);
4868 if (ret != 0) {
4869 PERROR("pthread_join ht cleanup thread");
4870 goto error; /* join error, exit without cleanup */
4871 }
4872 exit_ht_cleanup:
4873 health_app_destroy(health_sessiond);
4874 exit_health_sessiond_cleanup:
4875 exit:
4876 /*
4877 * cleanup() is called when no other thread is running.
4878 */
4879 rcu_thread_online();
4880 cleanup();
4881 rcu_thread_offline();
4882 rcu_unregister_thread();
4883 if (!ret) {
4884 exit(EXIT_SUCCESS);
4885 }
4886 error:
4887 exit(EXIT_FAILURE);
4888 }
This page took 0.203827 seconds and 5 git commands to generate.