Fix: handle EINTR for every read()
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <sys/mman.h>
29 #include <sys/mount.h>
30 #include <sys/resource.h>
31 #include <sys/socket.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include <sys/wait.h>
35 #include <urcu/uatomic.h>
36 #include <unistd.h>
37 #include <config.h>
38
39 #include <common/common.h>
40 #include <common/compat/poll.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
47
48 #include "lttng-sessiond.h"
49 #include "channel.h"
50 #include "cmd.h"
51 #include "consumer.h"
52 #include "context.h"
53 #include "event.h"
54 #include "kernel.h"
55 #include "kernel-consumer.h"
56 #include "modprobe.h"
57 #include "shm.h"
58 #include "ust-ctl.h"
59 #include "ust-consumer.h"
60 #include "utils.h"
61 #include "fd-limit.h"
62 #include "health.h"
63 #include "testpoint.h"
64
65 #define CONSUMERD_FILE "lttng-consumerd"
66
67 /* Const values */
68 const char default_home_dir[] = DEFAULT_HOME_DIR;
69 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
70 const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
71 const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
72
73 const char *progname;
74 const char *opt_tracing_group;
75 static int opt_sig_parent;
76 static int opt_verbose_consumer;
77 static int opt_daemon;
78 static int opt_no_kernel;
79 static int is_root; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid; /* Parent PID for --sig-parent option */
81 static char *rundir;
82
83 /*
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
86 */
87 static struct consumer_data kconsumer_data = {
88 .type = LTTNG_CONSUMER_KERNEL,
89 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
90 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
91 .err_sock = -1,
92 .cmd_sock = -1,
93 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
94 .lock = PTHREAD_MUTEX_INITIALIZER,
95 .cond = PTHREAD_COND_INITIALIZER,
96 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
97 };
98 static struct consumer_data ustconsumer64_data = {
99 .type = LTTNG_CONSUMER64_UST,
100 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
101 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
102 .err_sock = -1,
103 .cmd_sock = -1,
104 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
105 .lock = PTHREAD_MUTEX_INITIALIZER,
106 .cond = PTHREAD_COND_INITIALIZER,
107 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
108 };
109 static struct consumer_data ustconsumer32_data = {
110 .type = LTTNG_CONSUMER32_UST,
111 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
112 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
113 .err_sock = -1,
114 .cmd_sock = -1,
115 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
116 .lock = PTHREAD_MUTEX_INITIALIZER,
117 .cond = PTHREAD_COND_INITIALIZER,
118 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
119 };
120
121 /* Shared between threads */
122 static int dispatch_thread_exit;
123
124 /* Global application Unix socket path */
125 static char apps_unix_sock_path[PATH_MAX];
126 /* Global client Unix socket path */
127 static char client_unix_sock_path[PATH_MAX];
128 /* global wait shm path for UST */
129 static char wait_shm_path[PATH_MAX];
130 /* Global health check unix path */
131 static char health_unix_sock_path[PATH_MAX];
132
133 /* Sockets and FDs */
134 static int client_sock = -1;
135 static int apps_sock = -1;
136 int kernel_tracer_fd = -1;
137 static int kernel_poll_pipe[2] = { -1, -1 };
138
139 /*
140 * Quit pipe for all threads. This permits a single cancellation point
141 * for all threads when receiving an event on the pipe.
142 */
143 static int thread_quit_pipe[2] = { -1, -1 };
144
145 /*
146 * This pipe is used to inform the thread managing application communication
147 * that a command is queued and ready to be processed.
148 */
149 static int apps_cmd_pipe[2] = { -1, -1 };
150
151 /* Pthread, Mutexes and Semaphores */
152 static pthread_t apps_thread;
153 static pthread_t reg_apps_thread;
154 static pthread_t client_thread;
155 static pthread_t kernel_thread;
156 static pthread_t dispatch_thread;
157 static pthread_t health_thread;
158
159 /*
160 * UST registration command queue. This queue is tied with a futex and uses a N
161 * wakers / 1 waiter implemented and detailed in futex.c/.h
162 *
163 * The thread_manage_apps and thread_dispatch_ust_registration interact with
164 * this queue and the wait/wake scheme.
165 */
166 static struct ust_cmd_queue ust_cmd_queue;
167
168 /*
169 * Pointer initialized before thread creation.
170 *
171 * This points to the tracing session list containing the session count and a
172 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
173 * MUST NOT be taken if you call a public function in session.c.
174 *
175 * The lock is nested inside the structure: session_list_ptr->lock. Please use
176 * session_lock_list and session_unlock_list for lock acquisition.
177 */
178 static struct ltt_session_list *session_list_ptr;
179
180 int ust_consumerd64_fd = -1;
181 int ust_consumerd32_fd = -1;
182
183 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
184 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
185 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
186 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
187
188 static const char *module_proc_lttng = "/proc/lttng";
189
190 /*
191 * Consumer daemon state which is changed when spawning it, killing it or in
192 * case of a fatal error.
193 */
194 enum consumerd_state {
195 CONSUMER_STARTED = 1,
196 CONSUMER_STOPPED = 2,
197 CONSUMER_ERROR = 3,
198 };
199
200 /*
201 * This consumer daemon state is used to validate if a client command will be
202 * able to reach the consumer. If not, the client is informed. For instance,
203 * doing a "lttng start" when the consumer state is set to ERROR will return an
204 * error to the client.
205 *
206 * The following example shows a possible race condition of this scheme:
207 *
208 * consumer thread error happens
209 * client cmd arrives
210 * client cmd checks state -> still OK
211 * consumer thread exit, sets error
212 * client cmd try to talk to consumer
213 * ...
214 *
215 * However, since the consumer is a different daemon, we have no way of making
216 * sure the command will reach it safely even with this state flag. This is why
217 * we consider that up to the state validation during command processing, the
218 * command is safe. After that, we can not guarantee the correctness of the
219 * client request vis-a-vis the consumer.
220 */
221 static enum consumerd_state ust_consumerd_state;
222 static enum consumerd_state kernel_consumerd_state;
223
224 /* Used for the health monitoring of the session daemon. See health.h */
225 struct health_state health_thread_cmd;
226 struct health_state health_thread_app_manage;
227 struct health_state health_thread_app_reg;
228 struct health_state health_thread_kernel;
229
230 /*
231 * Socket timeout for receiving and sending in seconds.
232 */
233 static int app_socket_timeout;
234
235 static
236 void setup_consumerd_path(void)
237 {
238 const char *bin, *libdir;
239
240 /*
241 * Allow INSTALL_BIN_PATH to be used as a target path for the
242 * native architecture size consumer if CONFIG_CONSUMER*_PATH
243 * has not been defined.
244 */
245 #if (CAA_BITS_PER_LONG == 32)
246 if (!consumerd32_bin[0]) {
247 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
248 }
249 if (!consumerd32_libdir[0]) {
250 consumerd32_libdir = INSTALL_LIB_PATH;
251 }
252 #elif (CAA_BITS_PER_LONG == 64)
253 if (!consumerd64_bin[0]) {
254 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
255 }
256 if (!consumerd64_libdir[0]) {
257 consumerd64_libdir = INSTALL_LIB_PATH;
258 }
259 #else
260 #error "Unknown bitness"
261 #endif
262
263 /*
264 * runtime env. var. overrides the build default.
265 */
266 bin = getenv("LTTNG_CONSUMERD32_BIN");
267 if (bin) {
268 consumerd32_bin = bin;
269 }
270 bin = getenv("LTTNG_CONSUMERD64_BIN");
271 if (bin) {
272 consumerd64_bin = bin;
273 }
274 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
275 if (libdir) {
276 consumerd32_libdir = libdir;
277 }
278 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
279 if (libdir) {
280 consumerd64_libdir = libdir;
281 }
282 }
283
284 /*
285 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
286 */
287 static int create_thread_poll_set(struct lttng_poll_event *events,
288 unsigned int size)
289 {
290 int ret;
291
292 if (events == NULL || size == 0) {
293 ret = -1;
294 goto error;
295 }
296
297 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
298 if (ret < 0) {
299 goto error;
300 }
301
302 /* Add quit pipe */
303 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
304 if (ret < 0) {
305 goto error;
306 }
307
308 return 0;
309
310 error:
311 return ret;
312 }
313
314 /*
315 * Check if the thread quit pipe was triggered.
316 *
317 * Return 1 if it was triggered else 0;
318 */
319 static int check_thread_quit_pipe(int fd, uint32_t events)
320 {
321 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
322 return 1;
323 }
324
325 return 0;
326 }
327
328 /*
329 * Return group ID of the tracing group or -1 if not found.
330 */
331 static gid_t allowed_group(void)
332 {
333 struct group *grp;
334
335 if (opt_tracing_group) {
336 grp = getgrnam(opt_tracing_group);
337 } else {
338 grp = getgrnam(default_tracing_group);
339 }
340 if (!grp) {
341 return -1;
342 } else {
343 return grp->gr_gid;
344 }
345 }
346
347 /*
348 * Init thread quit pipe.
349 *
350 * Return -1 on error or 0 if all pipes are created.
351 */
352 static int init_thread_quit_pipe(void)
353 {
354 int ret, i;
355
356 ret = pipe(thread_quit_pipe);
357 if (ret < 0) {
358 PERROR("thread quit pipe");
359 goto error;
360 }
361
362 for (i = 0; i < 2; i++) {
363 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
364 if (ret < 0) {
365 PERROR("fcntl");
366 goto error;
367 }
368 }
369
370 error:
371 return ret;
372 }
373
374 /*
375 * Stop all threads by closing the thread quit pipe.
376 */
377 static void stop_threads(void)
378 {
379 int ret;
380
381 /* Stopping all threads */
382 DBG("Terminating all threads");
383 ret = notify_thread_pipe(thread_quit_pipe[1]);
384 if (ret < 0) {
385 ERR("write error on thread quit pipe");
386 }
387
388 /* Dispatch thread */
389 CMM_STORE_SHARED(dispatch_thread_exit, 1);
390 futex_nto1_wake(&ust_cmd_queue.futex);
391 }
392
393 /*
394 * Cleanup the daemon
395 */
396 static void cleanup(void)
397 {
398 int ret;
399 char *cmd = NULL;
400 struct ltt_session *sess, *stmp;
401
402 DBG("Cleaning up");
403
404 /* First thing first, stop all threads */
405 utils_close_pipe(thread_quit_pipe);
406
407 DBG("Removing %s directory", rundir);
408 ret = asprintf(&cmd, "rm -rf %s", rundir);
409 if (ret < 0) {
410 ERR("asprintf failed. Something is really wrong!");
411 }
412
413 /* Remove lttng run directory */
414 ret = system(cmd);
415 if (ret < 0) {
416 ERR("Unable to clean %s", rundir);
417 }
418 free(cmd);
419 free(rundir);
420
421 DBG("Cleaning up all sessions");
422
423 /* Destroy session list mutex */
424 if (session_list_ptr != NULL) {
425 pthread_mutex_destroy(&session_list_ptr->lock);
426
427 /* Cleanup ALL session */
428 cds_list_for_each_entry_safe(sess, stmp,
429 &session_list_ptr->head, list) {
430 cmd_destroy_session(sess, kernel_poll_pipe[1]);
431 }
432 }
433
434 DBG("Closing all UST sockets");
435 ust_app_clean_list();
436
437 if (is_root && !opt_no_kernel) {
438 DBG2("Closing kernel fd");
439 if (kernel_tracer_fd >= 0) {
440 ret = close(kernel_tracer_fd);
441 if (ret) {
442 PERROR("close");
443 }
444 }
445 DBG("Unloading kernel modules");
446 modprobe_remove_lttng_all();
447 }
448
449 /* <fun> */
450 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
451 "Matthew, BEET driven development works!%c[%dm",
452 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
453 /* </fun> */
454 }
455
456 /*
457 * Send data on a unix socket using the liblttsessiondcomm API.
458 *
459 * Return lttcomm error code.
460 */
461 static int send_unix_sock(int sock, void *buf, size_t len)
462 {
463 /* Check valid length */
464 if (len == 0) {
465 return -1;
466 }
467
468 return lttcomm_send_unix_sock(sock, buf, len);
469 }
470
471 /*
472 * Free memory of a command context structure.
473 */
474 static void clean_command_ctx(struct command_ctx **cmd_ctx)
475 {
476 DBG("Clean command context structure");
477 if (*cmd_ctx) {
478 if ((*cmd_ctx)->llm) {
479 free((*cmd_ctx)->llm);
480 }
481 if ((*cmd_ctx)->lsm) {
482 free((*cmd_ctx)->lsm);
483 }
484 free(*cmd_ctx);
485 *cmd_ctx = NULL;
486 }
487 }
488
489 /*
490 * Notify UST applications using the shm mmap futex.
491 */
492 static int notify_ust_apps(int active)
493 {
494 char *wait_shm_mmap;
495
496 DBG("Notifying applications of session daemon state: %d", active);
497
498 /* See shm.c for this call implying mmap, shm and futex calls */
499 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
500 if (wait_shm_mmap == NULL) {
501 goto error;
502 }
503
504 /* Wake waiting process */
505 futex_wait_update((int32_t *) wait_shm_mmap, active);
506
507 /* Apps notified successfully */
508 return 0;
509
510 error:
511 return -1;
512 }
513
514 /*
515 * Setup the outgoing data buffer for the response (llm) by allocating the
516 * right amount of memory and copying the original information from the lsm
517 * structure.
518 *
519 * Return total size of the buffer pointed by buf.
520 */
521 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
522 {
523 int ret, buf_size;
524
525 buf_size = size;
526
527 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
528 if (cmd_ctx->llm == NULL) {
529 PERROR("zmalloc");
530 ret = -ENOMEM;
531 goto error;
532 }
533
534 /* Copy common data */
535 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
536 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
537
538 cmd_ctx->llm->data_size = size;
539 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
540
541 return buf_size;
542
543 error:
544 return ret;
545 }
546
547 /*
548 * Update the kernel poll set of all channel fd available over all tracing
549 * session. Add the wakeup pipe at the end of the set.
550 */
551 static int update_kernel_poll(struct lttng_poll_event *events)
552 {
553 int ret;
554 struct ltt_session *session;
555 struct ltt_kernel_channel *channel;
556
557 DBG("Updating kernel poll set");
558
559 session_lock_list();
560 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
561 session_lock(session);
562 if (session->kernel_session == NULL) {
563 session_unlock(session);
564 continue;
565 }
566
567 cds_list_for_each_entry(channel,
568 &session->kernel_session->channel_list.head, list) {
569 /* Add channel fd to the kernel poll set */
570 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
571 if (ret < 0) {
572 session_unlock(session);
573 goto error;
574 }
575 DBG("Channel fd %d added to kernel set", channel->fd);
576 }
577 session_unlock(session);
578 }
579 session_unlock_list();
580
581 return 0;
582
583 error:
584 session_unlock_list();
585 return -1;
586 }
587
588 /*
589 * Find the channel fd from 'fd' over all tracing session. When found, check
590 * for new channel stream and send those stream fds to the kernel consumer.
591 *
592 * Useful for CPU hotplug feature.
593 */
594 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
595 {
596 int ret = 0;
597 struct ltt_session *session;
598 struct ltt_kernel_session *ksess;
599 struct ltt_kernel_channel *channel;
600
601 DBG("Updating kernel streams for channel fd %d", fd);
602
603 session_lock_list();
604 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
605 session_lock(session);
606 if (session->kernel_session == NULL) {
607 session_unlock(session);
608 continue;
609 }
610 ksess = session->kernel_session;
611
612 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
613 if (channel->fd == fd) {
614 DBG("Channel found, updating kernel streams");
615 ret = kernel_open_channel_stream(channel);
616 if (ret < 0) {
617 goto error;
618 }
619
620 /*
621 * Have we already sent fds to the consumer? If yes, it means
622 * that tracing is started so it is safe to send our updated
623 * stream fds.
624 */
625 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
626 struct lttng_ht_iter iter;
627 struct consumer_socket *socket;
628
629
630 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
631 &iter.iter, socket, node.node) {
632 /* Code flow error */
633 assert(socket->fd >= 0);
634
635 pthread_mutex_lock(socket->lock);
636 ret = kernel_consumer_send_channel_stream(socket,
637 channel, ksess);
638 pthread_mutex_unlock(socket->lock);
639 if (ret < 0) {
640 goto error;
641 }
642 }
643 }
644 goto error;
645 }
646 }
647 session_unlock(session);
648 }
649 session_unlock_list();
650 return ret;
651
652 error:
653 session_unlock(session);
654 session_unlock_list();
655 return ret;
656 }
657
658 /*
659 * For each tracing session, update newly registered apps.
660 */
661 static void update_ust_app(int app_sock)
662 {
663 struct ltt_session *sess, *stmp;
664
665 session_lock_list();
666
667 /* For all tracing session(s) */
668 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
669 session_lock(sess);
670 if (sess->ust_session) {
671 ust_app_global_update(sess->ust_session, app_sock);
672 }
673 session_unlock(sess);
674 }
675
676 session_unlock_list();
677 }
678
679 /*
680 * This thread manage event coming from the kernel.
681 *
682 * Features supported in this thread:
683 * -) CPU Hotplug
684 */
685 static void *thread_manage_kernel(void *data)
686 {
687 int ret, i, pollfd, update_poll_flag = 1, err = -1;
688 uint32_t revents, nb_fd;
689 char tmp;
690 struct lttng_poll_event events;
691
692 DBG("[thread] Thread manage kernel started");
693
694 /*
695 * This first step of the while is to clean this structure which could free
696 * non NULL pointers so zero it before the loop.
697 */
698 memset(&events, 0, sizeof(events));
699
700 if (testpoint(thread_manage_kernel)) {
701 goto error_testpoint;
702 }
703
704 health_code_update(&health_thread_kernel);
705
706 if (testpoint(thread_manage_kernel_before_loop)) {
707 goto error_testpoint;
708 }
709
710 while (1) {
711 health_code_update(&health_thread_kernel);
712
713 if (update_poll_flag == 1) {
714 /* Clean events object. We are about to populate it again. */
715 lttng_poll_clean(&events);
716
717 ret = create_thread_poll_set(&events, 2);
718 if (ret < 0) {
719 goto error_poll_create;
720 }
721
722 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
723 if (ret < 0) {
724 goto error;
725 }
726
727 /* This will add the available kernel channel if any. */
728 ret = update_kernel_poll(&events);
729 if (ret < 0) {
730 goto error;
731 }
732 update_poll_flag = 0;
733 }
734
735 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
736
737 /* Poll infinite value of time */
738 restart:
739 health_poll_update(&health_thread_kernel);
740 ret = lttng_poll_wait(&events, -1);
741 health_poll_update(&health_thread_kernel);
742 if (ret < 0) {
743 /*
744 * Restart interrupted system call.
745 */
746 if (errno == EINTR) {
747 goto restart;
748 }
749 goto error;
750 } else if (ret == 0) {
751 /* Should not happen since timeout is infinite */
752 ERR("Return value of poll is 0 with an infinite timeout.\n"
753 "This should not have happened! Continuing...");
754 continue;
755 }
756
757 nb_fd = ret;
758
759 for (i = 0; i < nb_fd; i++) {
760 /* Fetch once the poll data */
761 revents = LTTNG_POLL_GETEV(&events, i);
762 pollfd = LTTNG_POLL_GETFD(&events, i);
763
764 health_code_update(&health_thread_kernel);
765
766 /* Thread quit pipe has been closed. Killing thread. */
767 ret = check_thread_quit_pipe(pollfd, revents);
768 if (ret) {
769 err = 0;
770 goto exit;
771 }
772
773 /* Check for data on kernel pipe */
774 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
775 do {
776 ret = read(kernel_poll_pipe[0], &tmp, 1);
777 } while (ret < 0 && errno == EINTR);
778 /*
779 * Ret value is useless here, if this pipe gets any actions an
780 * update is required anyway.
781 */
782 update_poll_flag = 1;
783 continue;
784 } else {
785 /*
786 * New CPU detected by the kernel. Adding kernel stream to
787 * kernel session and updating the kernel consumer
788 */
789 if (revents & LPOLLIN) {
790 ret = update_kernel_stream(&kconsumer_data, pollfd);
791 if (ret < 0) {
792 continue;
793 }
794 break;
795 /*
796 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
797 * and unregister kernel stream at this point.
798 */
799 }
800 }
801 }
802 }
803
804 exit:
805 error:
806 lttng_poll_clean(&events);
807 error_poll_create:
808 error_testpoint:
809 utils_close_pipe(kernel_poll_pipe);
810 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
811 if (err) {
812 health_error(&health_thread_kernel);
813 ERR("Health error occurred in %s", __func__);
814 WARN("Kernel thread died unexpectedly. "
815 "Kernel tracing can continue but CPU hotplug is disabled.");
816 }
817 health_exit(&health_thread_kernel);
818 DBG("Kernel thread dying");
819 return NULL;
820 }
821
822 /*
823 * Signal pthread condition of the consumer data that the thread.
824 */
825 static void signal_consumer_condition(struct consumer_data *data, int state)
826 {
827 pthread_mutex_lock(&data->cond_mutex);
828
829 /*
830 * The state is set before signaling. It can be any value, it's the waiter
831 * job to correctly interpret this condition variable associated to the
832 * consumer pthread_cond.
833 *
834 * A value of 0 means that the corresponding thread of the consumer data
835 * was not started. 1 indicates that the thread has started and is ready
836 * for action. A negative value means that there was an error during the
837 * thread bootstrap.
838 */
839 data->consumer_thread_is_ready = state;
840 (void) pthread_cond_signal(&data->cond);
841
842 pthread_mutex_unlock(&data->cond_mutex);
843 }
844
845 /*
846 * This thread manage the consumer error sent back to the session daemon.
847 */
848 static void *thread_manage_consumer(void *data)
849 {
850 int sock = -1, i, ret, pollfd, err = -1;
851 uint32_t revents, nb_fd;
852 enum lttcomm_return_code code;
853 struct lttng_poll_event events;
854 struct consumer_data *consumer_data = data;
855
856 DBG("[thread] Manage consumer started");
857
858 /*
859 * Since the consumer thread can be spawned at any moment in time, we init
860 * the health to a poll status (1, which is a valid health over time).
861 * When the thread starts, we update here the health to a "code" path being
862 * an even value so this thread, when reaching a poll wait, does not
863 * trigger an error with an even value.
864 *
865 * Here is the use case we avoid.
866 *
867 * +1: the first poll update during initialization (main())
868 * +2 * x: multiple code update once in this thread.
869 * +1: poll wait in this thread (being a good health state).
870 * == even number which after the wait period shows as a bad health.
871 *
872 * In a nutshell, the following poll update to the health state brings back
873 * the state to an even value meaning a code path.
874 */
875 health_poll_update(&consumer_data->health);
876
877 /*
878 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
879 * Nothing more will be added to this poll set.
880 */
881 ret = create_thread_poll_set(&events, 2);
882 if (ret < 0) {
883 goto error_poll;
884 }
885
886 /*
887 * The error socket here is already in a listening state which was done
888 * just before spawning this thread to avoid a race between the consumer
889 * daemon exec trying to connect and the listen() call.
890 */
891 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
892 if (ret < 0) {
893 goto error;
894 }
895
896 health_code_update(&consumer_data->health);
897
898 /* Inifinite blocking call, waiting for transmission */
899 restart:
900 health_poll_update(&consumer_data->health);
901
902 if (testpoint(thread_manage_consumer)) {
903 goto error;
904 }
905
906 ret = lttng_poll_wait(&events, -1);
907 health_poll_update(&consumer_data->health);
908 if (ret < 0) {
909 /*
910 * Restart interrupted system call.
911 */
912 if (errno == EINTR) {
913 goto restart;
914 }
915 goto error;
916 }
917
918 nb_fd = ret;
919
920 for (i = 0; i < nb_fd; i++) {
921 /* Fetch once the poll data */
922 revents = LTTNG_POLL_GETEV(&events, i);
923 pollfd = LTTNG_POLL_GETFD(&events, i);
924
925 health_code_update(&consumer_data->health);
926
927 /* Thread quit pipe has been closed. Killing thread. */
928 ret = check_thread_quit_pipe(pollfd, revents);
929 if (ret) {
930 err = 0;
931 goto exit;
932 }
933
934 /* Event on the registration socket */
935 if (pollfd == consumer_data->err_sock) {
936 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
937 ERR("consumer err socket poll error");
938 goto error;
939 }
940 }
941 }
942
943 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
944 if (sock < 0) {
945 goto error;
946 }
947
948 /*
949 * Set the CLOEXEC flag. Return code is useless because either way, the
950 * show must go on.
951 */
952 (void) utils_set_fd_cloexec(sock);
953
954 health_code_update(&consumer_data->health);
955
956 DBG2("Receiving code from consumer err_sock");
957
958 /* Getting status code from kconsumerd */
959 ret = lttcomm_recv_unix_sock(sock, &code,
960 sizeof(enum lttcomm_return_code));
961 if (ret <= 0) {
962 goto error;
963 }
964
965 health_code_update(&consumer_data->health);
966
967 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
968 consumer_data->cmd_sock =
969 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
970 if (consumer_data->cmd_sock < 0) {
971 /* On error, signal condition and quit. */
972 signal_consumer_condition(consumer_data, -1);
973 PERROR("consumer connect");
974 goto error;
975 }
976 signal_consumer_condition(consumer_data, 1);
977 DBG("Consumer command socket ready");
978 } else {
979 ERR("consumer error when waiting for SOCK_READY : %s",
980 lttcomm_get_readable_code(-code));
981 goto error;
982 }
983
984 /* Remove the kconsumerd error sock since we've established a connexion */
985 ret = lttng_poll_del(&events, consumer_data->err_sock);
986 if (ret < 0) {
987 goto error;
988 }
989
990 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
991 if (ret < 0) {
992 goto error;
993 }
994
995 health_code_update(&consumer_data->health);
996
997 /* Inifinite blocking call, waiting for transmission */
998 restart_poll:
999 health_poll_update(&consumer_data->health);
1000 ret = lttng_poll_wait(&events, -1);
1001 health_poll_update(&consumer_data->health);
1002 if (ret < 0) {
1003 /*
1004 * Restart interrupted system call.
1005 */
1006 if (errno == EINTR) {
1007 goto restart_poll;
1008 }
1009 goto error;
1010 }
1011
1012 nb_fd = ret;
1013
1014 for (i = 0; i < nb_fd; i++) {
1015 /* Fetch once the poll data */
1016 revents = LTTNG_POLL_GETEV(&events, i);
1017 pollfd = LTTNG_POLL_GETFD(&events, i);
1018
1019 health_code_update(&consumer_data->health);
1020
1021 /* Thread quit pipe has been closed. Killing thread. */
1022 ret = check_thread_quit_pipe(pollfd, revents);
1023 if (ret) {
1024 err = 0;
1025 goto exit;
1026 }
1027
1028 /* Event on the kconsumerd socket */
1029 if (pollfd == sock) {
1030 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1031 ERR("consumer err socket second poll error");
1032 goto error;
1033 }
1034 }
1035 }
1036
1037 health_code_update(&consumer_data->health);
1038
1039 /* Wait for any kconsumerd error */
1040 ret = lttcomm_recv_unix_sock(sock, &code,
1041 sizeof(enum lttcomm_return_code));
1042 if (ret <= 0) {
1043 ERR("consumer closed the command socket");
1044 goto error;
1045 }
1046
1047 ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
1048
1049 exit:
1050 error:
1051 /* Immediately set the consumerd state to stopped */
1052 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1053 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1054 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1055 consumer_data->type == LTTNG_CONSUMER32_UST) {
1056 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1057 } else {
1058 /* Code flow error... */
1059 assert(0);
1060 }
1061
1062 if (consumer_data->err_sock >= 0) {
1063 ret = close(consumer_data->err_sock);
1064 if (ret) {
1065 PERROR("close");
1066 }
1067 }
1068 if (consumer_data->cmd_sock >= 0) {
1069 ret = close(consumer_data->cmd_sock);
1070 if (ret) {
1071 PERROR("close");
1072 }
1073 }
1074 if (sock >= 0) {
1075 ret = close(sock);
1076 if (ret) {
1077 PERROR("close");
1078 }
1079 }
1080
1081 unlink(consumer_data->err_unix_sock_path);
1082 unlink(consumer_data->cmd_unix_sock_path);
1083 consumer_data->pid = 0;
1084
1085 lttng_poll_clean(&events);
1086 error_poll:
1087 if (err) {
1088 health_error(&consumer_data->health);
1089 ERR("Health error occurred in %s", __func__);
1090 }
1091 health_exit(&consumer_data->health);
1092 DBG("consumer thread cleanup completed");
1093
1094 return NULL;
1095 }
1096
1097 /*
1098 * This thread manage application communication.
1099 */
1100 static void *thread_manage_apps(void *data)
1101 {
1102 int i, ret, pollfd, err = -1;
1103 uint32_t revents, nb_fd;
1104 struct ust_command ust_cmd;
1105 struct lttng_poll_event events;
1106
1107 DBG("[thread] Manage application started");
1108
1109 rcu_register_thread();
1110 rcu_thread_online();
1111
1112 if (testpoint(thread_manage_apps)) {
1113 goto error_testpoint;
1114 }
1115
1116 health_code_update(&health_thread_app_manage);
1117
1118 ret = create_thread_poll_set(&events, 2);
1119 if (ret < 0) {
1120 goto error_poll_create;
1121 }
1122
1123 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1124 if (ret < 0) {
1125 goto error;
1126 }
1127
1128 if (testpoint(thread_manage_apps_before_loop)) {
1129 goto error;
1130 }
1131
1132 health_code_update(&health_thread_app_manage);
1133
1134 while (1) {
1135 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1136
1137 /* Inifinite blocking call, waiting for transmission */
1138 restart:
1139 health_poll_update(&health_thread_app_manage);
1140 ret = lttng_poll_wait(&events, -1);
1141 health_poll_update(&health_thread_app_manage);
1142 if (ret < 0) {
1143 /*
1144 * Restart interrupted system call.
1145 */
1146 if (errno == EINTR) {
1147 goto restart;
1148 }
1149 goto error;
1150 }
1151
1152 nb_fd = ret;
1153
1154 for (i = 0; i < nb_fd; i++) {
1155 /* Fetch once the poll data */
1156 revents = LTTNG_POLL_GETEV(&events, i);
1157 pollfd = LTTNG_POLL_GETFD(&events, i);
1158
1159 health_code_update(&health_thread_app_manage);
1160
1161 /* Thread quit pipe has been closed. Killing thread. */
1162 ret = check_thread_quit_pipe(pollfd, revents);
1163 if (ret) {
1164 err = 0;
1165 goto exit;
1166 }
1167
1168 /* Inspect the apps cmd pipe */
1169 if (pollfd == apps_cmd_pipe[0]) {
1170 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1171 ERR("Apps command pipe error");
1172 goto error;
1173 } else if (revents & LPOLLIN) {
1174 /* Empty pipe */
1175 do {
1176 ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
1177 } while (ret < 0 && errno == EINTR);
1178 if (ret < 0 || ret < sizeof(ust_cmd)) {
1179 PERROR("read apps cmd pipe");
1180 goto error;
1181 }
1182
1183 health_code_update(&health_thread_app_manage);
1184
1185 /* Register applicaton to the session daemon */
1186 ret = ust_app_register(&ust_cmd.reg_msg,
1187 ust_cmd.sock);
1188 if (ret == -ENOMEM) {
1189 goto error;
1190 } else if (ret < 0) {
1191 break;
1192 }
1193
1194 health_code_update(&health_thread_app_manage);
1195
1196 /*
1197 * Validate UST version compatibility.
1198 */
1199 ret = ust_app_validate_version(ust_cmd.sock);
1200 if (ret >= 0) {
1201 /*
1202 * Add channel(s) and event(s) to newly registered apps
1203 * from lttng global UST domain.
1204 */
1205 update_ust_app(ust_cmd.sock);
1206 }
1207
1208 health_code_update(&health_thread_app_manage);
1209
1210 ret = ust_app_register_done(ust_cmd.sock);
1211 if (ret < 0) {
1212 /*
1213 * If the registration is not possible, we simply
1214 * unregister the apps and continue
1215 */
1216 ust_app_unregister(ust_cmd.sock);
1217 } else {
1218 /*
1219 * We only monitor the error events of the socket. This
1220 * thread does not handle any incoming data from UST
1221 * (POLLIN).
1222 */
1223 ret = lttng_poll_add(&events, ust_cmd.sock,
1224 LPOLLERR & LPOLLHUP & LPOLLRDHUP);
1225 if (ret < 0) {
1226 goto error;
1227 }
1228
1229 /* Set socket timeout for both receiving and ending */
1230 (void) lttcomm_setsockopt_rcv_timeout(ust_cmd.sock,
1231 app_socket_timeout);
1232 (void) lttcomm_setsockopt_snd_timeout(ust_cmd.sock,
1233 app_socket_timeout);
1234
1235 DBG("Apps with sock %d added to poll set",
1236 ust_cmd.sock);
1237 }
1238
1239 health_code_update(&health_thread_app_manage);
1240
1241 break;
1242 }
1243 } else {
1244 /*
1245 * At this point, we know that a registered application made
1246 * the event at poll_wait.
1247 */
1248 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1249 /* Removing from the poll set */
1250 ret = lttng_poll_del(&events, pollfd);
1251 if (ret < 0) {
1252 goto error;
1253 }
1254
1255 /* Socket closed on remote end. */
1256 ust_app_unregister(pollfd);
1257 break;
1258 }
1259 }
1260
1261 health_code_update(&health_thread_app_manage);
1262 }
1263 }
1264
1265 exit:
1266 error:
1267 lttng_poll_clean(&events);
1268 error_poll_create:
1269 error_testpoint:
1270 utils_close_pipe(apps_cmd_pipe);
1271 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1272
1273 /*
1274 * We don't clean the UST app hash table here since already registered
1275 * applications can still be controlled so let them be until the session
1276 * daemon dies or the applications stop.
1277 */
1278
1279 if (err) {
1280 health_error(&health_thread_app_manage);
1281 ERR("Health error occurred in %s", __func__);
1282 }
1283 health_exit(&health_thread_app_manage);
1284 DBG("Application communication apps thread cleanup complete");
1285 rcu_thread_offline();
1286 rcu_unregister_thread();
1287 return NULL;
1288 }
1289
1290 /*
1291 * Dispatch request from the registration threads to the application
1292 * communication thread.
1293 */
1294 static void *thread_dispatch_ust_registration(void *data)
1295 {
1296 int ret;
1297 struct cds_wfq_node *node;
1298 struct ust_command *ust_cmd = NULL;
1299
1300 DBG("[thread] Dispatch UST command started");
1301
1302 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1303 /* Atomically prepare the queue futex */
1304 futex_nto1_prepare(&ust_cmd_queue.futex);
1305
1306 do {
1307 /* Dequeue command for registration */
1308 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1309 if (node == NULL) {
1310 DBG("Woken up but nothing in the UST command queue");
1311 /* Continue thread execution */
1312 break;
1313 }
1314
1315 ust_cmd = caa_container_of(node, struct ust_command, node);
1316
1317 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1318 " gid:%d sock:%d name:%s (version %d.%d)",
1319 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1320 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1321 ust_cmd->sock, ust_cmd->reg_msg.name,
1322 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1323 /*
1324 * Inform apps thread of the new application registration. This
1325 * call is blocking so we can be assured that the data will be read
1326 * at some point in time or wait to the end of the world :)
1327 */
1328 if (apps_cmd_pipe[1] >= 0) {
1329 ret = write(apps_cmd_pipe[1], ust_cmd,
1330 sizeof(struct ust_command));
1331 if (ret < 0) {
1332 PERROR("write apps cmd pipe");
1333 if (errno == EBADF) {
1334 /*
1335 * We can't inform the application thread to process
1336 * registration. We will exit or else application
1337 * registration will not occur and tracing will never
1338 * start.
1339 */
1340 goto error;
1341 }
1342 }
1343 } else {
1344 /* Application manager thread is not available. */
1345 ret = close(ust_cmd->sock);
1346 if (ret < 0) {
1347 PERROR("close ust_cmd sock");
1348 }
1349 }
1350 free(ust_cmd);
1351 } while (node != NULL);
1352
1353 /* Futex wait on queue. Blocking call on futex() */
1354 futex_nto1_wait(&ust_cmd_queue.futex);
1355 }
1356
1357 error:
1358 DBG("Dispatch thread dying");
1359 return NULL;
1360 }
1361
1362 /*
1363 * This thread manage application registration.
1364 */
1365 static void *thread_registration_apps(void *data)
1366 {
1367 int sock = -1, i, ret, pollfd, err = -1;
1368 uint32_t revents, nb_fd;
1369 struct lttng_poll_event events;
1370 /*
1371 * Get allocated in this thread, enqueued to a global queue, dequeued and
1372 * freed in the manage apps thread.
1373 */
1374 struct ust_command *ust_cmd = NULL;
1375
1376 DBG("[thread] Manage application registration started");
1377
1378 if (testpoint(thread_registration_apps)) {
1379 goto error_testpoint;
1380 }
1381
1382 ret = lttcomm_listen_unix_sock(apps_sock);
1383 if (ret < 0) {
1384 goto error_listen;
1385 }
1386
1387 /*
1388 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1389 * more will be added to this poll set.
1390 */
1391 ret = create_thread_poll_set(&events, 2);
1392 if (ret < 0) {
1393 goto error_create_poll;
1394 }
1395
1396 /* Add the application registration socket */
1397 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1398 if (ret < 0) {
1399 goto error_poll_add;
1400 }
1401
1402 /* Notify all applications to register */
1403 ret = notify_ust_apps(1);
1404 if (ret < 0) {
1405 ERR("Failed to notify applications or create the wait shared memory.\n"
1406 "Execution continues but there might be problem for already\n"
1407 "running applications that wishes to register.");
1408 }
1409
1410 while (1) {
1411 DBG("Accepting application registration");
1412
1413 /* Inifinite blocking call, waiting for transmission */
1414 restart:
1415 health_poll_update(&health_thread_app_reg);
1416 ret = lttng_poll_wait(&events, -1);
1417 health_poll_update(&health_thread_app_reg);
1418 if (ret < 0) {
1419 /*
1420 * Restart interrupted system call.
1421 */
1422 if (errno == EINTR) {
1423 goto restart;
1424 }
1425 goto error;
1426 }
1427
1428 nb_fd = ret;
1429
1430 for (i = 0; i < nb_fd; i++) {
1431 health_code_update(&health_thread_app_reg);
1432
1433 /* Fetch once the poll data */
1434 revents = LTTNG_POLL_GETEV(&events, i);
1435 pollfd = LTTNG_POLL_GETFD(&events, i);
1436
1437 /* Thread quit pipe has been closed. Killing thread. */
1438 ret = check_thread_quit_pipe(pollfd, revents);
1439 if (ret) {
1440 err = 0;
1441 goto exit;
1442 }
1443
1444 /* Event on the registration socket */
1445 if (pollfd == apps_sock) {
1446 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1447 ERR("Register apps socket poll error");
1448 goto error;
1449 } else if (revents & LPOLLIN) {
1450 sock = lttcomm_accept_unix_sock(apps_sock);
1451 if (sock < 0) {
1452 goto error;
1453 }
1454
1455 /*
1456 * Set the CLOEXEC flag. Return code is useless because
1457 * either way, the show must go on.
1458 */
1459 (void) utils_set_fd_cloexec(sock);
1460
1461 /* Create UST registration command for enqueuing */
1462 ust_cmd = zmalloc(sizeof(struct ust_command));
1463 if (ust_cmd == NULL) {
1464 PERROR("ust command zmalloc");
1465 goto error;
1466 }
1467
1468 /*
1469 * Using message-based transmissions to ensure we don't
1470 * have to deal with partially received messages.
1471 */
1472 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1473 if (ret < 0) {
1474 ERR("Exhausted file descriptors allowed for applications.");
1475 free(ust_cmd);
1476 ret = close(sock);
1477 if (ret) {
1478 PERROR("close");
1479 }
1480 sock = -1;
1481 continue;
1482 }
1483 health_code_update(&health_thread_app_reg);
1484 ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
1485 sizeof(struct ust_register_msg));
1486 if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
1487 if (ret < 0) {
1488 PERROR("lttcomm_recv_unix_sock register apps");
1489 } else {
1490 ERR("Wrong size received on apps register");
1491 }
1492 free(ust_cmd);
1493 ret = close(sock);
1494 if (ret) {
1495 PERROR("close");
1496 }
1497 lttng_fd_put(LTTNG_FD_APPS, 1);
1498 sock = -1;
1499 continue;
1500 }
1501 health_code_update(&health_thread_app_reg);
1502
1503 ust_cmd->sock = sock;
1504 sock = -1;
1505
1506 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1507 " gid:%d sock:%d name:%s (version %d.%d)",
1508 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1509 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1510 ust_cmd->sock, ust_cmd->reg_msg.name,
1511 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1512
1513 /*
1514 * Lock free enqueue the registration request. The red pill
1515 * has been taken! This apps will be part of the *system*.
1516 */
1517 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1518
1519 /*
1520 * Wake the registration queue futex. Implicit memory
1521 * barrier with the exchange in cds_wfq_enqueue.
1522 */
1523 futex_nto1_wake(&ust_cmd_queue.futex);
1524 }
1525 }
1526 }
1527 }
1528
1529 exit:
1530 error:
1531 if (err) {
1532 health_error(&health_thread_app_reg);
1533 ERR("Health error occurred in %s", __func__);
1534 }
1535
1536 /* Notify that the registration thread is gone */
1537 notify_ust_apps(0);
1538
1539 if (apps_sock >= 0) {
1540 ret = close(apps_sock);
1541 if (ret) {
1542 PERROR("close");
1543 }
1544 }
1545 if (sock >= 0) {
1546 ret = close(sock);
1547 if (ret) {
1548 PERROR("close");
1549 }
1550 lttng_fd_put(LTTNG_FD_APPS, 1);
1551 }
1552 unlink(apps_unix_sock_path);
1553
1554 error_poll_add:
1555 lttng_poll_clean(&events);
1556 error_listen:
1557 error_create_poll:
1558 error_testpoint:
1559 DBG("UST Registration thread cleanup complete");
1560 health_exit(&health_thread_app_reg);
1561
1562 return NULL;
1563 }
1564
1565 /*
1566 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1567 * exec or it will fails.
1568 */
1569 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1570 {
1571 int ret, clock_ret;
1572 struct timespec timeout;
1573
1574 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1575 consumer_data->consumer_thread_is_ready = 0;
1576
1577 /* Setup pthread condition */
1578 ret = pthread_condattr_init(&consumer_data->condattr);
1579 if (ret != 0) {
1580 errno = ret;
1581 PERROR("pthread_condattr_init consumer data");
1582 goto error;
1583 }
1584
1585 /*
1586 * Set the monotonic clock in order to make sure we DO NOT jump in time
1587 * between the clock_gettime() call and the timedwait call. See bug #324
1588 * for a more details and how we noticed it.
1589 */
1590 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
1591 if (ret != 0) {
1592 errno = ret;
1593 PERROR("pthread_condattr_setclock consumer data");
1594 goto error;
1595 }
1596
1597 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
1598 if (ret != 0) {
1599 errno = ret;
1600 PERROR("pthread_cond_init consumer data");
1601 goto error;
1602 }
1603
1604 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
1605 consumer_data);
1606 if (ret != 0) {
1607 PERROR("pthread_create consumer");
1608 ret = -1;
1609 goto error;
1610 }
1611
1612 /* We are about to wait on a pthread condition */
1613 pthread_mutex_lock(&consumer_data->cond_mutex);
1614
1615 /* Get time for sem_timedwait absolute timeout */
1616 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
1617 /*
1618 * Set the timeout for the condition timed wait even if the clock gettime
1619 * call fails since we might loop on that call and we want to avoid to
1620 * increment the timeout too many times.
1621 */
1622 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1623
1624 /*
1625 * The following loop COULD be skipped in some conditions so this is why we
1626 * set ret to 0 in order to make sure at least one round of the loop is
1627 * done.
1628 */
1629 ret = 0;
1630
1631 /*
1632 * Loop until the condition is reached or when a timeout is reached. Note
1633 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1634 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1635 * possible. This loop does not take any chances and works with both of
1636 * them.
1637 */
1638 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
1639 if (clock_ret < 0) {
1640 PERROR("clock_gettime spawn consumer");
1641 /* Infinite wait for the consumerd thread to be ready */
1642 ret = pthread_cond_wait(&consumer_data->cond,
1643 &consumer_data->cond_mutex);
1644 } else {
1645 ret = pthread_cond_timedwait(&consumer_data->cond,
1646 &consumer_data->cond_mutex, &timeout);
1647 }
1648 }
1649
1650 /* Release the pthread condition */
1651 pthread_mutex_unlock(&consumer_data->cond_mutex);
1652
1653 if (ret != 0) {
1654 errno = ret;
1655 if (ret == ETIMEDOUT) {
1656 /*
1657 * Call has timed out so we kill the kconsumerd_thread and return
1658 * an error.
1659 */
1660 ERR("Condition timed out. The consumer thread was never ready."
1661 " Killing it");
1662 ret = pthread_cancel(consumer_data->thread);
1663 if (ret < 0) {
1664 PERROR("pthread_cancel consumer thread");
1665 }
1666 } else {
1667 PERROR("pthread_cond_wait failed consumer thread");
1668 }
1669 goto error;
1670 }
1671
1672 pthread_mutex_lock(&consumer_data->pid_mutex);
1673 if (consumer_data->pid == 0) {
1674 ERR("Consumerd did not start");
1675 pthread_mutex_unlock(&consumer_data->pid_mutex);
1676 goto error;
1677 }
1678 pthread_mutex_unlock(&consumer_data->pid_mutex);
1679
1680 return 0;
1681
1682 error:
1683 return ret;
1684 }
1685
1686 /*
1687 * Join consumer thread
1688 */
1689 static int join_consumer_thread(struct consumer_data *consumer_data)
1690 {
1691 void *status;
1692
1693 /* Consumer pid must be a real one. */
1694 if (consumer_data->pid > 0) {
1695 int ret;
1696 ret = kill(consumer_data->pid, SIGTERM);
1697 if (ret) {
1698 ERR("Error killing consumer daemon");
1699 return ret;
1700 }
1701 return pthread_join(consumer_data->thread, &status);
1702 } else {
1703 return 0;
1704 }
1705 }
1706
1707 /*
1708 * Fork and exec a consumer daemon (consumerd).
1709 *
1710 * Return pid if successful else -1.
1711 */
1712 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
1713 {
1714 int ret;
1715 pid_t pid;
1716 const char *consumer_to_use;
1717 const char *verbosity;
1718 struct stat st;
1719
1720 DBG("Spawning consumerd");
1721
1722 pid = fork();
1723 if (pid == 0) {
1724 /*
1725 * Exec consumerd.
1726 */
1727 if (opt_verbose_consumer) {
1728 verbosity = "--verbose";
1729 } else {
1730 verbosity = "--quiet";
1731 }
1732 switch (consumer_data->type) {
1733 case LTTNG_CONSUMER_KERNEL:
1734 /*
1735 * Find out which consumerd to execute. We will first try the
1736 * 64-bit path, then the sessiond's installation directory, and
1737 * fallback on the 32-bit one,
1738 */
1739 DBG3("Looking for a kernel consumer at these locations:");
1740 DBG3(" 1) %s", consumerd64_bin);
1741 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
1742 DBG3(" 3) %s", consumerd32_bin);
1743 if (stat(consumerd64_bin, &st) == 0) {
1744 DBG3("Found location #1");
1745 consumer_to_use = consumerd64_bin;
1746 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
1747 DBG3("Found location #2");
1748 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
1749 } else if (stat(consumerd32_bin, &st) == 0) {
1750 DBG3("Found location #3");
1751 consumer_to_use = consumerd32_bin;
1752 } else {
1753 DBG("Could not find any valid consumerd executable");
1754 break;
1755 }
1756 DBG("Using kernel consumer at: %s", consumer_to_use);
1757 execl(consumer_to_use,
1758 "lttng-consumerd", verbosity, "-k",
1759 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1760 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1761 NULL);
1762 break;
1763 case LTTNG_CONSUMER64_UST:
1764 {
1765 char *tmpnew = NULL;
1766
1767 if (consumerd64_libdir[0] != '\0') {
1768 char *tmp;
1769 size_t tmplen;
1770
1771 tmp = getenv("LD_LIBRARY_PATH");
1772 if (!tmp) {
1773 tmp = "";
1774 }
1775 tmplen = strlen("LD_LIBRARY_PATH=")
1776 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
1777 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1778 if (!tmpnew) {
1779 ret = -ENOMEM;
1780 goto error;
1781 }
1782 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1783 strcat(tmpnew, consumerd64_libdir);
1784 if (tmp[0] != '\0') {
1785 strcat(tmpnew, ":");
1786 strcat(tmpnew, tmp);
1787 }
1788 ret = putenv(tmpnew);
1789 if (ret) {
1790 ret = -errno;
1791 goto error;
1792 }
1793 }
1794 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
1795 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
1796 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1797 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1798 NULL);
1799 if (consumerd64_libdir[0] != '\0') {
1800 free(tmpnew);
1801 }
1802 if (ret) {
1803 goto error;
1804 }
1805 break;
1806 }
1807 case LTTNG_CONSUMER32_UST:
1808 {
1809 char *tmpnew = NULL;
1810
1811 if (consumerd32_libdir[0] != '\0') {
1812 char *tmp;
1813 size_t tmplen;
1814
1815 tmp = getenv("LD_LIBRARY_PATH");
1816 if (!tmp) {
1817 tmp = "";
1818 }
1819 tmplen = strlen("LD_LIBRARY_PATH=")
1820 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
1821 tmpnew = zmalloc(tmplen + 1 /* \0 */);
1822 if (!tmpnew) {
1823 ret = -ENOMEM;
1824 goto error;
1825 }
1826 strcpy(tmpnew, "LD_LIBRARY_PATH=");
1827 strcat(tmpnew, consumerd32_libdir);
1828 if (tmp[0] != '\0') {
1829 strcat(tmpnew, ":");
1830 strcat(tmpnew, tmp);
1831 }
1832 ret = putenv(tmpnew);
1833 if (ret) {
1834 ret = -errno;
1835 goto error;
1836 }
1837 }
1838 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
1839 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
1840 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
1841 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
1842 NULL);
1843 if (consumerd32_libdir[0] != '\0') {
1844 free(tmpnew);
1845 }
1846 if (ret) {
1847 goto error;
1848 }
1849 break;
1850 }
1851 default:
1852 PERROR("unknown consumer type");
1853 exit(EXIT_FAILURE);
1854 }
1855 if (errno != 0) {
1856 PERROR("kernel start consumer exec");
1857 }
1858 exit(EXIT_FAILURE);
1859 } else if (pid > 0) {
1860 ret = pid;
1861 } else {
1862 PERROR("start consumer fork");
1863 ret = -errno;
1864 }
1865 error:
1866 return ret;
1867 }
1868
1869 /*
1870 * Spawn the consumerd daemon and session daemon thread.
1871 */
1872 static int start_consumerd(struct consumer_data *consumer_data)
1873 {
1874 int ret;
1875
1876 /*
1877 * Set the listen() state on the socket since there is a possible race
1878 * between the exec() of the consumer daemon and this call if place in the
1879 * consumer thread. See bug #366 for more details.
1880 */
1881 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
1882 if (ret < 0) {
1883 goto error;
1884 }
1885
1886 pthread_mutex_lock(&consumer_data->pid_mutex);
1887 if (consumer_data->pid != 0) {
1888 pthread_mutex_unlock(&consumer_data->pid_mutex);
1889 goto end;
1890 }
1891
1892 ret = spawn_consumerd(consumer_data);
1893 if (ret < 0) {
1894 ERR("Spawning consumerd failed");
1895 pthread_mutex_unlock(&consumer_data->pid_mutex);
1896 goto error;
1897 }
1898
1899 /* Setting up the consumer_data pid */
1900 consumer_data->pid = ret;
1901 DBG2("Consumer pid %d", consumer_data->pid);
1902 pthread_mutex_unlock(&consumer_data->pid_mutex);
1903
1904 DBG2("Spawning consumer control thread");
1905 ret = spawn_consumer_thread(consumer_data);
1906 if (ret < 0) {
1907 ERR("Fatal error spawning consumer control thread");
1908 goto error;
1909 }
1910
1911 end:
1912 return 0;
1913
1914 error:
1915 /* Cleanup already created socket on error. */
1916 if (consumer_data->err_sock >= 0) {
1917 int err;
1918
1919 err = close(consumer_data->err_sock);
1920 if (err < 0) {
1921 PERROR("close consumer data error socket");
1922 }
1923 }
1924 return ret;
1925 }
1926
1927 /*
1928 * Compute health status of each consumer. If one of them is zero (bad
1929 * state), we return 0.
1930 */
1931 static int check_consumer_health(void)
1932 {
1933 int ret;
1934
1935 ret = health_check_state(&kconsumer_data.health) &&
1936 health_check_state(&ustconsumer32_data.health) &&
1937 health_check_state(&ustconsumer64_data.health);
1938
1939 DBG3("Health consumer check %d", ret);
1940
1941 return ret;
1942 }
1943
1944 /*
1945 * Setup necessary data for kernel tracer action.
1946 */
1947 static int init_kernel_tracer(void)
1948 {
1949 int ret;
1950
1951 /* Modprobe lttng kernel modules */
1952 ret = modprobe_lttng_control();
1953 if (ret < 0) {
1954 goto error;
1955 }
1956
1957 /* Open debugfs lttng */
1958 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
1959 if (kernel_tracer_fd < 0) {
1960 DBG("Failed to open %s", module_proc_lttng);
1961 ret = -1;
1962 goto error_open;
1963 }
1964
1965 /* Validate kernel version */
1966 ret = kernel_validate_version(kernel_tracer_fd);
1967 if (ret < 0) {
1968 goto error_version;
1969 }
1970
1971 ret = modprobe_lttng_data();
1972 if (ret < 0) {
1973 goto error_modules;
1974 }
1975
1976 DBG("Kernel tracer fd %d", kernel_tracer_fd);
1977 return 0;
1978
1979 error_version:
1980 modprobe_remove_lttng_control();
1981 ret = close(kernel_tracer_fd);
1982 if (ret) {
1983 PERROR("close");
1984 }
1985 kernel_tracer_fd = -1;
1986 return LTTNG_ERR_KERN_VERSION;
1987
1988 error_modules:
1989 ret = close(kernel_tracer_fd);
1990 if (ret) {
1991 PERROR("close");
1992 }
1993
1994 error_open:
1995 modprobe_remove_lttng_control();
1996
1997 error:
1998 WARN("No kernel tracer available");
1999 kernel_tracer_fd = -1;
2000 if (!is_root) {
2001 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2002 } else {
2003 return LTTNG_ERR_KERN_NA;
2004 }
2005 }
2006
2007
2008 /*
2009 * Copy consumer output from the tracing session to the domain session. The
2010 * function also applies the right modification on a per domain basis for the
2011 * trace files destination directory.
2012 */
2013 static int copy_session_consumer(int domain, struct ltt_session *session)
2014 {
2015 int ret;
2016 const char *dir_name;
2017 struct consumer_output *consumer;
2018
2019 assert(session);
2020 assert(session->consumer);
2021
2022 switch (domain) {
2023 case LTTNG_DOMAIN_KERNEL:
2024 DBG3("Copying tracing session consumer output in kernel session");
2025 /*
2026 * XXX: We should audit the session creation and what this function
2027 * does "extra" in order to avoid a destroy since this function is used
2028 * in the domain session creation (kernel and ust) only. Same for UST
2029 * domain.
2030 */
2031 if (session->kernel_session->consumer) {
2032 consumer_destroy_output(session->kernel_session->consumer);
2033 }
2034 session->kernel_session->consumer =
2035 consumer_copy_output(session->consumer);
2036 /* Ease our life a bit for the next part */
2037 consumer = session->kernel_session->consumer;
2038 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2039 break;
2040 case LTTNG_DOMAIN_UST:
2041 DBG3("Copying tracing session consumer output in UST session");
2042 if (session->ust_session->consumer) {
2043 consumer_destroy_output(session->ust_session->consumer);
2044 }
2045 session->ust_session->consumer =
2046 consumer_copy_output(session->consumer);
2047 /* Ease our life a bit for the next part */
2048 consumer = session->ust_session->consumer;
2049 dir_name = DEFAULT_UST_TRACE_DIR;
2050 break;
2051 default:
2052 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2053 goto error;
2054 }
2055
2056 /* Append correct directory to subdir */
2057 strncat(consumer->subdir, dir_name,
2058 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2059 DBG3("Copy session consumer subdir %s", consumer->subdir);
2060
2061 ret = LTTNG_OK;
2062
2063 error:
2064 return ret;
2065 }
2066
2067 /*
2068 * Create an UST session and add it to the session ust list.
2069 */
2070 static int create_ust_session(struct ltt_session *session,
2071 struct lttng_domain *domain)
2072 {
2073 int ret;
2074 struct ltt_ust_session *lus = NULL;
2075
2076 assert(session);
2077 assert(domain);
2078 assert(session->consumer);
2079
2080 switch (domain->type) {
2081 case LTTNG_DOMAIN_UST:
2082 break;
2083 default:
2084 ERR("Unknown UST domain on create session %d", domain->type);
2085 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2086 goto error;
2087 }
2088
2089 DBG("Creating UST session");
2090
2091 lus = trace_ust_create_session(session->path, session->id, domain);
2092 if (lus == NULL) {
2093 ret = LTTNG_ERR_UST_SESS_FAIL;
2094 goto error;
2095 }
2096
2097 lus->uid = session->uid;
2098 lus->gid = session->gid;
2099 session->ust_session = lus;
2100
2101 /* Copy session output to the newly created UST session */
2102 ret = copy_session_consumer(domain->type, session);
2103 if (ret != LTTNG_OK) {
2104 goto error;
2105 }
2106
2107 return LTTNG_OK;
2108
2109 error:
2110 free(lus);
2111 session->ust_session = NULL;
2112 return ret;
2113 }
2114
2115 /*
2116 * Create a kernel tracer session then create the default channel.
2117 */
2118 static int create_kernel_session(struct ltt_session *session)
2119 {
2120 int ret;
2121
2122 DBG("Creating kernel session");
2123
2124 ret = kernel_create_session(session, kernel_tracer_fd);
2125 if (ret < 0) {
2126 ret = LTTNG_ERR_KERN_SESS_FAIL;
2127 goto error;
2128 }
2129
2130 /* Code flow safety */
2131 assert(session->kernel_session);
2132
2133 /* Copy session output to the newly created Kernel session */
2134 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2135 if (ret != LTTNG_OK) {
2136 goto error;
2137 }
2138
2139 /* Create directory(ies) on local filesystem. */
2140 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2141 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2142 ret = run_as_mkdir_recursive(
2143 session->kernel_session->consumer->dst.trace_path,
2144 S_IRWXU | S_IRWXG, session->uid, session->gid);
2145 if (ret < 0) {
2146 if (ret != -EEXIST) {
2147 ERR("Trace directory creation error");
2148 goto error;
2149 }
2150 }
2151 }
2152
2153 session->kernel_session->uid = session->uid;
2154 session->kernel_session->gid = session->gid;
2155
2156 return LTTNG_OK;
2157
2158 error:
2159 trace_kernel_destroy_session(session->kernel_session);
2160 session->kernel_session = NULL;
2161 return ret;
2162 }
2163
2164 /*
2165 * Count number of session permitted by uid/gid.
2166 */
2167 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2168 {
2169 unsigned int i = 0;
2170 struct ltt_session *session;
2171
2172 DBG("Counting number of available session for UID %d GID %d",
2173 uid, gid);
2174 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2175 /*
2176 * Only list the sessions the user can control.
2177 */
2178 if (!session_access_ok(session, uid, gid)) {
2179 continue;
2180 }
2181 i++;
2182 }
2183 return i;
2184 }
2185
2186 /*
2187 * Process the command requested by the lttng client within the command
2188 * context structure. This function make sure that the return structure (llm)
2189 * is set and ready for transmission before returning.
2190 *
2191 * Return any error encountered or 0 for success.
2192 *
2193 * "sock" is only used for special-case var. len data.
2194 */
2195 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2196 int *sock_error)
2197 {
2198 int ret = LTTNG_OK;
2199 int need_tracing_session = 1;
2200 int need_domain;
2201
2202 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2203
2204 *sock_error = 0;
2205
2206 switch (cmd_ctx->lsm->cmd_type) {
2207 case LTTNG_CREATE_SESSION:
2208 case LTTNG_DESTROY_SESSION:
2209 case LTTNG_LIST_SESSIONS:
2210 case LTTNG_LIST_DOMAINS:
2211 case LTTNG_START_TRACE:
2212 case LTTNG_STOP_TRACE:
2213 case LTTNG_DATA_PENDING:
2214 need_domain = 0;
2215 break;
2216 default:
2217 need_domain = 1;
2218 }
2219
2220 if (opt_no_kernel && need_domain
2221 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2222 if (!is_root) {
2223 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2224 } else {
2225 ret = LTTNG_ERR_KERN_NA;
2226 }
2227 goto error;
2228 }
2229
2230 /* Deny register consumer if we already have a spawned consumer. */
2231 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2232 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2233 if (kconsumer_data.pid > 0) {
2234 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2235 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2236 goto error;
2237 }
2238 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2239 }
2240
2241 /*
2242 * Check for command that don't needs to allocate a returned payload. We do
2243 * this here so we don't have to make the call for no payload at each
2244 * command.
2245 */
2246 switch(cmd_ctx->lsm->cmd_type) {
2247 case LTTNG_LIST_SESSIONS:
2248 case LTTNG_LIST_TRACEPOINTS:
2249 case LTTNG_LIST_TRACEPOINT_FIELDS:
2250 case LTTNG_LIST_DOMAINS:
2251 case LTTNG_LIST_CHANNELS:
2252 case LTTNG_LIST_EVENTS:
2253 break;
2254 default:
2255 /* Setup lttng message with no payload */
2256 ret = setup_lttng_msg(cmd_ctx, 0);
2257 if (ret < 0) {
2258 /* This label does not try to unlock the session */
2259 goto init_setup_error;
2260 }
2261 }
2262
2263 /* Commands that DO NOT need a session. */
2264 switch (cmd_ctx->lsm->cmd_type) {
2265 case LTTNG_CREATE_SESSION:
2266 case LTTNG_CALIBRATE:
2267 case LTTNG_LIST_SESSIONS:
2268 case LTTNG_LIST_TRACEPOINTS:
2269 case LTTNG_LIST_TRACEPOINT_FIELDS:
2270 need_tracing_session = 0;
2271 break;
2272 default:
2273 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2274 /*
2275 * We keep the session list lock across _all_ commands
2276 * for now, because the per-session lock does not
2277 * handle teardown properly.
2278 */
2279 session_lock_list();
2280 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2281 if (cmd_ctx->session == NULL) {
2282 if (cmd_ctx->lsm->session.name != NULL) {
2283 ret = LTTNG_ERR_SESS_NOT_FOUND;
2284 } else {
2285 /* If no session name specified */
2286 ret = LTTNG_ERR_SELECT_SESS;
2287 }
2288 goto error;
2289 } else {
2290 /* Acquire lock for the session */
2291 session_lock(cmd_ctx->session);
2292 }
2293 break;
2294 }
2295
2296 if (!need_domain) {
2297 goto skip_domain;
2298 }
2299
2300 /*
2301 * Check domain type for specific "pre-action".
2302 */
2303 switch (cmd_ctx->lsm->domain.type) {
2304 case LTTNG_DOMAIN_KERNEL:
2305 if (!is_root) {
2306 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2307 goto error;
2308 }
2309
2310 /* Kernel tracer check */
2311 if (kernel_tracer_fd == -1) {
2312 /* Basically, load kernel tracer modules */
2313 ret = init_kernel_tracer();
2314 if (ret != 0) {
2315 goto error;
2316 }
2317 }
2318
2319 /* Consumer is in an ERROR state. Report back to client */
2320 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2321 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2322 goto error;
2323 }
2324
2325 /* Need a session for kernel command */
2326 if (need_tracing_session) {
2327 if (cmd_ctx->session->kernel_session == NULL) {
2328 ret = create_kernel_session(cmd_ctx->session);
2329 if (ret < 0) {
2330 ret = LTTNG_ERR_KERN_SESS_FAIL;
2331 goto error;
2332 }
2333 }
2334
2335 /* Start the kernel consumer daemon */
2336 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2337 if (kconsumer_data.pid == 0 &&
2338 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
2339 cmd_ctx->session->start_consumer) {
2340 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2341 ret = start_consumerd(&kconsumer_data);
2342 if (ret < 0) {
2343 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2344 goto error;
2345 }
2346 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2347 } else {
2348 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2349 }
2350
2351 /*
2352 * The consumer was just spawned so we need to add the socket to
2353 * the consumer output of the session if exist.
2354 */
2355 ret = consumer_create_socket(&kconsumer_data,
2356 cmd_ctx->session->kernel_session->consumer);
2357 if (ret < 0) {
2358 goto error;
2359 }
2360 }
2361
2362 break;
2363 case LTTNG_DOMAIN_UST:
2364 {
2365 /* Consumer is in an ERROR state. Report back to client */
2366 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2367 ret = LTTNG_ERR_NO_USTCONSUMERD;
2368 goto error;
2369 }
2370
2371 if (need_tracing_session) {
2372 /* Create UST session if none exist. */
2373 if (cmd_ctx->session->ust_session == NULL) {
2374 ret = create_ust_session(cmd_ctx->session,
2375 &cmd_ctx->lsm->domain);
2376 if (ret != LTTNG_OK) {
2377 goto error;
2378 }
2379 }
2380
2381 /* Start the UST consumer daemons */
2382 /* 64-bit */
2383 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2384 if (consumerd64_bin[0] != '\0' &&
2385 ustconsumer64_data.pid == 0 &&
2386 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
2387 cmd_ctx->session->start_consumer) {
2388 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2389 ret = start_consumerd(&ustconsumer64_data);
2390 if (ret < 0) {
2391 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2392 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2393 goto error;
2394 }
2395
2396 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2397 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2398 } else {
2399 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2400 }
2401
2402 /*
2403 * Setup socket for consumer 64 bit. No need for atomic access
2404 * since it was set above and can ONLY be set in this thread.
2405 */
2406 ret = consumer_create_socket(&ustconsumer64_data,
2407 cmd_ctx->session->ust_session->consumer);
2408 if (ret < 0) {
2409 goto error;
2410 }
2411
2412 /* 32-bit */
2413 if (consumerd32_bin[0] != '\0' &&
2414 ustconsumer32_data.pid == 0 &&
2415 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
2416 cmd_ctx->session->start_consumer) {
2417 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2418 ret = start_consumerd(&ustconsumer32_data);
2419 if (ret < 0) {
2420 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2421 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2422 goto error;
2423 }
2424
2425 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2426 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2427 } else {
2428 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2429 }
2430
2431 /*
2432 * Setup socket for consumer 64 bit. No need for atomic access
2433 * since it was set above and can ONLY be set in this thread.
2434 */
2435 ret = consumer_create_socket(&ustconsumer32_data,
2436 cmd_ctx->session->ust_session->consumer);
2437 if (ret < 0) {
2438 goto error;
2439 }
2440 }
2441 break;
2442 }
2443 default:
2444 break;
2445 }
2446 skip_domain:
2447
2448 /* Validate consumer daemon state when start/stop trace command */
2449 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2450 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2451 switch (cmd_ctx->lsm->domain.type) {
2452 case LTTNG_DOMAIN_UST:
2453 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2454 ret = LTTNG_ERR_NO_USTCONSUMERD;
2455 goto error;
2456 }
2457 break;
2458 case LTTNG_DOMAIN_KERNEL:
2459 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2460 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2461 goto error;
2462 }
2463 break;
2464 }
2465 }
2466
2467 /*
2468 * Check that the UID or GID match that of the tracing session.
2469 * The root user can interact with all sessions.
2470 */
2471 if (need_tracing_session) {
2472 if (!session_access_ok(cmd_ctx->session,
2473 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2474 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2475 ret = LTTNG_ERR_EPERM;
2476 goto error;
2477 }
2478 }
2479
2480 /* Process by command type */
2481 switch (cmd_ctx->lsm->cmd_type) {
2482 case LTTNG_ADD_CONTEXT:
2483 {
2484 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2485 cmd_ctx->lsm->u.context.channel_name,
2486 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
2487 break;
2488 }
2489 case LTTNG_DISABLE_CHANNEL:
2490 {
2491 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2492 cmd_ctx->lsm->u.disable.channel_name);
2493 break;
2494 }
2495 case LTTNG_DISABLE_EVENT:
2496 {
2497 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2498 cmd_ctx->lsm->u.disable.channel_name,
2499 cmd_ctx->lsm->u.disable.name);
2500 break;
2501 }
2502 case LTTNG_DISABLE_ALL_EVENT:
2503 {
2504 DBG("Disabling all events");
2505
2506 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2507 cmd_ctx->lsm->u.disable.channel_name);
2508 break;
2509 }
2510 case LTTNG_DISABLE_CONSUMER:
2511 {
2512 ret = cmd_disable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
2513 break;
2514 }
2515 case LTTNG_ENABLE_CHANNEL:
2516 {
2517 ret = cmd_enable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2518 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
2519 break;
2520 }
2521 case LTTNG_ENABLE_CONSUMER:
2522 {
2523 /*
2524 * XXX: 0 means that this URI should be applied on the session. Should
2525 * be a DOMAIN enuam.
2526 */
2527 ret = cmd_enable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
2528 if (ret != LTTNG_OK) {
2529 goto error;
2530 }
2531
2532 if (cmd_ctx->lsm->domain.type == 0) {
2533 /* Add the URI for the UST session if a consumer is present. */
2534 if (cmd_ctx->session->ust_session &&
2535 cmd_ctx->session->ust_session->consumer) {
2536 ret = cmd_enable_consumer(LTTNG_DOMAIN_UST, cmd_ctx->session);
2537 } else if (cmd_ctx->session->kernel_session &&
2538 cmd_ctx->session->kernel_session->consumer) {
2539 ret = cmd_enable_consumer(LTTNG_DOMAIN_KERNEL,
2540 cmd_ctx->session);
2541 }
2542 }
2543 break;
2544 }
2545 case LTTNG_ENABLE_EVENT:
2546 {
2547 ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2548 cmd_ctx->lsm->u.enable.channel_name,
2549 &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
2550 break;
2551 }
2552 case LTTNG_ENABLE_ALL_EVENT:
2553 {
2554 DBG("Enabling all events");
2555
2556 ret = cmd_enable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2557 cmd_ctx->lsm->u.enable.channel_name,
2558 cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
2559 break;
2560 }
2561 case LTTNG_LIST_TRACEPOINTS:
2562 {
2563 struct lttng_event *events;
2564 ssize_t nb_events;
2565
2566 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
2567 if (nb_events < 0) {
2568 /* Return value is a negative lttng_error_code. */
2569 ret = -nb_events;
2570 goto error;
2571 }
2572
2573 /*
2574 * Setup lttng message with payload size set to the event list size in
2575 * bytes and then copy list into the llm payload.
2576 */
2577 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
2578 if (ret < 0) {
2579 free(events);
2580 goto setup_error;
2581 }
2582
2583 /* Copy event list into message payload */
2584 memcpy(cmd_ctx->llm->payload, events,
2585 sizeof(struct lttng_event) * nb_events);
2586
2587 free(events);
2588
2589 ret = LTTNG_OK;
2590 break;
2591 }
2592 case LTTNG_LIST_TRACEPOINT_FIELDS:
2593 {
2594 struct lttng_event_field *fields;
2595 ssize_t nb_fields;
2596
2597 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
2598 &fields);
2599 if (nb_fields < 0) {
2600 /* Return value is a negative lttng_error_code. */
2601 ret = -nb_fields;
2602 goto error;
2603 }
2604
2605 /*
2606 * Setup lttng message with payload size set to the event list size in
2607 * bytes and then copy list into the llm payload.
2608 */
2609 ret = setup_lttng_msg(cmd_ctx,
2610 sizeof(struct lttng_event_field) * nb_fields);
2611 if (ret < 0) {
2612 free(fields);
2613 goto setup_error;
2614 }
2615
2616 /* Copy event list into message payload */
2617 memcpy(cmd_ctx->llm->payload, fields,
2618 sizeof(struct lttng_event_field) * nb_fields);
2619
2620 free(fields);
2621
2622 ret = LTTNG_OK;
2623 break;
2624 }
2625 case LTTNG_SET_CONSUMER_URI:
2626 {
2627 size_t nb_uri, len;
2628 struct lttng_uri *uris;
2629
2630 nb_uri = cmd_ctx->lsm->u.uri.size;
2631 len = nb_uri * sizeof(struct lttng_uri);
2632
2633 if (nb_uri == 0) {
2634 ret = LTTNG_ERR_INVALID;
2635 goto error;
2636 }
2637
2638 uris = zmalloc(len);
2639 if (uris == NULL) {
2640 ret = LTTNG_ERR_FATAL;
2641 goto error;
2642 }
2643
2644 /* Receive variable len data */
2645 DBG("Receiving %zu URI(s) from client ...", nb_uri);
2646 ret = lttcomm_recv_unix_sock(sock, uris, len);
2647 if (ret <= 0) {
2648 DBG("No URIs received from client... continuing");
2649 *sock_error = 1;
2650 ret = LTTNG_ERR_SESSION_FAIL;
2651 free(uris);
2652 goto error;
2653 }
2654
2655 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
2656 nb_uri, uris);
2657 if (ret != LTTNG_OK) {
2658 free(uris);
2659 goto error;
2660 }
2661
2662 /*
2663 * XXX: 0 means that this URI should be applied on the session. Should
2664 * be a DOMAIN enuam.
2665 */
2666 if (cmd_ctx->lsm->domain.type == 0) {
2667 /* Add the URI for the UST session if a consumer is present. */
2668 if (cmd_ctx->session->ust_session &&
2669 cmd_ctx->session->ust_session->consumer) {
2670 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
2671 nb_uri, uris);
2672 } else if (cmd_ctx->session->kernel_session &&
2673 cmd_ctx->session->kernel_session->consumer) {
2674 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
2675 cmd_ctx->session, nb_uri, uris);
2676 }
2677 }
2678
2679 free(uris);
2680
2681 break;
2682 }
2683 case LTTNG_START_TRACE:
2684 {
2685 ret = cmd_start_trace(cmd_ctx->session);
2686 break;
2687 }
2688 case LTTNG_STOP_TRACE:
2689 {
2690 ret = cmd_stop_trace(cmd_ctx->session);
2691 break;
2692 }
2693 case LTTNG_CREATE_SESSION:
2694 {
2695 size_t nb_uri, len;
2696 struct lttng_uri *uris = NULL;
2697
2698 nb_uri = cmd_ctx->lsm->u.uri.size;
2699 len = nb_uri * sizeof(struct lttng_uri);
2700
2701 if (nb_uri > 0) {
2702 uris = zmalloc(len);
2703 if (uris == NULL) {
2704 ret = LTTNG_ERR_FATAL;
2705 goto error;
2706 }
2707
2708 /* Receive variable len data */
2709 DBG("Waiting for %zu URIs from client ...", nb_uri);
2710 ret = lttcomm_recv_unix_sock(sock, uris, len);
2711 if (ret <= 0) {
2712 DBG("No URIs received from client... continuing");
2713 *sock_error = 1;
2714 ret = LTTNG_ERR_SESSION_FAIL;
2715 free(uris);
2716 goto error;
2717 }
2718
2719 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
2720 DBG("Creating session with ONE network URI is a bad call");
2721 ret = LTTNG_ERR_SESSION_FAIL;
2722 free(uris);
2723 goto error;
2724 }
2725 }
2726
2727 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
2728 &cmd_ctx->creds);
2729
2730 free(uris);
2731
2732 break;
2733 }
2734 case LTTNG_DESTROY_SESSION:
2735 {
2736 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
2737
2738 /* Set session to NULL so we do not unlock it after free. */
2739 cmd_ctx->session = NULL;
2740 break;
2741 }
2742 case LTTNG_LIST_DOMAINS:
2743 {
2744 ssize_t nb_dom;
2745 struct lttng_domain *domains;
2746
2747 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
2748 if (nb_dom < 0) {
2749 /* Return value is a negative lttng_error_code. */
2750 ret = -nb_dom;
2751 goto error;
2752 }
2753
2754 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
2755 if (ret < 0) {
2756 goto setup_error;
2757 }
2758
2759 /* Copy event list into message payload */
2760 memcpy(cmd_ctx->llm->payload, domains,
2761 nb_dom * sizeof(struct lttng_domain));
2762
2763 free(domains);
2764
2765 ret = LTTNG_OK;
2766 break;
2767 }
2768 case LTTNG_LIST_CHANNELS:
2769 {
2770 int nb_chan;
2771 struct lttng_channel *channels;
2772
2773 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
2774 cmd_ctx->session, &channels);
2775 if (nb_chan < 0) {
2776 /* Return value is a negative lttng_error_code. */
2777 ret = -nb_chan;
2778 goto error;
2779 }
2780
2781 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
2782 if (ret < 0) {
2783 goto setup_error;
2784 }
2785
2786 /* Copy event list into message payload */
2787 memcpy(cmd_ctx->llm->payload, channels,
2788 nb_chan * sizeof(struct lttng_channel));
2789
2790 free(channels);
2791
2792 ret = LTTNG_OK;
2793 break;
2794 }
2795 case LTTNG_LIST_EVENTS:
2796 {
2797 ssize_t nb_event;
2798 struct lttng_event *events = NULL;
2799
2800 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
2801 cmd_ctx->lsm->u.list.channel_name, &events);
2802 if (nb_event < 0) {
2803 /* Return value is a negative lttng_error_code. */
2804 ret = -nb_event;
2805 goto error;
2806 }
2807
2808 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
2809 if (ret < 0) {
2810 goto setup_error;
2811 }
2812
2813 /* Copy event list into message payload */
2814 memcpy(cmd_ctx->llm->payload, events,
2815 nb_event * sizeof(struct lttng_event));
2816
2817 free(events);
2818
2819 ret = LTTNG_OK;
2820 break;
2821 }
2822 case LTTNG_LIST_SESSIONS:
2823 {
2824 unsigned int nr_sessions;
2825
2826 session_lock_list();
2827 nr_sessions = lttng_sessions_count(
2828 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2829 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
2830
2831 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
2832 if (ret < 0) {
2833 session_unlock_list();
2834 goto setup_error;
2835 }
2836
2837 /* Filled the session array */
2838 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
2839 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2840 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
2841
2842 session_unlock_list();
2843
2844 ret = LTTNG_OK;
2845 break;
2846 }
2847 case LTTNG_CALIBRATE:
2848 {
2849 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
2850 &cmd_ctx->lsm->u.calibrate);
2851 break;
2852 }
2853 case LTTNG_REGISTER_CONSUMER:
2854 {
2855 struct consumer_data *cdata;
2856
2857 switch (cmd_ctx->lsm->domain.type) {
2858 case LTTNG_DOMAIN_KERNEL:
2859 cdata = &kconsumer_data;
2860 break;
2861 default:
2862 ret = LTTNG_ERR_UND;
2863 goto error;
2864 }
2865
2866 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2867 cmd_ctx->lsm->u.reg.path, cdata);
2868 break;
2869 }
2870 case LTTNG_ENABLE_EVENT_WITH_FILTER:
2871 {
2872 struct lttng_filter_bytecode *bytecode;
2873
2874 if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
2875 ret = LTTNG_ERR_FILTER_INVAL;
2876 goto error;
2877 }
2878 if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
2879 ret = LTTNG_ERR_FILTER_INVAL;
2880 goto error;
2881 }
2882 bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
2883 if (!bytecode) {
2884 ret = LTTNG_ERR_FILTER_NOMEM;
2885 goto error;
2886 }
2887 /* Receive var. len. data */
2888 DBG("Receiving var len data from client ...");
2889 ret = lttcomm_recv_unix_sock(sock, bytecode,
2890 cmd_ctx->lsm->u.enable.bytecode_len);
2891 if (ret <= 0) {
2892 DBG("Nothing recv() from client var len data... continuing");
2893 *sock_error = 1;
2894 ret = LTTNG_ERR_FILTER_INVAL;
2895 goto error;
2896 }
2897
2898 if (bytecode->len + sizeof(*bytecode)
2899 != cmd_ctx->lsm->u.enable.bytecode_len) {
2900 free(bytecode);
2901 ret = LTTNG_ERR_FILTER_INVAL;
2902 goto error;
2903 }
2904
2905 ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2906 cmd_ctx->lsm->u.enable.channel_name,
2907 &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
2908 break;
2909 }
2910 case LTTNG_DATA_PENDING:
2911 {
2912 ret = cmd_data_pending(cmd_ctx->session);
2913 break;
2914 }
2915 default:
2916 ret = LTTNG_ERR_UND;
2917 break;
2918 }
2919
2920 error:
2921 if (cmd_ctx->llm == NULL) {
2922 DBG("Missing llm structure. Allocating one.");
2923 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
2924 goto setup_error;
2925 }
2926 }
2927 /* Set return code */
2928 cmd_ctx->llm->ret_code = ret;
2929 setup_error:
2930 if (cmd_ctx->session) {
2931 session_unlock(cmd_ctx->session);
2932 }
2933 if (need_tracing_session) {
2934 session_unlock_list();
2935 }
2936 init_setup_error:
2937 return ret;
2938 }
2939
2940 /*
2941 * Thread managing health check socket.
2942 */
2943 static void *thread_manage_health(void *data)
2944 {
2945 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
2946 uint32_t revents, nb_fd;
2947 struct lttng_poll_event events;
2948 struct lttcomm_health_msg msg;
2949 struct lttcomm_health_data reply;
2950
2951 DBG("[thread] Manage health check started");
2952
2953 rcu_register_thread();
2954
2955 /* Create unix socket */
2956 sock = lttcomm_create_unix_sock(health_unix_sock_path);
2957 if (sock < 0) {
2958 ERR("Unable to create health check Unix socket");
2959 ret = -1;
2960 goto error;
2961 }
2962
2963 /*
2964 * Set the CLOEXEC flag. Return code is useless because either way, the
2965 * show must go on.
2966 */
2967 (void) utils_set_fd_cloexec(sock);
2968
2969 ret = lttcomm_listen_unix_sock(sock);
2970 if (ret < 0) {
2971 goto error;
2972 }
2973
2974 /*
2975 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
2976 * more will be added to this poll set.
2977 */
2978 ret = create_thread_poll_set(&events, 2);
2979 if (ret < 0) {
2980 goto error;
2981 }
2982
2983 /* Add the application registration socket */
2984 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
2985 if (ret < 0) {
2986 goto error;
2987 }
2988
2989 while (1) {
2990 DBG("Health check ready");
2991
2992 /* Inifinite blocking call, waiting for transmission */
2993 restart:
2994 ret = lttng_poll_wait(&events, -1);
2995 if (ret < 0) {
2996 /*
2997 * Restart interrupted system call.
2998 */
2999 if (errno == EINTR) {
3000 goto restart;
3001 }
3002 goto error;
3003 }
3004
3005 nb_fd = ret;
3006
3007 for (i = 0; i < nb_fd; i++) {
3008 /* Fetch once the poll data */
3009 revents = LTTNG_POLL_GETEV(&events, i);
3010 pollfd = LTTNG_POLL_GETFD(&events, i);
3011
3012 /* Thread quit pipe has been closed. Killing thread. */
3013 ret = check_thread_quit_pipe(pollfd, revents);
3014 if (ret) {
3015 err = 0;
3016 goto exit;
3017 }
3018
3019 /* Event on the registration socket */
3020 if (pollfd == sock) {
3021 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3022 ERR("Health socket poll error");
3023 goto error;
3024 }
3025 }
3026 }
3027
3028 new_sock = lttcomm_accept_unix_sock(sock);
3029 if (new_sock < 0) {
3030 goto error;
3031 }
3032
3033 /*
3034 * Set the CLOEXEC flag. Return code is useless because either way, the
3035 * show must go on.
3036 */
3037 (void) utils_set_fd_cloexec(new_sock);
3038
3039 DBG("Receiving data from client for health...");
3040 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3041 if (ret <= 0) {
3042 DBG("Nothing recv() from client... continuing");
3043 ret = close(new_sock);
3044 if (ret) {
3045 PERROR("close");
3046 }
3047 new_sock = -1;
3048 continue;
3049 }
3050
3051 rcu_thread_online();
3052
3053 switch (msg.component) {
3054 case LTTNG_HEALTH_CMD:
3055 reply.ret_code = health_check_state(&health_thread_cmd);
3056 break;
3057 case LTTNG_HEALTH_APP_MANAGE:
3058 reply.ret_code = health_check_state(&health_thread_app_manage);
3059 break;
3060 case LTTNG_HEALTH_APP_REG:
3061 reply.ret_code = health_check_state(&health_thread_app_reg);
3062 break;
3063 case LTTNG_HEALTH_KERNEL:
3064 reply.ret_code = health_check_state(&health_thread_kernel);
3065 break;
3066 case LTTNG_HEALTH_CONSUMER:
3067 reply.ret_code = check_consumer_health();
3068 break;
3069 case LTTNG_HEALTH_ALL:
3070 reply.ret_code =
3071 health_check_state(&health_thread_app_manage) &&
3072 health_check_state(&health_thread_app_reg) &&
3073 health_check_state(&health_thread_cmd) &&
3074 health_check_state(&health_thread_kernel) &&
3075 check_consumer_health();
3076 break;
3077 default:
3078 reply.ret_code = LTTNG_ERR_UND;
3079 break;
3080 }
3081
3082 /*
3083 * Flip ret value since 0 is a success and 1 indicates a bad health for
3084 * the client where in the sessiond it is the opposite. Again, this is
3085 * just to make things easier for us poor developer which enjoy a lot
3086 * lazyness.
3087 */
3088 if (reply.ret_code == 0 || reply.ret_code == 1) {
3089 reply.ret_code = !reply.ret_code;
3090 }
3091
3092 DBG2("Health check return value %d", reply.ret_code);
3093
3094 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3095 if (ret < 0) {
3096 ERR("Failed to send health data back to client");
3097 }
3098
3099 /* End of transmission */
3100 ret = close(new_sock);
3101 if (ret) {
3102 PERROR("close");
3103 }
3104 new_sock = -1;
3105 }
3106
3107 exit:
3108 error:
3109 if (err) {
3110 ERR("Health error occurred in %s", __func__);
3111 }
3112 DBG("Health check thread dying");
3113 unlink(health_unix_sock_path);
3114 if (sock >= 0) {
3115 ret = close(sock);
3116 if (ret) {
3117 PERROR("close");
3118 }
3119 }
3120 if (new_sock >= 0) {
3121 ret = close(new_sock);
3122 if (ret) {
3123 PERROR("close");
3124 }
3125 }
3126
3127 lttng_poll_clean(&events);
3128
3129 rcu_unregister_thread();
3130 return NULL;
3131 }
3132
3133 /*
3134 * This thread manage all clients request using the unix client socket for
3135 * communication.
3136 */
3137 static void *thread_manage_clients(void *data)
3138 {
3139 int sock = -1, ret, i, pollfd, err = -1;
3140 int sock_error;
3141 uint32_t revents, nb_fd;
3142 struct command_ctx *cmd_ctx = NULL;
3143 struct lttng_poll_event events;
3144
3145 DBG("[thread] Manage client started");
3146
3147 rcu_register_thread();
3148
3149 if (testpoint(thread_manage_clients)) {
3150 goto error_testpoint;
3151 }
3152
3153 health_code_update(&health_thread_cmd);
3154
3155 ret = lttcomm_listen_unix_sock(client_sock);
3156 if (ret < 0) {
3157 goto error_listen;
3158 }
3159
3160 /*
3161 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3162 * more will be added to this poll set.
3163 */
3164 ret = create_thread_poll_set(&events, 2);
3165 if (ret < 0) {
3166 goto error_create_poll;
3167 }
3168
3169 /* Add the application registration socket */
3170 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3171 if (ret < 0) {
3172 goto error;
3173 }
3174
3175 /*
3176 * Notify parent pid that we are ready to accept command for client side.
3177 */
3178 if (opt_sig_parent) {
3179 kill(ppid, SIGUSR1);
3180 }
3181
3182 if (testpoint(thread_manage_clients_before_loop)) {
3183 goto error;
3184 }
3185
3186 health_code_update(&health_thread_cmd);
3187
3188 while (1) {
3189 DBG("Accepting client command ...");
3190
3191 /* Inifinite blocking call, waiting for transmission */
3192 restart:
3193 health_poll_update(&health_thread_cmd);
3194 ret = lttng_poll_wait(&events, -1);
3195 health_poll_update(&health_thread_cmd);
3196 if (ret < 0) {
3197 /*
3198 * Restart interrupted system call.
3199 */
3200 if (errno == EINTR) {
3201 goto restart;
3202 }
3203 goto error;
3204 }
3205
3206 nb_fd = ret;
3207
3208 for (i = 0; i < nb_fd; i++) {
3209 /* Fetch once the poll data */
3210 revents = LTTNG_POLL_GETEV(&events, i);
3211 pollfd = LTTNG_POLL_GETFD(&events, i);
3212
3213 health_code_update(&health_thread_cmd);
3214
3215 /* Thread quit pipe has been closed. Killing thread. */
3216 ret = check_thread_quit_pipe(pollfd, revents);
3217 if (ret) {
3218 err = 0;
3219 goto exit;
3220 }
3221
3222 /* Event on the registration socket */
3223 if (pollfd == client_sock) {
3224 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3225 ERR("Client socket poll error");
3226 goto error;
3227 }
3228 }
3229 }
3230
3231 DBG("Wait for client response");
3232
3233 health_code_update(&health_thread_cmd);
3234
3235 sock = lttcomm_accept_unix_sock(client_sock);
3236 if (sock < 0) {
3237 goto error;
3238 }
3239
3240 /*
3241 * Set the CLOEXEC flag. Return code is useless because either way, the
3242 * show must go on.
3243 */
3244 (void) utils_set_fd_cloexec(sock);
3245
3246 /* Set socket option for credentials retrieval */
3247 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3248 if (ret < 0) {
3249 goto error;
3250 }
3251
3252 /* Allocate context command to process the client request */
3253 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3254 if (cmd_ctx == NULL) {
3255 PERROR("zmalloc cmd_ctx");
3256 goto error;
3257 }
3258
3259 /* Allocate data buffer for reception */
3260 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3261 if (cmd_ctx->lsm == NULL) {
3262 PERROR("zmalloc cmd_ctx->lsm");
3263 goto error;
3264 }
3265
3266 cmd_ctx->llm = NULL;
3267 cmd_ctx->session = NULL;
3268
3269 health_code_update(&health_thread_cmd);
3270
3271 /*
3272 * Data is received from the lttng client. The struct
3273 * lttcomm_session_msg (lsm) contains the command and data request of
3274 * the client.
3275 */
3276 DBG("Receiving data from client ...");
3277 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3278 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3279 if (ret <= 0) {
3280 DBG("Nothing recv() from client... continuing");
3281 ret = close(sock);
3282 if (ret) {
3283 PERROR("close");
3284 }
3285 sock = -1;
3286 clean_command_ctx(&cmd_ctx);
3287 continue;
3288 }
3289
3290 health_code_update(&health_thread_cmd);
3291
3292 // TODO: Validate cmd_ctx including sanity check for
3293 // security purpose.
3294
3295 rcu_thread_online();
3296 /*
3297 * This function dispatch the work to the kernel or userspace tracer
3298 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3299 * informations for the client. The command context struct contains
3300 * everything this function may needs.
3301 */
3302 ret = process_client_msg(cmd_ctx, sock, &sock_error);
3303 rcu_thread_offline();
3304 if (ret < 0) {
3305 if (sock_error) {
3306 ret = close(sock);
3307 if (ret) {
3308 PERROR("close");
3309 }
3310 sock = -1;
3311 }
3312 /*
3313 * TODO: Inform client somehow of the fatal error. At
3314 * this point, ret < 0 means that a zmalloc failed
3315 * (ENOMEM). Error detected but still accept
3316 * command, unless a socket error has been
3317 * detected.
3318 */
3319 clean_command_ctx(&cmd_ctx);
3320 continue;
3321 }
3322
3323 health_code_update(&health_thread_cmd);
3324
3325 DBG("Sending response (size: %d, retcode: %s)",
3326 cmd_ctx->lttng_msg_size,
3327 lttng_strerror(-cmd_ctx->llm->ret_code));
3328 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
3329 if (ret < 0) {
3330 ERR("Failed to send data back to client");
3331 }
3332
3333 /* End of transmission */
3334 ret = close(sock);
3335 if (ret) {
3336 PERROR("close");
3337 }
3338 sock = -1;
3339
3340 clean_command_ctx(&cmd_ctx);
3341
3342 health_code_update(&health_thread_cmd);
3343 }
3344
3345 exit:
3346 error:
3347 if (sock >= 0) {
3348 ret = close(sock);
3349 if (ret) {
3350 PERROR("close");
3351 }
3352 }
3353
3354 lttng_poll_clean(&events);
3355 clean_command_ctx(&cmd_ctx);
3356
3357 error_listen:
3358 error_create_poll:
3359 error_testpoint:
3360 unlink(client_unix_sock_path);
3361 if (client_sock >= 0) {
3362 ret = close(client_sock);
3363 if (ret) {
3364 PERROR("close");
3365 }
3366 }
3367
3368 if (err) {
3369 health_error(&health_thread_cmd);
3370 ERR("Health error occurred in %s", __func__);
3371 }
3372
3373 health_exit(&health_thread_cmd);
3374
3375 DBG("Client thread dying");
3376
3377 rcu_unregister_thread();
3378 return NULL;
3379 }
3380
3381
3382 /*
3383 * usage function on stderr
3384 */
3385 static void usage(void)
3386 {
3387 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
3388 fprintf(stderr, " -h, --help Display this usage.\n");
3389 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
3390 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3391 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3392 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3393 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3394 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3395 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3396 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3397 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3398 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3399 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3400 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3401 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
3402 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3403 fprintf(stderr, " -V, --version Show version number.\n");
3404 fprintf(stderr, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3405 fprintf(stderr, " -q, --quiet No output at all.\n");
3406 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3407 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3408 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
3409 }
3410
3411 /*
3412 * daemon argument parsing
3413 */
3414 static int parse_args(int argc, char **argv)
3415 {
3416 int c;
3417
3418 static struct option long_options[] = {
3419 { "client-sock", 1, 0, 'c' },
3420 { "apps-sock", 1, 0, 'a' },
3421 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3422 { "kconsumerd-err-sock", 1, 0, 'E' },
3423 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3424 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3425 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3426 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3427 { "consumerd32-path", 1, 0, 'u' },
3428 { "consumerd32-libdir", 1, 0, 'U' },
3429 { "consumerd64-path", 1, 0, 't' },
3430 { "consumerd64-libdir", 1, 0, 'T' },
3431 { "daemonize", 0, 0, 'd' },
3432 { "sig-parent", 0, 0, 'S' },
3433 { "help", 0, 0, 'h' },
3434 { "group", 1, 0, 'g' },
3435 { "version", 0, 0, 'V' },
3436 { "quiet", 0, 0, 'q' },
3437 { "verbose", 0, 0, 'v' },
3438 { "verbose-consumer", 0, 0, 'Z' },
3439 { "no-kernel", 0, 0, 'N' },
3440 { NULL, 0, 0, 0 }
3441 };
3442
3443 while (1) {
3444 int option_index = 0;
3445 c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
3446 long_options, &option_index);
3447 if (c == -1) {
3448 break;
3449 }
3450
3451 switch (c) {
3452 case 0:
3453 fprintf(stderr, "option %s", long_options[option_index].name);
3454 if (optarg) {
3455 fprintf(stderr, " with arg %s\n", optarg);
3456 }
3457 break;
3458 case 'c':
3459 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
3460 break;
3461 case 'a':
3462 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
3463 break;
3464 case 'd':
3465 opt_daemon = 1;
3466 break;
3467 case 'g':
3468 opt_tracing_group = optarg;
3469 break;
3470 case 'h':
3471 usage();
3472 exit(EXIT_FAILURE);
3473 case 'V':
3474 fprintf(stdout, "%s\n", VERSION);
3475 exit(EXIT_SUCCESS);
3476 case 'S':
3477 opt_sig_parent = 1;
3478 break;
3479 case 'E':
3480 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3481 break;
3482 case 'C':
3483 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3484 break;
3485 case 'F':
3486 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3487 break;
3488 case 'D':
3489 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3490 break;
3491 case 'H':
3492 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3493 break;
3494 case 'G':
3495 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3496 break;
3497 case 'N':
3498 opt_no_kernel = 1;
3499 break;
3500 case 'q':
3501 lttng_opt_quiet = 1;
3502 break;
3503 case 'v':
3504 /* Verbose level can increase using multiple -v */
3505 lttng_opt_verbose += 1;
3506 break;
3507 case 'Z':
3508 opt_verbose_consumer += 1;
3509 break;
3510 case 'u':
3511 consumerd32_bin= optarg;
3512 break;
3513 case 'U':
3514 consumerd32_libdir = optarg;
3515 break;
3516 case 't':
3517 consumerd64_bin = optarg;
3518 break;
3519 case 'T':
3520 consumerd64_libdir = optarg;
3521 break;
3522 default:
3523 /* Unknown option or other error.
3524 * Error is printed by getopt, just return */
3525 return -1;
3526 }
3527 }
3528
3529 return 0;
3530 }
3531
3532 /*
3533 * Creates the two needed socket by the daemon.
3534 * apps_sock - The communication socket for all UST apps.
3535 * client_sock - The communication of the cli tool (lttng).
3536 */
3537 static int init_daemon_socket(void)
3538 {
3539 int ret = 0;
3540 mode_t old_umask;
3541
3542 old_umask = umask(0);
3543
3544 /* Create client tool unix socket */
3545 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
3546 if (client_sock < 0) {
3547 ERR("Create unix sock failed: %s", client_unix_sock_path);
3548 ret = -1;
3549 goto end;
3550 }
3551
3552 /* Set the cloexec flag */
3553 ret = utils_set_fd_cloexec(client_sock);
3554 if (ret < 0) {
3555 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
3556 "Continuing but note that the consumer daemon will have a "
3557 "reference to this socket on exec()", client_sock);
3558 }
3559
3560 /* File permission MUST be 660 */
3561 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3562 if (ret < 0) {
3563 ERR("Set file permissions failed: %s", client_unix_sock_path);
3564 PERROR("chmod");
3565 goto end;
3566 }
3567
3568 /* Create the application unix socket */
3569 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
3570 if (apps_sock < 0) {
3571 ERR("Create unix sock failed: %s", apps_unix_sock_path);
3572 ret = -1;
3573 goto end;
3574 }
3575
3576 /* Set the cloexec flag */
3577 ret = utils_set_fd_cloexec(apps_sock);
3578 if (ret < 0) {
3579 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
3580 "Continuing but note that the consumer daemon will have a "
3581 "reference to this socket on exec()", apps_sock);
3582 }
3583
3584 /* File permission MUST be 666 */
3585 ret = chmod(apps_unix_sock_path,
3586 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
3587 if (ret < 0) {
3588 ERR("Set file permissions failed: %s", apps_unix_sock_path);
3589 PERROR("chmod");
3590 goto end;
3591 }
3592
3593 DBG3("Session daemon client socket %d and application socket %d created",
3594 client_sock, apps_sock);
3595
3596 end:
3597 umask(old_umask);
3598 return ret;
3599 }
3600
3601 /*
3602 * Check if the global socket is available, and if a daemon is answering at the
3603 * other side. If yes, error is returned.
3604 */
3605 static int check_existing_daemon(void)
3606 {
3607 /* Is there anybody out there ? */
3608 if (lttng_session_daemon_alive()) {
3609 return -EEXIST;
3610 }
3611
3612 return 0;
3613 }
3614
3615 /*
3616 * Set the tracing group gid onto the client socket.
3617 *
3618 * Race window between mkdir and chown is OK because we are going from more
3619 * permissive (root.root) to less permissive (root.tracing).
3620 */
3621 static int set_permissions(char *rundir)
3622 {
3623 int ret;
3624 gid_t gid;
3625
3626 ret = allowed_group();
3627 if (ret < 0) {
3628 WARN("No tracing group detected");
3629 ret = 0;
3630 goto end;
3631 }
3632
3633 gid = ret;
3634
3635 /* Set lttng run dir */
3636 ret = chown(rundir, 0, gid);
3637 if (ret < 0) {
3638 ERR("Unable to set group on %s", rundir);
3639 PERROR("chown");
3640 }
3641
3642 /* Ensure tracing group can search the run dir */
3643 ret = chmod(rundir, S_IRWXU | S_IXGRP | S_IXOTH);
3644 if (ret < 0) {
3645 ERR("Unable to set permissions on %s", rundir);
3646 PERROR("chmod");
3647 }
3648
3649 /* lttng client socket path */
3650 ret = chown(client_unix_sock_path, 0, gid);
3651 if (ret < 0) {
3652 ERR("Unable to set group on %s", client_unix_sock_path);
3653 PERROR("chown");
3654 }
3655
3656 /* kconsumer error socket path */
3657 ret = chown(kconsumer_data.err_unix_sock_path, 0, gid);
3658 if (ret < 0) {
3659 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
3660 PERROR("chown");
3661 }
3662
3663 /* 64-bit ustconsumer error socket path */
3664 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, gid);
3665 if (ret < 0) {
3666 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
3667 PERROR("chown");
3668 }
3669
3670 /* 32-bit ustconsumer compat32 error socket path */
3671 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, gid);
3672 if (ret < 0) {
3673 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
3674 PERROR("chown");
3675 }
3676
3677 DBG("All permissions are set");
3678
3679 end:
3680 return ret;
3681 }
3682
3683 /*
3684 * Create the lttng run directory needed for all global sockets and pipe.
3685 */
3686 static int create_lttng_rundir(const char *rundir)
3687 {
3688 int ret;
3689
3690 DBG3("Creating LTTng run directory: %s", rundir);
3691
3692 ret = mkdir(rundir, S_IRWXU);
3693 if (ret < 0) {
3694 if (errno != EEXIST) {
3695 ERR("Unable to create %s", rundir);
3696 goto error;
3697 } else {
3698 ret = 0;
3699 }
3700 }
3701
3702 error:
3703 return ret;
3704 }
3705
3706 /*
3707 * Setup sockets and directory needed by the kconsumerd communication with the
3708 * session daemon.
3709 */
3710 static int set_consumer_sockets(struct consumer_data *consumer_data,
3711 const char *rundir)
3712 {
3713 int ret;
3714 char path[PATH_MAX];
3715
3716 switch (consumer_data->type) {
3717 case LTTNG_CONSUMER_KERNEL:
3718 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
3719 break;
3720 case LTTNG_CONSUMER64_UST:
3721 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
3722 break;
3723 case LTTNG_CONSUMER32_UST:
3724 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
3725 break;
3726 default:
3727 ERR("Consumer type unknown");
3728 ret = -EINVAL;
3729 goto error;
3730 }
3731
3732 DBG2("Creating consumer directory: %s", path);
3733
3734 ret = mkdir(path, S_IRWXU);
3735 if (ret < 0) {
3736 if (errno != EEXIST) {
3737 PERROR("mkdir");
3738 ERR("Failed to create %s", path);
3739 goto error;
3740 }
3741 ret = -1;
3742 }
3743
3744 /* Create the kconsumerd error unix socket */
3745 consumer_data->err_sock =
3746 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
3747 if (consumer_data->err_sock < 0) {
3748 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
3749 ret = -1;
3750 goto error;
3751 }
3752
3753 /* File permission MUST be 660 */
3754 ret = chmod(consumer_data->err_unix_sock_path,
3755 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3756 if (ret < 0) {
3757 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
3758 PERROR("chmod");
3759 goto error;
3760 }
3761
3762 error:
3763 return ret;
3764 }
3765
3766 /*
3767 * Signal handler for the daemon
3768 *
3769 * Simply stop all worker threads, leaving main() return gracefully after
3770 * joining all threads and calling cleanup().
3771 */
3772 static void sighandler(int sig)
3773 {
3774 switch (sig) {
3775 case SIGPIPE:
3776 DBG("SIGPIPE caught");
3777 return;
3778 case SIGINT:
3779 DBG("SIGINT caught");
3780 stop_threads();
3781 break;
3782 case SIGTERM:
3783 DBG("SIGTERM caught");
3784 stop_threads();
3785 break;
3786 default:
3787 break;
3788 }
3789 }
3790
3791 /*
3792 * Setup signal handler for :
3793 * SIGINT, SIGTERM, SIGPIPE
3794 */
3795 static int set_signal_handler(void)
3796 {
3797 int ret = 0;
3798 struct sigaction sa;
3799 sigset_t sigset;
3800
3801 if ((ret = sigemptyset(&sigset)) < 0) {
3802 PERROR("sigemptyset");
3803 return ret;
3804 }
3805
3806 sa.sa_handler = sighandler;
3807 sa.sa_mask = sigset;
3808 sa.sa_flags = 0;
3809 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
3810 PERROR("sigaction");
3811 return ret;
3812 }
3813
3814 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
3815 PERROR("sigaction");
3816 return ret;
3817 }
3818
3819 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
3820 PERROR("sigaction");
3821 return ret;
3822 }
3823
3824 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
3825
3826 return ret;
3827 }
3828
3829 /*
3830 * Set open files limit to unlimited. This daemon can open a large number of
3831 * file descriptors in order to consumer multiple kernel traces.
3832 */
3833 static void set_ulimit(void)
3834 {
3835 int ret;
3836 struct rlimit lim;
3837
3838 /* The kernel does not allowed an infinite limit for open files */
3839 lim.rlim_cur = 65535;
3840 lim.rlim_max = 65535;
3841
3842 ret = setrlimit(RLIMIT_NOFILE, &lim);
3843 if (ret < 0) {
3844 PERROR("failed to set open files limit");
3845 }
3846 }
3847
3848 /*
3849 * main
3850 */
3851 int main(int argc, char **argv)
3852 {
3853 int ret = 0;
3854 void *status;
3855 const char *home_path, *env_app_timeout;
3856
3857 init_kernel_workarounds();
3858
3859 rcu_register_thread();
3860
3861 setup_consumerd_path();
3862
3863 /* Parse arguments */
3864 progname = argv[0];
3865 if ((ret = parse_args(argc, argv)) < 0) {
3866 goto error;
3867 }
3868
3869 /* Daemonize */
3870 if (opt_daemon) {
3871 int i;
3872
3873 /*
3874 * fork
3875 * child: setsid, close FD 0, 1, 2, chdir /
3876 * parent: exit (if fork is successful)
3877 */
3878 ret = daemon(0, 0);
3879 if (ret < 0) {
3880 PERROR("daemon");
3881 goto error;
3882 }
3883 /*
3884 * We are in the child. Make sure all other file
3885 * descriptors are closed, in case we are called with
3886 * more opened file descriptors than the standard ones.
3887 */
3888 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
3889 (void) close(i);
3890 }
3891 }
3892
3893 /* Create thread quit pipe */
3894 if ((ret = init_thread_quit_pipe()) < 0) {
3895 goto error;
3896 }
3897
3898 /* Check if daemon is UID = 0 */
3899 is_root = !getuid();
3900
3901 if (is_root) {
3902 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
3903
3904 /* Create global run dir with root access */
3905 ret = create_lttng_rundir(rundir);
3906 if (ret < 0) {
3907 goto error;
3908 }
3909
3910 if (strlen(apps_unix_sock_path) == 0) {
3911 snprintf(apps_unix_sock_path, PATH_MAX,
3912 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
3913 }
3914
3915 if (strlen(client_unix_sock_path) == 0) {
3916 snprintf(client_unix_sock_path, PATH_MAX,
3917 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
3918 }
3919
3920 /* Set global SHM for ust */
3921 if (strlen(wait_shm_path) == 0) {
3922 snprintf(wait_shm_path, PATH_MAX,
3923 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
3924 }
3925
3926 if (strlen(health_unix_sock_path) == 0) {
3927 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
3928 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
3929 }
3930
3931 /* Setup kernel consumerd path */
3932 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
3933 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
3934 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
3935 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
3936
3937 DBG2("Kernel consumer err path: %s",
3938 kconsumer_data.err_unix_sock_path);
3939 DBG2("Kernel consumer cmd path: %s",
3940 kconsumer_data.cmd_unix_sock_path);
3941 } else {
3942 home_path = get_home_dir();
3943 if (home_path == NULL) {
3944 /* TODO: Add --socket PATH option */
3945 ERR("Can't get HOME directory for sockets creation.");
3946 ret = -EPERM;
3947 goto error;
3948 }
3949
3950 /*
3951 * Create rundir from home path. This will create something like
3952 * $HOME/.lttng
3953 */
3954 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
3955 if (ret < 0) {
3956 ret = -ENOMEM;
3957 goto error;
3958 }
3959
3960 ret = create_lttng_rundir(rundir);
3961 if (ret < 0) {
3962 goto error;
3963 }
3964
3965 if (strlen(apps_unix_sock_path) == 0) {
3966 snprintf(apps_unix_sock_path, PATH_MAX,
3967 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
3968 }
3969
3970 /* Set the cli tool unix socket path */
3971 if (strlen(client_unix_sock_path) == 0) {
3972 snprintf(client_unix_sock_path, PATH_MAX,
3973 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
3974 }
3975
3976 /* Set global SHM for ust */
3977 if (strlen(wait_shm_path) == 0) {
3978 snprintf(wait_shm_path, PATH_MAX,
3979 DEFAULT_HOME_APPS_WAIT_SHM_PATH, geteuid());
3980 }
3981
3982 /* Set health check Unix path */
3983 if (strlen(health_unix_sock_path) == 0) {
3984 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
3985 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
3986 }
3987 }
3988
3989 /* Set consumer initial state */
3990 kernel_consumerd_state = CONSUMER_STOPPED;
3991 ust_consumerd_state = CONSUMER_STOPPED;
3992
3993 DBG("Client socket path %s", client_unix_sock_path);
3994 DBG("Application socket path %s", apps_unix_sock_path);
3995 DBG("LTTng run directory path: %s", rundir);
3996
3997 /* 32 bits consumerd path setup */
3998 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
3999 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
4000 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
4001 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
4002
4003 DBG2("UST consumer 32 bits err path: %s",
4004 ustconsumer32_data.err_unix_sock_path);
4005 DBG2("UST consumer 32 bits cmd path: %s",
4006 ustconsumer32_data.cmd_unix_sock_path);
4007
4008 /* 64 bits consumerd path setup */
4009 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
4010 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
4011 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
4012 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
4013
4014 DBG2("UST consumer 64 bits err path: %s",
4015 ustconsumer64_data.err_unix_sock_path);
4016 DBG2("UST consumer 64 bits cmd path: %s",
4017 ustconsumer64_data.cmd_unix_sock_path);
4018
4019 /*
4020 * See if daemon already exist.
4021 */
4022 if ((ret = check_existing_daemon()) < 0) {
4023 ERR("Already running daemon.\n");
4024 /*
4025 * We do not goto exit because we must not cleanup()
4026 * because a daemon is already running.
4027 */
4028 goto error;
4029 }
4030
4031 /*
4032 * Init UST app hash table. Alloc hash table before this point since
4033 * cleanup() can get called after that point.
4034 */
4035 ust_app_ht_alloc();
4036
4037 /* After this point, we can safely call cleanup() with "goto exit" */
4038
4039 /*
4040 * These actions must be executed as root. We do that *after* setting up
4041 * the sockets path because we MUST make the check for another daemon using
4042 * those paths *before* trying to set the kernel consumer sockets and init
4043 * kernel tracer.
4044 */
4045 if (is_root) {
4046 ret = set_consumer_sockets(&kconsumer_data, rundir);
4047 if (ret < 0) {
4048 goto exit;
4049 }
4050
4051 /* Setup kernel tracer */
4052 if (!opt_no_kernel) {
4053 init_kernel_tracer();
4054 }
4055
4056 /* Set ulimit for open files */
4057 set_ulimit();
4058 }
4059 /* init lttng_fd tracking must be done after set_ulimit. */
4060 lttng_fd_init();
4061
4062 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
4063 if (ret < 0) {
4064 goto exit;
4065 }
4066
4067 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
4068 if (ret < 0) {
4069 goto exit;
4070 }
4071
4072 if ((ret = set_signal_handler()) < 0) {
4073 goto exit;
4074 }
4075
4076 /* Setup the needed unix socket */
4077 if ((ret = init_daemon_socket()) < 0) {
4078 goto exit;
4079 }
4080
4081 /* Set credentials to socket */
4082 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
4083 goto exit;
4084 }
4085
4086 /* Get parent pid if -S, --sig-parent is specified. */
4087 if (opt_sig_parent) {
4088 ppid = getppid();
4089 }
4090
4091 /* Setup the kernel pipe for waking up the kernel thread */
4092 if (is_root && !opt_no_kernel) {
4093 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
4094 goto exit;
4095 }
4096 }
4097
4098 /* Setup the thread apps communication pipe. */
4099 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
4100 goto exit;
4101 }
4102
4103 /* Init UST command queue. */
4104 cds_wfq_init(&ust_cmd_queue.queue);
4105
4106 /*
4107 * Get session list pointer. This pointer MUST NOT be free(). This list is
4108 * statically declared in session.c
4109 */
4110 session_list_ptr = session_get_list();
4111
4112 /* Set up max poll set size */
4113 lttng_poll_set_max_size();
4114
4115 cmd_init();
4116
4117 /* Init all health thread counters. */
4118 health_init(&health_thread_cmd);
4119 health_init(&health_thread_kernel);
4120 health_init(&health_thread_app_manage);
4121 health_init(&health_thread_app_reg);
4122
4123 /*
4124 * Init health counters of the consumer thread. We do a quick hack here to
4125 * the state of the consumer health is fine even if the thread is not
4126 * started. Once the thread starts, the health state is updated with a poll
4127 * value to set a health code path. This is simply to ease our life and has
4128 * no cost what so ever.
4129 */
4130 health_init(&kconsumer_data.health);
4131 health_poll_update(&kconsumer_data.health);
4132 health_init(&ustconsumer32_data.health);
4133 health_poll_update(&ustconsumer32_data.health);
4134 health_init(&ustconsumer64_data.health);
4135 health_poll_update(&ustconsumer64_data.health);
4136
4137 /* Check for the application socket timeout env variable. */
4138 env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
4139 if (env_app_timeout) {
4140 app_socket_timeout = atoi(env_app_timeout);
4141 } else {
4142 app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
4143 }
4144
4145 /* Create thread to manage the client socket */
4146 ret = pthread_create(&health_thread, NULL,
4147 thread_manage_health, (void *) NULL);
4148 if (ret != 0) {
4149 PERROR("pthread_create health");
4150 goto exit_health;
4151 }
4152
4153 /* Create thread to manage the client socket */
4154 ret = pthread_create(&client_thread, NULL,
4155 thread_manage_clients, (void *) NULL);
4156 if (ret != 0) {
4157 PERROR("pthread_create clients");
4158 goto exit_client;
4159 }
4160
4161 /* Create thread to dispatch registration */
4162 ret = pthread_create(&dispatch_thread, NULL,
4163 thread_dispatch_ust_registration, (void *) NULL);
4164 if (ret != 0) {
4165 PERROR("pthread_create dispatch");
4166 goto exit_dispatch;
4167 }
4168
4169 /* Create thread to manage application registration. */
4170 ret = pthread_create(&reg_apps_thread, NULL,
4171 thread_registration_apps, (void *) NULL);
4172 if (ret != 0) {
4173 PERROR("pthread_create registration");
4174 goto exit_reg_apps;
4175 }
4176
4177 /* Create thread to manage application socket */
4178 ret = pthread_create(&apps_thread, NULL,
4179 thread_manage_apps, (void *) NULL);
4180 if (ret != 0) {
4181 PERROR("pthread_create apps");
4182 goto exit_apps;
4183 }
4184
4185 /* Don't start this thread if kernel tracing is not requested nor root */
4186 if (is_root && !opt_no_kernel) {
4187 /* Create kernel thread to manage kernel event */
4188 ret = pthread_create(&kernel_thread, NULL,
4189 thread_manage_kernel, (void *) NULL);
4190 if (ret != 0) {
4191 PERROR("pthread_create kernel");
4192 goto exit_kernel;
4193 }
4194
4195 ret = pthread_join(kernel_thread, &status);
4196 if (ret != 0) {
4197 PERROR("pthread_join");
4198 goto error; /* join error, exit without cleanup */
4199 }
4200 }
4201
4202 exit_kernel:
4203 ret = pthread_join(apps_thread, &status);
4204 if (ret != 0) {
4205 PERROR("pthread_join");
4206 goto error; /* join error, exit without cleanup */
4207 }
4208
4209 exit_apps:
4210 ret = pthread_join(reg_apps_thread, &status);
4211 if (ret != 0) {
4212 PERROR("pthread_join");
4213 goto error; /* join error, exit without cleanup */
4214 }
4215
4216 exit_reg_apps:
4217 ret = pthread_join(dispatch_thread, &status);
4218 if (ret != 0) {
4219 PERROR("pthread_join");
4220 goto error; /* join error, exit without cleanup */
4221 }
4222
4223 exit_dispatch:
4224 ret = pthread_join(client_thread, &status);
4225 if (ret != 0) {
4226 PERROR("pthread_join");
4227 goto error; /* join error, exit without cleanup */
4228 }
4229
4230 ret = join_consumer_thread(&kconsumer_data);
4231 if (ret != 0) {
4232 PERROR("join_consumer");
4233 goto error; /* join error, exit without cleanup */
4234 }
4235
4236 ret = join_consumer_thread(&ustconsumer32_data);
4237 if (ret != 0) {
4238 PERROR("join_consumer ust32");
4239 goto error; /* join error, exit without cleanup */
4240 }
4241
4242 ret = join_consumer_thread(&ustconsumer64_data);
4243 if (ret != 0) {
4244 PERROR("join_consumer ust64");
4245 goto error; /* join error, exit without cleanup */
4246 }
4247
4248 exit_client:
4249 ret = pthread_join(health_thread, &status);
4250 if (ret != 0) {
4251 PERROR("pthread_join health thread");
4252 goto error; /* join error, exit without cleanup */
4253 }
4254
4255 exit_health:
4256 exit:
4257 /*
4258 * cleanup() is called when no other thread is running.
4259 */
4260 rcu_thread_online();
4261 cleanup();
4262 rcu_thread_offline();
4263 rcu_unregister_thread();
4264 if (!ret) {
4265 exit(EXIT_SUCCESS);
4266 }
4267 error:
4268 exit(EXIT_FAILURE);
4269 }
This page took 0.112074 seconds and 6 git commands to generate.