Fix: remove the rundir at the end of the cleanup
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
index 007c722921700e351cfedd6441fc31a16a9683b3..35007b728a10eb9df8c2200167ef5885c99f95ff 100644 (file)
 #include "ust-consumer.h"
 #include "utils.h"
 #include "fd-limit.h"
-#include "health.h"
+#include "health-sessiond.h"
 #include "testpoint.h"
 #include "ust-thread.h"
+#include "jul-thread.h"
 
 #define CONSUMERD_FILE "lttng-consumerd"
 
-/* Const values */
-const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
-
 const char *progname;
-const char *opt_tracing_group;
+static const char *tracing_group_name = DEFAULT_TRACING_GROUP;
 static const char *opt_pidfile;
 static int opt_sig_parent;
 static int opt_verbose_consumer;
@@ -90,7 +88,6 @@ static struct consumer_data kconsumer_data = {
        .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
        .err_sock = -1,
        .cmd_sock = -1,
-       .metadata_sock.fd = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
        .cond = PTHREAD_COND_INITIALIZER,
@@ -102,7 +99,6 @@ static struct consumer_data ustconsumer64_data = {
        .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
        .err_sock = -1,
        .cmd_sock = -1,
-       .metadata_sock.fd = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
        .cond = PTHREAD_COND_INITIALIZER,
@@ -114,7 +110,6 @@ static struct consumer_data ustconsumer32_data = {
        .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
        .err_sock = -1,
        .cmd_sock = -1,
-       .metadata_sock.fd = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
        .cond = PTHREAD_COND_INITIALIZER,
@@ -162,6 +157,7 @@ static pthread_t kernel_thread;
 static pthread_t dispatch_thread;
 static pthread_t health_thread;
 static pthread_t ht_cleanup_thread;
+static pthread_t jul_reg_thread;
 
 /*
  * UST registration command queue. This queue is tied with a futex and uses a N
@@ -236,6 +232,12 @@ static int app_socket_timeout;
 /* Set in main() with the current page size. */
 long page_size;
 
+/* Application health monitoring */
+struct health_app *health_sessiond;
+
+/* JUL TCP port for registration. Used by the JUL thread. */
+unsigned int jul_tcp_port = DEFAULT_JUL_TCP_PORT;
+
 static
 void setup_consumerd_path(void)
 {
@@ -325,25 +327,6 @@ int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
        return 0;
 }
 
-/*
- * Return group ID of the tracing group or -1 if not found.
- */
-static gid_t allowed_group(void)
-{
-       struct group *grp;
-
-       if (opt_tracing_group) {
-               grp = getgrnam(opt_tracing_group);
-       } else {
-               grp = getgrnam(default_tracing_group);
-       }
-       if (!grp) {
-               return -1;
-       } else {
-               return grp->gr_gid;
-       }
-}
-
 /*
  * Init thread quit pipe.
  *
@@ -390,18 +373,66 @@ static void stop_threads(void)
        futex_nto1_wake(&ust_cmd_queue.futex);
 }
 
+/*
+ * Close every consumer sockets.
+ */
+static void close_consumer_sockets(void)
+{
+       int ret;
+
+       if (kconsumer_data.err_sock >= 0) {
+               ret = close(kconsumer_data.err_sock);
+               if (ret < 0) {
+                       PERROR("kernel consumer err_sock close");
+               }
+       }
+       if (ustconsumer32_data.err_sock >= 0) {
+               ret = close(ustconsumer32_data.err_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd32 err_sock close");
+               }
+       }
+       if (ustconsumer64_data.err_sock >= 0) {
+               ret = close(ustconsumer64_data.err_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd64 err_sock close");
+               }
+       }
+       if (kconsumer_data.cmd_sock >= 0) {
+               ret = close(kconsumer_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("kernel consumer cmd_sock close");
+               }
+       }
+       if (ustconsumer32_data.cmd_sock >= 0) {
+               ret = close(ustconsumer32_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd32 cmd_sock close");
+               }
+       }
+       if (ustconsumer64_data.cmd_sock >= 0) {
+               ret = close(ustconsumer64_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd64 cmd_sock close");
+               }
+       }
+}
+
 /*
  * Cleanup the daemon
  */
 static void cleanup(void)
 {
        int ret;
-       char *cmd = NULL;
        struct ltt_session *sess, *stmp;
+       char path[PATH_MAX];
 
        DBG("Cleaning up");
 
-       /* First thing first, stop all threads */
+       /*
+        * Close the thread quit pipe. It has already done its job,
+        * since we are now called.
+        */
        utils_close_pipe(thread_quit_pipe);
 
        /*
@@ -415,18 +446,60 @@ static void cleanup(void)
                }
        }
 
-       DBG("Removing %s directory", rundir);
-       ret = asprintf(&cmd, "rm -rf %s", rundir);
-       if (ret < 0) {
-               ERR("asprintf failed. Something is really wrong!");
-       }
-
-       /* Remove lttng run directory */
-       ret = system(cmd);
-       if (ret < 0) {
-               ERR("Unable to clean %s", rundir);
-       }
-       free(cmd);
+       DBG("Removing sessiond and consumerd content of directory %s", rundir);
+
+       /* sessiond */
+       snprintf(path, PATH_MAX,
+               "%s/%s",
+               rundir, DEFAULT_LTTNG_SESSIOND_PIDFILE);
+       DBG("Removing %s", path);
+       (void) unlink(path);
+
+       snprintf(path, PATH_MAX, "%s/%s", rundir,
+                       DEFAULT_LTTNG_SESSIOND_JULPORT_FILE);
+       DBG("Removing %s", path);
+       (void) unlink(path);
+
+       /* kconsumerd */
+       snprintf(path, PATH_MAX,
+               DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
+               rundir);
+       DBG("Removing %s", path);
+       (void) unlink(path);
+
+       snprintf(path, PATH_MAX,
+               DEFAULT_KCONSUMERD_PATH,
+               rundir);
+       DBG("Removing directory %s", path);
+       (void) rmdir(path);
+
+       /* ust consumerd 32 */
+       snprintf(path, PATH_MAX,
+               DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
+               rundir);
+       DBG("Removing %s", path);
+       (void) unlink(path);
+
+       snprintf(path, PATH_MAX,
+               DEFAULT_USTCONSUMERD32_PATH,
+               rundir);
+       DBG("Removing directory %s", path);
+       (void) rmdir(path);
+
+       /* ust consumerd 64 */
+       snprintf(path, PATH_MAX,
+               DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
+               rundir);
+       DBG("Removing %s", path);
+       (void) unlink(path);
+
+       snprintf(path, PATH_MAX,
+               DEFAULT_USTCONSUMERD64_PATH,
+               rundir);
+       DBG("Removing directory %s", path);
+       (void) rmdir(path);
+
+       (void) rmdir(rundir);
        free(rundir);
 
        DBG("Cleaning up all sessions");
@@ -458,6 +531,8 @@ static void cleanup(void)
                modprobe_remove_lttng_all();
        }
 
+       close_consumer_sockets();
+
        /* <fun> */
        DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
                        "Matthew, BEET driven development works!%c[%dm",
@@ -628,6 +703,8 @@ static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
                                if (ret < 0) {
                                        goto error;
                                }
+                               /* Update the stream global counter */
+                               ksess->stream_count_global += ret;
 
                                /*
                                 * Have we already sent fds to the consumer? If yes, it means
@@ -641,12 +718,10 @@ static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
                                        rcu_read_lock();
                                        cds_lfht_for_each_entry(ksess->consumer->socks->ht,
                                                        &iter.iter, socket, node.node) {
-                                               /* Code flow error */
-                                               assert(socket->fd >= 0);
-
                                                pthread_mutex_lock(socket->lock);
                                                ret = kernel_consumer_send_channel_stream(socket,
-                                                               channel, ksess);
+                                                               channel, ksess,
+                                                               session->output_traces ? 1 : 0);
                                                pthread_mutex_unlock(socket->lock);
                                                if (ret < 0) {
                                                        rcu_read_unlock();
@@ -677,6 +752,12 @@ static void update_ust_app(int app_sock)
 {
        struct ltt_session *sess, *stmp;
 
+       /* Consumer is in an ERROR state. Stop any application update. */
+       if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
+               /* Stop the update process since the consumer is dead. */
+               return;
+       }
+
        /* For all tracing session(s) */
        cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
                session_lock(sess);
@@ -702,7 +783,7 @@ static void *thread_manage_kernel(void *data)
 
        DBG("[thread] Thread manage kernel started");
 
-       health_register(HEALTH_TYPE_KERNEL);
+       health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
 
        /*
         * This first step of the while is to clean this structure which could free
@@ -827,7 +908,7 @@ error_testpoint:
                WARN("Kernel thread died unexpectedly. "
                                "Kernel tracing can continue but CPU hotplug is disabled.");
        }
-       health_unregister();
+       health_unregister(health_sessiond);
        DBG("Kernel thread dying");
        return NULL;
 }
@@ -868,7 +949,7 @@ static void *thread_manage_consumer(void *data)
 
        DBG("[thread] Manage consumer started");
 
-       health_register(HEALTH_TYPE_CONSUMER);
+       health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
 
        health_code_update();
 
@@ -966,15 +1047,16 @@ restart:
                /* Connect both socket, command and metadata. */
                consumer_data->cmd_sock =
                        lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
-               consumer_data->metadata_sock.fd =
+               consumer_data->metadata_fd =
                        lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
-               if (consumer_data->cmd_sock < 0 ||
-                               consumer_data->metadata_sock.fd < 0) {
+               if (consumer_data->cmd_sock < 0
+                               || consumer_data->metadata_fd < 0) {
                        PERROR("consumer connect cmd socket");
                        /* On error, signal condition and quit. */
                        signal_consumer_condition(consumer_data, -1);
                        goto error;
                }
+               consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
                /* Create metadata socket lock. */
                consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
                if (consumer_data->metadata_sock.lock == NULL) {
@@ -987,7 +1069,7 @@ restart:
                signal_consumer_condition(consumer_data, 1);
                DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
                DBG("Consumer metadata socket ready (fd: %d)",
-                               consumer_data->metadata_sock.fd);
+                               consumer_data->metadata_fd);
        } else {
                ERR("consumer error when waiting for SOCK_READY : %s",
                                lttcomm_get_readable_code(-code));
@@ -1007,7 +1089,7 @@ restart:
        }
 
        /* Add metadata socket that is successfully connected. */
-       ret = lttng_poll_add(&events, consumer_data->metadata_sock.fd,
+       ret = lttng_poll_add(&events, consumer_data->metadata_fd,
                        LPOLLIN | LPOLLRDHUP);
        if (ret < 0) {
                goto error;
@@ -1066,7 +1148,7 @@ restart_poll:
                                                lttcomm_get_readable_code(-code));
 
                                goto exit;
-                       } else if (pollfd == consumer_data->metadata_sock.fd) {
+                       } else if (pollfd == consumer_data->metadata_fd) {
                                /* UST metadata requests */
                                ret = ust_consumer_metadata_request(
                                                &consumer_data->metadata_sock);
@@ -1085,6 +1167,13 @@ restart_poll:
 
 exit:
 error:
+       /*
+        * We lock here because we are about to close the sockets and some other
+        * thread might be using them so get exclusive access which will abort all
+        * other consumer command by other threads.
+        */
+       pthread_mutex_lock(&consumer_data->lock);
+
        /* Immediately set the consumerd state to stopped */
        if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
                uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
@@ -1101,22 +1190,21 @@ error:
                if (ret) {
                        PERROR("close");
                }
+               consumer_data->err_sock = -1;
        }
        if (consumer_data->cmd_sock >= 0) {
                ret = close(consumer_data->cmd_sock);
                if (ret) {
                        PERROR("close");
                }
+               consumer_data->cmd_sock = -1;
        }
-       if (consumer_data->metadata_sock.fd >= 0) {
-               ret = close(consumer_data->metadata_sock.fd);
+       if (*consumer_data->metadata_sock.fd_ptr >= 0) {
+               ret = close(*consumer_data->metadata_sock.fd_ptr);
                if (ret) {
                        PERROR("close");
                }
        }
-       /* Cleanup metadata socket mutex. */
-       pthread_mutex_destroy(consumer_data->metadata_sock.lock);
-       free(consumer_data->metadata_sock.lock);
 
        if (sock >= 0) {
                ret = close(sock);
@@ -1128,6 +1216,11 @@ error:
        unlink(consumer_data->err_unix_sock_path);
        unlink(consumer_data->cmd_unix_sock_path);
        consumer_data->pid = 0;
+       pthread_mutex_unlock(&consumer_data->lock);
+
+       /* Cleanup metadata socket mutex. */
+       pthread_mutex_destroy(consumer_data->metadata_sock.lock);
+       free(consumer_data->metadata_sock.lock);
 
        lttng_poll_clean(&events);
 error_poll:
@@ -1135,7 +1228,7 @@ error_poll:
                health_error();
                ERR("Health error occurred in %s", __func__);
        }
-       health_unregister();
+       health_unregister(health_sessiond);
        DBG("consumer thread cleanup completed");
 
        return NULL;
@@ -1155,7 +1248,7 @@ static void *thread_manage_apps(void *data)
        rcu_register_thread();
        rcu_thread_online();
 
-       health_register(HEALTH_TYPE_APP_MANAGE);
+       health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
 
        if (testpoint(thread_manage_apps)) {
                goto error_testpoint;
@@ -1243,12 +1336,6 @@ static void *thread_manage_apps(void *data)
                                                goto error;
                                        }
 
-                                       /* Set socket timeout for both receiving and ending */
-                                       (void) lttcomm_setsockopt_rcv_timeout(sock,
-                                                       app_socket_timeout);
-                                       (void) lttcomm_setsockopt_snd_timeout(sock,
-                                                       app_socket_timeout);
-
                                        DBG("Apps with sock %d added to poll set", sock);
 
                                        health_code_update();
@@ -1295,7 +1382,7 @@ error_testpoint:
                health_error();
                ERR("Health error occurred in %s", __func__);
        }
-       health_unregister();
+       health_unregister(health_sessiond);
        DBG("Application communication apps thread cleanup complete");
        rcu_thread_offline();
        rcu_unregister_thread();
@@ -1306,6 +1393,9 @@ error_testpoint:
  * Send a socket to a thread This is called from the dispatch UST registration
  * thread once all sockets are set for the application.
  *
+ * The sock value can be invalid, we don't really care, the thread will handle
+ * it and make the necessary cleanup if so.
+ *
  * On success, return 0 else a negative value being the errno message of the
  * write().
  */
@@ -1313,9 +1403,14 @@ static int send_socket_to_thread(int fd, int sock)
 {
        int ret;
 
-       /* Sockets MUST be set or else this should not have been called. */
-       assert(fd >= 0);
-       assert(sock >= 0);
+       /*
+        * It's possible that the FD is set as invalid with -1 concurrently just
+        * before calling this function being a shutdown state of the thread.
+        */
+       if (fd < 0) {
+               ret = -EBADF;
+               goto error;
+       }
 
        do {
                ret = write(fd, &sock, sizeof(sock));
@@ -1334,6 +1429,91 @@ error:
        return ret;
 }
 
+/*
+ * Sanitize the wait queue of the dispatch registration thread meaning removing
+ * invalid nodes from it. This is to avoid memory leaks for the case the UST
+ * notify socket is never received.
+ */
+static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
+{
+       int ret, nb_fd = 0, i;
+       unsigned int fd_added = 0;
+       struct lttng_poll_event events;
+       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
+
+       assert(wait_queue);
+
+       lttng_poll_init(&events);
+
+       /* Just skip everything for an empty queue. */
+       if (!wait_queue->count) {
+               goto end;
+       }
+
+       ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_create;
+       }
+
+       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                       &wait_queue->head, head) {
+               assert(wait_node->app);
+               ret = lttng_poll_add(&events, wait_node->app->sock,
+                               LPOLLHUP | LPOLLERR);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               fd_added = 1;
+       }
+
+       if (!fd_added) {
+               goto end;
+       }
+
+       /*
+        * Poll but don't block so we can quickly identify the faulty events and
+        * clean them afterwards from the wait queue.
+        */
+       ret = lttng_poll_wait(&events, 0);
+       if (ret < 0) {
+               goto error;
+       }
+       nb_fd = ret;
+
+       for (i = 0; i < nb_fd; i++) {
+               /* Get faulty FD. */
+               uint32_t revents = LTTNG_POLL_GETEV(&events, i);
+               int pollfd = LTTNG_POLL_GETFD(&events, i);
+
+               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                               &wait_queue->head, head) {
+                       if (pollfd == wait_node->app->sock &&
+                                       (revents & (LPOLLHUP | LPOLLERR))) {
+                               cds_list_del(&wait_node->head);
+                               wait_queue->count--;
+                               ust_app_destroy(wait_node->app);
+                               free(wait_node);
+                               break;
+                       }
+               }
+       }
+
+       if (nb_fd > 0) {
+               DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
+       }
+
+end:
+       lttng_poll_clean(&events);
+       return;
+
+error:
+       lttng_poll_clean(&events);
+error_create:
+       ERR("Unable to sanitize wait queue");
+       return;
+}
+
 /*
  * Dispatch request from the registration threads to the application
  * communication thread.
@@ -1343,16 +1523,16 @@ static void *thread_dispatch_ust_registration(void *data)
        int ret, err = -1;
        struct cds_wfq_node *node;
        struct ust_command *ust_cmd = NULL;
-       struct {
-               struct ust_app *app;
-               struct cds_list_head head;
-       } *wait_node = NULL, *tmp_wait_node;
+       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
+       struct ust_reg_wait_queue wait_queue = {
+               .count = 0,
+       };
 
-       health_register(HEALTH_TYPE_APP_REG_DISPATCH);
+       health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
 
        health_code_update();
 
-       CDS_LIST_HEAD(wait_queue);
+       CDS_INIT_LIST_HEAD(&wait_queue.head);
 
        DBG("[thread] Dispatch UST command started");
 
@@ -1366,6 +1546,13 @@ static void *thread_dispatch_ust_registration(void *data)
                        struct ust_app *app = NULL;
                        ust_cmd = NULL;
 
+                       /*
+                        * Make sure we don't have node(s) that have hung up before receiving
+                        * the notify socket. This is to clean the list in order to avoid
+                        * memory leaks from notify socket that are never seen.
+                        */
+                       sanitize_wait_queue(&wait_queue);
+
                        health_code_update();
                        /* Dequeue command for registration */
                        node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
@@ -1392,7 +1579,7 @@ static void *thread_dispatch_ust_registration(void *data)
                                        if (ret < 0) {
                                                PERROR("close ust sock dispatch %d", ust_cmd->sock);
                                        }
-                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                                       lttng_fd_put(LTTNG_FD_APPS, 1);
                                        free(ust_cmd);
                                        goto error;
                                }
@@ -1406,7 +1593,7 @@ static void *thread_dispatch_ust_registration(void *data)
                                        if (ret < 0) {
                                                PERROR("close ust sock dispatch %d", ust_cmd->sock);
                                        }
-                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                                       lttng_fd_put(LTTNG_FD_APPS, 1);
                                        free(wait_node);
                                        free(ust_cmd);
                                        continue;
@@ -1415,7 +1602,8 @@ static void *thread_dispatch_ust_registration(void *data)
                                 * Add application to the wait queue so we can set the notify
                                 * socket before putting this object in the global ht.
                                 */
-                               cds_list_add(&wait_node->head, &wait_queue);
+                               cds_list_add(&wait_node->head, &wait_queue.head);
+                               wait_queue.count++;
 
                                free(ust_cmd);
                                /*
@@ -1430,11 +1618,12 @@ static void *thread_dispatch_ust_registration(void *data)
                                 * notify socket if found.
                                 */
                                cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
-                                               &wait_queue, head) {
+                                               &wait_queue.head, head) {
                                        health_code_update();
                                        if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
                                                wait_node->app->notify_sock = ust_cmd->sock;
                                                cds_list_del(&wait_node->head);
+                                               wait_queue.count--;
                                                app = wait_node->app;
                                                free(wait_node);
                                                DBG3("UST app notify socket %d is set", ust_cmd->sock);
@@ -1452,7 +1641,7 @@ static void *thread_dispatch_ust_registration(void *data)
                                        if (ret < 0) {
                                                PERROR("close ust sock dispatch %d", ust_cmd->sock);
                                        }
-                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                                       lttng_fd_put(LTTNG_FD_APPS, 1);
                                }
                                free(ust_cmd);
                        }
@@ -1484,7 +1673,12 @@ static void *thread_dispatch_ust_registration(void *data)
                                if (ret < 0) {
                                        rcu_read_unlock();
                                        session_unlock_list();
-                                       /* No notify thread, stop the UST tracing. */
+                                       /*
+                                        * No notify thread, stop the UST tracing. However, this is
+                                        * not an internal error of the this thread thus setting
+                                        * the health error code to a normal exit.
+                                        */
+                                       err = 0;
                                        goto error;
                                }
 
@@ -1509,7 +1703,12 @@ static void *thread_dispatch_ust_registration(void *data)
                                if (ret < 0) {
                                        rcu_read_unlock();
                                        session_unlock_list();
-                                       /* No apps. thread, stop the UST tracing. */
+                                       /*
+                                        * No apps. thread, stop the UST tracing. However, this is
+                                        * not an internal error of the this thread thus setting
+                                        * the health error code to a normal exit.
+                                        */
+                                       err = 0;
                                        goto error;
                                }
 
@@ -1529,8 +1728,9 @@ static void *thread_dispatch_ust_registration(void *data)
 error:
        /* Clean up wait queue. */
        cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
-                       &wait_queue, head) {
+                       &wait_queue.head, head) {
                cds_list_del(&wait_node->head);
+               wait_queue.count--;
                free(wait_node);
        }
 
@@ -1539,7 +1739,7 @@ error:
                health_error();
                ERR("Health error occurred in %s", __func__);
        }
-       health_unregister();
+       health_unregister(health_sessiond);
        return NULL;
 }
 
@@ -1559,7 +1759,7 @@ static void *thread_registration_apps(void *data)
 
        DBG("[thread] Manage application registration started");
 
-       health_register(HEALTH_TYPE_APP_REG);
+       health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
 
        if (testpoint(thread_registration_apps)) {
                goto error_testpoint;
@@ -1638,6 +1838,18 @@ static void *thread_registration_apps(void *data)
                                                goto error;
                                        }
 
+                                       /*
+                                        * Set socket timeout for both receiving and ending.
+                                        * app_socket_timeout is in seconds, whereas
+                                        * lttcomm_setsockopt_rcv_timeout and
+                                        * lttcomm_setsockopt_snd_timeout expect msec as
+                                        * parameter.
+                                        */
+                                       (void) lttcomm_setsockopt_rcv_timeout(sock,
+                                                       app_socket_timeout * 1000);
+                                       (void) lttcomm_setsockopt_snd_timeout(sock,
+                                                       app_socket_timeout * 1000);
+
                                        /*
                                         * Set the CLOEXEC flag. Return code is useless because
                                         * either way, the show must go on.
@@ -1739,7 +1951,7 @@ error_listen:
 error_create_poll:
 error_testpoint:
        DBG("UST Registration thread cleanup complete");
-       health_unregister();
+       health_unregister(health_sessiond);
 
        return NULL;
 }
@@ -1940,6 +2152,7 @@ static pid_t spawn_consumerd(struct consumer_data *consumer_data)
                                "lttng-consumerd", verbosity, "-k",
                                "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
                                "--consumerd-err-sock", consumer_data->err_unix_sock_path,
+                               "--group", tracing_group_name,
                                NULL);
                        break;
                case LTTNG_CONSUMER64_UST:
@@ -1978,6 +2191,7 @@ static pid_t spawn_consumerd(struct consumer_data *consumer_data)
                        ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
                                        "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
                                        "--consumerd-err-sock", consumer_data->err_unix_sock_path,
+                                       "--group", tracing_group_name,
                                        NULL);
                        if (consumerd64_libdir[0] != '\0') {
                                free(tmpnew);
@@ -2023,6 +2237,7 @@ static pid_t spawn_consumerd(struct consumer_data *consumer_data)
                        ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
                                        "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
                                        "--consumerd-err-sock", consumer_data->err_unix_sock_path,
+                                       "--group", tracing_group_name,
                                        NULL);
                        if (consumerd32_libdir[0] != '\0') {
                                free(tmpnew);
@@ -2108,21 +2323,6 @@ error:
        return ret;
 }
 
-/*
- * Compute health status of each consumer. If one of them is zero (bad
- * state), we return 0.
- */
-static int check_consumer_health(void)
-{
-       int ret;
-
-       ret = health_check_state(HEALTH_TYPE_CONSUMER);
-
-       DBG3("Health consumer check %d", ret);
-
-       return ret;
-}
-
 /*
  * Setup necessary data for kernel tracer action.
  */
@@ -2221,6 +2421,7 @@ static int copy_session_consumer(int domain, struct ltt_session *session)
                consumer = session->kernel_session->consumer;
                dir_name = DEFAULT_KERNEL_TRACE_DIR;
                break;
+       case LTTNG_DOMAIN_JUL:
        case LTTNG_DOMAIN_UST:
                DBG3("Copying tracing session consumer output in UST session");
                if (session->ust_session->consumer) {
@@ -2264,6 +2465,7 @@ static int create_ust_session(struct ltt_session *session,
        assert(session->consumer);
 
        switch (domain->type) {
+       case LTTNG_DOMAIN_JUL:
        case LTTNG_DOMAIN_UST:
                break;
        default:
@@ -2282,6 +2484,9 @@ static int create_ust_session(struct ltt_session *session,
 
        lus->uid = session->uid;
        lus->gid = session->gid;
+       lus->output_traces = session->output_traces;
+       lus->snapshot_mode = session->snapshot_mode;
+       lus->live_timer_interval = session->live_timer;
        session->ust_session = lus;
 
        /* Copy session output to the newly created UST session */
@@ -2338,6 +2543,8 @@ static int create_kernel_session(struct ltt_session *session)
 
        session->kernel_session->uid = session->uid;
        session->kernel_session->gid = session->gid;
+       session->kernel_session->output_traces = session->output_traces;
+       session->kernel_session->snapshot_mode = session->snapshot_mode;
 
        return LTTNG_OK;
 
@@ -2393,12 +2600,18 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
 
        switch (cmd_ctx->lsm->cmd_type) {
        case LTTNG_CREATE_SESSION:
+       case LTTNG_CREATE_SESSION_SNAPSHOT:
+       case LTTNG_CREATE_SESSION_LIVE:
        case LTTNG_DESTROY_SESSION:
        case LTTNG_LIST_SESSIONS:
        case LTTNG_LIST_DOMAINS:
        case LTTNG_START_TRACE:
        case LTTNG_STOP_TRACE:
        case LTTNG_DATA_PENDING:
+       case LTTNG_SNAPSHOT_ADD_OUTPUT:
+       case LTTNG_SNAPSHOT_DEL_OUTPUT:
+       case LTTNG_SNAPSHOT_LIST_OUTPUT:
+       case LTTNG_SNAPSHOT_RECORD:
                need_domain = 0;
                break;
        default:
@@ -2451,6 +2664,8 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
        /* Commands that DO NOT need a session. */
        switch (cmd_ctx->lsm->cmd_type) {
        case LTTNG_CREATE_SESSION:
+       case LTTNG_CREATE_SESSION_SNAPSHOT:
+       case LTTNG_CREATE_SESSION_LIVE:
        case LTTNG_CALIBRATE:
        case LTTNG_LIST_SESSIONS:
        case LTTNG_LIST_TRACEPOINTS:
@@ -2467,12 +2682,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                session_lock_list();
                cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
                if (cmd_ctx->session == NULL) {
-                       if (cmd_ctx->lsm->session.name != NULL) {
-                               ret = LTTNG_ERR_SESS_NOT_FOUND;
-                       } else {
-                               /* If no session name specified */
-                               ret = LTTNG_ERR_SELECT_SESS;
-                       }
+                       ret = LTTNG_ERR_SESS_NOT_FOUND;
                        goto error;
                } else {
                        /* Acquire lock for the session */
@@ -2547,8 +2757,13 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                }
 
                break;
+       case LTTNG_DOMAIN_JUL:
        case LTTNG_DOMAIN_UST:
        {
+               if (!ust_app_supported()) {
+                       ret = LTTNG_ERR_NO_UST;
+                       goto error;
+               }
                /* Consumer is in an ERROR state. Report back to client */
                if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
                        ret = LTTNG_ERR_NO_USTCONSUMERD;
@@ -2634,6 +2849,7 @@ skip_domain:
        if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
                        cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
                switch (cmd_ctx->lsm->domain.type) {
+               case LTTNG_DOMAIN_JUL:
                case LTTNG_DOMAIN_UST:
                        if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
                                ret = LTTNG_ERR_NO_USTCONSUMERD;
@@ -2896,7 +3112,7 @@ skip_domain:
                }
 
                ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
-                       &cmd_ctx->creds);
+                       &cmd_ctx->creds, 0);
 
                free(uris);
 
@@ -3086,6 +3302,145 @@ skip_domain:
                ret = cmd_data_pending(cmd_ctx->session);
                break;
        }
+       case LTTNG_SNAPSHOT_ADD_OUTPUT:
+       {
+               struct lttcomm_lttng_output_id reply;
+
+               ret = cmd_snapshot_add_output(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               /* Copy output list into message payload */
+               memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_DEL_OUTPUT:
+       {
+               ret = cmd_snapshot_del_output(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_output.output);
+               break;
+       }
+       case LTTNG_SNAPSHOT_LIST_OUTPUT:
+       {
+               ssize_t nb_output;
+               struct lttng_snapshot_output *outputs = NULL;
+
+               nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
+               if (nb_output < 0) {
+                       ret = -nb_output;
+                       goto error;
+               }
+
+               ret = setup_lttng_msg(cmd_ctx,
+                               nb_output * sizeof(struct lttng_snapshot_output));
+               if (ret < 0) {
+                       free(outputs);
+                       goto setup_error;
+               }
+
+               if (outputs) {
+                       /* Copy output list into message payload */
+                       memcpy(cmd_ctx->llm->payload, outputs,
+                                       nb_output * sizeof(struct lttng_snapshot_output));
+                       free(outputs);
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_RECORD:
+       {
+               ret = cmd_snapshot_record(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_record.output,
+                               cmd_ctx->lsm->u.snapshot_record.wait);
+               break;
+       }
+       case LTTNG_CREATE_SESSION_SNAPSHOT:
+       {
+               size_t nb_uri, len;
+               struct lttng_uri *uris = NULL;
+
+               nb_uri = cmd_ctx->lsm->u.uri.size;
+               len = nb_uri * sizeof(struct lttng_uri);
+
+               if (nb_uri > 0) {
+                       uris = zmalloc(len);
+                       if (uris == NULL) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+
+                       /* Receive variable len data */
+                       DBG("Waiting for %zu URIs from client ...", nb_uri);
+                       ret = lttcomm_recv_unix_sock(sock, uris, len);
+                       if (ret <= 0) {
+                               DBG("No URIs received from client... continuing");
+                               *sock_error = 1;
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
+                               goto error;
+                       }
+
+                       if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
+                               DBG("Creating session with ONE network URI is a bad call");
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
+                               goto error;
+                       }
+               }
+
+               ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
+                               nb_uri, &cmd_ctx->creds);
+               free(uris);
+               break;
+       }
+       case LTTNG_CREATE_SESSION_LIVE:
+       {
+               size_t nb_uri, len;
+               struct lttng_uri *uris = NULL;
+
+               nb_uri = cmd_ctx->lsm->u.uri.size;
+               len = nb_uri * sizeof(struct lttng_uri);
+
+               if (nb_uri > 0) {
+                       uris = zmalloc(len);
+                       if (uris == NULL) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+
+                       /* Receive variable len data */
+                       DBG("Waiting for %zu URIs from client ...", nb_uri);
+                       ret = lttcomm_recv_unix_sock(sock, uris, len);
+                       if (ret <= 0) {
+                               DBG("No URIs received from client... continuing");
+                               *sock_error = 1;
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
+                               goto error;
+                       }
+
+                       if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
+                               DBG("Creating session with ONE network URI is a bad call");
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
+                               goto error;
+                       }
+               }
+
+               ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
+                               nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
+               free(uris);
+               break;
+       }
        default:
                ret = LTTNG_ERR_UND;
                break;
@@ -3119,8 +3474,8 @@ static void *thread_manage_health(void *data)
        int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
        uint32_t revents, nb_fd;
        struct lttng_poll_event events;
-       struct lttcomm_health_msg msg;
-       struct lttcomm_health_data reply;
+       struct health_comm_msg msg;
+       struct health_comm_reply reply;
 
        DBG("[thread] Manage health check started");
 
@@ -3137,6 +3492,27 @@ static void *thread_manage_health(void *data)
                goto error;
        }
 
+       if (is_root) {
+               /* lttng health client socket path permissions */
+               ret = chown(health_unix_sock_path, 0,
+                               utils_get_group_id(tracing_group_name));
+               if (ret < 0) {
+                       ERR("Unable to set group on %s", health_unix_sock_path);
+                       PERROR("chown");
+                       ret = -1;
+                       goto error;
+               }
+
+               ret = chmod(health_unix_sock_path,
+                               S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
+               if (ret < 0) {
+                       ERR("Unable to set permissions on %s", health_unix_sock_path);
+                       PERROR("chmod");
+                       ret = -1;
+                       goto error;
+               }
+       }
+
        /*
         * Set the CLOEXEC flag. Return code is useless because either way, the
         * show must go on.
@@ -3227,58 +3603,18 @@ restart:
 
                rcu_thread_online();
 
-               switch (msg.component) {
-               case LTTNG_HEALTH_CMD:
-                       reply.ret_code = health_check_state(HEALTH_TYPE_CMD);
-                       break;
-               case LTTNG_HEALTH_APP_MANAGE:
-                       reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE);
-                       break;
-               case LTTNG_HEALTH_APP_REG:
-                       reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG);
-                       break;
-               case LTTNG_HEALTH_KERNEL:
-                       reply.ret_code = health_check_state(HEALTH_TYPE_KERNEL);
-                       break;
-               case LTTNG_HEALTH_CONSUMER:
-                       reply.ret_code = check_consumer_health();
-                       break;
-               case LTTNG_HEALTH_HT_CLEANUP:
-                       reply.ret_code = health_check_state(HEALTH_TYPE_HT_CLEANUP);
-                       break;
-               case LTTNG_HEALTH_APP_MANAGE_NOTIFY:
-                       reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY);
-                       break;
-               case LTTNG_HEALTH_APP_REG_DISPATCH:
-                       reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
-                       break;
-               case LTTNG_HEALTH_ALL:
-                       reply.ret_code =
-                               health_check_state(HEALTH_TYPE_APP_MANAGE) &&
-                               health_check_state(HEALTH_TYPE_APP_REG) &&
-                               health_check_state(HEALTH_TYPE_CMD) &&
-                               health_check_state(HEALTH_TYPE_KERNEL) &&
-                               check_consumer_health() &&
-                               health_check_state(HEALTH_TYPE_HT_CLEANUP) &&
-                               health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY) &&
-                               health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
-                       break;
-               default:
-                       reply.ret_code = LTTNG_ERR_UND;
-                       break;
-               }
-
-               /*
-                * Flip ret value since 0 is a success and 1 indicates a bad health for
-                * the client where in the sessiond it is the opposite. Again, this is
-                * just to make things easier for us poor developer which enjoy a lot
-                * lazyness.
-                */
-               if (reply.ret_code == 0 || reply.ret_code == 1) {
-                       reply.ret_code = !reply.ret_code;
+               reply.ret_code = 0;
+               for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
+                       /*
+                        * health_check_state returns 0 if health is
+                        * bad.
+                        */
+                       if (!health_check_state(health_sessiond, i)) {
+                               reply.ret_code |= 1ULL << i;
+                       }
                }
 
-               DBG2("Health check return value %d", reply.ret_code);
+               DBG2("Health check return value %" PRIx64, reply.ret_code);
 
                ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
                if (ret < 0) {
@@ -3329,7 +3665,7 @@ static void *thread_manage_clients(void *data)
 
        rcu_register_thread();
 
-       health_register(HEALTH_TYPE_CMD);
+       health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
 
        if (testpoint(thread_manage_clients)) {
                goto error_testpoint;
@@ -3553,7 +3889,7 @@ error_testpoint:
                ERR("Health error occurred in %s", __func__);
        }
 
-       health_unregister();
+       health_unregister(health_sessiond);
 
        DBG("Client thread dying");
 
@@ -3590,6 +3926,7 @@ static void usage(void)
        fprintf(stderr, "  -p, --pidfile FILE                 Write a pid to FILE name overriding the default value.\n");
        fprintf(stderr, "      --verbose-consumer             Verbose mode for consumer. Activate DBG() macro.\n");
        fprintf(stderr, "      --no-kernel                    Disable kernel tracer\n");
+       fprintf(stderr, "      --jul-tcp-port                 JUL application registration TCP port\n");
 }
 
 /*
@@ -3622,12 +3959,13 @@ static int parse_args(int argc, char **argv)
                { "verbose-consumer", 0, 0, 'Z' },
                { "no-kernel", 0, 0, 'N' },
                { "pidfile", 1, 0, 'p' },
+               { "jul-tcp-port", 1, 0, 'J' },
                { NULL, 0, 0, 0 }
        };
 
        while (1) {
                int option_index = 0;
-               c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:",
+               c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:J:",
                                long_options, &option_index);
                if (c == -1) {
                        break;
@@ -3650,7 +3988,7 @@ static int parse_args(int argc, char **argv)
                        opt_daemon = 1;
                        break;
                case 'g':
-                       opt_tracing_group = optarg;
+                       tracing_group_name = optarg;
                        break;
                case 'h':
                        usage();
@@ -3707,6 +4045,24 @@ static int parse_args(int argc, char **argv)
                case 'p':
                        opt_pidfile = optarg;
                        break;
+               case 'J': /* JUL TCP port. */
+               {
+                       unsigned long v;
+
+                       errno = 0;
+                       v = strtoul(optarg, NULL, 0);
+                       if (errno != 0 || !isdigit(optarg[0])) {
+                               ERR("Wrong value in --jul-tcp-port parameter: %s", optarg);
+                               return -1;
+                       }
+                       if (v == 0 || v >= 65535) {
+                               ERR("Port overflow in --jul-tcp-port parameter: %s", optarg);
+                               return -1;
+                       }
+                       jul_tcp_port = (uint32_t) v;
+                       DBG3("JUL TCP port set to non default: %u", jul_tcp_port);
+                       break;
+               }
                default:
                        /* Unknown option or other error.
                         * Error is printed by getopt, just return */
@@ -3811,14 +4167,7 @@ static int set_permissions(char *rundir)
        int ret;
        gid_t gid;
 
-       ret = allowed_group();
-       if (ret < 0) {
-               WARN("No tracing group detected");
-               ret = 0;
-               goto end;
-       }
-
-       gid = ret;
+       gid = utils_get_group_id(tracing_group_name);
 
        /* Set lttng run dir */
        ret = chown(rundir, 0, gid);
@@ -3827,8 +4176,12 @@ static int set_permissions(char *rundir)
                PERROR("chown");
        }
 
-       /* Ensure tracing group can search the run dir */
-       ret = chmod(rundir, S_IRWXU | S_IXGRP | S_IXOTH);
+       /*
+        * Ensure all applications and tracing group can search the run
+        * dir. Allow everyone to read the directory, since it does not
+        * buy us anything to hide its content.
+        */
+       ret = chmod(rundir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
        if (ret < 0) {
                ERR("Unable to set permissions on %s", rundir);
                PERROR("chmod");
@@ -3842,21 +4195,21 @@ static int set_permissions(char *rundir)
        }
 
        /* kconsumer error socket path */
-       ret = chown(kconsumer_data.err_unix_sock_path, 0, gid);
+       ret = chown(kconsumer_data.err_unix_sock_path, 0, 0);
        if (ret < 0) {
                ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
                PERROR("chown");
        }
 
        /* 64-bit ustconsumer error socket path */
-       ret = chown(ustconsumer64_data.err_unix_sock_path, 0, gid);
+       ret = chown(ustconsumer64_data.err_unix_sock_path, 0, 0);
        if (ret < 0) {
                ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
                PERROR("chown");
        }
 
        /* 32-bit ustconsumer compat32 error socket path */
-       ret = chown(ustconsumer32_data.err_unix_sock_path, 0, gid);
+       ret = chown(ustconsumer32_data.err_unix_sock_path, 0, 0);
        if (ret < 0) {
                ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
                PERROR("chown");
@@ -3864,7 +4217,6 @@ static int set_permissions(char *rundir)
 
        DBG("All permissions are set");
 
-end:
        return ret;
 }
 
@@ -3901,7 +4253,7 @@ static int set_consumer_sockets(struct consumer_data *consumer_data,
        int ret;
        char path[PATH_MAX];
 
-    switch (consumer_data->type) {
+       switch (consumer_data->type) {
        case LTTNG_CONSUMER_KERNEL:
                snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
                break;
@@ -3919,7 +4271,7 @@ static int set_consumer_sockets(struct consumer_data *consumer_data,
 
        DBG2("Creating consumer directory: %s", path);
 
-       ret = mkdir(path, S_IRWXU);
+       ret = mkdir(path, S_IRWXU | S_IRGRP | S_IXGRP);
        if (ret < 0) {
                if (errno != EEXIST) {
                        PERROR("mkdir");
@@ -3928,6 +4280,14 @@ static int set_consumer_sockets(struct consumer_data *consumer_data,
                }
                ret = -1;
        }
+       if (is_root) {
+               ret = chown(path, 0, utils_get_group_id(tracing_group_name));
+               if (ret < 0) {
+                       ERR("Unable to set group on %s", path);
+                       PERROR("chown");
+                       goto error;
+               }
+       }
 
        /* Create the kconsumerd error unix socket */
        consumer_data->err_sock =
@@ -3938,6 +4298,16 @@ static int set_consumer_sockets(struct consumer_data *consumer_data,
                goto error;
        }
 
+       /*
+        * Set the CLOEXEC flag. Return code is useless because either way, the
+        * show must go on.
+        */
+       ret = utils_set_fd_cloexec(consumer_data->err_sock);
+       if (ret < 0) {
+               PERROR("utils_set_fd_cloexec");
+               /* continue anyway */
+       }
+
        /* File permission MUST be 660 */
        ret = chmod(consumer_data->err_unix_sock_path,
                        S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
@@ -4065,6 +4435,34 @@ error:
        return;
 }
 
+/*
+ * Write JUL TCP port using the rundir.
+ */
+static void write_julport(void)
+{
+       int ret;
+       char path[PATH_MAX];
+
+       assert(rundir);
+
+       ret = snprintf(path, sizeof(path), "%s/"
+                       DEFAULT_LTTNG_SESSIOND_JULPORT_FILE, rundir);
+       if (ret < 0) {
+               PERROR("snprintf julport path");
+               goto error;
+       }
+
+       /*
+        * Create TCP JUL port file in rundir. Return value is of no importance.
+        * The execution will continue even though we are not able to write the
+        * file.
+        */
+       (void) utils_create_pid_file(jul_tcp_port, path);
+
+error:
+       return;
+}
+
 /*
  * main
  */
@@ -4166,7 +4564,7 @@ int main(int argc, char **argv)
                DBG2("Kernel consumer cmd path: %s",
                                kconsumer_data.cmd_unix_sock_path);
        } else {
-               home_path = get_home_dir();
+               home_path = utils_get_home_dir();
                if (home_path == NULL) {
                        /* TODO: Add --socket PATH option */
                        ERR("Can't get HOME directory for sockets creation.");
@@ -4262,6 +4660,12 @@ int main(int argc, char **argv)
         */
        ust_app_ht_alloc();
 
+       /* Initialize JUL domain subsystem. */
+       if ((ret = jul_init()) < 0) {
+               /* ENOMEM at this point. */
+               goto error;
+       }
+
        /* After this point, we can safely call cleanup() with "goto exit" */
 
        /*
@@ -4365,6 +4769,22 @@ int main(int argc, char **argv)
        }
 
        write_pidfile();
+       write_julport();
+
+       /* Initialize communication library */
+       lttcomm_init();
+       /* This is to get the TCP timeout value. */
+       lttcomm_inet_init();
+
+       /*
+        * Initialize the health check subsystem. This call should set the
+        * appropriate time values.
+        */
+       health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
+       if (!health_sessiond) {
+               PERROR("health_app_create error");
+               goto exit_health_sessiond_cleanup;
+       }
 
        /* Create thread to manage the client socket */
        ret = pthread_create(&ht_cleanup_thread, NULL,
@@ -4419,7 +4839,15 @@ int main(int argc, char **argv)
                        ust_thread_manage_notify, (void *) NULL);
        if (ret != 0) {
                PERROR("pthread_create apps");
-               goto exit_apps;
+               goto exit_apps_notify;
+       }
+
+       /* Create JUL registration thread. */
+       ret = pthread_create(&jul_reg_thread, NULL,
+                       jul_thread_manage_registration, (void *) NULL);
+       if (ret != 0) {
+               PERROR("pthread_create apps");
+               goto exit_jul_reg;
        }
 
        /* Don't start this thread if kernel tracing is not requested nor root */
@@ -4440,12 +4868,27 @@ int main(int argc, char **argv)
        }
 
 exit_kernel:
+       ret = pthread_join(jul_reg_thread, &status);
+       if (ret != 0) {
+               PERROR("pthread_join JUL");
+               goto error;     /* join error, exit without cleanup */
+       }
+
+exit_jul_reg:
+       ret = pthread_join(apps_notify_thread, &status);
+       if (ret != 0) {
+               PERROR("pthread_join apps notify");
+               goto error;     /* join error, exit without cleanup */
+       }
+
+exit_apps_notify:
        ret = pthread_join(apps_thread, &status);
        if (ret != 0) {
-               PERROR("pthread_join");
+               PERROR("pthread_join apps");
                goto error;     /* join error, exit without cleanup */
        }
 
+
 exit_apps:
        ret = pthread_join(reg_apps_thread, &status);
        if (ret != 0) {
@@ -4499,6 +4942,8 @@ exit_health:
                goto error;     /* join error, exit without cleanup */
        }
 exit_ht_cleanup:
+       health_app_destroy(health_sessiond);
+exit_health_sessiond_cleanup:
 exit:
        /*
         * cleanup() is called when no other thread is running.
This page took 0.043663 seconds and 5 git commands to generate.