Move health into its own common/ static library
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
index c952fc0a90851361ff47f5e0c97f3d81a6a7d04b..550aa1b83f68d65f01e936d1e81fb5308b0b2b4e 100644 (file)
 #include <grp.h>
 #include <limits.h>
 #include <pthread.h>
-#include <semaphore.h>
 #include <signal.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <inttypes.h>
 #include <sys/mman.h>
 #include <sys/mount.h>
 #include <sys/resource.h>
@@ -38,7 +38,6 @@
 #include <config.h>
 
 #include <common/common.h>
-#include <common/compat/poll.h>
 #include <common/compat/socket.h>
 #include <common/defaults.h>
 #include <common/kernel-consumer/kernel-consumer.h>
@@ -47,7 +46,9 @@
 #include <common/utils.h>
 
 #include "lttng-sessiond.h"
+#include "buffer-registry.h"
 #include "channel.h"
+#include "cmd.h"
 #include "consumer.h"
 #include "context.h"
 #include "event.h"
 #include "ust-consumer.h"
 #include "utils.h"
 #include "fd-limit.h"
-#include "filter.h"
-#include "health.h"
+#include "health-sessiond.h"
+#include "testpoint.h"
+#include "ust-thread.h"
 
 #define CONSUMERD_FILE "lttng-consumerd"
 
 /* Const values */
-const char default_home_dir[] = DEFAULT_HOME_DIR;
 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
-const char default_ust_sock_dir[] = DEFAULT_UST_SOCK_DIR;
-const char default_global_apps_pipe[] = DEFAULT_GLOBAL_APPS_PIPE;
 
 const char *progname;
 const char *opt_tracing_group;
+static const char *opt_pidfile;
 static int opt_sig_parent;
 static int opt_verbose_consumer;
 static int opt_daemon;
@@ -80,7 +80,10 @@ static int is_root;                  /* Set to 1 if the daemon is running as root */
 static pid_t ppid;          /* Parent PID for --sig-parent option */
 static char *rundir;
 
-/* Consumer daemon specific control data */
+/*
+ * Consumer daemon specific control data. Every value not initialized here is
+ * set to 0 by the static definition.
+ */
 static struct consumer_data kconsumer_data = {
        .type = LTTNG_CONSUMER_KERNEL,
        .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
@@ -89,6 +92,8 @@ static struct consumer_data kconsumer_data = {
        .cmd_sock = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
+       .cond = PTHREAD_COND_INITIALIZER,
+       .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
 };
 static struct consumer_data ustconsumer64_data = {
        .type = LTTNG_CONSUMER64_UST,
@@ -98,6 +103,8 @@ static struct consumer_data ustconsumer64_data = {
        .cmd_sock = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
+       .cond = PTHREAD_COND_INITIALIZER,
+       .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
 };
 static struct consumer_data ustconsumer32_data = {
        .type = LTTNG_CONSUMER32_UST,
@@ -107,6 +114,8 @@ static struct consumer_data ustconsumer32_data = {
        .cmd_sock = -1,
        .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
        .lock = PTHREAD_MUTEX_INITIALIZER,
+       .cond = PTHREAD_COND_INITIALIZER,
+       .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
 };
 
 /* Shared between threads */
@@ -124,7 +133,7 @@ static char health_unix_sock_path[PATH_MAX];
 /* Sockets and FDs */
 static int client_sock = -1;
 static int apps_sock = -1;
-static int kernel_tracer_fd = -1;
+int kernel_tracer_fd = -1;
 static int kernel_poll_pipe[2] = { -1, -1 };
 
 /*
@@ -139,13 +148,17 @@ static int thread_quit_pipe[2] = { -1, -1 };
  */
 static int apps_cmd_pipe[2] = { -1, -1 };
 
+int apps_cmd_notify_pipe[2] = { -1, -1 };
+
 /* Pthread, Mutexes and Semaphores */
 static pthread_t apps_thread;
+static pthread_t apps_notify_thread;
 static pthread_t reg_apps_thread;
 static pthread_t client_thread;
 static pthread_t kernel_thread;
 static pthread_t dispatch_thread;
 static pthread_t health_thread;
+static pthread_t ht_cleanup_thread;
 
 /*
  * UST registration command queue. This queue is tied with a futex and uses a N
@@ -176,6 +189,8 @@ static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
 
+static const char *module_proc_lttng = "/proc/lttng";
+
 /*
  * Consumer daemon state which is changed when spawning it, killing it or in
  * case of a fatal error.
@@ -211,20 +226,15 @@ static enum consumerd_state ust_consumerd_state;
 static enum consumerd_state kernel_consumerd_state;
 
 /*
- * Used to keep a unique index for each relayd socket created where this value
- * is associated with streams on the consumer so it can match the right relayd
- * to send to.
- *
- * This value should be incremented atomically for safety purposes and future
- * possible concurrent access.
+ * Socket timeout for receiving and sending in seconds.
  */
-static unsigned int relayd_net_seq_idx;
+static int app_socket_timeout;
+
+/* Set in main() with the current page size. */
+long page_size;
 
-/* Used for the health monitoring of the session daemon. See health.h */
-struct health_state health_thread_cmd;
-struct health_state health_thread_app_manage;
-struct health_state health_thread_app_reg;
-struct health_state health_thread_kernel;
+/* Application health monitoring */
+struct health_app *health_sessiond;
 
 static
 void setup_consumerd_path(void)
@@ -278,15 +288,11 @@ void setup_consumerd_path(void)
 /*
  * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
  */
-static int create_thread_poll_set(struct lttng_poll_event *events,
-               unsigned int size)
+int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
 {
        int ret;
 
-       if (events == NULL || size == 0) {
-               ret = -1;
-               goto error;
-       }
+       assert(events);
 
        ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
        if (ret < 0) {
@@ -294,7 +300,7 @@ static int create_thread_poll_set(struct lttng_poll_event *events,
        }
 
        /* Add quit pipe */
-       ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN);
+       ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
        if (ret < 0) {
                goto error;
        }
@@ -310,7 +316,7 @@ error:
  *
  * Return 1 if it was triggered else 0;
  */
-static int check_thread_quit_pipe(int fd, uint32_t events)
+int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
 {
        if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
                return 1;
@@ -366,127 +372,67 @@ error:
 }
 
 /*
- * Complete teardown of a kernel session. This free all data structure related
- * to a kernel session and update counter.
+ * Stop all threads by closing the thread quit pipe.
  */
-static void teardown_kernel_session(struct ltt_session *session)
+static void stop_threads(void)
 {
        int ret;
-       struct lttng_ht_iter iter;
-       struct ltt_kernel_session *ksess;
-       struct consumer_socket *socket;
-
-       if (!session->kernel_session) {
-               DBG3("No kernel session when tearing down session");
-               return;
-       }
-
-       ksess = session->kernel_session;
 
-       DBG("Tearing down kernel session");
-
-       /*
-        * Destroy relayd associated with the session consumer. This action is
-        * valid since in order to destroy a session we must acquire the session
-        * lock. This means that there CAN NOT be stream(s) being sent to a
-        * consumer since this action also requires the session lock at any time.
-        *
-        * At this point, we are sure that not streams data will be lost after this
-        * command is issued.
-        */
-       if (ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET) {
-               cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter, socket,
-                               node.node) {
-                       ret = consumer_send_destroy_relayd(socket, ksess->consumer);
-                       if (ret < 0) {
-                               ERR("Unable to send destroy relayd command to consumer");
-                               /* Continue since we MUST delete everything at this point. */
-                       }
-               }
-       }
-
-       /*
-        * If a custom kernel consumer was registered, close the socket before
-        * tearing down the complete kernel session structure
-        */
-       cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter, socket,
-                       node.node) {
-               if (socket->fd != kconsumer_data.cmd_sock) {
-                       rcu_read_lock();
-                       consumer_del_socket(socket, ksess->consumer);
-                       lttcomm_close_unix_sock(socket->fd);
-                       consumer_destroy_socket(socket);
-                       rcu_read_unlock();
-               }
+       /* Stopping all threads */
+       DBG("Terminating all threads");
+       ret = notify_thread_pipe(thread_quit_pipe[1]);
+       if (ret < 0) {
+               ERR("write error on thread quit pipe");
        }
 
-       trace_kernel_destroy_session(ksess);
+       /* Dispatch thread */
+       CMM_STORE_SHARED(dispatch_thread_exit, 1);
+       futex_nto1_wake(&ust_cmd_queue.futex);
 }
 
 /*
- * Complete teardown of all UST sessions. This will free everything on his path
- * and destroy the core essence of all ust sessions :)
+ * Close every consumer sockets.
  */
-static void teardown_ust_session(struct ltt_session *session)
+static void close_consumer_sockets(void)
 {
        int ret;
-       struct lttng_ht_iter iter;
-       struct ltt_ust_session *usess;
-       struct consumer_socket *socket;
 
-       if (!session->ust_session) {
-               DBG3("No UST session when tearing down session");
-               return;
+       if (kconsumer_data.err_sock >= 0) {
+               ret = close(kconsumer_data.err_sock);
+               if (ret < 0) {
+                       PERROR("kernel consumer err_sock close");
+               }
        }
-       usess = session->ust_session;
-
-       DBG("Tearing down UST session(s)");
-
-       /*
-        * Destroy relayd associated with the session consumer. This action is
-        * valid since in order to destroy a session we must acquire the session
-        * lock. This means that there CAN NOT be stream(s) being sent to a
-        * consumer since this action also requires the session lock at any time.
-        *
-        * At this point, we are sure that no data will be lost after this command
-        * is issued.
-        */
-       if (usess->consumer && usess->consumer->type == CONSUMER_DST_NET) {
-               cds_lfht_for_each_entry(usess->consumer->socks->ht, &iter.iter, socket,
-                               node.node) {
-                       ret = consumer_send_destroy_relayd(socket, usess->consumer);
-                       if (ret < 0) {
-                               ERR("Unable to send destroy relayd command to consumer");
-                               /* Continue since we MUST delete everything at this point. */
-                       }
+       if (ustconsumer32_data.err_sock >= 0) {
+               ret = close(ustconsumer32_data.err_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd32 err_sock close");
                }
        }
-
-       ret = ust_app_destroy_trace_all(usess);
-       if (ret) {
-               ERR("Error in ust_app_destroy_trace_all");
+       if (ustconsumer64_data.err_sock >= 0) {
+               ret = close(ustconsumer64_data.err_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd64 err_sock close");
+               }
        }
-
-       trace_ust_destroy_session(usess);
-}
-
-/*
- * Stop all threads by closing the thread quit pipe.
- */
-static void stop_threads(void)
-{
-       int ret;
-
-       /* Stopping all threads */
-       DBG("Terminating all threads");
-       ret = notify_thread_pipe(thread_quit_pipe[1]);
-       if (ret < 0) {
-               ERR("write error on thread quit pipe");
+       if (kconsumer_data.cmd_sock >= 0) {
+               ret = close(kconsumer_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("kernel consumer cmd_sock close");
+               }
+       }
+       if (ustconsumer32_data.cmd_sock >= 0) {
+               ret = close(ustconsumer32_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd32 cmd_sock close");
+               }
+       }
+       if (ustconsumer64_data.cmd_sock >= 0) {
+               ret = close(ustconsumer64_data.cmd_sock);
+               if (ret < 0) {
+                       PERROR("UST consumerd64 cmd_sock close");
+               }
        }
-
-       /* Dispatch thread */
-       CMM_STORE_SHARED(dispatch_thread_exit, 1);
-       futex_nto1_wake(&ust_cmd_queue.futex);
 }
 
 /*
@@ -495,11 +441,28 @@ static void stop_threads(void)
 static void cleanup(void)
 {
        int ret;
-       char *cmd;
+       char *cmd = NULL;
        struct ltt_session *sess, *stmp;
 
        DBG("Cleaning up");
 
+       /*
+        * Close the thread quit pipe. It has already done its job,
+        * since we are now called.
+        */
+       utils_close_pipe(thread_quit_pipe);
+
+       /*
+        * If opt_pidfile is undefined, the default file will be wiped when
+        * removing the rundir.
+        */
+       if (opt_pidfile) {
+               ret = remove(opt_pidfile);
+               if (ret < 0) {
+                       PERROR("remove pidfile %s", opt_pidfile);
+               }
+       }
+
        DBG("Removing %s directory", rundir);
        ret = asprintf(&cmd, "rm -rf %s", rundir);
        if (ret < 0) {
@@ -512,6 +475,7 @@ static void cleanup(void)
                ERR("Unable to clean %s", rundir);
        }
        free(cmd);
+       free(rundir);
 
        DBG("Cleaning up all sessions");
 
@@ -522,14 +486,13 @@ static void cleanup(void)
                /* Cleanup ALL session */
                cds_list_for_each_entry_safe(sess, stmp,
                                &session_list_ptr->head, list) {
-                       teardown_kernel_session(sess);
-                       teardown_ust_session(sess);
-                       free(sess);
+                       cmd_destroy_session(sess, kernel_poll_pipe[1]);
                }
        }
 
        DBG("Closing all UST sockets");
        ust_app_clean_list();
+       buffer_reg_destroy_registries();
 
        if (is_root && !opt_no_kernel) {
                DBG2("Closing kernel fd");
@@ -542,9 +505,8 @@ static void cleanup(void)
                DBG("Unloading kernel modules");
                modprobe_remove_lttng_all();
        }
-       utils_close_pipe(kernel_poll_pipe);
-       utils_close_pipe(thread_quit_pipe);
-       utils_close_pipe(apps_cmd_pipe);
+
+       close_consumer_sockets();
 
        /* <fun> */
        DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
@@ -561,7 +523,7 @@ static void cleanup(void)
 static int send_unix_sock(int sock, void *buf, size_t len)
 {
        /* Check valid length */
-       if (len <= 0) {
+       if (len == 0) {
                return -1;
        }
 
@@ -716,6 +678,8 @@ static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
                                if (ret < 0) {
                                        goto error;
                                }
+                               /* Update the stream global counter */
+                               ksess->stream_count_global += ret;
 
                                /*
                                 * Have we already sent fds to the consumer? If yes, it means
@@ -726,20 +690,20 @@ static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
                                        struct lttng_ht_iter iter;
                                        struct consumer_socket *socket;
 
-
+                                       rcu_read_lock();
                                        cds_lfht_for_each_entry(ksess->consumer->socks->ht,
                                                        &iter.iter, socket, node.node) {
-                                               /* Code flow error */
-                                               assert(socket->fd >= 0);
-
                                                pthread_mutex_lock(socket->lock);
-                                               ret = kernel_consumer_send_channel_stream(socket->fd,
-                                                               channel, ksess);
+                                               ret = kernel_consumer_send_channel_stream(socket,
+                                                               channel, ksess,
+                                                               session->output_traces ? 1 : 0);
                                                pthread_mutex_unlock(socket->lock);
                                                if (ret < 0) {
+                                                       rcu_read_unlock();
                                                        goto error;
                                                }
                                        }
+                                       rcu_read_unlock();
                                }
                                goto error;
                        }
@@ -756,13 +720,18 @@ error:
 }
 
 /*
- * For each tracing session, update newly registered apps.
+ * For each tracing session, update newly registered apps. The session list
+ * lock MUST be acquired before calling this.
  */
 static void update_ust_app(int app_sock)
 {
        struct ltt_session *sess, *stmp;
 
-       session_lock_list();
+       /* Consumer is in an ERROR state. Stop any application update. */
+       if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
+               /* Stop the update process since the consumer is dead. */
+               return;
+       }
 
        /* For all tracing session(s) */
        cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
@@ -772,8 +741,6 @@ static void update_ust_app(int app_sock)
                }
                session_unlock(sess);
        }
-
-       session_unlock_list();
 }
 
 /*
@@ -789,30 +756,44 @@ static void *thread_manage_kernel(void *data)
        char tmp;
        struct lttng_poll_event events;
 
-       DBG("Thread manage kernel started");
+       DBG("[thread] Thread manage kernel started");
 
-       health_code_update(&health_thread_kernel);
+       health_register(health_sessiond, HEALTH_TYPE_KERNEL);
 
-       ret = create_thread_poll_set(&events, 2);
-       if (ret < 0) {
-               goto error_poll_create;
+       /*
+        * This first step of the while is to clean this structure which could free
+        * non NULL pointers so initialize it before the loop.
+        */
+       lttng_poll_init(&events);
+
+       if (testpoint(thread_manage_kernel)) {
+               goto error_testpoint;
        }
 
-       ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
-       if (ret < 0) {
-               goto error;
+       health_code_update();
+
+       if (testpoint(thread_manage_kernel_before_loop)) {
+               goto error_testpoint;
        }
 
        while (1) {
-               health_code_update(&health_thread_kernel);
+               health_code_update();
 
                if (update_poll_flag == 1) {
-                       /*
-                        * Reset number of fd in the poll set. Always 2 since there is the thread
-                        * quit pipe and the kernel pipe.
-                        */
-                       events.nb_fd = 2;
+                       /* Clean events object. We are about to populate it again. */
+                       lttng_poll_clean(&events);
+
+                       ret = sessiond_set_thread_pollset(&events, 2);
+                       if (ret < 0) {
+                               goto error_poll_create;
+                       }
+
+                       ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
+                       if (ret < 0) {
+                               goto error;
+                       }
 
+                       /* This will add the available kernel channel if any. */
                        ret = update_kernel_poll(&events);
                        if (ret < 0) {
                                goto error;
@@ -820,18 +801,13 @@ static void *thread_manage_kernel(void *data)
                        update_poll_flag = 0;
                }
 
-               nb_fd = LTTNG_POLL_GETNB(&events);
-
-               DBG("Thread kernel polling on %d fds", nb_fd);
-
-               /* Zeroed the poll events */
-               lttng_poll_reset(&events);
+               DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
 
                /* Poll infinite value of time */
        restart:
-               health_poll_update(&health_thread_kernel);
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
-               health_poll_update(&health_thread_kernel);
+               health_poll_exit();
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -847,15 +823,17 @@ static void *thread_manage_kernel(void *data)
                        continue;
                }
 
+               nb_fd = ret;
+
                for (i = 0; i < nb_fd; i++) {
                        /* Fetch once the poll data */
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
-                       health_code_update(&health_thread_kernel);
+                       health_code_update();
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -863,7 +841,13 @@ static void *thread_manage_kernel(void *data)
 
                        /* Check for data on kernel pipe */
                        if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
-                               ret = read(kernel_poll_pipe[0], &tmp, 1);
+                               do {
+                                       ret = read(kernel_poll_pipe[0], &tmp, 1);
+                               } while (ret < 0 && errno == EINTR);
+                               /*
+                                * Ret value is useless here, if this pipe gets any actions an
+                                * update is required anyway.
+                                */
                                update_poll_flag = 1;
                                continue;
                        } else {
@@ -890,15 +874,43 @@ exit:
 error:
        lttng_poll_clean(&events);
 error_poll_create:
+error_testpoint:
+       utils_close_pipe(kernel_poll_pipe);
+       kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
        if (err) {
-               health_error(&health_thread_kernel);
+               health_error();
                ERR("Health error occurred in %s", __func__);
+               WARN("Kernel thread died unexpectedly. "
+                               "Kernel tracing can continue but CPU hotplug is disabled.");
        }
-       health_exit(&health_thread_kernel);
+       health_unregister(health_sessiond);
        DBG("Kernel thread dying");
        return NULL;
 }
 
+/*
+ * Signal pthread condition of the consumer data that the thread.
+ */
+static void signal_consumer_condition(struct consumer_data *data, int state)
+{
+       pthread_mutex_lock(&data->cond_mutex);
+
+       /*
+        * The state is set before signaling. It can be any value, it's the waiter
+        * job to correctly interpret this condition variable associated to the
+        * consumer pthread_cond.
+        *
+        * A value of 0 means that the corresponding thread of the consumer data
+        * was not started. 1 indicates that the thread has started and is ready
+        * for action. A negative value means that there was an error during the
+        * thread bootstrap.
+        */
+       data->consumer_thread_is_ready = state;
+       (void) pthread_cond_signal(&data->cond);
+
+       pthread_mutex_unlock(&data->cond_mutex);
+}
+
 /*
  * This thread manage the consumer error sent back to the session daemon.
  */
@@ -912,36 +924,41 @@ static void *thread_manage_consumer(void *data)
 
        DBG("[thread] Manage consumer started");
 
-       health_code_update(&consumer_data->health);
+       health_register(health_sessiond, HEALTH_TYPE_CONSUMER);
 
-       ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
-       if (ret < 0) {
-               goto error_listen;
-       }
+       health_code_update();
 
        /*
-        * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
-        * Nothing more will be added to this poll set.
+        * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
+        * metadata_sock. Nothing more will be added to this poll set.
         */
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 3);
        if (ret < 0) {
                goto error_poll;
        }
 
+       /*
+        * The error socket here is already in a listening state which was done
+        * just before spawning this thread to avoid a race between the consumer
+        * daemon exec trying to connect and the listen() call.
+        */
        ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
        if (ret < 0) {
                goto error;
        }
 
-       nb_fd = LTTNG_POLL_GETNB(&events);
-
-       health_code_update(&consumer_data->health);
+       health_code_update();
 
-       /* Inifinite blocking call, waiting for transmission */
+       /* Infinite blocking call, waiting for transmission */
 restart:
-       health_poll_update(&consumer_data->health);
+       health_poll_entry();
+
+       if (testpoint(thread_manage_consumer)) {
+               goto error;
+       }
+
        ret = lttng_poll_wait(&events, -1);
-       health_poll_update(&consumer_data->health);
+       health_poll_exit();
        if (ret < 0) {
                /*
                 * Restart interrupted system call.
@@ -952,15 +969,17 @@ restart:
                goto error;
        }
 
+       nb_fd = ret;
+
        for (i = 0; i < nb_fd; i++) {
                /* Fetch once the poll data */
                revents = LTTNG_POLL_GETEV(&events, i);
                pollfd = LTTNG_POLL_GETFD(&events, i);
 
-               health_code_update(&consumer_data->health);
+               health_code_update();
 
                /* Thread quit pipe has been closed. Killing thread. */
-               ret = check_thread_quit_pipe(pollfd, revents);
+               ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                if (ret) {
                        err = 0;
                        goto exit;
@@ -980,7 +999,13 @@ restart:
                goto error;
        }
 
-       health_code_update(&consumer_data->health);
+       /*
+        * Set the CLOEXEC flag. Return code is useless because either way, the
+        * show must go on.
+        */
+       (void) utils_set_fd_cloexec(sock);
+
+       health_code_update();
 
        DBG2("Receiving code from consumer err_sock");
 
@@ -991,93 +1016,139 @@ restart:
                goto error;
        }
 
-       health_code_update(&consumer_data->health);
+       health_code_update();
 
-       if (code == CONSUMERD_COMMAND_SOCK_READY) {
+       if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
+               /* Connect both socket, command and metadata. */
                consumer_data->cmd_sock =
                        lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
-               if (consumer_data->cmd_sock < 0) {
-                       sem_post(&consumer_data->sem);
-                       PERROR("consumer connect");
+               consumer_data->metadata_fd =
+                       lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
+               if (consumer_data->cmd_sock < 0
+                               || consumer_data->metadata_fd < 0) {
+                       PERROR("consumer connect cmd socket");
+                       /* On error, signal condition and quit. */
+                       signal_consumer_condition(consumer_data, -1);
+                       goto error;
+               }
+               consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
+               /* Create metadata socket lock. */
+               consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
+               if (consumer_data->metadata_sock.lock == NULL) {
+                       PERROR("zmalloc pthread mutex");
+                       ret = -1;
                        goto error;
                }
-               /* Signal condition to tell that the kconsumerd is ready */
-               sem_post(&consumer_data->sem);
-               DBG("consumer command socket ready");
+               pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
+
+               signal_consumer_condition(consumer_data, 1);
+               DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
+               DBG("Consumer metadata socket ready (fd: %d)",
+                               consumer_data->metadata_fd);
        } else {
                ERR("consumer error when waiting for SOCK_READY : %s",
                                lttcomm_get_readable_code(-code));
                goto error;
        }
 
-       /* Remove the kconsumerd error sock since we've established a connexion */
+       /* Remove the consumerd error sock since we've established a connexion */
        ret = lttng_poll_del(&events, consumer_data->err_sock);
        if (ret < 0) {
                goto error;
        }
 
+       /* Add new accepted error socket. */
        ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
        if (ret < 0) {
                goto error;
        }
 
-       health_code_update(&consumer_data->health);
+       /* Add metadata socket that is successfully connected. */
+       ret = lttng_poll_add(&events, consumer_data->metadata_fd,
+                       LPOLLIN | LPOLLRDHUP);
+       if (ret < 0) {
+               goto error;
+       }
 
-       /* Update number of fd */
-       nb_fd = LTTNG_POLL_GETNB(&events);
+       health_code_update();
 
-       /* Inifinite blocking call, waiting for transmission */
+       /* Infinite blocking call, waiting for transmission */
 restart_poll:
-       health_poll_update(&consumer_data->health);
-       ret = lttng_poll_wait(&events, -1);
-       health_poll_update(&consumer_data->health);
-       if (ret < 0) {
-               /*
-                * Restart interrupted system call.
-                */
-               if (errno == EINTR) {
-                       goto restart_poll;
+       while (1) {
+               health_poll_entry();
+               ret = lttng_poll_wait(&events, -1);
+               health_poll_exit();
+               if (ret < 0) {
+                       /*
+                        * Restart interrupted system call.
+                        */
+                       if (errno == EINTR) {
+                               goto restart_poll;
+                       }
+                       goto error;
                }
-               goto error;
-       }
 
-       for (i = 0; i < nb_fd; i++) {
-               /* Fetch once the poll data */
-               revents = LTTNG_POLL_GETEV(&events, i);
-               pollfd = LTTNG_POLL_GETFD(&events, i);
+               nb_fd = ret;
 
-               health_code_update(&consumer_data->health);
+               for (i = 0; i < nb_fd; i++) {
+                       /* Fetch once the poll data */
+                       revents = LTTNG_POLL_GETEV(&events, i);
+                       pollfd = LTTNG_POLL_GETFD(&events, i);
 
-               /* Thread quit pipe has been closed. Killing thread. */
-               ret = check_thread_quit_pipe(pollfd, revents);
-               if (ret) {
-                       err = 0;
-                       goto exit;
-               }
+                       health_code_update();
 
-               /* Event on the kconsumerd socket */
-               if (pollfd == sock) {
-                       if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                               ERR("consumer err socket second poll error");
-                               goto error;
+                       /* Thread quit pipe has been closed. Killing thread. */
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
+                       if (ret) {
+                               err = 0;
+                               goto exit;
                        }
-               }
-       }
 
-       health_code_update(&consumer_data->health);
+                       if (pollfd == sock) {
+                               /* Event on the consumerd socket */
+                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("consumer err socket second poll error");
+                                       goto error;
+                               }
+                               health_code_update();
+                               /* Wait for any kconsumerd error */
+                               ret = lttcomm_recv_unix_sock(sock, &code,
+                                               sizeof(enum lttcomm_return_code));
+                               if (ret <= 0) {
+                                       ERR("consumer closed the command socket");
+                                       goto error;
+                               }
 
-       /* Wait for any kconsumerd error */
-       ret = lttcomm_recv_unix_sock(sock, &code,
-                       sizeof(enum lttcomm_return_code));
-       if (ret <= 0) {
-               ERR("consumer closed the command socket");
-               goto error;
-       }
+                               ERR("consumer return code : %s",
+                                               lttcomm_get_readable_code(-code));
 
-       ERR("consumer return code : %s", lttcomm_get_readable_code(-code));
+                               goto exit;
+                       } else if (pollfd == consumer_data->metadata_fd) {
+                               /* UST metadata requests */
+                               ret = ust_consumer_metadata_request(
+                                               &consumer_data->metadata_sock);
+                               if (ret < 0) {
+                                       ERR("Handling metadata request");
+                                       goto error;
+                               }
+                               break;
+                       } else {
+                               ERR("Unknown pollfd");
+                               goto error;
+                       }
+               }
+               health_code_update();
+       }
 
 exit:
 error:
+       /*
+        * We lock here because we are about to close the sockets and some other
+        * thread might be using them so get exclusive access which will abort all
+        * other consumer command by other threads.
+        */
+       pthread_mutex_lock(&consumer_data->lock);
+
        /* Immediately set the consumerd state to stopped */
        if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
                uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
@@ -1094,13 +1165,22 @@ error:
                if (ret) {
                        PERROR("close");
                }
+               consumer_data->err_sock = -1;
        }
        if (consumer_data->cmd_sock >= 0) {
                ret = close(consumer_data->cmd_sock);
                if (ret) {
                        PERROR("close");
                }
+               consumer_data->cmd_sock = -1;
+       }
+       if (*consumer_data->metadata_sock.fd_ptr >= 0) {
+               ret = close(*consumer_data->metadata_sock.fd_ptr);
+               if (ret) {
+                       PERROR("close");
+               }
        }
+
        if (sock >= 0) {
                ret = close(sock);
                if (ret) {
@@ -1111,15 +1191,19 @@ error:
        unlink(consumer_data->err_unix_sock_path);
        unlink(consumer_data->cmd_unix_sock_path);
        consumer_data->pid = 0;
+       pthread_mutex_unlock(&consumer_data->lock);
+
+       /* Cleanup metadata socket mutex. */
+       pthread_mutex_destroy(consumer_data->metadata_sock.lock);
+       free(consumer_data->metadata_sock.lock);
 
        lttng_poll_clean(&events);
 error_poll:
-error_listen:
        if (err) {
-               health_error(&consumer_data->health);
+               health_error();
                ERR("Health error occurred in %s", __func__);
        }
-       health_exit(&consumer_data->health);
+       health_unregister(health_sessiond);
        DBG("consumer thread cleanup completed");
 
        return NULL;
@@ -1132,7 +1216,6 @@ static void *thread_manage_apps(void *data)
 {
        int i, ret, pollfd, err = -1;
        uint32_t revents, nb_fd;
-       struct ust_command ust_cmd;
        struct lttng_poll_event events;
 
        DBG("[thread] Manage application started");
@@ -1140,9 +1223,15 @@ static void *thread_manage_apps(void *data)
        rcu_register_thread();
        rcu_thread_online();
 
-       health_code_update(&health_thread_app_manage);
+       health_register(health_sessiond, HEALTH_TYPE_APP_MANAGE);
+
+       if (testpoint(thread_manage_apps)) {
+               goto error_testpoint;
+       }
+
+       health_code_update();
 
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 2);
        if (ret < 0) {
                goto error_poll_create;
        }
@@ -1152,21 +1241,20 @@ static void *thread_manage_apps(void *data)
                goto error;
        }
 
-       health_code_update(&health_thread_app_manage);
-
-       while (1) {
-               /* Zeroed the events structure */
-               lttng_poll_reset(&events);
+       if (testpoint(thread_manage_apps_before_loop)) {
+               goto error;
+       }
 
-               nb_fd = LTTNG_POLL_GETNB(&events);
+       health_code_update();
 
-               DBG("Apps thread polling on %d fds", nb_fd);
+       while (1) {
+               DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
 
                /* Inifinite blocking call, waiting for transmission */
        restart:
-               health_poll_update(&health_thread_app_manage);
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
-               health_poll_update(&health_thread_app_manage);
+               health_poll_exit();
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -1177,15 +1265,17 @@ static void *thread_manage_apps(void *data)
                        goto error;
                }
 
+               nb_fd = ret;
+
                for (i = 0; i < nb_fd; i++) {
                        /* Fetch once the poll data */
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
-                       health_code_update(&health_thread_app_manage);
+                       health_code_update();
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -1197,64 +1287,45 @@ static void *thread_manage_apps(void *data)
                                        ERR("Apps command pipe error");
                                        goto error;
                                } else if (revents & LPOLLIN) {
+                                       int sock;
+
                                        /* Empty pipe */
-                                       ret = read(apps_cmd_pipe[0], &ust_cmd, sizeof(ust_cmd));
-                                       if (ret < 0 || ret < sizeof(ust_cmd)) {
+                                       do {
+                                               ret = read(apps_cmd_pipe[0], &sock, sizeof(sock));
+                                       } while (ret < 0 && errno == EINTR);
+                                       if (ret < 0 || ret < sizeof(sock)) {
                                                PERROR("read apps cmd pipe");
                                                goto error;
                                        }
 
-                                       health_code_update(&health_thread_app_manage);
+                                       health_code_update();
 
-                                       /* Register applicaton to the session daemon */
-                                       ret = ust_app_register(&ust_cmd.reg_msg,
-                                                       ust_cmd.sock);
-                                       if (ret == -ENOMEM) {
+                                       /*
+                                        * We only monitor the error events of the socket. This
+                                        * thread does not handle any incoming data from UST
+                                        * (POLLIN).
+                                        */
+                                       ret = lttng_poll_add(&events, sock,
+                                                       LPOLLERR | LPOLLHUP | LPOLLRDHUP);
+                                       if (ret < 0) {
                                                goto error;
-                                       } else if (ret < 0) {
-                                               break;
                                        }
 
-                                       health_code_update(&health_thread_app_manage);
-
                                        /*
-                                        * Validate UST version compatibility.
+                                        * Set socket timeout for both receiving and ending.
+                                        * app_socket_timeout is in seconds, whereas
+                                        * lttcomm_setsockopt_rcv_timeout and
+                                        * lttcomm_setsockopt_snd_timeout expect msec as
+                                        * parameter.
                                         */
-                                       ret = ust_app_validate_version(ust_cmd.sock);
-                                       if (ret >= 0) {
-                                               /*
-                                                * Add channel(s) and event(s) to newly registered apps
-                                                * from lttng global UST domain.
-                                                */
-                                               update_ust_app(ust_cmd.sock);
-                                       }
-
-                                       health_code_update(&health_thread_app_manage);
-
-                                       ret = ust_app_register_done(ust_cmd.sock);
-                                       if (ret < 0) {
-                                               /*
-                                                * If the registration is not possible, we simply
-                                                * unregister the apps and continue
-                                                */
-                                               ust_app_unregister(ust_cmd.sock);
-                                       } else {
-                                               /*
-                                                * We just need here to monitor the close of the UST
-                                                * socket and poll set monitor those by default.
-                                                * Listen on POLLIN (even if we never expect any
-                                                * data) to ensure that hangup wakes us.
-                                                */
-                                               ret = lttng_poll_add(&events, ust_cmd.sock, LPOLLIN);
-                                               if (ret < 0) {
-                                                       goto error;
-                                               }
+                                       (void) lttcomm_setsockopt_rcv_timeout(sock,
+                                                       app_socket_timeout * 1000);
+                                       (void) lttcomm_setsockopt_snd_timeout(sock,
+                                                       app_socket_timeout * 1000);
 
-                                               DBG("Apps with sock %d added to poll set",
-                                                               ust_cmd.sock);
-                                       }
+                                       DBG("Apps with sock %d added to poll set", sock);
 
-                                       health_code_update(&health_thread_app_manage);
+                                       health_code_update();
 
                                        break;
                                }
@@ -1276,7 +1347,7 @@ static void *thread_manage_apps(void *data)
                                }
                        }
 
-                       health_code_update(&health_thread_app_manage);
+                       health_code_update();
                }
        }
 
@@ -1284,11 +1355,21 @@ exit:
 error:
        lttng_poll_clean(&events);
 error_poll_create:
+error_testpoint:
+       utils_close_pipe(apps_cmd_pipe);
+       apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
+
+       /*
+        * We don't clean the UST app hash table here since already registered
+        * applications can still be controlled so let them be until the session
+        * daemon dies or the applications stop.
+        */
+
        if (err) {
-               health_error(&health_thread_app_manage);
+               health_error();
                ERR("Health error occurred in %s", __func__);
        }
-       health_exit(&health_thread_app_manage);
+       health_unregister(health_sessiond);
        DBG("Application communication apps thread cleanup complete");
        rcu_thread_offline();
        rcu_unregister_thread();
@@ -1296,22 +1377,170 @@ error_poll_create:
 }
 
 /*
- * Dispatch request from the registration threads to the application
- * communication thread.
- */
-static void *thread_dispatch_ust_registration(void *data)
+ * Send a socket to a thread This is called from the dispatch UST registration
+ * thread once all sockets are set for the application.
+ *
+ * The sock value can be invalid, we don't really care, the thread will handle
+ * it and make the necessary cleanup if so.
+ *
+ * On success, return 0 else a negative value being the errno message of the
+ * write().
+ */
+static int send_socket_to_thread(int fd, int sock)
 {
        int ret;
+
+       /*
+        * It's possible that the FD is set as invalid with -1 concurrently just
+        * before calling this function being a shutdown state of the thread.
+        */
+       if (fd < 0) {
+               ret = -EBADF;
+               goto error;
+       }
+
+       do {
+               ret = write(fd, &sock, sizeof(sock));
+       } while (ret < 0 && errno == EINTR);
+       if (ret < 0 || ret != sizeof(sock)) {
+               PERROR("write apps pipe %d", fd);
+               if (ret < 0) {
+                       ret = -errno;
+               }
+               goto error;
+       }
+
+       /* All good. Don't send back the write positive ret value. */
+       ret = 0;
+error:
+       return ret;
+}
+
+/*
+ * Sanitize the wait queue of the dispatch registration thread meaning removing
+ * invalid nodes from it. This is to avoid memory leaks for the case the UST
+ * notify socket is never received.
+ */
+static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
+{
+       int ret, nb_fd = 0, i;
+       unsigned int fd_added = 0;
+       struct lttng_poll_event events;
+       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
+
+       assert(wait_queue);
+
+       lttng_poll_init(&events);
+
+       /* Just skip everything for an empty queue. */
+       if (!wait_queue->count) {
+               goto end;
+       }
+
+       ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               goto error_create;
+       }
+
+       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                       &wait_queue->head, head) {
+               assert(wait_node->app);
+               ret = lttng_poll_add(&events, wait_node->app->sock,
+                               LPOLLHUP | LPOLLERR);
+               if (ret < 0) {
+                       goto error;
+               }
+
+               fd_added = 1;
+       }
+
+       if (!fd_added) {
+               goto end;
+       }
+
+       /*
+        * Poll but don't block so we can quickly identify the faulty events and
+        * clean them afterwards from the wait queue.
+        */
+       ret = lttng_poll_wait(&events, 0);
+       if (ret < 0) {
+               goto error;
+       }
+       nb_fd = ret;
+
+       for (i = 0; i < nb_fd; i++) {
+               /* Get faulty FD. */
+               uint32_t revents = LTTNG_POLL_GETEV(&events, i);
+               int pollfd = LTTNG_POLL_GETFD(&events, i);
+
+               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                               &wait_queue->head, head) {
+                       if (pollfd == wait_node->app->sock &&
+                                       (revents & (LPOLLHUP | LPOLLERR))) {
+                               cds_list_del(&wait_node->head);
+                               wait_queue->count--;
+                               ust_app_destroy(wait_node->app);
+                               free(wait_node);
+                               break;
+                       }
+               }
+       }
+
+       if (nb_fd > 0) {
+               DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
+       }
+
+end:
+       lttng_poll_clean(&events);
+       return;
+
+error:
+       lttng_poll_clean(&events);
+error_create:
+       ERR("Unable to sanitize wait queue");
+       return;
+}
+
+/*
+ * Dispatch request from the registration threads to the application
+ * communication thread.
+ */
+static void *thread_dispatch_ust_registration(void *data)
+{
+       int ret, err = -1;
        struct cds_wfq_node *node;
        struct ust_command *ust_cmd = NULL;
+       struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
+       struct ust_reg_wait_queue wait_queue = {
+               .count = 0,
+       };
+
+       health_register(health_sessiond, HEALTH_TYPE_APP_REG_DISPATCH);
+
+       health_code_update();
+
+       CDS_INIT_LIST_HEAD(&wait_queue.head);
 
        DBG("[thread] Dispatch UST command started");
 
        while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
+               health_code_update();
+
                /* Atomically prepare the queue futex */
                futex_nto1_prepare(&ust_cmd_queue.futex);
 
                do {
+                       struct ust_app *app = NULL;
+                       ust_cmd = NULL;
+
+                       /*
+                        * Make sure we don't have node(s) that have hung up before receiving
+                        * the notify socket. This is to clean the list in order to avoid
+                        * memory leaks from notify socket that are never seen.
+                        */
+                       sanitize_wait_queue(&wait_queue);
+
+                       health_code_update();
                        /* Dequeue command for registration */
                        node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
                        if (node == NULL) {
@@ -1328,34 +1557,176 @@ static void *thread_dispatch_ust_registration(void *data)
                                        ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
                                        ust_cmd->sock, ust_cmd->reg_msg.name,
                                        ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
-                       /*
-                        * Inform apps thread of the new application registration. This
-                        * call is blocking so we can be assured that the data will be read
-                        * at some point in time or wait to the end of the world :)
-                        */
-                       ret = write(apps_cmd_pipe[1], ust_cmd,
-                                       sizeof(struct ust_command));
-                       if (ret < 0) {
-                               PERROR("write apps cmd pipe");
-                               if (errno == EBADF) {
+
+                       if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
+                               wait_node = zmalloc(sizeof(*wait_node));
+                               if (!wait_node) {
+                                       PERROR("zmalloc wait_node dispatch");
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
+                                       }
+                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                                       free(ust_cmd);
+                                       goto error;
+                               }
+                               CDS_INIT_LIST_HEAD(&wait_node->head);
+
+                               /* Create application object if socket is CMD. */
+                               wait_node->app = ust_app_create(&ust_cmd->reg_msg,
+                                               ust_cmd->sock);
+                               if (!wait_node->app) {
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
+                                       }
+                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                                       free(wait_node);
+                                       free(ust_cmd);
+                                       continue;
+                               }
+                               /*
+                                * Add application to the wait queue so we can set the notify
+                                * socket before putting this object in the global ht.
+                                */
+                               cds_list_add(&wait_node->head, &wait_queue.head);
+                               wait_queue.count++;
+
+                               free(ust_cmd);
+                               /*
+                                * We have to continue here since we don't have the notify
+                                * socket and the application MUST be added to the hash table
+                                * only at that moment.
+                                */
+                               continue;
+                       } else {
+                               /*
+                                * Look for the application in the local wait queue and set the
+                                * notify socket if found.
+                                */
+                               cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                                               &wait_queue.head, head) {
+                                       health_code_update();
+                                       if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
+                                               wait_node->app->notify_sock = ust_cmd->sock;
+                                               cds_list_del(&wait_node->head);
+                                               wait_queue.count--;
+                                               app = wait_node->app;
+                                               free(wait_node);
+                                               DBG3("UST app notify socket %d is set", ust_cmd->sock);
+                                               break;
+                                       }
+                               }
+
+                               /*
+                                * With no application at this stage the received socket is
+                                * basically useless so close it before we free the cmd data
+                                * structure for good.
+                                */
+                               if (!app) {
+                                       ret = close(ust_cmd->sock);
+                                       if (ret < 0) {
+                                               PERROR("close ust sock dispatch %d", ust_cmd->sock);
+                                       }
+                                       lttng_fd_put(1, LTTNG_FD_APPS);
+                               }
+                               free(ust_cmd);
+                       }
+
+                       if (app) {
+                               /*
+                                * @session_lock_list
+                                *
+                                * Lock the global session list so from the register up to the
+                                * registration done message, no thread can see the application
+                                * and change its state.
+                                */
+                               session_lock_list();
+                               rcu_read_lock();
+
+                               /*
+                                * Add application to the global hash table. This needs to be
+                                * done before the update to the UST registry can locate the
+                                * application.
+                                */
+                               ust_app_add(app);
+
+                               /* Set app version. This call will print an error if needed. */
+                               (void) ust_app_version(app);
+
+                               /* Send notify socket through the notify pipe. */
+                               ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
+                                               app->notify_sock);
+                               if (ret < 0) {
+                                       rcu_read_unlock();
+                                       session_unlock_list();
+                                       /*
+                                        * No notify thread, stop the UST tracing. However, this is
+                                        * not an internal error of the this thread thus setting
+                                        * the health error code to a normal exit.
+                                        */
+                                       err = 0;
+                                       goto error;
+                               }
+
+                               /*
+                                * Update newly registered application with the tracing
+                                * registry info already enabled information.
+                                */
+                               update_ust_app(app->sock);
+
+                               /*
+                                * Don't care about return value. Let the manage apps threads
+                                * handle app unregistration upon socket close.
+                                */
+                               (void) ust_app_register_done(app->sock);
+
+                               /*
+                                * Even if the application socket has been closed, send the app
+                                * to the thread and unregistration will take place at that
+                                * place.
+                                */
+                               ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
+                               if (ret < 0) {
+                                       rcu_read_unlock();
+                                       session_unlock_list();
                                        /*
-                                        * We can't inform the application thread to process
-                                        * registration. We will exit or else application
-                                        * registration will not occur and tracing will never
-                                        * start.
+                                        * No apps. thread, stop the UST tracing. However, this is
+                                        * not an internal error of the this thread thus setting
+                                        * the health error code to a normal exit.
                                         */
+                                       err = 0;
                                        goto error;
                                }
+
+                               rcu_read_unlock();
+                               session_unlock_list();
                        }
-                       free(ust_cmd);
                } while (node != NULL);
 
+               health_poll_entry();
                /* Futex wait on queue. Blocking call on futex() */
                futex_nto1_wait(&ust_cmd_queue.futex);
+               health_poll_exit();
        }
+       /* Normal exit, no error */
+       err = 0;
 
 error:
+       /* Clean up wait queue. */
+       cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
+                       &wait_queue.head, head) {
+               cds_list_del(&wait_node->head);
+               wait_queue.count--;
+               free(wait_node);
+       }
+
        DBG("Dispatch thread dying");
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
+       }
+       health_unregister(health_sessiond);
        return NULL;
 }
 
@@ -1375,6 +1746,12 @@ static void *thread_registration_apps(void *data)
 
        DBG("[thread] Manage application registration started");
 
+       health_register(health_sessiond, HEALTH_TYPE_APP_REG);
+
+       if (testpoint(thread_registration_apps)) {
+               goto error_testpoint;
+       }
+
        ret = lttcomm_listen_unix_sock(apps_sock);
        if (ret < 0) {
                goto error_listen;
@@ -1384,7 +1761,7 @@ static void *thread_registration_apps(void *data)
         * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
         * more will be added to this poll set.
         */
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 2);
        if (ret < 0) {
                goto error_create_poll;
        }
@@ -1406,13 +1783,11 @@ static void *thread_registration_apps(void *data)
        while (1) {
                DBG("Accepting application registration");
 
-               nb_fd = LTTNG_POLL_GETNB(&events);
-
                /* Inifinite blocking call, waiting for transmission */
        restart:
-               health_poll_update(&health_thread_app_reg);
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
-               health_poll_update(&health_thread_app_reg);
+               health_poll_exit();
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -1423,15 +1798,17 @@ static void *thread_registration_apps(void *data)
                        goto error;
                }
 
+               nb_fd = ret;
+
                for (i = 0; i < nb_fd; i++) {
-                       health_code_update(&health_thread_app_reg);
+                       health_code_update();
 
                        /* Fetch once the poll data */
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -1448,6 +1825,12 @@ static void *thread_registration_apps(void *data)
                                                goto error;
                                        }
 
+                                       /*
+                                        * Set the CLOEXEC flag. Return code is useless because
+                                        * either way, the show must go on.
+                                        */
+                                       (void) utils_set_fd_cloexec(sock);
+
                                        /* Create UST registration command for enqueuing */
                                        ust_cmd = zmalloc(sizeof(struct ust_command));
                                        if (ust_cmd == NULL) {
@@ -1470,16 +1853,12 @@ static void *thread_registration_apps(void *data)
                                                sock = -1;
                                                continue;
                                        }
-                                       health_code_update(&health_thread_app_reg);
-                                       ret = lttcomm_recv_unix_sock(sock, &ust_cmd->reg_msg,
-                                                       sizeof(struct ust_register_msg));
-                                       if (ret < 0 || ret < sizeof(struct ust_register_msg)) {
-                                               if (ret < 0) {
-                                                       PERROR("lttcomm_recv_unix_sock register apps");
-                                               } else {
-                                                       ERR("Wrong size received on apps register");
-                                               }
+
+                                       health_code_update();
+                                       ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
+                                       if (ret < 0) {
                                                free(ust_cmd);
+                                               /* Close socket of the application. */
                                                ret = close(sock);
                                                if (ret) {
                                                        PERROR("close");
@@ -1488,7 +1867,7 @@ static void *thread_registration_apps(void *data)
                                                sock = -1;
                                                continue;
                                        }
-                                       health_code_update(&health_thread_app_reg);
+                                       health_code_update();
 
                                        ust_cmd->sock = sock;
                                        sock = -1;
@@ -1519,10 +1898,9 @@ static void *thread_registration_apps(void *data)
 exit:
 error:
        if (err) {
-               health_error(&health_thread_app_reg);
+               health_error();
                ERR("Health error occurred in %s", __func__);
        }
-       health_exit(&health_thread_app_reg);
 
        /* Notify that the registration thread is gone */
        notify_ust_apps(0);
@@ -1546,7 +1924,9 @@ error_poll_add:
        lttng_poll_clean(&events);
 error_listen:
 error_create_poll:
+error_testpoint:
        DBG("UST Registration thread cleanup complete");
+       health_unregister(health_sessiond);
 
        return NULL;
 }
@@ -1557,59 +1937,110 @@ error_create_poll:
  */
 static int spawn_consumer_thread(struct consumer_data *consumer_data)
 {
-       int ret;
+       int ret, clock_ret;
        struct timespec timeout;
 
-       timeout.tv_sec = DEFAULT_SEM_WAIT_TIMEOUT;
-       timeout.tv_nsec = 0;
+       /* Make sure we set the readiness flag to 0 because we are NOT ready */
+       consumer_data->consumer_thread_is_ready = 0;
 
-       /* Setup semaphore */
-       ret = sem_init(&consumer_data->sem, 0, 0);
-       if (ret < 0) {
-               PERROR("sem_init consumer semaphore");
+       /* Setup pthread condition */
+       ret = pthread_condattr_init(&consumer_data->condattr);
+       if (ret != 0) {
+               errno = ret;
+               PERROR("pthread_condattr_init consumer data");
+               goto error;
+       }
+
+       /*
+        * Set the monotonic clock in order to make sure we DO NOT jump in time
+        * between the clock_gettime() call and the timedwait call. See bug #324
+        * for a more details and how we noticed it.
+        */
+       ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
+       if (ret != 0) {
+               errno = ret;
+               PERROR("pthread_condattr_setclock consumer data");
+               goto error;
+       }
+
+       ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
+       if (ret != 0) {
+               errno = ret;
+               PERROR("pthread_cond_init consumer data");
                goto error;
        }
 
-       ret = pthread_create(&consumer_data->thread, NULL,
-                       thread_manage_consumer, consumer_data);
+       ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
+                       consumer_data);
        if (ret != 0) {
                PERROR("pthread_create consumer");
                ret = -1;
                goto error;
        }
 
+       /* We are about to wait on a pthread condition */
+       pthread_mutex_lock(&consumer_data->cond_mutex);
+
        /* Get time for sem_timedwait absolute timeout */
-       ret = clock_gettime(CLOCK_REALTIME, &timeout);
-       if (ret < 0) {
-               PERROR("clock_gettime spawn consumer");
-               /* Infinite wait for the kconsumerd thread to be ready */
-               ret = sem_wait(&consumer_data->sem);
-       } else {
-               /* Normal timeout if the gettime was successful */
-               timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
-               ret = sem_timedwait(&consumer_data->sem, &timeout);
+       clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
+       /*
+        * Set the timeout for the condition timed wait even if the clock gettime
+        * call fails since we might loop on that call and we want to avoid to
+        * increment the timeout too many times.
+        */
+       timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
+
+       /*
+        * The following loop COULD be skipped in some conditions so this is why we
+        * set ret to 0 in order to make sure at least one round of the loop is
+        * done.
+        */
+       ret = 0;
+
+       /*
+        * Loop until the condition is reached or when a timeout is reached. Note
+        * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
+        * be returned but the pthread_cond(3), from the glibc-doc, says that it is
+        * possible. This loop does not take any chances and works with both of
+        * them.
+        */
+       while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
+               if (clock_ret < 0) {
+                       PERROR("clock_gettime spawn consumer");
+                       /* Infinite wait for the consumerd thread to be ready */
+                       ret = pthread_cond_wait(&consumer_data->cond,
+                                       &consumer_data->cond_mutex);
+               } else {
+                       ret = pthread_cond_timedwait(&consumer_data->cond,
+                                       &consumer_data->cond_mutex, &timeout);
+               }
        }
 
-       if (ret < 0) {
-               if (errno == ETIMEDOUT) {
+       /* Release the pthread condition */
+       pthread_mutex_unlock(&consumer_data->cond_mutex);
+
+       if (ret != 0) {
+               errno = ret;
+               if (ret == ETIMEDOUT) {
                        /*
                         * Call has timed out so we kill the kconsumerd_thread and return
                         * an error.
                         */
-                       ERR("The consumer thread was never ready. Killing it");
+                       ERR("Condition timed out. The consumer thread was never ready."
+                                       " Killing it");
                        ret = pthread_cancel(consumer_data->thread);
                        if (ret < 0) {
                                PERROR("pthread_cancel consumer thread");
                        }
                } else {
-                       PERROR("semaphore wait failed consumer thread");
+                       PERROR("pthread_cond_wait failed consumer thread");
                }
                goto error;
        }
 
        pthread_mutex_lock(&consumer_data->pid_mutex);
        if (consumer_data->pid == 0) {
-               ERR("Kconsumerd did not start");
+               ERR("Consumerd did not start");
                pthread_mutex_unlock(&consumer_data->pid_mutex);
                goto error;
        }
@@ -1627,10 +2058,10 @@ error:
 static int join_consumer_thread(struct consumer_data *consumer_data)
 {
        void *status;
-       int ret;
 
        /* Consumer pid must be a real one. */
        if (consumer_data->pid > 0) {
+               int ret;
                ret = kill(consumer_data->pid, SIGTERM);
                if (ret) {
                        ERR("Error killing consumer daemon");
@@ -1726,6 +2157,7 @@ static pid_t spawn_consumerd(struct consumer_data *consumer_data)
                                ret = putenv(tmpnew);
                                if (ret) {
                                        ret = -errno;
+                                       free(tmpnew);
                                        goto error;
                                }
                        }
@@ -1770,6 +2202,7 @@ static pid_t spawn_consumerd(struct consumer_data *consumer_data)
                                ret = putenv(tmpnew);
                                if (ret) {
                                        ret = -errno;
+                                       free(tmpnew);
                                        goto error;
                                }
                        }
@@ -1811,6 +2244,16 @@ static int start_consumerd(struct consumer_data *consumer_data)
 {
        int ret;
 
+       /*
+        * Set the listen() state on the socket since there is a possible race
+        * between the exec() of the consumer daemon and this call if place in the
+        * consumer thread. See bug #366 for more details.
+        */
+       ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
+       if (ret < 0) {
+               goto error;
+       }
+
        pthread_mutex_lock(&consumer_data->pid_mutex);
        if (consumer_data->pid != 0) {
                pthread_mutex_unlock(&consumer_data->pid_mutex);
@@ -1840,6 +2283,15 @@ end:
        return 0;
 
 error:
+       /* Cleanup already created sockets on error. */
+       if (consumer_data->err_sock >= 0) {
+               int err;
+
+               err = close(consumer_data->err_sock);
+               if (err < 0) {
+                       PERROR("close consumer data error socket");
+               }
+       }
        return ret;
 }
 
@@ -1851,23 +2303,13 @@ static int check_consumer_health(void)
 {
        int ret;
 
-       ret = health_check_state(&kconsumer_data.health) &&
-               health_check_state(&ustconsumer32_data.health) &&
-               health_check_state(&ustconsumer64_data.health);
+       ret = health_check_state(health_sessiond, HEALTH_TYPE_CONSUMER);
 
        DBG3("Health consumer check %d", ret);
 
        return ret;
 }
 
-/*
- * Check version of the lttng-modules.
- */
-static int validate_lttng_modules_version(void)
-{
-       return kernel_validate_version(kernel_tracer_fd);
-}
-
 /*
  * Setup necessary data for kernel tracer action.
  */
@@ -1890,7 +2332,7 @@ static int init_kernel_tracer(void)
        }
 
        /* Validate kernel version */
-       ret = validate_lttng_modules_version();
+       ret = kernel_validate_version(kernel_tracer_fd);
        if (ret < 0) {
                goto error_version;
        }
@@ -1910,7 +2352,7 @@ error_version:
                PERROR("close");
        }
        kernel_tracer_fd = -1;
-       return LTTCOMM_KERN_VERSION;
+       return LTTNG_ERR_KERN_VERSION;
 
 error_modules:
        ret = close(kernel_tracer_fd);
@@ -1925,2453 +2367,198 @@ error:
        WARN("No kernel tracer available");
        kernel_tracer_fd = -1;
        if (!is_root) {
-               return LTTCOMM_NEED_ROOT_SESSIOND;
+               return LTTNG_ERR_NEED_ROOT_SESSIOND;
        } else {
-               return LTTCOMM_KERN_NA;
+               return LTTNG_ERR_KERN_NA;
        }
 }
 
+
 /*
- * Init tracing by creating trace directory and sending fds kernel consumer.
+ * Copy consumer output from the tracing session to the domain session. The
+ * function also applies the right modification on a per domain basis for the
+ * trace files destination directory.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
  */
-static int init_kernel_tracing(struct ltt_kernel_session *session)
+static int copy_session_consumer(int domain, struct ltt_session *session)
 {
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct consumer_socket *socket;
+       int ret;
+       const char *dir_name;
+       struct consumer_output *consumer;
 
        assert(session);
+       assert(session->consumer);
 
-       if (session->consumer_fds_sent == 0 && session->consumer != NULL) {
-               cds_lfht_for_each_entry(session->consumer->socks->ht, &iter.iter,
-                               socket, node.node) {
-                       /* Code flow error */
-                       assert(socket->fd >= 0);
-
-                       pthread_mutex_lock(socket->lock);
-                       ret = kernel_consumer_send_session(socket->fd, session);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret < 0) {
-                               ret = LTTCOMM_KERN_CONSUMER_FAIL;
-                               goto error;
-                       }
+       switch (domain) {
+       case LTTNG_DOMAIN_KERNEL:
+               DBG3("Copying tracing session consumer output in kernel session");
+               /*
+                * XXX: We should audit the session creation and what this function
+                * does "extra" in order to avoid a destroy since this function is used
+                * in the domain session creation (kernel and ust) only. Same for UST
+                * domain.
+                */
+               if (session->kernel_session->consumer) {
+                       consumer_destroy_output(session->kernel_session->consumer);
+               }
+               session->kernel_session->consumer =
+                       consumer_copy_output(session->consumer);
+               /* Ease our life a bit for the next part */
+               consumer = session->kernel_session->consumer;
+               dir_name = DEFAULT_KERNEL_TRACE_DIR;
+               break;
+       case LTTNG_DOMAIN_UST:
+               DBG3("Copying tracing session consumer output in UST session");
+               if (session->ust_session->consumer) {
+                       consumer_destroy_output(session->ust_session->consumer);
                }
+               session->ust_session->consumer =
+                       consumer_copy_output(session->consumer);
+               /* Ease our life a bit for the next part */
+               consumer = session->ust_session->consumer;
+               dir_name = DEFAULT_UST_TRACE_DIR;
+               break;
+       default:
+               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
+               goto error;
        }
 
+       /* Append correct directory to subdir */
+       strncat(consumer->subdir, dir_name,
+                       sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
+       DBG3("Copy session consumer subdir %s", consumer->subdir);
+
+       ret = LTTNG_OK;
+
 error:
        return ret;
 }
 
 /*
- * Create a socket to the relayd using the URI.
+ * Create an UST session and add it to the session ust list.
  *
- * On success, the relayd_sock pointer is set to the created socket.
- * Else, it is untouched and an lttcomm error code is returned.
+ * Should *NOT* be called with RCU read-side lock held.
  */
-static int create_connect_relayd(struct consumer_output *output,
-               const char *session_name, struct lttng_uri *uri,
-               struct lttcomm_sock **relayd_sock)
+static int create_ust_session(struct ltt_session *session,
+               struct lttng_domain *domain)
 {
        int ret;
-       struct lttcomm_sock *sock;
+       struct ltt_ust_session *lus = NULL;
 
-       /* Create socket object from URI */
-       sock = lttcomm_alloc_sock_from_uri(uri);
-       if (sock == NULL) {
-               ret = LTTCOMM_FATAL;
-               goto error;
-       }
+       assert(session);
+       assert(domain);
+       assert(session->consumer);
 
-       ret = lttcomm_create_sock(sock);
-       if (ret < 0) {
-               ret = LTTCOMM_FATAL;
+       switch (domain->type) {
+       case LTTNG_DOMAIN_UST:
+               break;
+       default:
+               ERR("Unknown UST domain on create session %d", domain->type);
+               ret = LTTNG_ERR_UNKNOWN_DOMAIN;
                goto error;
        }
 
-       /* Connect to relayd so we can proceed with a session creation. */
-       ret = relayd_connect(sock);
-       if (ret < 0) {
-               ERR("Unable to reach lttng-relayd");
-               ret = LTTCOMM_RELAYD_SESSION_FAIL;
-               goto free_sock;
+       DBG("Creating UST session");
+
+       lus = trace_ust_create_session(session->id);
+       if (lus == NULL) {
+               ret = LTTNG_ERR_UST_SESS_FAIL;
+               goto error;
        }
 
-       /* Create socket for control stream. */
-       if (uri->stype == LTTNG_STREAM_CONTROL) {
-               DBG3("Creating relayd stream socket from URI");
-
-               /* Check relayd version */
-               ret = relayd_version_check(sock, RELAYD_VERSION_COMM_MAJOR,
-                               RELAYD_VERSION_COMM_MINOR);
-               if (ret < 0) {
-                       ret = LTTCOMM_RELAYD_VERSION_FAIL;
-                       goto close_sock;
-               }
-       } else if (uri->stype == LTTNG_STREAM_DATA) {
-               DBG3("Creating relayd data socket from URI");
-       } else {
-               /* Command is not valid */
-               ERR("Relayd invalid stream type: %d", uri->stype);
-               ret = LTTCOMM_INVALID;
-               goto close_sock;
-       }
-
-       *relayd_sock = sock;
-
-       return LTTCOMM_OK;
-
-close_sock:
-       if (sock) {
-               (void) relayd_close(sock);
-       }
-free_sock:
-       if (sock) {
-               lttcomm_destroy_sock(sock);
-       }
-error:
-       return ret;
-}
-
-/*
- * Connect to the relayd using URI and send the socket to the right consumer.
- */
-static int send_socket_relayd_consumer(int domain, struct ltt_session *session,
-               struct lttng_uri *relayd_uri, struct consumer_output *consumer,
-               int consumer_fd)
-{
-       int ret;
-       struct lttcomm_sock *sock = NULL;
-
-       /* Set the network sequence index if not set. */
-       if (consumer->net_seq_index == -1) {
-               /*
-                * Increment net_seq_idx because we are about to transfer the
-                * new relayd socket to the consumer.
-                */
-               uatomic_inc(&relayd_net_seq_idx);
-               /* Assign unique key so the consumer can match streams */
-               consumer->net_seq_index = uatomic_read(&relayd_net_seq_idx);
-       }
-
-       /* Connect to relayd and make version check if uri is the control. */
-       ret = create_connect_relayd(consumer, session->name, relayd_uri, &sock);
-       if (ret != LTTCOMM_OK) {
-               goto close_sock;
-       }
-
-       /* If the control socket is connected, network session is ready */
-       if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
-               session->net_handle = 1;
-       }
-
-       /* Send relayd socket to consumer. */
-       ret = consumer_send_relayd_socket(consumer_fd, sock,
-                       consumer, relayd_uri->stype);
-       if (ret < 0) {
-               ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
-               goto close_sock;
-       }
-
-       ret = LTTCOMM_OK;
-
-       /*
-        * Close socket which was dup on the consumer side. The session daemon does
-        * NOT keep track of the relayd socket(s) once transfer to the consumer.
-        */
-
-close_sock:
-       if (sock) {
-               (void) relayd_close(sock);
-               lttcomm_destroy_sock(sock);
-       }
-
-       return ret;
-}
-
-/*
- * Send both relayd sockets to a specific consumer and domain.  This is a
- * helper function to facilitate sending the information to the consumer for a
- * session.
- */
-static int send_sockets_relayd_consumer(int domain,
-               struct ltt_session *session, struct consumer_output *consumer, int fd)
-{
-       int ret;
-
-       assert(session);
-       assert(consumer);
-
-       /* Don't resend the sockets to the consumer. */
-       if (consumer->dst.net.relayd_socks_sent) {
-               ret = LTTCOMM_OK;
-               goto error;
-       }
-
-       /* Sending control relayd socket. */
-       ret = send_socket_relayd_consumer(domain, session,
-                       &consumer->dst.net.control, consumer, fd);
-       if (ret != LTTCOMM_OK) {
-               goto error;
-       }
-
-       /* Sending data relayd socket. */
-       ret = send_socket_relayd_consumer(domain, session,
-                       &consumer->dst.net.data, consumer, fd);
-       if (ret != LTTCOMM_OK) {
-               goto error;
-       }
-
-       /* Flag that all relayd sockets were sent to the consumer. */
-       consumer->dst.net.relayd_socks_sent = 1;
-
-error:
-       return ret;
-}
-
-/*
- * Setup relayd connections for a tracing session. First creates the socket to
- * the relayd and send them to the right domain consumer. Consumer type MUST be
- * network.
- */
-static int setup_relayd(struct ltt_session *session)
-{
-       int ret = LTTCOMM_OK;
-       struct ltt_ust_session *usess;
-       struct ltt_kernel_session *ksess;
-       struct consumer_socket *socket;
-       struct lttng_ht_iter iter;
-
-       assert(session);
-
-       usess = session->ust_session;
-       ksess = session->kernel_session;
-
-       DBG2("Setting relayd for session %s", session->name);
-
-       if (usess && usess->consumer && usess->consumer->type == CONSUMER_DST_NET
-                       && usess->consumer->enabled) {
-               /* For each consumer socket, send relayd sockets */
-               cds_lfht_for_each_entry(usess->consumer->socks->ht, &iter.iter,
-                               socket, node.node) {
-                       /* Code flow error */
-                       assert(socket->fd >= 0);
-
-                       pthread_mutex_lock(socket->lock);
-                       send_sockets_relayd_consumer(LTTNG_DOMAIN_UST, session,
-                                       usess->consumer, socket->fd);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret != LTTCOMM_OK) {
-                               goto error;
-                       }
-               }
-       }
-
-       if (ksess && ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET
-                       && ksess->consumer->enabled) {
-               cds_lfht_for_each_entry(ksess->consumer->socks->ht, &iter.iter,
-                               socket, node.node) {
-                       /* Code flow error */
-                       assert(socket->fd >= 0);
-
-                       pthread_mutex_lock(socket->lock);
-                       send_sockets_relayd_consumer(LTTNG_DOMAIN_KERNEL, session,
-                                       ksess->consumer, socket->fd);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret != LTTCOMM_OK) {
-                               goto error;
-                       }
-               }
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Set consumer subdirectory using the session name and a generated datetime if
- * needed. This is appended to the current subdirectory.
- */
-static int set_consumer_subdir(struct consumer_output *consumer,
-               const char *session_name)
-{
-       int ret = 0;
-       unsigned int have_default_name = 0;
-       char datetime[16], tmp_path[PATH_MAX];
-       time_t rawtime;
-       struct tm *timeinfo;
-
-       assert(consumer);
-       assert(session_name);
-
-       memset(tmp_path, 0, sizeof(tmp_path));
-
-       /* Flag if we have a default session. */
-       if (strncmp(session_name, DEFAULT_SESSION_NAME "-",
-                               strlen(DEFAULT_SESSION_NAME) + 1) == 0) {
-               have_default_name = 1;
-       } else {
-               /* Get date and time for session path */
-               time(&rawtime);
-               timeinfo = localtime(&rawtime);
-               strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
-       }
-
-       if (have_default_name) {
-               ret = snprintf(tmp_path, sizeof(tmp_path),
-                               "%s/%s", consumer->subdir, session_name);
-       } else {
-               ret = snprintf(tmp_path, sizeof(tmp_path),
-                               "%s/%s-%s/", consumer->subdir, session_name, datetime);
-       }
-       if (ret < 0) {
-               PERROR("snprintf session name date");
-               goto error;
-       }
-
-       strncpy(consumer->subdir, tmp_path, sizeof(consumer->subdir));
-       DBG2("Consumer subdir set to %s", consumer->subdir);
-
-error:
-       return ret;
-}
-
-/*
- * Copy consumer output from the tracing session to the domain session. The
- * function also applies the right modification on a per domain basis for the
- * trace files destination directory.
- */
-static int copy_session_consumer(int domain, struct ltt_session *session)
-{
-       int ret;
-       const char *dir_name;
-       struct consumer_output *consumer;
-
-       assert(session);
-       assert(session->consumer);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               DBG3("Copying tracing session consumer output in kernel session");
-               session->kernel_session->consumer =
-                       consumer_copy_output(session->consumer);
-               /* Ease our life a bit for the next part */
-               consumer = session->kernel_session->consumer;
-               dir_name = DEFAULT_KERNEL_TRACE_DIR;
-               break;
-       case LTTNG_DOMAIN_UST:
-               DBG3("Copying tracing session consumer output in UST session");
-               session->ust_session->consumer =
-                       consumer_copy_output(session->consumer);
-               /* Ease our life a bit for the next part */
-               consumer = session->ust_session->consumer;
-               dir_name = DEFAULT_UST_TRACE_DIR;
-               break;
-       default:
-               ret = LTTCOMM_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-       ret = set_consumer_subdir(session->consumer, session->name);
-       if (ret < 0) {
-               ret = LTTCOMM_FATAL;
-               goto error;
-       }
-
-       /* Append correct directory to subdir */
-       strncat(consumer->subdir, dir_name, sizeof(consumer->subdir));
-       DBG3("Copy session consumer subdir %s", consumer->subdir);
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Create an UST session and add it to the session ust list.
- */
-static int create_ust_session(struct ltt_session *session,
-               struct lttng_domain *domain)
-{
-       int ret;
-       struct ltt_ust_session *lus = NULL;
-
-       assert(session);
-       assert(domain);
-       assert(session->consumer);
-
-       switch (domain->type) {
-       case LTTNG_DOMAIN_UST:
-               break;
-       default:
-               ERR("Unknown UST domain on create session %d", domain->type);
-               ret = LTTCOMM_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-       DBG("Creating UST session");
-
-       lus = trace_ust_create_session(session->path, session->id, domain);
-       if (lus == NULL) {
-               ret = LTTCOMM_UST_SESS_FAIL;
-               goto error;
-       }
-
-       lus->uid = session->uid;
-       lus->gid = session->gid;
-       session->ust_session = lus;
-
-       /* Copy session output to the newly created UST session */
-       ret = copy_session_consumer(domain->type, session);
-       if (ret != LTTCOMM_OK) {
-               goto error;
-       }
-
-       return LTTCOMM_OK;
-
-error:
-       free(lus);
-       session->ust_session = NULL;
-       return ret;
-}
-
-/*
- * Create a kernel tracer session then create the default channel.
- */
-static int create_kernel_session(struct ltt_session *session)
-{
-       int ret;
-
-       DBG("Creating kernel session");
-
-       ret = kernel_create_session(session, kernel_tracer_fd);
-       if (ret < 0) {
-               ret = LTTCOMM_KERN_SESS_FAIL;
-               goto error;
-       }
-
-       /* Code flow safety */
-       assert(session->kernel_session);
-
-       /* Copy session output to the newly created Kernel session */
-       ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
-       if (ret != LTTCOMM_OK) {
-               goto error;
-       }
-
-       /* Create directory(ies) on local filesystem. */
-       if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
-                       strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
-               ret = run_as_mkdir_recursive(
-                               session->kernel_session->consumer->dst.trace_path,
-                               S_IRWXU | S_IRWXG, session->uid, session->gid);
-               if (ret < 0) {
-                       if (ret != -EEXIST) {
-                               ERR("Trace directory creation error");
-                               goto error;
-                       }
-               }
-       }
-
-       session->kernel_session->uid = session->uid;
-       session->kernel_session->gid = session->gid;
-
-       return LTTCOMM_OK;
-
-error:
-       trace_kernel_destroy_session(session->kernel_session);
-       session->kernel_session = NULL;
-       return ret;
-}
-
-/*
- * Check if the UID or GID match the session. Root user has access to all
- * sessions.
- */
-static int session_access_ok(struct ltt_session *session, uid_t uid, gid_t gid)
-{
-       if (uid != session->uid && gid != session->gid && uid != 0) {
-               return 0;
-       } else {
-               return 1;
-       }
-}
-
-/*
- * Count number of session permitted by uid/gid.
- */
-static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
-{
-       unsigned int i = 0;
-       struct ltt_session *session;
-
-       DBG("Counting number of available session for UID %d GID %d",
-               uid, gid);
-       cds_list_for_each_entry(session, &session_list_ptr->head, list) {
-               /*
-                * Only list the sessions the user can control.
-                */
-               if (!session_access_ok(session, uid, gid)) {
-                       continue;
-               }
-               i++;
-       }
-       return i;
-}
-
-/*
- * Create a session path used by list_lttng_sessions for the case that the
- * session consumer is on the network.
- */
-static int build_network_session_path(char *dst, size_t size,
-               struct ltt_session *session)
-{
-       int ret, kdata_port, udata_port;
-       struct lttng_uri *kuri = NULL, *uuri = NULL, *uri = NULL;
-       char tmp_uurl[PATH_MAX], tmp_urls[PATH_MAX];
-
-       assert(session);
-       assert(dst);
-
-       memset(tmp_urls, 0, sizeof(tmp_urls));
-       memset(tmp_uurl, 0, sizeof(tmp_uurl));
-
-       kdata_port = udata_port = DEFAULT_NETWORK_DATA_PORT;
-
-       if (session->kernel_session && session->kernel_session->consumer) {
-               kuri = &session->kernel_session->consumer->dst.net.control;
-               kdata_port = session->kernel_session->consumer->dst.net.data.port;
-       }
-
-       if (session->ust_session && session->ust_session->consumer) {
-               uuri = &session->ust_session->consumer->dst.net.control;
-               udata_port = session->ust_session->consumer->dst.net.data.port;
-       }
-
-       if (uuri == NULL && kuri == NULL) {
-               uri = &session->consumer->dst.net.control;
-               kdata_port = session->consumer->dst.net.data.port;
-       } else if (kuri && uuri) {
-               ret = uri_compare(kuri, uuri);
-               if (ret) {
-                       /* Not Equal */
-                       uri = kuri;
-                       /* Build uuri URL string */
-                       ret = uri_to_str_url(uuri, tmp_uurl, sizeof(tmp_uurl));
-                       if (ret < 0) {
-                               goto error;
-                       }
-               } else {
-                       uri = kuri;
-               }
-       } else if (kuri && uuri == NULL) {
-               uri = kuri;
-       } else if (uuri && kuri == NULL) {
-               uri = uuri;
-       }
-
-       ret = uri_to_str_url(uri, tmp_urls, sizeof(tmp_urls));
-       if (ret < 0) {
-               goto error;
-       }
-
-       if (strlen(tmp_uurl) > 0) {
-               ret = snprintf(dst, size, "[K]: %s [data: %d] -- [U]: %s [data: %d]",
-                               tmp_urls, kdata_port, tmp_uurl, udata_port);
-       } else {
-               ret = snprintf(dst, size, "%s [data: %d]", tmp_urls, kdata_port);
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Using the session list, filled a lttng_session array to send back to the
- * client for session listing.
- *
- * The session list lock MUST be acquired before calling this function. Use
- * session_lock_list() and session_unlock_list().
- */
-static void list_lttng_sessions(struct lttng_session *sessions, uid_t uid,
-               gid_t gid)
-{
-       int ret;
-       unsigned int i = 0;
-       struct ltt_session *session;
-
-       DBG("Getting all available session for UID %d GID %d",
-               uid, gid);
-       /*
-        * Iterate over session list and append data after the control struct in
-        * the buffer.
-        */
-       cds_list_for_each_entry(session, &session_list_ptr->head, list) {
-               /*
-                * Only list the sessions the user can control.
-                */
-               if (!session_access_ok(session, uid, gid)) {
-                       continue;
-               }
-
-               struct ltt_kernel_session *ksess = session->kernel_session;
-               struct ltt_ust_session *usess = session->ust_session;
-
-               if (session->consumer->type == CONSUMER_DST_NET ||
-                               (ksess && ksess->consumer->type == CONSUMER_DST_NET) ||
-                               (usess && usess->consumer->type == CONSUMER_DST_NET)) {
-                       ret = build_network_session_path(sessions[i].path,
-                                       sizeof(session[i].path), session);
-               } else {
-                       ret = snprintf(sessions[i].path, sizeof(session[i].path), "%s",
-                               session->consumer->dst.trace_path);
-               }
-               if (ret < 0) {
-                       PERROR("snprintf session path");
-                       continue;
-               }
-
-               strncpy(sessions[i].name, session->name, NAME_MAX);
-               sessions[i].name[NAME_MAX - 1] = '\0';
-               sessions[i].enabled = session->enabled;
-               i++;
-       }
-}
-
-/*
- * Fill lttng_channel array of all channels.
- */
-static void list_lttng_channels(int domain, struct ltt_session *session,
-               struct lttng_channel *channels)
-{
-       int i = 0;
-       struct ltt_kernel_channel *kchan;
-
-       DBG("Listing channels for session %s", session->name);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               /* Kernel channels */
-               if (session->kernel_session != NULL) {
-                       cds_list_for_each_entry(kchan,
-                                       &session->kernel_session->channel_list.head, list) {
-                               /* Copy lttng_channel struct to array */
-                               memcpy(&channels[i], kchan->channel, sizeof(struct lttng_channel));
-                               channels[i].enabled = kchan->enabled;
-                               i++;
-                       }
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-       {
-               struct lttng_ht_iter iter;
-               struct ltt_ust_channel *uchan;
-
-               cds_lfht_for_each_entry(session->ust_session->domain_global.channels->ht,
-                               &iter.iter, uchan, node.node) {
-                       strncpy(channels[i].name, uchan->name, LTTNG_SYMBOL_NAME_LEN);
-                       channels[i].attr.overwrite = uchan->attr.overwrite;
-                       channels[i].attr.subbuf_size = uchan->attr.subbuf_size;
-                       channels[i].attr.num_subbuf = uchan->attr.num_subbuf;
-                       channels[i].attr.switch_timer_interval =
-                               uchan->attr.switch_timer_interval;
-                       channels[i].attr.read_timer_interval =
-                               uchan->attr.read_timer_interval;
-                       channels[i].enabled = uchan->enabled;
-                       switch (uchan->attr.output) {
-                       case LTTNG_UST_MMAP:
-                       default:
-                               channels[i].attr.output = LTTNG_EVENT_MMAP;
-                               break;
-                       }
-                       i++;
-               }
-               break;
-       }
-       default:
-               break;
-       }
-}
-
-/*
- * Create a list of ust global domain events.
- */
-static int list_lttng_ust_global_events(char *channel_name,
-               struct ltt_ust_domain_global *ust_global, struct lttng_event **events)
-{
-       int i = 0, ret = 0;
-       unsigned int nb_event = 0;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *node;
-       struct ltt_ust_channel *uchan;
-       struct ltt_ust_event *uevent;
-       struct lttng_event *tmp;
-
-       DBG("Listing UST global events for channel %s", channel_name);
-
-       rcu_read_lock();
-
-       lttng_ht_lookup(ust_global->channels, (void *)channel_name, &iter);
-       node = lttng_ht_iter_get_node_str(&iter);
-       if (node == NULL) {
-               ret = -LTTCOMM_UST_CHAN_NOT_FOUND;
-               goto error;
-       }
-
-       uchan = caa_container_of(&node->node, struct ltt_ust_channel, node.node);
-
-       nb_event += lttng_ht_get_count(uchan->events);
-
-       if (nb_event == 0) {
-               ret = nb_event;
-               goto error;
-       }
-
-       DBG3("Listing UST global %d events", nb_event);
-
-       tmp = zmalloc(nb_event * sizeof(struct lttng_event));
-       if (tmp == NULL) {
-               ret = -LTTCOMM_FATAL;
-               goto error;
-       }
-
-       cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
-               strncpy(tmp[i].name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN);
-               tmp[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
-               tmp[i].enabled = uevent->enabled;
-               switch (uevent->attr.instrumentation) {
-               case LTTNG_UST_TRACEPOINT:
-                       tmp[i].type = LTTNG_EVENT_TRACEPOINT;
-                       break;
-               case LTTNG_UST_PROBE:
-                       tmp[i].type = LTTNG_EVENT_PROBE;
-                       break;
-               case LTTNG_UST_FUNCTION:
-                       tmp[i].type = LTTNG_EVENT_FUNCTION;
-                       break;
-               }
-               tmp[i].loglevel = uevent->attr.loglevel;
-               switch (uevent->attr.loglevel_type) {
-               case LTTNG_UST_LOGLEVEL_ALL:
-                       tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
-                       break;
-               case LTTNG_UST_LOGLEVEL_RANGE:
-                       tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
-                       break;
-               case LTTNG_UST_LOGLEVEL_SINGLE:
-                       tmp[i].loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
-                       break;
-               }
-               if (uevent->filter) {
-                       tmp[i].filter = 1;
-               }
-               i++;
-       }
-
-       ret = nb_event;
-       *events = tmp;
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Fill lttng_event array of all kernel events in the channel.
- */
-static int list_lttng_kernel_events(char *channel_name,
-               struct ltt_kernel_session *kernel_session, struct lttng_event **events)
-{
-       int i = 0, ret;
-       unsigned int nb_event;
-       struct ltt_kernel_event *event;
-       struct ltt_kernel_channel *kchan;
-
-       kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
-       if (kchan == NULL) {
-               ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
-               goto error;
-       }
-
-       nb_event = kchan->event_count;
-
-       DBG("Listing events for channel %s", kchan->channel->name);
-
-       if (nb_event == 0) {
-               ret = nb_event;
-               goto error;
-       }
-
-       *events = zmalloc(nb_event * sizeof(struct lttng_event));
-       if (*events == NULL) {
-               ret = LTTCOMM_FATAL;
-               goto error;
-       }
-
-       /* Kernel channels */
-       cds_list_for_each_entry(event, &kchan->events_list.head , list) {
-               strncpy((*events)[i].name, event->event->name, LTTNG_SYMBOL_NAME_LEN);
-               (*events)[i].name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
-               (*events)[i].enabled = event->enabled;
-               switch (event->event->instrumentation) {
-                       case LTTNG_KERNEL_TRACEPOINT:
-                               (*events)[i].type = LTTNG_EVENT_TRACEPOINT;
-                               break;
-                       case LTTNG_KERNEL_KPROBE:
-                       case LTTNG_KERNEL_KRETPROBE:
-                               (*events)[i].type = LTTNG_EVENT_PROBE;
-                               memcpy(&(*events)[i].attr.probe, &event->event->u.kprobe,
-                                               sizeof(struct lttng_kernel_kprobe));
-                               break;
-                       case LTTNG_KERNEL_FUNCTION:
-                               (*events)[i].type = LTTNG_EVENT_FUNCTION;
-                               memcpy(&((*events)[i].attr.ftrace), &event->event->u.ftrace,
-                                               sizeof(struct lttng_kernel_function));
-                               break;
-                       case LTTNG_KERNEL_NOOP:
-                               (*events)[i].type = LTTNG_EVENT_NOOP;
-                               break;
-                       case LTTNG_KERNEL_SYSCALL:
-                               (*events)[i].type = LTTNG_EVENT_SYSCALL;
-                               break;
-                       case LTTNG_KERNEL_ALL:
-                               assert(0);
-                               break;
-               }
-               i++;
-       }
-
-       return nb_event;
-
-error:
-       return ret;
-}
-
-
-/*
- * Add URI so the consumer output object. Set the correct path depending on the
- * domain adding the default trace directory.
- */
-static int add_uri_to_consumer(struct consumer_output *consumer,
-               struct lttng_uri *uri, int domain, const char *session_name)
-{
-       int ret = LTTCOMM_OK;
-       const char *default_trace_dir;
-
-       assert(uri);
-
-       if (consumer == NULL) {
-               DBG("No consumer detected. Don't add URI. Stopping.");
-               ret = LTTCOMM_NO_CONSUMER;
-               goto error;
-       }
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               default_trace_dir = DEFAULT_KERNEL_TRACE_DIR;
-               break;
-       case LTTNG_DOMAIN_UST:
-               default_trace_dir = DEFAULT_UST_TRACE_DIR;
-               break;
-       default:
-               /*
-                * This case is possible is we try to add the URI to the global tracing
-                * session consumer object which in this case there is no subdir.
-                */
-               default_trace_dir = "";
-       }
-
-       switch (uri->dtype) {
-       case LTTNG_DST_IPV4:
-       case LTTNG_DST_IPV6:
-               DBG2("Setting network URI to consumer");
-
-               /* Set URI into consumer output object */
-               ret = consumer_set_network_uri(consumer, uri);
-               if (ret < 0) {
-                       ret = LTTCOMM_FATAL;
-                       goto error;
-               } else if (ret == 1) {
-                       /*
-                        * URI was the same in the consumer so we do not append the subdir
-                        * again so to not duplicate output dir.
-                        */
-                       goto error;
-               }
-
-               if (uri->stype == LTTNG_STREAM_CONTROL && strlen(uri->subdir) == 0) {
-                       ret = set_consumer_subdir(consumer, session_name);
-                       if (ret < 0) {
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-               }
-
-               if (uri->stype == LTTNG_STREAM_CONTROL) {
-                       /* On a new subdir, reappend the default trace dir. */
-                       strncat(consumer->subdir, default_trace_dir, sizeof(consumer->subdir));
-                       DBG3("Append domain trace name to subdir %s", consumer->subdir);
-               }
-
-               break;
-       case LTTNG_DST_PATH:
-               DBG2("Setting trace directory path from URI to %s", uri->dst.path);
-               memset(consumer->dst.trace_path, 0,
-                               sizeof(consumer->dst.trace_path));
-               strncpy(consumer->dst.trace_path, uri->dst.path,
-                               sizeof(consumer->dst.trace_path));
-               /* Append default trace dir */
-               strncat(consumer->dst.trace_path, default_trace_dir,
-                               sizeof(consumer->dst.trace_path));
-               /* Flag consumer as local. */
-               consumer->type = CONSUMER_DST_LOCAL;
-               break;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
- */
-static int cmd_disable_channel(struct ltt_session *session,
-               int domain, char *channel_name)
-{
-       int ret;
-       struct ltt_ust_session *usess;
-
-       usess = session->ust_session;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               ret = channel_kernel_disable(session->kernel_session,
-                               channel_name);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               kernel_wait_quiescent(kernel_tracer_fd);
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_channel *uchan;
-               struct lttng_ht *chan_ht;
-
-               chan_ht = usess->domain_global.channels;
-
-               uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
-               if (uchan == NULL) {
-                       ret = LTTCOMM_UST_CHAN_NOT_FOUND;
-                       goto error;
-               }
-
-               ret = channel_ust_disable(usess, domain, uchan);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-               break;
-       }
-#if 0
-       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
-       case LTTNG_DOMAIN_UST_EXEC_NAME:
-       case LTTNG_DOMAIN_UST_PID:
-#endif
-       default:
-               ret = LTTCOMM_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
- */
-static int cmd_enable_channel(struct ltt_session *session,
-               int domain, struct lttng_channel *attr)
-{
-       int ret;
-       struct ltt_ust_session *usess = session->ust_session;
-       struct lttng_ht *chan_ht;
-
-       DBG("Enabling channel %s for session %s", attr->name, session->name);
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct ltt_kernel_channel *kchan;
-
-               kchan = trace_kernel_get_channel_by_name(attr->name,
-                               session->kernel_session);
-               if (kchan == NULL) {
-                       ret = channel_kernel_create(session->kernel_session,
-                                       attr, kernel_poll_pipe[1]);
-               } else {
-                       ret = channel_kernel_enable(session->kernel_session, kchan);
-               }
-
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               kernel_wait_quiescent(kernel_tracer_fd);
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_channel *uchan;
-
-               chan_ht = usess->domain_global.channels;
-
-               uchan = trace_ust_find_channel_by_name(chan_ht, attr->name);
-               if (uchan == NULL) {
-                       ret = channel_ust_create(usess, domain, attr);
-               } else {
-                       ret = channel_ust_enable(usess, domain, uchan);
-               }
-               break;
-       }
-#if 0
-       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
-       case LTTNG_DOMAIN_UST_EXEC_NAME:
-       case LTTNG_DOMAIN_UST_PID:
-#endif
-       default:
-               ret = LTTCOMM_UNKNOWN_DOMAIN;
-               goto error;
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_DISABLE_EVENT processed by the client thread.
- */
-static int cmd_disable_event(struct ltt_session *session, int domain,
-               char *channel_name, char *event_name)
-{
-       int ret;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct ltt_kernel_channel *kchan;
-               struct ltt_kernel_session *ksess;
-
-               ksess = session->kernel_session;
-
-               kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
-               if (kchan == NULL) {
-                       ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
-                       goto error;
-               }
-
-               ret = event_kernel_disable_tracepoint(ksess, kchan, event_name);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               kernel_wait_quiescent(kernel_tracer_fd);
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_channel *uchan;
-               struct ltt_ust_session *usess;
-
-               usess = session->ust_session;
-
-               uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
-                               channel_name);
-               if (uchan == NULL) {
-                       ret = LTTCOMM_UST_CHAN_NOT_FOUND;
-                       goto error;
-               }
-
-               ret = event_ust_disable_tracepoint(usess, domain, uchan, event_name);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               DBG3("Disable UST event %s in channel %s completed", event_name,
-                               channel_name);
-               break;
-       }
-#if 0
-       case LTTNG_DOMAIN_UST_EXEC_NAME:
-       case LTTNG_DOMAIN_UST_PID:
-       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
-#endif
-       default:
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
- */
-static int cmd_disable_event_all(struct ltt_session *session, int domain,
-               char *channel_name)
-{
-       int ret;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct ltt_kernel_session *ksess;
-               struct ltt_kernel_channel *kchan;
-
-               ksess = session->kernel_session;
-
-               kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
-               if (kchan == NULL) {
-                       ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
-                       goto error;
-               }
-
-               ret = event_kernel_disable_all(ksess, kchan);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               kernel_wait_quiescent(kernel_tracer_fd);
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_session *usess;
-               struct ltt_ust_channel *uchan;
-
-               usess = session->ust_session;
-
-               uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
-                               channel_name);
-               if (uchan == NULL) {
-                       ret = LTTCOMM_UST_CHAN_NOT_FOUND;
-                       goto error;
-               }
-
-               ret = event_ust_disable_all_tracepoints(usess, domain, uchan);
-               if (ret != 0) {
-                       goto error;
-               }
-
-               DBG3("Disable all UST events in channel %s completed", channel_name);
-
-               break;
-       }
-#if 0
-       case LTTNG_DOMAIN_UST_EXEC_NAME:
-       case LTTNG_DOMAIN_UST_PID:
-       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
-#endif
-       default:
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_ADD_CONTEXT processed by the client thread.
- */
-static int cmd_add_context(struct ltt_session *session, int domain,
-               char *channel_name, char *event_name, struct lttng_event_context *ctx)
-{
-       int ret;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               /* Add kernel context to kernel tracer */
-               ret = context_kernel_add(session->kernel_session, ctx,
-                               event_name, channel_name);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_session *usess = session->ust_session;
-
-               ret = context_ust_add(usess, domain, ctx, event_name, channel_name);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-               break;
-       }
-#if 0
-       case LTTNG_DOMAIN_UST_EXEC_NAME:
-       case LTTNG_DOMAIN_UST_PID:
-       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
-#endif
-       default:
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_SET_FILTER processed by the client thread.
- */
-static int cmd_set_filter(struct ltt_session *session, int domain,
-               char *channel_name, char *event_name,
-               struct lttng_filter_bytecode *bytecode)
-{
-       int ret;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               ret = LTTCOMM_FATAL;
-               break;
-       case LTTNG_DOMAIN_UST:
-       {
-               struct ltt_ust_session *usess = session->ust_session;
-
-               ret = filter_ust_set(usess, domain, bytecode, event_name, channel_name);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-               break;
-       }
-#if 0
-       case LTTNG_DOMAIN_UST_EXEC_NAME:
-       case LTTNG_DOMAIN_UST_PID:
-       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
-#endif
-       default:
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-
-}
-
-/*
- * Command LTTNG_ENABLE_EVENT processed by the client thread.
- */
-static int cmd_enable_event(struct ltt_session *session, int domain,
-               char *channel_name, struct lttng_event *event)
-{
-       int ret;
-       struct lttng_channel *attr;
-       struct ltt_ust_session *usess = session->ust_session;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct ltt_kernel_channel *kchan;
-
-               kchan = trace_kernel_get_channel_by_name(channel_name,
-                               session->kernel_session);
-               if (kchan == NULL) {
-                       attr = channel_new_default_attr(domain);
-                       if (attr == NULL) {
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-                       snprintf(attr->name, NAME_MAX, "%s", channel_name);
-
-                       /* This call will notify the kernel thread */
-                       ret = channel_kernel_create(session->kernel_session,
-                                       attr, kernel_poll_pipe[1]);
-                       if (ret != LTTCOMM_OK) {
-                               free(attr);
-                               goto error;
-                       }
-                       free(attr);
-               }
-
-               /* Get the newly created kernel channel pointer */
-               kchan = trace_kernel_get_channel_by_name(channel_name,
-                               session->kernel_session);
-               if (kchan == NULL) {
-                       /* This sould not happen... */
-                       ret = LTTCOMM_FATAL;
-                       goto error;
-               }
-
-               ret = event_kernel_enable_tracepoint(session->kernel_session, kchan,
-                               event);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               kernel_wait_quiescent(kernel_tracer_fd);
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct lttng_channel *attr;
-               struct ltt_ust_channel *uchan;
-
-               /* Get channel from global UST domain */
-               uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
-                               channel_name);
-               if (uchan == NULL) {
-                       /* Create default channel */
-                       attr = channel_new_default_attr(domain);
-                       if (attr == NULL) {
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-                       snprintf(attr->name, NAME_MAX, "%s", channel_name);
-                       attr->name[NAME_MAX - 1] = '\0';
-
-                       ret = channel_ust_create(usess, domain, attr);
-                       if (ret != LTTCOMM_OK) {
-                               free(attr);
-                               goto error;
-                       }
-                       free(attr);
-
-                       /* Get the newly created channel reference back */
-                       uchan = trace_ust_find_channel_by_name(
-                                       usess->domain_global.channels, channel_name);
-                       if (uchan == NULL) {
-                               /* Something is really wrong */
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-               }
-
-               /* At this point, the session and channel exist on the tracer */
-               ret = event_ust_enable_tracepoint(usess, domain, uchan, event);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-               break;
-       }
-#if 0
-       case LTTNG_DOMAIN_UST_EXEC_NAME:
-       case LTTNG_DOMAIN_UST_PID:
-       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
-#endif
-       default:
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
- */
-static int cmd_enable_event_all(struct ltt_session *session, int domain,
-               char *channel_name, int event_type)
-{
-       int ret;
-       struct ltt_kernel_channel *kchan;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               kchan = trace_kernel_get_channel_by_name(channel_name,
-                               session->kernel_session);
-               if (kchan == NULL) {
-                       /* This call will notify the kernel thread */
-                       ret = channel_kernel_create(session->kernel_session, NULL,
-                                       kernel_poll_pipe[1]);
-                       if (ret != LTTCOMM_OK) {
-                               goto error;
-                       }
-
-                       /* Get the newly created kernel channel pointer */
-                       kchan = trace_kernel_get_channel_by_name(channel_name,
-                                       session->kernel_session);
-                       if (kchan == NULL) {
-                               /* This sould not happen... */
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-
-               }
-
-               switch (event_type) {
-               case LTTNG_EVENT_SYSCALL:
-                       ret = event_kernel_enable_all_syscalls(session->kernel_session,
-                                       kchan, kernel_tracer_fd);
-                       break;
-               case LTTNG_EVENT_TRACEPOINT:
-                       /*
-                        * This call enables all LTTNG_KERNEL_TRACEPOINTS and
-                        * events already registered to the channel.
-                        */
-                       ret = event_kernel_enable_all_tracepoints(session->kernel_session,
-                                       kchan, kernel_tracer_fd);
-                       break;
-               case LTTNG_EVENT_ALL:
-                       /* Enable syscalls and tracepoints */
-                       ret = event_kernel_enable_all(session->kernel_session,
-                                       kchan, kernel_tracer_fd);
-                       break;
-               default:
-                       ret = LTTCOMM_KERN_ENABLE_FAIL;
-                       goto error;
-               }
-
-               /* Manage return value */
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               kernel_wait_quiescent(kernel_tracer_fd);
-               break;
-       case LTTNG_DOMAIN_UST:
-       {
-               struct lttng_channel *attr;
-               struct ltt_ust_channel *uchan;
-               struct ltt_ust_session *usess = session->ust_session;
-
-               /* Get channel from global UST domain */
-               uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
-                               channel_name);
-               if (uchan == NULL) {
-                       /* Create default channel */
-                       attr = channel_new_default_attr(domain);
-                       if (attr == NULL) {
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-                       snprintf(attr->name, NAME_MAX, "%s", channel_name);
-                       attr->name[NAME_MAX - 1] = '\0';
-
-                       /* Use the internal command enable channel */
-                       ret = channel_ust_create(usess, domain, attr);
-                       if (ret != LTTCOMM_OK) {
-                               free(attr);
-                               goto error;
-                       }
-                       free(attr);
-
-                       /* Get the newly created channel reference back */
-                       uchan = trace_ust_find_channel_by_name(
-                                       usess->domain_global.channels, channel_name);
-                       if (uchan == NULL) {
-                               /* Something is really wrong */
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-               }
-
-               /* At this point, the session and channel exist on the tracer */
-
-               switch (event_type) {
-               case LTTNG_EVENT_ALL:
-               case LTTNG_EVENT_TRACEPOINT:
-                       ret = event_ust_enable_all_tracepoints(usess, domain, uchan);
-                       if (ret != LTTCOMM_OK) {
-                               goto error;
-                       }
-                       break;
-               default:
-                       ret = LTTCOMM_UST_ENABLE_FAIL;
-                       goto error;
-               }
-
-               /* Manage return value */
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               break;
-       }
-#if 0
-       case LTTNG_DOMAIN_UST_EXEC_NAME:
-       case LTTNG_DOMAIN_UST_PID:
-       case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN:
-#endif
-       default:
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
- */
-static ssize_t cmd_list_tracepoints(int domain, struct lttng_event **events)
-{
-       int ret;
-       ssize_t nb_events = 0;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               nb_events = kernel_list_events(kernel_tracer_fd, events);
-               if (nb_events < 0) {
-                       ret = LTTCOMM_KERN_LIST_FAIL;
-                       goto error;
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-               nb_events = ust_app_list_events(events);
-               if (nb_events < 0) {
-                       ret = LTTCOMM_UST_LIST_FAIL;
-                       goto error;
-               }
-               break;
-       default:
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       return nb_events;
-
-error:
-       /* Return negative value to differentiate return code */
-       return -ret;
-}
-
-/*
- * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
- */
-static ssize_t cmd_list_tracepoint_fields(int domain,
-                       struct lttng_event_field **fields)
-{
-       int ret;
-       ssize_t nb_fields = 0;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_UST:
-               nb_fields = ust_app_list_event_fields(fields);
-               if (nb_fields < 0) {
-                       ret = LTTCOMM_UST_LIST_FAIL;
-                       goto error;
-               }
-               break;
-       case LTTNG_DOMAIN_KERNEL:
-       default:        /* fall-through */
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       return nb_fields;
-
-error:
-       /* Return negative value to differentiate return code */
-       return -ret;
-}
-
-/*
- * Command LTTNG_START_TRACE processed by the client thread.
- */
-static int cmd_start_trace(struct ltt_session *session)
-{
-       int ret;
-       struct ltt_kernel_session *ksession;
-       struct ltt_ust_session *usess;
-       struct ltt_kernel_channel *kchan;
-
-       /* Ease our life a bit ;) */
-       ksession = session->kernel_session;
-       usess = session->ust_session;
-
-       if (session->enabled) {
-               /* Already started. */
-               ret = LTTCOMM_TRACE_ALREADY_STARTED;
-               goto error;
-       }
-
-       session->enabled = 1;
-
-       ret = setup_relayd(session);
-       if (ret != LTTCOMM_OK) {
-               ERR("Error setting up relayd for session %s", session->name);
-               goto error;
-       }
-
-       /* Kernel tracing */
-       if (ksession != NULL) {
-               /* Open kernel metadata */
-               if (ksession->metadata == NULL) {
-                       ret = kernel_open_metadata(ksession);
-                       if (ret < 0) {
-                               ret = LTTCOMM_KERN_META_FAIL;
-                               goto error;
-                       }
-               }
-
-               /* Open kernel metadata stream */
-               if (ksession->metadata_stream_fd < 0) {
-                       ret = kernel_open_metadata_stream(ksession);
-                       if (ret < 0) {
-                               ERR("Kernel create metadata stream failed");
-                               ret = LTTCOMM_KERN_STREAM_FAIL;
-                               goto error;
-                       }
-               }
-
-               /* For each channel */
-               cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
-                       if (kchan->stream_count == 0) {
-                               ret = kernel_open_channel_stream(kchan);
-                               if (ret < 0) {
-                                       ret = LTTCOMM_KERN_STREAM_FAIL;
-                                       goto error;
-                               }
-                               /* Update the stream global counter */
-                               ksession->stream_count_global += ret;
-                       }
-               }
-
-               /* Setup kernel consumer socket and send fds to it */
-               ret = init_kernel_tracing(ksession);
-               if (ret < 0) {
-                       ret = LTTCOMM_KERN_START_FAIL;
-                       goto error;
-               }
-
-               /* This start the kernel tracing */
-               ret = kernel_start_session(ksession);
-               if (ret < 0) {
-                       ret = LTTCOMM_KERN_START_FAIL;
-                       goto error;
-               }
-
-               /* Quiescent wait after starting trace */
-               kernel_wait_quiescent(kernel_tracer_fd);
-       }
-
-       /* Flag session that trace should start automatically */
-       if (usess) {
-               usess->start_trace = 1;
-
-               ret = ust_app_start_trace_all(usess);
-               if (ret < 0) {
-                       ret = LTTCOMM_UST_START_FAIL;
-                       goto error;
-               }
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_STOP_TRACE processed by the client thread.
- */
-static int cmd_stop_trace(struct ltt_session *session)
-{
-       int ret;
-       struct ltt_kernel_channel *kchan;
-       struct ltt_kernel_session *ksession;
-       struct ltt_ust_session *usess;
-
-       /* Short cut */
-       ksession = session->kernel_session;
-       usess = session->ust_session;
-
-       if (!session->enabled) {
-               ret = LTTCOMM_TRACE_ALREADY_STOPPED;
-               goto error;
-       }
-
-       session->enabled = 0;
-
-       /* Kernel tracer */
-       if (ksession != NULL) {
-               DBG("Stop kernel tracing");
-
-               /* Flush metadata if exist */
-               if (ksession->metadata_stream_fd >= 0) {
-                       ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
-                       if (ret < 0) {
-                               ERR("Kernel metadata flush failed");
-                       }
-               }
-
-               /* Flush all buffers before stopping */
-               cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
-                       ret = kernel_flush_buffer(kchan);
-                       if (ret < 0) {
-                               ERR("Kernel flush buffer error");
-                       }
-               }
-
-               ret = kernel_stop_session(ksession);
-               if (ret < 0) {
-                       ret = LTTCOMM_KERN_STOP_FAIL;
-                       goto error;
-               }
-
-               kernel_wait_quiescent(kernel_tracer_fd);
-       }
-
-       if (usess) {
-               usess->start_trace = 0;
-
-               ret = ust_app_stop_trace_all(usess);
-               if (ret < 0) {
-                       ret = LTTCOMM_UST_STOP_FAIL;
-                       goto error;
-               }
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
- */
-static int cmd_set_consumer_uri(int domain, struct ltt_session *session,
-               size_t nb_uri, struct lttng_uri *uris)
-{
-       int ret, i;
-       struct ltt_kernel_session *ksess = session->kernel_session;
-       struct ltt_ust_session *usess = session->ust_session;
-       struct consumer_output *consumer = NULL;
-
-       assert(session);
-       assert(uris);
-       assert(nb_uri > 0);
-
-       /* Can't enable consumer after session started. */
-       if (session->enabled) {
-               ret = LTTCOMM_TRACE_ALREADY_STARTED;
-               goto error;
-       }
-
-       if (!session->start_consumer) {
-               ret = LTTCOMM_NO_CONSUMER;
-               goto error;
-       }
-
-       /*
-        * This case switch makes sure the domain session has a temporary consumer
-        * so the URL can be set.
-        */
-       switch (domain) {
-       case 0:
-               /* Code flow error. A session MUST always have a consumer object */
-               assert(session->consumer);
-               /*
-                * The URL will be added to the tracing session consumer instead of a
-                * specific domain consumer.
-                */
-               consumer = session->consumer;
-               break;
-       case LTTNG_DOMAIN_KERNEL:
-               /* Code flow error if we don't have a kernel session here. */
-               assert(ksess);
-
-               /* Create consumer output if none exists */
-               consumer = ksess->tmp_consumer;
-               if (consumer == NULL) {
-                       consumer = consumer_copy_output(ksess->consumer);
-                       if (consumer == NULL) {
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-                       /* Trash the consumer subdir, we are about to set a new one. */
-                       memset(consumer->subdir, 0, sizeof(consumer->subdir));
-                       ksess->tmp_consumer = consumer;
-               }
-
-               break;
-       case LTTNG_DOMAIN_UST:
-               /* Code flow error if we don't have a kernel session here. */
-               assert(usess);
-
-               /* Create consumer output if none exists */
-               consumer = usess->tmp_consumer;
-               if (consumer == NULL) {
-                       consumer = consumer_copy_output(usess->consumer);
-                       if (consumer == NULL) {
-                               ret = LTTCOMM_FATAL;
-                               goto error;
-                       }
-                       /* Trash the consumer subdir, we are about to set a new one. */
-                       memset(consumer->subdir, 0, sizeof(consumer->subdir));
-                       usess->tmp_consumer = consumer;
-               }
-
-               break;
-       }
-
-       for (i = 0; i < nb_uri; i++) {
-               struct consumer_socket *socket;
-               struct lttng_ht_iter iter;
-
-               ret = add_uri_to_consumer(consumer, &uris[i], domain, session->name);
-               if (ret < 0) {
-                       goto error;
-               }
-
-               /*
-                * Don't send relayd socket if URI is NOT remote or if the relayd
-                * sockets for the session are already sent.
-                */
-               if (uris[i].dtype == LTTNG_DST_PATH ||
-                               consumer->dst.net.relayd_socks_sent) {
-                       continue;
-               }
-
-               /* Try to send relayd URI to the consumer if exist. */
-               cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter,
-                               socket, node.node) {
-
-                       /* A socket in the HT should never have a negative fd */
-                       assert(socket->fd >= 0);
-
-                       pthread_mutex_lock(socket->lock);
-                       ret = send_socket_relayd_consumer(domain, session, &uris[i],
-                                       consumer, socket->fd);
-                       pthread_mutex_unlock(socket->lock);
-                       if (ret != LTTCOMM_OK) {
-                               goto error;
-                       }
-               }
-       }
-
-       /* All good! */
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-
-/*
- * Command LTTNG_CREATE_SESSION processed by the client thread.
- */
-static int cmd_create_session_uri(char *name, struct lttng_uri *uris,
-               size_t nb_uri, lttng_sock_cred *creds)
-{
-       int ret;
-       char *path = NULL;
-       struct ltt_session *session;
-
-       assert(name);
-
-       /*
-        * Verify if the session already exist
-        *
-        * XXX: There is no need for the session lock list here since the caller
-        * (process_client_msg) is holding it. We might want to change that so a
-        * single command does not lock the entire session list.
-        */
-       session = session_find_by_name(name);
-       if (session != NULL) {
-               ret = LTTCOMM_EXIST_SESS;
-               goto find_error;
-       }
-
-       /* Create tracing session in the registry */
-       ret = session_create(name, path, LTTNG_SOCK_GET_UID_CRED(creds),
-                       LTTNG_SOCK_GET_GID_CRED(creds));
-       if (ret != LTTCOMM_OK) {
-               goto session_error;
-       }
-
-       /*
-        * Get the newly created session pointer back
-        *
-        * XXX: There is no need for the session lock list here since the caller
-        * (process_client_msg) is holding it. We might want to change that so a
-        * single command does not lock the entire session list.
-        */
-       session = session_find_by_name(name);
-       assert(session);
-
-       /* Create default consumer output for the session not yet created. */
-       session->consumer = consumer_create_output(CONSUMER_DST_LOCAL);
-       if (session->consumer == NULL) {
-               ret = LTTCOMM_FATAL;
-               goto consumer_error;
-       }
-
-       /*
-        * This means that the lttng_create_session call was called with the _path_
-        * argument set to NULL.
-        */
-       if (uris == NULL) {
-               /*
-                * At this point, we'll skip the consumer URI setup and create a
-                * session with a NULL path which will flag the session to NOT spawn a
-                * consumer.
-                */
-               DBG("Create session %s with NO uri, skipping consumer setup", name);
-               goto end;
-       }
-
-       session->start_consumer = 1;
-
-       ret = cmd_set_consumer_uri(0, session, nb_uri, uris);
-       if (ret != LTTCOMM_OK) {
-               goto consumer_error;
-       }
-
-       session->consumer->enabled = 1;
-
-end:
-       return LTTCOMM_OK;
-
-consumer_error:
-       session_destroy(session);
-session_error:
-find_error:
-       return ret;
-}
-
-/*
- * Command LTTNG_DESTROY_SESSION processed by the client thread.
- */
-static int cmd_destroy_session(struct ltt_session *session, char *name)
-{
-       int ret;
-
-       /* Safety net */
-       assert(session);
-
-       /* Clean kernel session teardown */
-       teardown_kernel_session(session);
-       /* UST session teardown */
-       teardown_ust_session(session);
-
-       /*
-        * Must notify the kernel thread here to update it's poll setin order
-        * to remove the channel(s)' fd just destroyed.
-        */
-       ret = notify_thread_pipe(kernel_poll_pipe[1]);
-       if (ret < 0) {
-               PERROR("write kernel poll pipe");
-       }
-
-       ret = session_destroy(session);
-
-       return ret;
-}
-
-/*
- * Command LTTNG_CALIBRATE processed by the client thread.
- */
-static int cmd_calibrate(int domain, struct lttng_calibrate *calibrate)
-{
-       int ret;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-       {
-               struct lttng_kernel_calibrate kcalibrate;
-
-               kcalibrate.type = calibrate->type;
-               ret = kernel_calibrate(kernel_tracer_fd, &kcalibrate);
-               if (ret < 0) {
-                       ret = LTTCOMM_KERN_ENABLE_FAIL;
-                       goto error;
-               }
-               break;
-       }
-       case LTTNG_DOMAIN_UST:
-       {
-               struct lttng_ust_calibrate ucalibrate;
-
-               ucalibrate.type = calibrate->type;
-               ret = ust_app_calibrate_glb(&ucalibrate);
-               if (ret < 0) {
-                       ret = LTTCOMM_UST_CALIBRATE_FAIL;
-                       goto error;
-               }
-               break;
-       }
-       default:
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
- */
-static int cmd_register_consumer(struct ltt_session *session, int domain,
-               char *sock_path)
-{
-       int ret, sock;
-       struct consumer_socket *socket;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               /* Can't register a consumer if there is already one */
-               if (session->kernel_session->consumer_fds_sent != 0) {
-                       ret = LTTCOMM_KERN_CONSUMER_FAIL;
-                       goto error;
-               }
-
-               sock = lttcomm_connect_unix_sock(sock_path);
-               if (sock < 0) {
-                       ret = LTTCOMM_CONNECT_FAIL;
-                       goto error;
-               }
-
-               socket = consumer_allocate_socket(sock);
-               if (socket == NULL) {
-                       ret = LTTCOMM_FATAL;
-                       close(sock);
-                       goto error;
-               }
-
-               socket->lock = zmalloc(sizeof(pthread_mutex_t));
-               if (socket->lock == NULL) {
-                       PERROR("zmalloc pthread mutex");
-                       ret = LTTCOMM_FATAL;
-                       goto error;
-               }
-               pthread_mutex_init(socket->lock, NULL);
-
-               rcu_read_lock();
-               consumer_add_socket(socket, session->kernel_session->consumer);
-               rcu_read_unlock();
-
-               pthread_mutex_lock(&kconsumer_data.pid_mutex);
-               kconsumer_data.pid = -1;
-               pthread_mutex_unlock(&kconsumer_data.pid_mutex);
-
-               break;
-       default:
-               /* TODO: Userspace tracing */
-               ret = LTTCOMM_UND;
-               goto error;
-       }
-
-       ret = LTTCOMM_OK;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_LIST_DOMAINS processed by the client thread.
- */
-static ssize_t cmd_list_domains(struct ltt_session *session,
-               struct lttng_domain **domains)
-{
-       int ret, index = 0;
-       ssize_t nb_dom = 0;
-
-       if (session->kernel_session != NULL) {
-               DBG3("Listing domains found kernel domain");
-               nb_dom++;
-       }
-
-       if (session->ust_session != NULL) {
-               DBG3("Listing domains found UST global domain");
-               nb_dom++;
-       }
+       lus->uid = session->uid;
+       lus->gid = session->gid;
+       lus->output_traces = session->output_traces;
+       lus->snapshot_mode = session->snapshot_mode;
+       lus->live_timer_interval = session->live_timer;
+       session->ust_session = lus;
 
-       *domains = zmalloc(nb_dom * sizeof(struct lttng_domain));
-       if (*domains == NULL) {
-               ret = -LTTCOMM_FATAL;
+       /* Copy session output to the newly created UST session */
+       ret = copy_session_consumer(domain->type, session);
+       if (ret != LTTNG_OK) {
                goto error;
        }
 
-       if (session->kernel_session != NULL) {
-               (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
-               index++;
-       }
-
-       if (session->ust_session != NULL) {
-               (*domains)[index].type = LTTNG_DOMAIN_UST;
-               index++;
-       }
-
-       return nb_dom;
+       return LTTNG_OK;
 
 error:
+       free(lus);
+       session->ust_session = NULL;
        return ret;
 }
 
 /*
- * Command LTTNG_LIST_CHANNELS processed by the client thread.
+ * Create a kernel tracer session then create the default channel.
  */
-static ssize_t cmd_list_channels(int domain, struct ltt_session *session,
-               struct lttng_channel **channels)
+static int create_kernel_session(struct ltt_session *session)
 {
        int ret;
-       ssize_t nb_chan = 0;
-
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (session->kernel_session != NULL) {
-                       nb_chan = session->kernel_session->channel_count;
-               }
-               DBG3("Number of kernel channels %zd", nb_chan);
-               break;
-       case LTTNG_DOMAIN_UST:
-               if (session->ust_session != NULL) {
-                       nb_chan = lttng_ht_get_count(
-                                       session->ust_session->domain_global.channels);
-               }
-               DBG3("Number of UST global channels %zd", nb_chan);
-               break;
-       default:
-               *channels = NULL;
-               ret = -LTTCOMM_UND;
-               goto error;
-       }
-
-       if (nb_chan > 0) {
-               *channels = zmalloc(nb_chan * sizeof(struct lttng_channel));
-               if (*channels == NULL) {
-                       ret = -LTTCOMM_FATAL;
-                       goto error;
-               }
-
-               list_lttng_channels(domain, session, *channels);
-       } else {
-               *channels = NULL;
-       }
-
-       return nb_chan;
-
-error:
-       return ret;
-}
 
-/*
- * Command LTTNG_LIST_EVENTS processed by the client thread.
- */
-static ssize_t cmd_list_events(int domain, struct ltt_session *session,
-               char *channel_name, struct lttng_event **events)
-{
-       int ret = 0;
-       ssize_t nb_event = 0;
+       DBG("Creating kernel session");
 
-       switch (domain) {
-       case LTTNG_DOMAIN_KERNEL:
-               if (session->kernel_session != NULL) {
-                       nb_event = list_lttng_kernel_events(channel_name,
-                                       session->kernel_session, events);
-               }
-               break;
-       case LTTNG_DOMAIN_UST:
-       {
-               if (session->ust_session != NULL) {
-                       nb_event = list_lttng_ust_global_events(channel_name,
-                                       &session->ust_session->domain_global, events);
-               }
-               break;
-       }
-       default:
-               ret = -LTTCOMM_UND;
+       ret = kernel_create_session(session, kernel_tracer_fd);
+       if (ret < 0) {
+               ret = LTTNG_ERR_KERN_SESS_FAIL;
                goto error;
        }
 
-       ret = nb_event;
-
-error:
-       return ret;
-}
-
-/*
- * Command LTTNG_DISABLE_CONSUMER processed by the client thread.
- */
-static int cmd_disable_consumer(int domain, struct ltt_session *session)
-{
-       int ret;
-       struct ltt_kernel_session *ksess = session->kernel_session;
-       struct ltt_ust_session *usess = session->ust_session;
-       struct consumer_output *consumer;
-
-       assert(session);
+       /* Code flow safety */
+       assert(session->kernel_session);
 
-       if (session->enabled) {
-               /* Can't disable consumer on an already started session */
-               ret = LTTCOMM_TRACE_ALREADY_STARTED;
+       /* Copy session output to the newly created Kernel session */
+       ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
+       if (ret != LTTNG_OK) {
                goto error;
        }
 
-       if (!session->start_consumer) {
-               ret = LTTCOMM_NO_CONSUMER;
-               goto error;
+       /* Create directory(ies) on local filesystem. */
+       if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
+                       strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
+               ret = run_as_mkdir_recursive(
+                               session->kernel_session->consumer->dst.trace_path,
+                               S_IRWXU | S_IRWXG, session->uid, session->gid);
+               if (ret < 0) {
+                       if (ret != -EEXIST) {
+                               ERR("Trace directory creation error");
+                               goto error;
+                       }
+               }
        }
 
-       switch (domain) {
-       case 0:
-               DBG("Disable tracing session %s consumer", session->name);
-               consumer = session->consumer;
-               break;
-       case LTTNG_DOMAIN_KERNEL:
-               /* Code flow error if we don't have a kernel session here. */
-               assert(ksess);
-
-               DBG("Disabling kernel consumer");
-               consumer = ksess->consumer;
-
-               break;
-       case LTTNG_DOMAIN_UST:
-               /* Code flow error if we don't have a UST session here. */
-               assert(usess);
-
-               DBG("Disabling UST consumer");
-               consumer = usess->consumer;
-
-               break;
-       default:
-               ret = LTTCOMM_UNKNOWN_DOMAIN;
-               goto error;
-       }
+       session->kernel_session->uid = session->uid;
+       session->kernel_session->gid = session->gid;
+       session->kernel_session->output_traces = session->output_traces;
+       session->kernel_session->snapshot_mode = session->snapshot_mode;
 
-       if (consumer) {
-               consumer->enabled = 0;
-               /* Success at this point */
-               ret = LTTCOMM_OK;
-       } else {
-               ret = LTTCOMM_NO_CONSUMER;
-       }
+       return LTTNG_OK;
 
 error:
+       trace_kernel_destroy_session(session->kernel_session);
+       session->kernel_session = NULL;
        return ret;
 }
 
 /*
- * Command LTTNG_ENABLE_CONSUMER processed by the client thread.
+ * Count number of session permitted by uid/gid.
  */
-static int cmd_enable_consumer(int domain, struct ltt_session *session)
+static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
 {
-       int ret;
-       struct ltt_kernel_session *ksess = session->kernel_session;
-       struct ltt_ust_session *usess = session->ust_session;
-       struct consumer_output *consumer = NULL;
-
-       assert(session);
-
-       /* Can't enable consumer after session started. */
-       if (session->enabled) {
-               ret = LTTCOMM_TRACE_ALREADY_STARTED;
-               goto error;
-       }
-
-       if (!session->start_consumer) {
-               ret = LTTCOMM_NO_CONSUMER;
-               goto error;
-       }
-
-       switch (domain) {
-       case 0:
-               assert(session->consumer);
-               consumer = session->consumer;
-               break;
-       case LTTNG_DOMAIN_KERNEL:
-               /* Code flow error if we don't have a kernel session here. */
-               assert(ksess);
-
-               /*
-                * Check if we have already sent fds to the consumer. In that case,
-                * the enable-consumer command can't be used because a start trace
-                * had previously occured.
-                */
-               if (ksess->consumer_fds_sent) {
-                       ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
-                       goto error;
-               }
-
-               consumer = ksess->tmp_consumer;
-               if (consumer == NULL) {
-                       ret = LTTCOMM_OK;
-                       /* No temp. consumer output exists. Using the current one. */
-                       DBG3("No temporary consumer. Using default");
-                       consumer = ksess->consumer;
-                       goto error;
-               }
-
-               switch (consumer->type) {
-               case CONSUMER_DST_LOCAL:
-                       DBG2("Consumer output is local. Creating directory(ies)");
-
-                       /* Create directory(ies) */
-                       ret = run_as_mkdir_recursive(consumer->dst.trace_path,
-                                       S_IRWXU | S_IRWXG, session->uid, session->gid);
-                       if (ret < 0) {
-                               if (ret != -EEXIST) {
-                                       ERR("Trace directory creation error");
-                                       ret = LTTCOMM_FATAL;
-                                       goto error;
-                               }
-                       }
-                       break;
-               case CONSUMER_DST_NET:
-                       DBG2("Consumer output is network. Validating URIs");
-                       /* Validate if we have both control and data path set. */
-                       if (!consumer->dst.net.control_isset) {
-                               ret = LTTCOMM_URL_CTRL_MISS;
-                               goto error;
-                       }
-
-                       if (!consumer->dst.net.data_isset) {
-                               ret = LTTCOMM_URL_DATA_MISS;
-                               goto error;
-                       }
-
-                       /* Check established network session state */
-                       if (session->net_handle == 0) {
-                               ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
-                               ERR("Session network handle is not set on enable-consumer");
-                               goto error;
-                       }
-
-                       break;
-               }
-
-               /* Append default kernel trace dir to subdir */
-               strncat(ksess->consumer->subdir, DEFAULT_KERNEL_TRACE_DIR,
-                               sizeof(ksess->consumer->subdir));
-
-               /*
-                * @session-lock
-                * This is race free for now since the session lock is acquired before
-                * ending up in this function. No other threads can access this kernel
-                * session without this lock hence freeing the consumer output object
-                * is valid.
-                */
-               rcu_read_lock();
-               consumer_destroy_output(ksess->consumer);
-               rcu_read_unlock();
-               ksess->consumer = consumer;
-               ksess->tmp_consumer = NULL;
-
-               break;
-       case LTTNG_DOMAIN_UST:
-               /* Code flow error if we don't have a UST session here. */
-               assert(usess);
+       unsigned int i = 0;
+       struct ltt_session *session;
 
+       DBG("Counting number of available session for UID %d GID %d",
+                       uid, gid);
+       cds_list_for_each_entry(session, &session_list_ptr->head, list) {
                /*
-                * Check if we have already sent fds to the consumer. In that case,
-                * the enable-consumer command can't be used because a start trace
-                * had previously occured.
+                * Only list the sessions the user can control.
                 */
-               if (usess->start_trace) {
-                       ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
-                       goto error;
-               }
-
-               consumer = usess->tmp_consumer;
-               if (consumer == NULL) {
-                       ret = LTTCOMM_OK;
-                       /* No temp. consumer output exists. Using the current one. */
-                       DBG3("No temporary consumer. Using default");
-                       consumer = usess->consumer;
-                       goto error;
-               }
-
-               switch (consumer->type) {
-               case CONSUMER_DST_LOCAL:
-                       DBG2("Consumer output is local. Creating directory(ies)");
-
-                       /* Create directory(ies) */
-                       ret = run_as_mkdir_recursive(consumer->dst.trace_path,
-                                       S_IRWXU | S_IRWXG, session->uid, session->gid);
-                       if (ret < 0) {
-                               if (ret != -EEXIST) {
-                                       ERR("Trace directory creation error");
-                                       ret = LTTCOMM_FATAL;
-                                       goto error;
-                               }
-                       }
-                       break;
-               case CONSUMER_DST_NET:
-                       DBG2("Consumer output is network. Validating URIs");
-                       /* Validate if we have both control and data path set. */
-                       if (!consumer->dst.net.control_isset) {
-                               ret = LTTCOMM_URL_CTRL_MISS;
-                               goto error;
-                       }
-
-                       if (!consumer->dst.net.data_isset) {
-                               ret = LTTCOMM_URL_DATA_MISS;
-                               goto error;
-                       }
-
-                       /* Check established network session state */
-                       if (session->net_handle == 0) {
-                               ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
-                               DBG2("Session network handle is not set on enable-consumer");
-                               goto error;
-                       }
-
-                       if (consumer->net_seq_index == -1) {
-                               ret = LTTCOMM_ENABLE_CONSUMER_FAIL;
-                               DBG2("Network index is not set on the consumer");
-                               goto error;
-                       }
-
-                       break;
+               if (!session_access_ok(session, uid, gid)) {
+                       continue;
                }
-
-               /* Append default kernel trace dir to subdir */
-               strncat(usess->consumer->subdir, DEFAULT_UST_TRACE_DIR,
-                               sizeof(usess->consumer->subdir));
-
-               /*
-                * @session-lock
-                * This is race free for now since the session lock is acquired before
-                * ending up in this function. No other threads can access this kernel
-                * session without this lock hence freeing the consumer output object
-                * is valid.
-                */
-               rcu_read_lock();
-               consumer_destroy_output(usess->consumer);
-               rcu_read_unlock();
-               usess->consumer = consumer;
-               usess->tmp_consumer = NULL;
-
-               break;
-       }
-
-       /* Enable it */
-       if (consumer) {
-               consumer->enabled = 1;
-               /* Success at this point */
-               ret = LTTCOMM_OK;
-       } else {
-               /* Should not really happend... */
-               ret = LTTCOMM_NO_CONSUMER;
+               i++;
        }
-
-error:
-       return ret;
+       return i;
 }
 
 /*
@@ -4382,11 +2569,13 @@ error:
  * Return any error encountered or 0 for success.
  *
  * "sock" is only used for special-case var. len data.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
  */
 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                int *sock_error)
 {
-       int ret = LTTCOMM_OK;
+       int ret = LTTNG_OK;
        int need_tracing_session = 1;
        int need_domain;
 
@@ -4396,11 +2585,18 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
 
        switch (cmd_ctx->lsm->cmd_type) {
        case LTTNG_CREATE_SESSION:
+       case LTTNG_CREATE_SESSION_SNAPSHOT:
+       case LTTNG_CREATE_SESSION_LIVE:
        case LTTNG_DESTROY_SESSION:
        case LTTNG_LIST_SESSIONS:
        case LTTNG_LIST_DOMAINS:
        case LTTNG_START_TRACE:
        case LTTNG_STOP_TRACE:
+       case LTTNG_DATA_PENDING:
+       case LTTNG_SNAPSHOT_ADD_OUTPUT:
+       case LTTNG_SNAPSHOT_DEL_OUTPUT:
+       case LTTNG_SNAPSHOT_LIST_OUTPUT:
+       case LTTNG_SNAPSHOT_RECORD:
                need_domain = 0;
                break;
        default:
@@ -4410,9 +2606,9 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
        if (opt_no_kernel && need_domain
                        && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
                if (!is_root) {
-                       ret = LTTCOMM_NEED_ROOT_SESSIOND;
+                       ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
                } else {
-                       ret = LTTCOMM_KERN_NA;
+                       ret = LTTNG_ERR_KERN_NA;
                }
                goto error;
        }
@@ -4421,7 +2617,8 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
        if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
                pthread_mutex_lock(&kconsumer_data.pid_mutex);
                if (kconsumer_data.pid > 0) {
-                       ret = LTTCOMM_KERN_CONSUMER_FAIL;
+                       ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
+                       pthread_mutex_unlock(&kconsumer_data.pid_mutex);
                        goto error;
                }
                pthread_mutex_unlock(&kconsumer_data.pid_mutex);
@@ -4452,6 +2649,8 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
        /* Commands that DO NOT need a session. */
        switch (cmd_ctx->lsm->cmd_type) {
        case LTTNG_CREATE_SESSION:
+       case LTTNG_CREATE_SESSION_SNAPSHOT:
+       case LTTNG_CREATE_SESSION_LIVE:
        case LTTNG_CALIBRATE:
        case LTTNG_LIST_SESSIONS:
        case LTTNG_LIST_TRACEPOINTS:
@@ -4468,12 +2667,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                session_lock_list();
                cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
                if (cmd_ctx->session == NULL) {
-                       if (cmd_ctx->lsm->session.name != NULL) {
-                               ret = LTTCOMM_SESS_NOT_FOUND;
-                       } else {
-                               /* If no session name specified */
-                               ret = LTTCOMM_SELECT_SESS;
-                       }
+                       ret = LTTNG_ERR_SESS_NOT_FOUND;
                        goto error;
                } else {
                        /* Acquire lock for the session */
@@ -4492,7 +2686,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
        switch (cmd_ctx->lsm->domain.type) {
        case LTTNG_DOMAIN_KERNEL:
                if (!is_root) {
-                       ret = LTTCOMM_NEED_ROOT_SESSIOND;
+                       ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
                        goto error;
                }
 
@@ -4507,7 +2701,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
 
                /* Consumer is in an ERROR state. Report back to client */
                if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
-                       ret = LTTCOMM_NO_KERNCONSUMERD;
+                       ret = LTTNG_ERR_NO_KERNCONSUMERD;
                        goto error;
                }
 
@@ -4516,7 +2710,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                        if (cmd_ctx->session->kernel_session == NULL) {
                                ret = create_kernel_session(cmd_ctx->session);
                                if (ret < 0) {
-                                       ret = LTTCOMM_KERN_SESS_FAIL;
+                                       ret = LTTNG_ERR_KERN_SESS_FAIL;
                                        goto error;
                                }
                        }
@@ -4524,12 +2718,11 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                        /* Start the kernel consumer daemon */
                        pthread_mutex_lock(&kconsumer_data.pid_mutex);
                        if (kconsumer_data.pid == 0 &&
-                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
-                                       cmd_ctx->session->start_consumer) {
+                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
                                pthread_mutex_unlock(&kconsumer_data.pid_mutex);
                                ret = start_consumerd(&kconsumer_data);
                                if (ret < 0) {
-                                       ret = LTTCOMM_KERN_CONSUMER_FAIL;
+                                       ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
                                        goto error;
                                }
                                uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
@@ -4551,9 +2744,13 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                break;
        case LTTNG_DOMAIN_UST:
        {
+               if (!ust_app_supported()) {
+                       ret = LTTNG_ERR_NO_UST;
+                       goto error;
+               }
                /* Consumer is in an ERROR state. Report back to client */
                if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
-                       ret = LTTCOMM_NO_USTCONSUMERD;
+                       ret = LTTNG_ERR_NO_USTCONSUMERD;
                        goto error;
                }
 
@@ -4562,7 +2759,7 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                        if (cmd_ctx->session->ust_session == NULL) {
                                ret = create_ust_session(cmd_ctx->session,
                                                &cmd_ctx->lsm->domain);
-                               if (ret != LTTCOMM_OK) {
+                               if (ret != LTTNG_OK) {
                                        goto error;
                                }
                        }
@@ -4572,12 +2769,11 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                        pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
                        if (consumerd64_bin[0] != '\0' &&
                                        ustconsumer64_data.pid == 0 &&
-                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
-                                       cmd_ctx->session->start_consumer) {
+                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
                                pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
                                ret = start_consumerd(&ustconsumer64_data);
                                if (ret < 0) {
-                                       ret = LTTCOMM_UST_CONSUMER64_FAIL;
+                                       ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
                                        uatomic_set(&ust_consumerd64_fd, -EINVAL);
                                        goto error;
                                }
@@ -4601,12 +2797,11 @@ static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
                        /* 32-bit */
                        if (consumerd32_bin[0] != '\0' &&
                                        ustconsumer32_data.pid == 0 &&
-                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER &&
-                                       cmd_ctx->session->start_consumer) {
+                                       cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
                                pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
                                ret = start_consumerd(&ustconsumer32_data);
                                if (ret < 0) {
-                                       ret = LTTCOMM_UST_CONSUMER32_FAIL;
+                                       ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
                                        uatomic_set(&ust_consumerd32_fd, -EINVAL);
                                        goto error;
                                }
@@ -4640,13 +2835,13 @@ skip_domain:
                switch (cmd_ctx->lsm->domain.type) {
                case LTTNG_DOMAIN_UST:
                        if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
-                               ret = LTTCOMM_NO_USTCONSUMERD;
+                               ret = LTTNG_ERR_NO_USTCONSUMERD;
                                goto error;
                        }
                        break;
                case LTTNG_DOMAIN_KERNEL:
                        if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
-                               ret = LTTCOMM_NO_KERNCONSUMERD;
+                               ret = LTTNG_ERR_NO_KERNCONSUMERD;
                                goto error;
                        }
                        break;
@@ -4661,7 +2856,22 @@ skip_domain:
                if (!session_access_ok(cmd_ctx->session,
                                LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
                                LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
-                       ret = LTTCOMM_EPERM;
+                       ret = LTTNG_ERR_EPERM;
+                       goto error;
+               }
+       }
+
+       /*
+        * Send relayd information to consumer as soon as we have a domain and a
+        * session defined.
+        */
+       if (cmd_ctx->session && need_domain) {
+               /*
+                * Setup relayd if not done yet. If the relayd information was already
+                * sent to the consumer, this call will gracefully return.
+                */
+               ret = cmd_setup_relayd(cmd_ctx->session);
+               if (ret != LTTNG_OK) {
                        goto error;
                }
        }
@@ -4672,8 +2882,7 @@ skip_domain:
        {
                ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
                                cmd_ctx->lsm->u.context.channel_name,
-                               cmd_ctx->lsm->u.context.event_name,
-                               &cmd_ctx->lsm->u.context.ctx);
+                               &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
                break;
        }
        case LTTNG_DISABLE_CHANNEL:
@@ -4697,55 +2906,26 @@ skip_domain:
                                cmd_ctx->lsm->u.disable.channel_name);
                break;
        }
-       case LTTNG_DISABLE_CONSUMER:
-       {
-               ret = cmd_disable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
-               break;
-       }
        case LTTNG_ENABLE_CHANNEL:
        {
-               ret = cmd_enable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
-                               &cmd_ctx->lsm->u.channel.chan);
-               break;
-       }
-       case LTTNG_ENABLE_CONSUMER:
-       {
-               /*
-                * XXX: 0 means that this URI should be applied on the session. Should
-                * be a DOMAIN enuam.
-                */
-               ret = cmd_enable_consumer(cmd_ctx->lsm->domain.type, cmd_ctx->session);
-               if (ret != LTTCOMM_OK) {
-                       goto error;
-               }
-
-               if (cmd_ctx->lsm->domain.type == 0) {
-                       /* Add the URI for the UST session if a consumer is present. */
-                       if (cmd_ctx->session->ust_session &&
-                                       cmd_ctx->session->ust_session->consumer) {
-                               ret = cmd_enable_consumer(LTTNG_DOMAIN_UST, cmd_ctx->session);
-                       } else if (cmd_ctx->session->kernel_session &&
-                                       cmd_ctx->session->kernel_session->consumer) {
-                               ret = cmd_enable_consumer(LTTNG_DOMAIN_KERNEL,
-                                               cmd_ctx->session);
-                       }
-               }
+               ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
+                               &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
                break;
        }
        case LTTNG_ENABLE_EVENT:
        {
-               ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
+               ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
                                cmd_ctx->lsm->u.enable.channel_name,
-                               &cmd_ctx->lsm->u.enable.event);
+                               &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
                break;
        }
        case LTTNG_ENABLE_ALL_EVENT:
        {
                DBG("Enabling all events");
 
-               ret = cmd_enable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
+               ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
                                cmd_ctx->lsm->u.enable.channel_name,
-                               cmd_ctx->lsm->u.enable.event.type);
+                               cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
                break;
        }
        case LTTNG_LIST_TRACEPOINTS:
@@ -4755,6 +2935,7 @@ skip_domain:
 
                nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
                if (nb_events < 0) {
+                       /* Return value is a negative lttng_error_code. */
                        ret = -nb_events;
                        goto error;
                }
@@ -4775,7 +2956,7 @@ skip_domain:
 
                free(events);
 
-               ret = LTTCOMM_OK;
+               ret = LTTNG_OK;
                break;
        }
        case LTTNG_LIST_TRACEPOINT_FIELDS:
@@ -4786,6 +2967,7 @@ skip_domain:
                nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
                                &fields);
                if (nb_fields < 0) {
+                       /* Return value is a negative lttng_error_code. */
                        ret = -nb_fields;
                        goto error;
                }
@@ -4807,7 +2989,7 @@ skip_domain:
 
                free(fields);
 
-               ret = LTTCOMM_OK;
+               ret = LTTNG_OK;
                break;
        }
        case LTTNG_SET_CONSUMER_URI:
@@ -4819,13 +3001,13 @@ skip_domain:
                len = nb_uri * sizeof(struct lttng_uri);
 
                if (nb_uri == 0) {
-                       ret = LTTCOMM_INVALID;
+                       ret = LTTNG_ERR_INVALID;
                        goto error;
                }
 
                uris = zmalloc(len);
                if (uris == NULL) {
-                       ret = LTTCOMM_FATAL;
+                       ret = LTTNG_ERR_FATAL;
                        goto error;
                }
 
@@ -4835,13 +3017,15 @@ skip_domain:
                if (ret <= 0) {
                        DBG("No URIs received from client... continuing");
                        *sock_error = 1;
-                       ret = LTTCOMM_SESSION_FAIL;
+                       ret = LTTNG_ERR_SESSION_FAIL;
+                       free(uris);
                        goto error;
                }
 
                ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
                                nb_uri, uris);
-               if (ret != LTTCOMM_OK) {
+               if (ret != LTTNG_OK) {
+                       free(uris);
                        goto error;
                }
 
@@ -4862,6 +3046,8 @@ skip_domain:
                        }
                }
 
+               free(uris);
+
                break;
        }
        case LTTNG_START_TRACE:
@@ -4885,7 +3071,7 @@ skip_domain:
                if (nb_uri > 0) {
                        uris = zmalloc(len);
                        if (uris == NULL) {
-                               ret = LTTCOMM_FATAL;
+                               ret = LTTNG_ERR_FATAL;
                                goto error;
                        }
 
@@ -4895,26 +3081,29 @@ skip_domain:
                        if (ret <= 0) {
                                DBG("No URIs received from client... continuing");
                                *sock_error = 1;
-                               ret = LTTCOMM_SESSION_FAIL;
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
                                goto error;
                        }
 
                        if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
                                DBG("Creating session with ONE network URI is a bad call");
-                               ret = LTTCOMM_SESSION_FAIL;
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
                                goto error;
                        }
                }
 
                ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
-                       &cmd_ctx->creds);
+                       &cmd_ctx->creds, 0);
+
+               free(uris);
 
                break;
        }
        case LTTNG_DESTROY_SESSION:
        {
-               ret = cmd_destroy_session(cmd_ctx->session,
-                               cmd_ctx->lsm->session.name);
+               ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
 
                /* Set session to NULL so we do not unlock it after free. */
                cmd_ctx->session = NULL;
@@ -4927,12 +3116,14 @@ skip_domain:
 
                nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
                if (nb_dom < 0) {
+                       /* Return value is a negative lttng_error_code. */
                        ret = -nb_dom;
                        goto error;
                }
 
                ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
                if (ret < 0) {
+                       free(domains);
                        goto setup_error;
                }
 
@@ -4942,7 +3133,7 @@ skip_domain:
 
                free(domains);
 
-               ret = LTTCOMM_OK;
+               ret = LTTNG_OK;
                break;
        }
        case LTTNG_LIST_CHANNELS:
@@ -4953,12 +3144,14 @@ skip_domain:
                nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
                                cmd_ctx->session, &channels);
                if (nb_chan < 0) {
+                       /* Return value is a negative lttng_error_code. */
                        ret = -nb_chan;
                        goto error;
                }
 
                ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
                if (ret < 0) {
+                       free(channels);
                        goto setup_error;
                }
 
@@ -4968,7 +3161,7 @@ skip_domain:
 
                free(channels);
 
-               ret = LTTCOMM_OK;
+               ret = LTTNG_OK;
                break;
        }
        case LTTNG_LIST_EVENTS:
@@ -4979,12 +3172,14 @@ skip_domain:
                nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
                                cmd_ctx->lsm->u.list.channel_name, &events);
                if (nb_event < 0) {
+                       /* Return value is a negative lttng_error_code. */
                        ret = -nb_event;
                        goto error;
                }
 
                ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
                if (ret < 0) {
+                       free(events);
                        goto setup_error;
                }
 
@@ -4994,7 +3189,7 @@ skip_domain:
 
                free(events);
 
-               ret = LTTCOMM_OK;
+               ret = LTTNG_OK;
                break;
        }
        case LTTNG_LIST_SESSIONS:
@@ -5013,13 +3208,13 @@ skip_domain:
                }
 
                /* Filled the session array */
-               list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
+               cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
                        LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
                        LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
 
                session_unlock_list();
 
-               ret = LTTCOMM_OK;
+               ret = LTTNG_OK;
                break;
        }
        case LTTNG_CALIBRATE:
@@ -5030,49 +3225,207 @@ skip_domain:
        }
        case LTTNG_REGISTER_CONSUMER:
        {
+               struct consumer_data *cdata;
+
+               switch (cmd_ctx->lsm->domain.type) {
+               case LTTNG_DOMAIN_KERNEL:
+                       cdata = &kconsumer_data;
+                       break;
+               default:
+                       ret = LTTNG_ERR_UND;
+                       goto error;
+               }
+
                ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
-                               cmd_ctx->lsm->u.reg.path);
+                               cmd_ctx->lsm->u.reg.path, cdata);
                break;
        }
-       case LTTNG_SET_FILTER:
+       case LTTNG_ENABLE_EVENT_WITH_FILTER:
        {
                struct lttng_filter_bytecode *bytecode;
 
-               if (cmd_ctx->lsm->u.filter.bytecode_len > 65336) {
-                       ret = LTTCOMM_FILTER_INVAL;
+               if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
+                       ret = LTTNG_ERR_FILTER_INVAL;
+                       goto error;
+               }
+               if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
+                       ret = LTTNG_ERR_FILTER_INVAL;
                        goto error;
                }
-               bytecode = zmalloc(cmd_ctx->lsm->u.filter.bytecode_len);
+               bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
                if (!bytecode) {
-                       ret = LTTCOMM_FILTER_NOMEM;
+                       ret = LTTNG_ERR_FILTER_NOMEM;
                        goto error;
                }
                /* Receive var. len. data */
                DBG("Receiving var len data from client ...");
                ret = lttcomm_recv_unix_sock(sock, bytecode,
-                               cmd_ctx->lsm->u.filter.bytecode_len);
+                               cmd_ctx->lsm->u.enable.bytecode_len);
                if (ret <= 0) {
                        DBG("Nothing recv() from client var len data... continuing");
                        *sock_error = 1;
-                       ret = LTTCOMM_FILTER_INVAL;
+                       ret = LTTNG_ERR_FILTER_INVAL;
                        goto error;
                }
 
                if (bytecode->len + sizeof(*bytecode)
-                               != cmd_ctx->lsm->u.filter.bytecode_len) {
+                               != cmd_ctx->lsm->u.enable.bytecode_len) {
                        free(bytecode);
-                       ret = LTTCOMM_FILTER_INVAL;
+                       ret = LTTNG_ERR_FILTER_INVAL;
+                       goto error;
+               }
+
+               ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
+                               cmd_ctx->lsm->u.enable.channel_name,
+                               &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
+               break;
+       }
+       case LTTNG_DATA_PENDING:
+       {
+               ret = cmd_data_pending(cmd_ctx->session);
+               break;
+       }
+       case LTTNG_SNAPSHOT_ADD_OUTPUT:
+       {
+               struct lttcomm_lttng_output_id reply;
+
+               ret = cmd_snapshot_add_output(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
+               if (ret != LTTNG_OK) {
+                       goto error;
+               }
+
+               ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
+               if (ret < 0) {
+                       goto setup_error;
+               }
+
+               /* Copy output list into message payload */
+               memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_DEL_OUTPUT:
+       {
+               ret = cmd_snapshot_del_output(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_output.output);
+               break;
+       }
+       case LTTNG_SNAPSHOT_LIST_OUTPUT:
+       {
+               ssize_t nb_output;
+               struct lttng_snapshot_output *outputs = NULL;
+
+               nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
+               if (nb_output < 0) {
+                       ret = -nb_output;
                        goto error;
                }
 
-               ret = cmd_set_filter(cmd_ctx->session, cmd_ctx->lsm->domain.type,
-                               cmd_ctx->lsm->u.filter.channel_name,
-                               cmd_ctx->lsm->u.filter.event_name,
-                               bytecode);
+               ret = setup_lttng_msg(cmd_ctx,
+                               nb_output * sizeof(struct lttng_snapshot_output));
+               if (ret < 0) {
+                       free(outputs);
+                       goto setup_error;
+               }
+
+               if (outputs) {
+                       /* Copy output list into message payload */
+                       memcpy(cmd_ctx->llm->payload, outputs,
+                                       nb_output * sizeof(struct lttng_snapshot_output));
+                       free(outputs);
+               }
+
+               ret = LTTNG_OK;
+               break;
+       }
+       case LTTNG_SNAPSHOT_RECORD:
+       {
+               ret = cmd_snapshot_record(cmd_ctx->session,
+                               &cmd_ctx->lsm->u.snapshot_record.output,
+                               cmd_ctx->lsm->u.snapshot_record.wait);
+               break;
+       }
+       case LTTNG_CREATE_SESSION_SNAPSHOT:
+       {
+               size_t nb_uri, len;
+               struct lttng_uri *uris = NULL;
+
+               nb_uri = cmd_ctx->lsm->u.uri.size;
+               len = nb_uri * sizeof(struct lttng_uri);
+
+               if (nb_uri > 0) {
+                       uris = zmalloc(len);
+                       if (uris == NULL) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+
+                       /* Receive variable len data */
+                       DBG("Waiting for %zu URIs from client ...", nb_uri);
+                       ret = lttcomm_recv_unix_sock(sock, uris, len);
+                       if (ret <= 0) {
+                               DBG("No URIs received from client... continuing");
+                               *sock_error = 1;
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
+                               goto error;
+                       }
+
+                       if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
+                               DBG("Creating session with ONE network URI is a bad call");
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
+                               goto error;
+                       }
+               }
+
+               ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
+                               nb_uri, &cmd_ctx->creds);
+               free(uris);
+               break;
+       }
+       case LTTNG_CREATE_SESSION_LIVE:
+       {
+               size_t nb_uri, len;
+               struct lttng_uri *uris = NULL;
+
+               nb_uri = cmd_ctx->lsm->u.uri.size;
+               len = nb_uri * sizeof(struct lttng_uri);
+
+               if (nb_uri > 0) {
+                       uris = zmalloc(len);
+                       if (uris == NULL) {
+                               ret = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
+
+                       /* Receive variable len data */
+                       DBG("Waiting for %zu URIs from client ...", nb_uri);
+                       ret = lttcomm_recv_unix_sock(sock, uris, len);
+                       if (ret <= 0) {
+                               DBG("No URIs received from client... continuing");
+                               *sock_error = 1;
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
+                               goto error;
+                       }
+
+                       if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
+                               DBG("Creating session with ONE network URI is a bad call");
+                               ret = LTTNG_ERR_SESSION_FAIL;
+                               free(uris);
+                               goto error;
+                       }
+               }
+
+               ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
+                               nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
+               free(uris);
                break;
        }
        default:
-               ret = LTTCOMM_UND;
+               ret = LTTNG_ERR_UND;
                break;
        }
 
@@ -5111,6 +3464,9 @@ static void *thread_manage_health(void *data)
 
        rcu_register_thread();
 
+       /* We might hit an error path before this is created. */
+       lttng_poll_init(&events);
+
        /* Create unix socket */
        sock = lttcomm_create_unix_sock(health_unix_sock_path);
        if (sock < 0) {
@@ -5119,6 +3475,12 @@ static void *thread_manage_health(void *data)
                goto error;
        }
 
+       /*
+        * Set the CLOEXEC flag. Return code is useless because either way, the
+        * show must go on.
+        */
+       (void) utils_set_fd_cloexec(sock);
+
        ret = lttcomm_listen_unix_sock(sock);
        if (ret < 0) {
                goto error;
@@ -5128,7 +3490,7 @@ static void *thread_manage_health(void *data)
         * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
         * more will be added to this poll set.
         */
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 2);
        if (ret < 0) {
                goto error;
        }
@@ -5142,8 +3504,6 @@ static void *thread_manage_health(void *data)
        while (1) {
                DBG("Health check ready");
 
-               nb_fd = LTTNG_POLL_GETNB(&events);
-
                /* Inifinite blocking call, waiting for transmission */
 restart:
                ret = lttng_poll_wait(&events, -1);
@@ -5157,13 +3517,15 @@ restart:
                        goto error;
                }
 
+               nb_fd = ret;
+
                for (i = 0; i < nb_fd; i++) {
                        /* Fetch once the poll data */
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -5183,6 +3545,12 @@ restart:
                        goto error;
                }
 
+               /*
+                * Set the CLOEXEC flag. Return code is useless because either way, the
+                * show must go on.
+                */
+               (void) utils_set_fd_cloexec(new_sock);
+
                DBG("Receiving data from client for health...");
                ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
                if (ret <= 0) {
@@ -5199,30 +3567,42 @@ restart:
 
                switch (msg.component) {
                case LTTNG_HEALTH_CMD:
-                       reply.ret_code = health_check_state(&health_thread_cmd);
+                       reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_CMD);
                        break;
                case LTTNG_HEALTH_APP_MANAGE:
-                       reply.ret_code = health_check_state(&health_thread_app_manage);
+                       reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_APP_MANAGE);
                        break;
                case LTTNG_HEALTH_APP_REG:
-                       reply.ret_code = health_check_state(&health_thread_app_reg);
+                       reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_APP_REG);
                        break;
                case LTTNG_HEALTH_KERNEL:
-                       reply.ret_code = health_check_state(&health_thread_kernel);
+                       reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_KERNEL);
                        break;
                case LTTNG_HEALTH_CONSUMER:
                        reply.ret_code = check_consumer_health();
                        break;
+               case LTTNG_HEALTH_HT_CLEANUP:
+                       reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_HT_CLEANUP);
+                       break;
+               case LTTNG_HEALTH_APP_MANAGE_NOTIFY:
+                       reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_APP_MANAGE_NOTIFY);
+                       break;
+               case LTTNG_HEALTH_APP_REG_DISPATCH:
+                       reply.ret_code = health_check_state(health_sessiond, HEALTH_TYPE_APP_REG_DISPATCH);
+                       break;
                case LTTNG_HEALTH_ALL:
                        reply.ret_code =
-                               health_check_state(&health_thread_app_manage) &&
-                               health_check_state(&health_thread_app_reg) &&
-                               health_check_state(&health_thread_cmd) &&
-                               health_check_state(&health_thread_kernel) &&
-                               check_consumer_health();
+                               health_check_state(health_sessiond, HEALTH_TYPE_APP_MANAGE) &&
+                               health_check_state(health_sessiond, HEALTH_TYPE_APP_REG) &&
+                               health_check_state(health_sessiond, HEALTH_TYPE_CMD) &&
+                               health_check_state(health_sessiond, HEALTH_TYPE_KERNEL) &&
+                               check_consumer_health() &&
+                               health_check_state(health_sessiond, HEALTH_TYPE_HT_CLEANUP) &&
+                               health_check_state(health_sessiond, HEALTH_TYPE_APP_MANAGE_NOTIFY) &&
+                               health_check_state(health_sessiond, HEALTH_TYPE_APP_REG_DISPATCH);
                        break;
                default:
-                       reply.ret_code = LTTCOMM_UND;
+                       reply.ret_code = LTTNG_ERR_UND;
                        break;
                }
 
@@ -5264,12 +3644,6 @@ error:
                        PERROR("close");
                }
        }
-       if (new_sock >= 0) {
-               ret = close(new_sock);
-               if (ret) {
-                       PERROR("close");
-               }
-       }
 
        lttng_poll_clean(&events);
 
@@ -5293,20 +3667,26 @@ static void *thread_manage_clients(void *data)
 
        rcu_register_thread();
 
-       health_code_update(&health_thread_cmd);
+       health_register(health_sessiond, HEALTH_TYPE_CMD);
+
+       if (testpoint(thread_manage_clients)) {
+               goto error_testpoint;
+       }
+
+       health_code_update();
 
        ret = lttcomm_listen_unix_sock(client_sock);
        if (ret < 0) {
-               goto error;
+               goto error_listen;
        }
 
        /*
         * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
         * more will be added to this poll set.
         */
-       ret = create_thread_poll_set(&events, 2);
+       ret = sessiond_set_thread_pollset(&events, 2);
        if (ret < 0) {
-               goto error;
+               goto error_create_poll;
        }
 
        /* Add the application registration socket */
@@ -5322,18 +3702,20 @@ static void *thread_manage_clients(void *data)
                kill(ppid, SIGUSR1);
        }
 
-       health_code_update(&health_thread_cmd);
+       if (testpoint(thread_manage_clients_before_loop)) {
+               goto error;
+       }
+
+       health_code_update();
 
        while (1) {
                DBG("Accepting client command ...");
 
-               nb_fd = LTTNG_POLL_GETNB(&events);
-
                /* Inifinite blocking call, waiting for transmission */
        restart:
-               health_poll_update(&health_thread_cmd);
+               health_poll_entry();
                ret = lttng_poll_wait(&events, -1);
-               health_poll_update(&health_thread_cmd);
+               health_poll_exit();
                if (ret < 0) {
                        /*
                         * Restart interrupted system call.
@@ -5344,15 +3726,17 @@ static void *thread_manage_clients(void *data)
                        goto error;
                }
 
+               nb_fd = ret;
+
                for (i = 0; i < nb_fd; i++) {
                        /* Fetch once the poll data */
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
-                       health_code_update(&health_thread_cmd);
+                       health_code_update();
 
                        /* Thread quit pipe has been closed. Killing thread. */
-                       ret = check_thread_quit_pipe(pollfd, revents);
+                       ret = sessiond_check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
                                err = 0;
                                goto exit;
@@ -5369,13 +3753,19 @@ static void *thread_manage_clients(void *data)
 
                DBG("Wait for client response");
 
-               health_code_update(&health_thread_cmd);
+               health_code_update();
 
                sock = lttcomm_accept_unix_sock(client_sock);
                if (sock < 0) {
                        goto error;
                }
 
+               /*
+                * Set the CLOEXEC flag. Return code is useless because either way, the
+                * show must go on.
+                */
+               (void) utils_set_fd_cloexec(sock);
+
                /* Set socket option for credentials retrieval */
                ret = lttcomm_setsockopt_creds_unix_sock(sock);
                if (ret < 0) {
@@ -5399,7 +3789,7 @@ static void *thread_manage_clients(void *data)
                cmd_ctx->llm = NULL;
                cmd_ctx->session = NULL;
 
-               health_code_update(&health_thread_cmd);
+               health_code_update();
 
                /*
                 * Data is received from the lttng client. The struct
@@ -5420,7 +3810,7 @@ static void *thread_manage_clients(void *data)
                        continue;
                }
 
-               health_code_update(&health_thread_cmd);
+               health_code_update();
 
                // TODO: Validate cmd_ctx including sanity check for
                // security purpose.
@@ -5435,13 +3825,11 @@ static void *thread_manage_clients(void *data)
                ret = process_client_msg(cmd_ctx, sock, &sock_error);
                rcu_thread_offline();
                if (ret < 0) {
-                       if (sock_error) {
-                               ret = close(sock);
-                               if (ret) {
-                                       PERROR("close");
-                               }
-                               sock = -1;
+                       ret = close(sock);
+                       if (ret) {
+                               PERROR("close");
                        }
+                       sock = -1;
                        /*
                         * TODO: Inform client somehow of the fatal error. At
                         * this point, ret < 0 means that a zmalloc failed
@@ -5453,7 +3841,7 @@ static void *thread_manage_clients(void *data)
                        continue;
                }
 
-               health_code_update(&health_thread_cmd);
+               health_code_update();
 
                DBG("Sending response (size: %d, retcode: %s)",
                                cmd_ctx->lttng_msg_size,
@@ -5472,18 +3860,24 @@ static void *thread_manage_clients(void *data)
 
                clean_command_ctx(&cmd_ctx);
 
-               health_code_update(&health_thread_cmd);
+               health_code_update();
        }
 
 exit:
 error:
-       if (err) {
-               health_error(&health_thread_cmd);
-               ERR("Health error occurred in %s", __func__);
+       if (sock >= 0) {
+               ret = close(sock);
+               if (ret) {
+                       PERROR("close");
+               }
        }
-       health_exit(&health_thread_cmd);
 
-       DBG("Client thread dying");
+       lttng_poll_clean(&events);
+       clean_command_ctx(&cmd_ctx);
+
+error_listen:
+error_create_poll:
+error_testpoint:
        unlink(client_unix_sock_path);
        if (client_sock >= 0) {
                ret = close(client_sock);
@@ -5491,15 +3885,15 @@ error:
                        PERROR("close");
                }
        }
-       if (sock >= 0) {
-               ret = close(sock);
-               if (ret) {
-                       PERROR("close");
-               }
+
+       if (err) {
+               health_error();
+               ERR("Health error occurred in %s", __func__);
        }
 
-       lttng_poll_clean(&events);
-       clean_command_ctx(&cmd_ctx);
+       health_unregister(health_sessiond);
+
+       DBG("Client thread dying");
 
        rcu_unregister_thread();
        return NULL;
@@ -5531,6 +3925,7 @@ static void usage(void)
        fprintf(stderr, "  -S, --sig-parent                   Send SIGCHLD to parent pid to notify readiness.\n");
        fprintf(stderr, "  -q, --quiet                        No output at all.\n");
        fprintf(stderr, "  -v, --verbose                      Verbose mode. Activate DBG() macro.\n");
+       fprintf(stderr, "  -p, --pidfile FILE                 Write a pid to FILE name overriding the default value.\n");
        fprintf(stderr, "      --verbose-consumer             Verbose mode for consumer. Activate DBG() macro.\n");
        fprintf(stderr, "      --no-kernel                    Disable kernel tracer\n");
 }
@@ -5564,12 +3959,13 @@ static int parse_args(int argc, char **argv)
                { "verbose", 0, 0, 'v' },
                { "verbose-consumer", 0, 0, 'Z' },
                { "no-kernel", 0, 0, 'N' },
+               { "pidfile", 1, 0, 'p' },
                { NULL, 0, 0, 0 }
        };
 
        while (1) {
                int option_index = 0;
-               c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
+               c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:",
                                long_options, &option_index);
                if (c == -1) {
                        break;
@@ -5646,6 +4042,9 @@ static int parse_args(int argc, char **argv)
                case 'T':
                        consumerd64_libdir = optarg;
                        break;
+               case 'p':
+                       opt_pidfile = optarg;
+                       break;
                default:
                        /* Unknown option or other error.
                         * Error is printed by getopt, just return */
@@ -5676,6 +4075,14 @@ static int init_daemon_socket(void)
                goto end;
        }
 
+       /* Set the cloexec flag */
+       ret = utils_set_fd_cloexec(client_sock);
+       if (ret < 0) {
+               ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
+                               "Continuing but note that the consumer daemon will have a "
+                               "reference to this socket on exec()", client_sock);
+       }
+
        /* File permission MUST be 660 */
        ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
        if (ret < 0) {
@@ -5692,6 +4099,14 @@ static int init_daemon_socket(void)
                goto end;
        }
 
+       /* Set the cloexec flag */
+       ret = utils_set_fd_cloexec(apps_sock);
+       if (ret < 0) {
+               ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
+                               "Continuing but note that the consumer daemon will have a "
+                               "reference to this socket on exec()", apps_sock);
+       }
+
        /* File permission MUST be 666 */
        ret = chmod(apps_unix_sock_path,
                        S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
@@ -5701,6 +4116,9 @@ static int init_daemon_socket(void)
                goto end;
        }
 
+       DBG3("Session daemon client socket %d and application socket %d created",
+                       client_sock, apps_sock);
+
 end:
        umask(old_umask);
        return ret;
@@ -5734,12 +4152,12 @@ static int set_permissions(char *rundir)
        ret = allowed_group();
        if (ret < 0) {
                WARN("No tracing group detected");
-               ret = 0;
-               goto end;
+               /* Setting gid to 0 if no tracing group is found */
+               gid = 0;
+       } else {
+               gid = ret;
        }
 
-       gid = ret;
-
        /* Set lttng run dir */
        ret = chown(rundir, 0, gid);
        if (ret < 0) {
@@ -5747,7 +4165,7 @@ static int set_permissions(char *rundir)
                PERROR("chown");
        }
 
-       /* Ensure tracing group can search the run dir */
+       /* Ensure all applications and tracing group can search the run dir */
        ret = chmod(rundir, S_IRWXU | S_IXGRP | S_IXOTH);
        if (ret < 0) {
                ERR("Unable to set permissions on %s", rundir);
@@ -5784,7 +4202,6 @@ static int set_permissions(char *rundir)
 
        DBG("All permissions are set");
 
-end:
        return ret;
 }
 
@@ -5858,6 +4275,16 @@ static int set_consumer_sockets(struct consumer_data *consumer_data,
                goto error;
        }
 
+       /*
+        * Set the CLOEXEC flag. Return code is useless because either way, the
+        * show must go on.
+        */
+       ret = utils_set_fd_cloexec(consumer_data->err_sock);
+       if (ret < 0) {
+               PERROR("utils_set_fd_cloexec");
+               /* continue anyway */
+       }
+
        /* File permission MUST be 660 */
        ret = chmod(consumer_data->err_unix_sock_path,
                        S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
@@ -5953,6 +4380,38 @@ static void set_ulimit(void)
        }
 }
 
+/*
+ * Write pidfile using the rundir and opt_pidfile.
+ */
+static void write_pidfile(void)
+{
+       int ret;
+       char pidfile_path[PATH_MAX];
+
+       assert(rundir);
+
+       if (opt_pidfile) {
+               strncpy(pidfile_path, opt_pidfile, sizeof(pidfile_path));
+       } else {
+               /* Build pidfile path from rundir and opt_pidfile. */
+               ret = snprintf(pidfile_path, sizeof(pidfile_path), "%s/"
+                               DEFAULT_LTTNG_SESSIOND_PIDFILE, rundir);
+               if (ret < 0) {
+                       PERROR("snprintf pidfile path");
+                       goto error;
+               }
+       }
+
+       /*
+        * Create pid file in rundir. Return value is of no importance. The
+        * execution will continue even though we are not able to write the file.
+        */
+       (void) utils_create_pid_file(getpid(), pidfile_path);
+
+error:
+       return;
+}
+
 /*
  * main
  */
@@ -5960,7 +4419,7 @@ int main(int argc, char **argv)
 {
        int ret = 0;
        void *status;
-       const char *home_path;
+       const char *home_path, *env_app_timeout;
 
        init_kernel_workarounds();
 
@@ -5968,9 +4427,16 @@ int main(int argc, char **argv)
 
        setup_consumerd_path();
 
+       page_size = sysconf(_SC_PAGESIZE);
+       if (page_size < 0) {
+               PERROR("sysconf _SC_PAGESIZE");
+               page_size = LONG_MAX;
+               WARN("Fallback page size to %ld", page_size);
+       }
+
        /* Parse arguments */
        progname = argv[0];
-       if ((ret = parse_args(argc, argv) < 0)) {
+       if ((ret = parse_args(argc, argv)) < 0) {
                goto error;
        }
 
@@ -6047,7 +4513,7 @@ int main(int argc, char **argv)
                DBG2("Kernel consumer cmd path: %s",
                                kconsumer_data.cmd_unix_sock_path);
        } else {
-               home_path = get_home_dir();
+               home_path = utils_get_home_dir();
                if (home_path == NULL) {
                        /* TODO: Add --socket PATH option */
                        ERR("Can't get HOME directory for sockets creation.");
@@ -6084,7 +4550,7 @@ int main(int argc, char **argv)
                /* Set global SHM for ust */
                if (strlen(wait_shm_path) == 0) {
                        snprintf(wait_shm_path, PATH_MAX,
-                                       DEFAULT_HOME_APPS_WAIT_SHM_PATH, geteuid());
+                                       DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
                }
 
                /* Set health check Unix path */
@@ -6100,6 +4566,7 @@ int main(int argc, char **argv)
 
        DBG("Client socket path %s", client_unix_sock_path);
        DBG("Application socket path %s", apps_unix_sock_path);
+       DBG("Application wait path %s", wait_shm_path);
        DBG("LTTng run directory path: %s", rundir);
 
        /* 32 bits consumerd path setup */
@@ -6197,7 +4664,14 @@ int main(int argc, char **argv)
        }
 
        /* Setup the kernel pipe for waking up the kernel thread */
-       if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
+       if (is_root && !opt_no_kernel) {
+               if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
+                       goto exit;
+               }
+       }
+
+       /* Setup the thread ht_cleanup communication pipe. */
+       if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
                goto exit;
        }
 
@@ -6206,6 +4680,15 @@ int main(int argc, char **argv)
                goto exit;
        }
 
+       /* Setup the thread apps notify communication pipe. */
+       if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
+               goto exit;
+       }
+
+       /* Initialize global buffer per UID and PID registry. */
+       buffer_reg_init_uid_registry();
+       buffer_reg_init_pid_registry();
+
        /* Init UST command queue. */
        cds_wfq_init(&ust_cmd_queue.queue);
 
@@ -6218,29 +4701,40 @@ int main(int argc, char **argv)
        /* Set up max poll set size */
        lttng_poll_set_max_size();
 
-       /*
-        * Set network sequence index to 1 for streams to match a relayd socket on
-        * the consumer side.
-        */
-       uatomic_set(&relayd_net_seq_idx, 1);
+       cmd_init();
+
+       /* Check for the application socket timeout env variable. */
+       env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
+       if (env_app_timeout) {
+               app_socket_timeout = atoi(env_app_timeout);
+       } else {
+               app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
+       }
+
+       write_pidfile();
 
-       /* Init all health thread counters. */
-       health_init(&health_thread_cmd);
-       health_init(&health_thread_kernel);
-       health_init(&health_thread_app_manage);
-       health_init(&health_thread_app_reg);
+       /* Initialize communication library */
+       lttcomm_init();
+       /* This is to get the TCP timeout value. */
+       lttcomm_inet_init();
 
        /*
-        * Init health counters of the consumer thread. We do a quick hack here to
-        * the state of the consumer health is fine even if the thread is not
-        * started.  This is simply to ease our life and has no cost what so ever.
+        * Initialize the health check subsystem. This call should set the
+        * appropriate time values.
         */
-       health_init(&kconsumer_data.health);
-       health_poll_update(&kconsumer_data.health);
-       health_init(&ustconsumer32_data.health);
-       health_poll_update(&ustconsumer32_data.health);
-       health_init(&ustconsumer64_data.health);
-       health_poll_update(&ustconsumer64_data.health);
+       health_sessiond = health_app_create(HEALTH_NUM_TYPE);
+       if (!health_sessiond) {
+               PERROR("health_app_create error");
+               goto exit_health_sessiond_cleanup;
+       }
+
+       /* Create thread to manage the client socket */
+       ret = pthread_create(&ht_cleanup_thread, NULL,
+                       thread_ht_cleanup, (void *) NULL);
+       if (ret != 0) {
+               PERROR("pthread_create ht_cleanup");
+               goto exit_ht_cleanup;
+       }
 
        /* Create thread to manage the client socket */
        ret = pthread_create(&health_thread, NULL,
@@ -6282,27 +4776,46 @@ int main(int argc, char **argv)
                goto exit_apps;
        }
 
-       /* Create kernel thread to manage kernel event */
-       ret = pthread_create(&kernel_thread, NULL,
-                       thread_manage_kernel, (void *) NULL);
+       /* Create thread to manage application notify socket */
+       ret = pthread_create(&apps_notify_thread, NULL,
+                       ust_thread_manage_notify, (void *) NULL);
        if (ret != 0) {
-               PERROR("pthread_create kernel");
-               goto exit_kernel;
+               PERROR("pthread_create apps");
+               goto exit_apps_notify;
+       }
+
+       /* Don't start this thread if kernel tracing is not requested nor root */
+       if (is_root && !opt_no_kernel) {
+               /* Create kernel thread to manage kernel event */
+               ret = pthread_create(&kernel_thread, NULL,
+                               thread_manage_kernel, (void *) NULL);
+               if (ret != 0) {
+                       PERROR("pthread_create kernel");
+                       goto exit_kernel;
+               }
+
+               ret = pthread_join(kernel_thread, &status);
+               if (ret != 0) {
+                       PERROR("pthread_join");
+                       goto error;     /* join error, exit without cleanup */
+               }
        }
 
-       ret = pthread_join(kernel_thread, &status);
+exit_kernel:
+       ret = pthread_join(apps_notify_thread, &status);
        if (ret != 0) {
-               PERROR("pthread_join");
+               PERROR("pthread_join apps notify");
                goto error;     /* join error, exit without cleanup */
        }
 
-exit_kernel:
+exit_apps_notify:
        ret = pthread_join(apps_thread, &status);
        if (ret != 0) {
-               PERROR("pthread_join");
+               PERROR("pthread_join apps");
                goto error;     /* join error, exit without cleanup */
        }
 
+
 exit_apps:
        ret = pthread_join(reg_apps_thread, &status);
        if (ret != 0) {
@@ -6330,8 +4843,34 @@ exit_dispatch:
                goto error;     /* join error, exit without cleanup */
        }
 
+       ret = join_consumer_thread(&ustconsumer32_data);
+       if (ret != 0) {
+               PERROR("join_consumer ust32");
+               goto error;     /* join error, exit without cleanup */
+       }
+
+       ret = join_consumer_thread(&ustconsumer64_data);
+       if (ret != 0) {
+               PERROR("join_consumer ust64");
+               goto error;     /* join error, exit without cleanup */
+       }
+
 exit_client:
+       ret = pthread_join(health_thread, &status);
+       if (ret != 0) {
+               PERROR("pthread_join health thread");
+               goto error;     /* join error, exit without cleanup */
+       }
+
 exit_health:
+       ret = pthread_join(ht_cleanup_thread, &status);
+       if (ret != 0) {
+               PERROR("pthread_join ht cleanup thread");
+               goto error;     /* join error, exit without cleanup */
+       }
+exit_ht_cleanup:
+       health_app_destroy(health_sessiond);
+exit_health_sessiond_cleanup:
 exit:
        /*
         * cleanup() is called when no other thread is running.
This page took 0.133657 seconds and 5 git commands to generate.