SoW-2019-0002: Dynamic Snapshot
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
index 856de63a3f24152d03944c003bfe28c890f4847a..8fc0efdea8cb2ea14aff6df01f2ae3aecd4b5aaf 100644 (file)
@@ -1,19 +1,9 @@
 /*
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2 only,
- * as published by the Free Software Foundation.
+ * SPDX-License-Identifier: GPL-2.0-only
  *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #define _LGPL_SOURCE
 #include <signal.h>
 
 #include <common/common.h>
+#include <common/hashtable/utils.h>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/event-rule/tracepoint.h>
+#include <lttng/condition/condition.h>
+#include <lttng/condition/event-rule-internal.h>
+#include <lttng/condition/event-rule.h>
 #include <common/sessiond-comm/sessiond-comm.h>
 
 #include "buffer-registry.h"
 #include "lttng-sessiond.h"
 #include "notification-thread-commands.h"
 #include "rotate.h"
+#include "event.h"
+
+struct lttng_ht *ust_app_ht;
+struct lttng_ht *ust_app_ht_by_sock;
+struct lttng_ht *ust_app_ht_by_notify_sock;
 
 static
 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
@@ -319,6 +321,34 @@ void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
        free(ua_event);
 }
 
+/*
+ * Delete ust app token event_rule safely. RCU read lock must be held before calling
+ * this function. TODO: or does it????
+ */
+static
+void delete_ust_app_token_event_rule(int sock, struct ust_app_token_event_rule *ua_token,
+               struct ust_app *app)
+{
+       int ret;
+
+       assert(ua_token);
+
+       if (ua_token->exclusion != NULL)
+               free(ua_token->exclusion);
+       if (ua_token->obj != NULL) {
+               pthread_mutex_lock(&app->sock_lock);
+               ret = ustctl_release_object(sock, ua_token->obj);
+               pthread_mutex_unlock(&app->sock_lock);
+               if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+                       ERR("UST app sock %d release event obj failed with ret %d",
+                                       sock, ret);
+               }
+               free(ua_token->obj);
+       }
+       lttng_event_rule_put(ua_token->event_rule);
+       free(ua_token);
+}
+
 /*
  * Release ust data object of the given stream.
  *
@@ -927,6 +957,10 @@ void delete_ust_app(struct ust_app *app)
        ht_cleanup_push(app->ust_sessions_objd);
        ht_cleanup_push(app->ust_objd);
 
+       ustctl_release_object(sock, app->token_communication.handle);
+
+       lttng_pipe_close(app->token_communication.trigger_event_pipe);
+
        /*
         * Wait until we have deleted the application from the sock hash table
         * before closing this socket, otherwise an application could re-use the
@@ -1025,7 +1059,7 @@ error_free:
  * Alloc new UST app channel.
  */
 static
-struct ust_app_channel *alloc_ust_app_channel(char *name,
+struct ust_app_channel *alloc_ust_app_channel(const char *name,
                struct ust_app_session *ua_sess,
                struct lttng_ust_channel_attr *attr)
 {
@@ -1131,6 +1165,41 @@ error:
        return NULL;
 }
 
+/*
+ * Alloc new UST app token event rule.
+ */
+static struct ust_app_token_event_rule *alloc_ust_app_token_event_rule(
+               struct lttng_event_rule *event_rule, uint64_t token)
+{
+       struct ust_app_token_event_rule *ua_token;
+
+       ua_token = zmalloc(sizeof(struct ust_app_token_event_rule));
+       if (ua_token == NULL) {
+               PERROR("Failed to allocate ust_app_token_event_rule structure");
+               goto error;
+       }
+
+       ua_token->enabled = 1;
+       ua_token->token = token;
+       lttng_ht_node_init_u64(&ua_token->node, token);
+
+       /* Get reference of the event_rule */
+       if (!lttng_event_rule_get(event_rule)) {
+               assert(0);
+       }
+
+       ua_token->event_rule = event_rule;
+       ua_token->filter = lttng_event_rule_get_filter_bytecode(event_rule);
+       ua_token->exclusion = lttng_event_rule_generate_exclusions(event_rule);
+
+       DBG3("UST app token event rule %" PRIu64 " allocated", ua_token->token);
+
+       return ua_token;
+
+error:
+       return NULL;
+}
+
 /*
  * Alloc new UST app context.
  */
@@ -1171,36 +1240,13 @@ error:
        return NULL;
 }
 
-/*
- * Allocate a filter and copy the given original filter.
- *
- * Return allocated filter or NULL on error.
- */
-static struct lttng_filter_bytecode *copy_filter_bytecode(
-               struct lttng_filter_bytecode *orig_f)
-{
-       struct lttng_filter_bytecode *filter = NULL;
-
-       /* Copy filter bytecode */
-       filter = zmalloc(sizeof(*filter) + orig_f->len);
-       if (!filter) {
-               PERROR("zmalloc alloc filter bytecode");
-               goto error;
-       }
-
-       memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
-
-error:
-       return filter;
-}
-
 /*
  * Create a liblttng-ust filter bytecode from given bytecode.
  *
  * Return allocated filter or NULL on error.
  */
 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
-               struct lttng_filter_bytecode *orig_f)
+               const struct lttng_filter_bytecode *orig_f)
 {
        struct lttng_ust_filter_bytecode *filter = NULL;
 
@@ -1303,6 +1349,32 @@ end:
        return event;
 }
 
+/*
+ * Lookup for an ust app tokens based on a token id.
+ *
+ * Return an ust_app_token_event_rule object or NULL on error.
+ */
+static struct ust_app_token_event_rule *find_ust_app_token_event_rule(struct lttng_ht *ht,
+               uint64_t token)
+{
+       struct lttng_ht_iter iter;
+       struct lttng_ht_node_u64 *node;
+       struct ust_app_token_event_rule *token_event_rule = NULL;
+
+       assert(ht);
+
+       lttng_ht_lookup(ht, &token, &iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
+       if (node == NULL) {
+               DBG2("UST app token %" PRIu64 " not found", token);
+               goto end;
+       }
+
+       token_event_rule = caa_container_of(node, struct ust_app_token_event_rule, node);
+end:
+       return token_event_rule;
+}
+
 /*
  * Create the channel context on the tracer.
  *
@@ -1349,33 +1421,28 @@ error:
 /*
  * Set the filter on the tracer.
  */
-static
-int set_ust_event_filter(struct ust_app_event *ua_event,
-               struct ust_app *app)
+static int set_ust_filter(struct ust_app *app,
+               const struct lttng_filter_bytecode *bytecode,
+               struct lttng_ust_object_data *ust_object)
 {
        int ret;
        struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
 
        health_code_update();
 
-       if (!ua_event->filter) {
-               ret = 0;
-               goto error;
-       }
-
-       ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
+       ust_bytecode = create_ust_bytecode_from_bytecode(bytecode);
        if (!ust_bytecode) {
                ret = -LTTNG_ERR_NOMEM;
                goto error;
        }
        pthread_mutex_lock(&app->sock_lock);
        ret = ustctl_set_filter(app->sock, ust_bytecode,
-                       ua_event->obj);
+                       ust_object);
        pthread_mutex_unlock(&app->sock_lock);
        if (ret < 0) {
                if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                       ERR("UST app event %s filter failed for app (pid: %d) "
-                                       "with ret %d", ua_event->attr.name, app->pid, ret);
+                       ERR("UST app set filter failed for object %p of app (pid: %d) "
+                                       "with ret %d", ust_object, app->pid, ret);
                } else {
                        /*
                         * This is normal behavior, an application can die during the
@@ -1383,12 +1450,12 @@ int set_ust_event_filter(struct ust_app_event *ua_event,
                         * continue normally.
                         */
                        ret = 0;
-                       DBG3("UST app filter event failed. Application is dead.");
+                       DBG3("UST app set filter. Application is dead.");
                }
                goto error;
        }
 
-       DBG2("UST filter set successfully for event %s", ua_event->name);
+       DBG2("UST filter set for object %p successfully", ust_object);
 
 error:
        health_code_update();
@@ -1420,33 +1487,30 @@ end:
 /*
  * Set event exclusions on the tracer.
  */
-static
-int set_ust_event_exclusion(struct ust_app_event *ua_event,
-               struct ust_app *app)
+static int set_ust_exclusions(struct ust_app *app,
+               struct lttng_event_exclusion *exclusions,
+               struct lttng_ust_object_data *ust_object)
 {
        int ret;
-       struct lttng_ust_event_exclusion *ust_exclusion = NULL;
+       struct lttng_ust_event_exclusion *ust_exclusions = NULL;
 
-       health_code_update();
+       assert(exclusions && exclusions->count > 0);
 
-       if (!ua_event->exclusion || !ua_event->exclusion->count) {
-               ret = 0;
-               goto error;
-       }
+       health_code_update();
 
-       ust_exclusion = create_ust_exclusion_from_exclusion(
-                       ua_event->exclusion);
-       if (!ust_exclusion) {
+       ust_exclusions = create_ust_exclusion_from_exclusion(
+                       exclusions);
+       if (!ust_exclusions) {
                ret = -LTTNG_ERR_NOMEM;
                goto error;
        }
        pthread_mutex_lock(&app->sock_lock);
-       ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
+       ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
        pthread_mutex_unlock(&app->sock_lock);
        if (ret < 0) {
                if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                       ERR("UST app event %s exclusions failed for app (pid: %d) "
-                                       "with ret %d", ua_event->attr.name, app->pid, ret);
+                       ERR("UST app exclusions failed for object %p of app (pid: %d) "
+                                       "with ret %d", ust_object, app->pid, ret);
                } else {
                        /*
                         * This is normal behavior, an application can die during the
@@ -1454,37 +1518,36 @@ int set_ust_event_exclusion(struct ust_app_event *ua_event,
                         * continue normally.
                         */
                        ret = 0;
-                       DBG3("UST app event exclusion failed. Application is dead.");
+                       DBG3("UST app set exclusions failed. Application is dead.");
                }
                goto error;
        }
 
-       DBG2("UST exclusion set successfully for event %s", ua_event->name);
+       DBG2("UST exclusions set successfully for object %p", ust_object);
 
 error:
        health_code_update();
-       free(ust_exclusion);
+       free(ust_exclusions);
        return ret;
 }
 
 /*
  * Disable the specified event on to UST tracer for the UST session.
  */
-static int disable_ust_event(struct ust_app *app,
-               struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
+static int disable_ust_object(struct ust_app *app,
+               struct lttng_ust_object_data *object)
 {
        int ret;
 
        health_code_update();
 
        pthread_mutex_lock(&app->sock_lock);
-       ret = ustctl_disable(app->sock, ua_event->obj);
+       ret = ustctl_disable(app->sock, object);
        pthread_mutex_unlock(&app->sock_lock);
        if (ret < 0) {
                if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                       ERR("UST app event %s disable failed for app (pid: %d) "
-                                       "and session handle %d with ret %d",
-                                       ua_event->attr.name, app->pid, ua_sess->handle, ret);
+                       ERR("UST app disable failed for object %p app (pid: %d) with ret %d",
+                                       object, app->pid, ret);
                } else {
                        /*
                         * This is normal behavior, an application can die during the
@@ -1497,8 +1560,8 @@ static int disable_ust_event(struct ust_app *app,
                goto error;
        }
 
-       DBG2("UST app event %s disabled successfully for app (pid: %d)",
-                       ua_event->attr.name, app->pid);
+       DBG2("UST app object %p disabled successfully for app (pid: %d)",
+                       object, app->pid);
 
 error:
        health_code_update();
@@ -1586,21 +1649,19 @@ error:
 /*
  * Enable the specified event on to UST tracer for the UST session.
  */
-static int enable_ust_event(struct ust_app *app,
-               struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
+static int enable_ust_object(struct ust_app *app, struct lttng_ust_object_data *ust_object)
 {
        int ret;
 
        health_code_update();
 
        pthread_mutex_lock(&app->sock_lock);
-       ret = ustctl_enable(app->sock, ua_event->obj);
+       ret = ustctl_enable(app->sock, ust_object);
        pthread_mutex_unlock(&app->sock_lock);
        if (ret < 0) {
                if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                       ERR("UST app event %s enable failed for app (pid: %d) "
-                                       "and session handle %d with ret %d",
-                                       ua_event->attr.name, app->pid, ua_sess->handle, ret);
+                       ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
+                                       ust_object, app->pid, ret);
                } else {
                        /*
                         * This is normal behavior, an application can die during the
@@ -1608,13 +1669,13 @@ static int enable_ust_event(struct ust_app *app,
                         * continue normally.
                         */
                        ret = 0;
-                       DBG3("UST app enable event failed. Application is dead.");
+                       DBG3("UST app enable failed. Application is dead.");
                }
                goto error;
        }
 
-       DBG2("UST app event %s enabled successfully for app (pid: %d)",
-                       ua_event->attr.name, app->pid);
+       DBG2("UST app object %p enabled successfully for app (pid: %d)",
+                       ust_object, app->pid);
 
 error:
        health_code_update();
@@ -1710,14 +1771,14 @@ int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
 
        ua_event->handle = ua_event->obj->handle;
 
-       DBG2("UST app event %s created successfully for pid:%d",
-                       ua_event->attr.name, app->pid);
+       DBG2("UST app event %s created successfully for pid:%d object: %p",
+                       ua_event->attr.name, app->pid, ua_event->obj);
 
        health_code_update();
 
        /* Set filter if one is present. */
        if (ua_event->filter) {
-               ret = set_ust_event_filter(ua_event, app);
+               ret = set_ust_filter(app, ua_event->filter, ua_event->obj);
                if (ret < 0) {
                        goto error;
                }
@@ -1725,7 +1786,7 @@ int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
 
        /* Set exclusions for the event */
        if (ua_event->exclusion) {
-               ret = set_ust_event_exclusion(ua_event, app);
+               ret = set_ust_exclusions(app, ua_event->exclusion, ua_event->obj);
                if (ret < 0) {
                        goto error;
                }
@@ -1737,7 +1798,7 @@ int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
                 * We now need to explicitly enable the event, since it
                 * is now disabled at creation.
                 */
-               ret = enable_ust_event(app, ua_sess, ua_event);
+               ret = enable_ust_object(app, ua_event->obj);
                if (ret < 0) {
                        /*
                         * If we hit an EPERM, something is wrong with our enable call. If
@@ -1764,6 +1825,159 @@ error:
        return ret;
 }
 
+static
+void init_ust_trigger_from_event_rule(const struct lttng_event_rule *rule, struct lttng_ust_trigger *trigger)
+{
+       enum lttng_event_rule_status status;
+       enum lttng_loglevel_type loglevel_type;
+       enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+       int loglevel = -1;
+       const char *pattern;
+
+       /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported */
+       assert(lttng_event_rule_get_type(rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
+
+       memset(trigger, 0, sizeof(*trigger));
+
+       if (lttng_event_rule_is_agent(rule)) {
+               /*
+                * Special event for agents
+                * The actual meat of the event is in the filter that will be
+                * attached later on.
+                * Set the default values for the agent event.
+                */
+               pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
+               loglevel = 0;
+               ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+       } else {
+               status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
+               if (status != LTTNG_EVENT_RULE_STATUS_OK) {
+                       /* At this point this is a fatal error */
+                       assert(0);
+               }
+
+               status = lttng_event_rule_tracepoint_get_loglevel_type(
+                               rule, &loglevel_type);
+               if (status != LTTNG_EVENT_RULE_STATUS_OK) {
+                       /* At this point this is a fatal error */
+                       assert(0);
+               }
+
+               switch (loglevel_type) {
+               case LTTNG_EVENT_LOGLEVEL_ALL:
+                       ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+                       break;
+               case LTTNG_EVENT_LOGLEVEL_RANGE:
+                       ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
+                       break;
+               case LTTNG_EVENT_LOGLEVEL_SINGLE:
+                       ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
+                       break;
+               }
+
+               if (loglevel_type != LTTNG_EVENT_LOGLEVEL_ALL) {
+                       status = lttng_event_rule_tracepoint_get_loglevel(
+                                       rule, &loglevel);
+                       assert(status == LTTNG_EVENT_RULE_STATUS_OK);
+               }
+       }
+
+       trigger->instrumentation = LTTNG_UST_TRACEPOINT;
+       strncpy(trigger->name, pattern, LTTNG_UST_SYM_NAME_LEN - 1);
+       trigger->loglevel_type = ust_loglevel_type;
+       trigger->loglevel = loglevel;
+}
+
+/*
+ * Create the specified event rule token onto the UST tracer for a UST app.
+ *
+ */
+static
+int create_ust_token_event_rule(struct ust_app *app, struct ust_app_token_event_rule *ua_token)
+{
+       int ret = 0;
+       struct lttng_ust_trigger trigger;
+
+       health_code_update();
+
+       init_ust_trigger_from_event_rule(ua_token->event_rule, &trigger);
+       trigger.id = ua_token->token;
+
+       /* Create UST trigger on tracer */
+       pthread_mutex_lock(&app->sock_lock);
+       ret = ustctl_create_trigger(app->sock, &trigger, app->token_communication.handle, &ua_token->obj);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+                       abort();
+                       ERR("Error ustctl create trigger %s for app pid: %d with ret %d",
+                                       trigger.name, app->pid, ret);
+               } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
+                       DBG3("UST app create event failed. Application is dead.");
+               }
+               goto error;
+       }
+
+       ua_token->handle = ua_token->obj->handle;
+
+       DBG2("UST app event %s created successfully for pid:%d object: %p",
+                       trigger.name, app->pid, ua_token->obj);
+
+       health_code_update();
+
+       /* Set filter if one is present. */
+       if (ua_token->filter) {
+               ret = set_ust_filter(app, ua_token->filter, ua_token->obj);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       /* Set exclusions for the event */
+       if (ua_token->exclusion) {
+               ret = set_ust_exclusions(app, ua_token->exclusion, ua_token->obj);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
+       /*
+        * We now need to explicitly enable the event, since it
+        * is disabled at creation.
+        */
+       ret = enable_ust_object(app, ua_token->obj);
+       if (ret < 0) {
+               /*
+                * If we hit an EPERM, something is wrong with our enable call. If
+                * we get an EEXIST, there is a problem on the tracer side since we
+                * just created it.
+                */
+               switch (ret) {
+               case -LTTNG_UST_ERR_PERM:
+                       /* Code flow problem */
+                       assert(0);
+               case -LTTNG_UST_ERR_EXIST:
+                       /* It's OK for our use case. */
+                       ret = 0;
+                       break;
+               default:
+                       break;
+               }
+               goto error;
+       }
+       ua_token->enabled = true;
+
+error:
+       health_code_update();
+       return ret;
+}
+
 /*
  * Copy data between an UST app event and a LTT event.
  */
@@ -1901,12 +2115,12 @@ static void shadow_copy_session(struct ust_app_session *ua_sess,
                switch (ua_sess->buffer_type) {
                case LTTNG_BUFFER_PER_PID:
                        ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
-                                       DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
+                                       "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
                                        app->name, app->pid, datetime);
                        break;
                case LTTNG_BUFFER_PER_UID:
                        ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
-                                       DEFAULT_UST_TRACE_UID_PATH,
+                                       "/" DEFAULT_UST_TRACE_UID_PATH,
                                        app->uid, app->bits_per_long);
                        break;
                default:
@@ -2347,7 +2561,7 @@ int enable_ust_app_event(struct ust_app_session *ua_sess,
 {
        int ret;
 
-       ret = enable_ust_event(app, ua_sess, ua_event);
+       ret = enable_ust_object(app, ua_event->obj);
        if (ret < 0) {
                goto error;
        }
@@ -2366,7 +2580,7 @@ static int disable_ust_app_event(struct ust_app_session *ua_sess,
 {
        int ret;
 
-       ret = disable_ust_event(app, ua_sess, ua_event);
+       ret = disable_ust_object(app, ua_event->obj);
        if (ret < 0) {
                goto error;
        }
@@ -3181,6 +3395,57 @@ error:
        return ret;
 }
 
+/*
+ * Create UST app event and create it on the tracer side.
+ *
+ * Called with ust app session mutex held.
+ */
+static
+int create_ust_app_token_event_rule(struct lttng_event_rule *rule,
+               struct ust_app *app, uint64_t token)
+{
+       int ret = 0;
+       struct ust_app_token_event_rule *ua_token;
+
+       ua_token = alloc_ust_app_token_event_rule(rule, token);
+       if (ua_token == NULL) {
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       /* Create it on the tracer side */
+       ret = create_ust_token_event_rule(app, ua_token);
+       if (ret < 0) {
+               /*
+                * Not found previously means that it does not exist on the
+                * tracer. If the application reports that the event existed,
+                * it means there is a bug in the sessiond or lttng-ust
+                * (or corruption, etc.)
+                */
+               if (ret == -LTTNG_UST_ERR_EXIST) {
+                       ERR("Tracer for application reported that a token event rule being created already existed: "
+                                       "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
+                                       token,
+                                       app->pid, app->ppid, app->uid,
+                                       app->gid);
+               }
+               goto error;
+       }
+
+       lttng_ht_add_unique_u64(app->tokens_ht, &ua_token->node);
+
+       DBG2("UST app create token event rule %" PRIu64 " for PID %d completed", token,
+                       app->pid);
+
+end:
+       return ret;
+
+error:
+       /* Valid. Calling here is already in a read side lock */
+       delete_ust_app_token_event_rule(-1, ua_token, app);
+       return ret;
+}
+
 /*
  * Create UST metadata and open it on the tracer side.
  *
@@ -3325,6 +3590,7 @@ error:
 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
 {
        struct ust_app *lta = NULL;
+       struct lttng_pipe *trigger_event_source_pipe = NULL;
 
        assert(msg);
        assert(sock >= 0);
@@ -3341,12 +3607,20 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
                goto error;
        }
 
+       trigger_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
+       if (!trigger_event_source_pipe) {
+               PERROR("Open trigger pipe");
+               goto error;
+       }
+
        lta = zmalloc(sizeof(struct ust_app));
        if (lta == NULL) {
                PERROR("malloc");
                goto error;
        }
 
+       lta->token_communication.trigger_event_pipe = trigger_event_source_pipe;
+
        lta->ppid = msg->ppid;
        lta->uid = msg->uid;
        lta->gid = msg->gid;
@@ -3365,6 +3639,7 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
        lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
        lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
        lta->notify_sock = -1;
+       lta->tokens_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
 
        /* Copy name and make sure it's NULL terminated. */
        strncpy(lta->name, msg->name, sizeof(lta->name));
@@ -3451,6 +3726,47 @@ int ust_app_version(struct ust_app *app)
        return ret;
 }
 
+/*
+ * Setup the base trigger group.
+ *
+ * Return 0 on success else a negative value either an errno code or a
+ * LTTng-UST error code.
+ */
+int ust_app_setup_trigger_group(struct ust_app *app)
+{
+       int ret;
+       int writefd;
+       struct lttng_ust_object_data *group = NULL;
+       enum lttng_error_code lttng_ret;
+
+       assert(app);
+
+       /* Get the write side of the pipe */
+       writefd = lttng_pipe_get_writefd(app->token_communication.trigger_event_pipe);
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = ustctl_create_trigger_group(app->sock, writefd, &group);
+       pthread_mutex_unlock(&app->sock_lock);
+       if (ret < 0) {
+               ERR("UST app %d create_trigger_group failed with ret %d", app->sock, ret);
+               goto end;
+       }
+
+       app->token_communication.handle = group;
+
+       lttng_ret = notification_thread_command_add_application(
+                       notification_thread_handle, app->token_communication.trigger_event_pipe);
+       if (lttng_ret != LTTNG_OK) {
+               /* TODO: error */
+               ret = - 1;
+               ERR("Failed to add channel to notification thread");
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
 /*
  * Unregister app by removing it from the global traceable app list and freeing
  * the data struct.
@@ -3459,6 +3775,7 @@ int ust_app_version(struct ust_app *app)
  */
 void ust_app_unregister(int sock)
 {
+       enum lttng_error_code ret_code;
        struct ust_app *lta;
        struct lttng_ht_node_ulong *node;
        struct lttng_ht_iter ust_app_sock_iter;
@@ -3564,6 +3881,13 @@ void ust_app_unregister(int sock)
                                lta->pid);
        }
 
+       ret_code = notification_thread_command_remove_application(
+                       notification_thread_handle,
+                       lta->token_communication.trigger_event_pipe);
+       if (ret_code != LTTNG_OK) {
+               ERR("Failed to remove application from notification thread");
+       }
+
        /* Free memory */
        call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
 
@@ -4161,75 +4485,6 @@ end:
        return ret;
 }
 
-/*
- * For a specific UST session, create the channel for all registered apps.
- */
-int ust_app_create_channel_glb(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan)
-{
-       int ret = 0;
-       struct cds_lfht_iter iter;
-       struct ust_app *app;
-
-       assert(usess);
-       assert(usess->active);
-       assert(uchan);
-
-       DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
-                       uchan->name, usess->id);
-
-       rcu_read_lock();
-       /* For every registered applications */
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter, app, pid_n.node) {
-               struct ust_app_session *ua_sess;
-               int session_was_created = 0;
-
-               if (!app->compatible ||
-                               !trace_ust_pid_tracker_lookup(usess, app->pid)) {
-                       goto error_rcu_unlock;
-               }
-
-               /*
-                * Create session on the tracer side and add it to app session HT. Note
-                * that if session exist, it will simply return a pointer to the ust
-                * app session.
-                */
-               ret = find_or_create_ust_app_session(usess, app, &ua_sess,
-                               &session_was_created);
-               if (ret < 0) {
-                       switch (ret) {
-                       case -ENOTCONN:
-                               /*
-                                * The application's socket is not valid. Either a bad
-                                * socket or a timeout on it. We can't inform the caller
-                                * that for a specific app, the session failed so lets
-                                * continue here; it is not an error.
-                                */
-                               ret = 0;
-                               goto error_rcu_unlock;
-                       case -ENOMEM:
-                       default:
-                               goto error_rcu_unlock;
-                       }
-               }
-
-               if (ua_sess->deleted) {
-                       continue;
-               }
-               ret = ust_app_channel_create(usess, ua_sess, uchan, app, NULL);
-               if (ret) {
-                       if (session_was_created) {
-                               destroy_app_session(app, ua_sess);
-                       }
-                       /* Continue to the next application. */
-               }
-       }
-
-error_rcu_unlock:
-       rcu_read_unlock();
-       return ret;
-}
-
 /*
  * Enable event for a specific session and channel on the tracer.
  */
@@ -4416,6 +4671,11 @@ int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
                goto end;
        }
 
+       if (ua_sess->enabled) {
+               pthread_mutex_unlock(&ua_sess->lock);
+               goto end;
+       }
+
        /* Upon restart, we skip the setup, already done */
        if (ua_sess->started) {
                goto skip_setup;
@@ -4456,6 +4716,7 @@ skip_setup:
 
        /* Indicate that the session has been started once */
        ua_sess->started = 1;
+       ua_sess->enabled = 1;
 
        pthread_mutex_unlock(&ua_sess->lock);
 
@@ -4545,6 +4806,7 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
        }
 
        health_code_update();
+       ua_sess->enabled = 0;
 
        /* Quiescent wait after stopping trace */
        pthread_mutex_lock(&app->sock_lock);
@@ -5062,6 +5324,123 @@ end:
        return ret;
 }
 
+static
+void ust_app_synchronize_tokens(struct ust_app *app)
+{
+       int ret = 0;
+       enum lttng_error_code ret_code;
+       enum lttng_trigger_status t_status;
+       struct lttng_ht_iter app_trigger_iter;
+       struct lttng_triggers *triggers;
+       struct ust_app_token_event_rule *token_event_rule_element;
+       unsigned int count;
+
+       rcu_read_lock();
+       /* TODO: is this necessary to protect against new trigger being added ?
+        * notification_trigger_tokens_ht is still the backing data structure
+        * for this listing. Leave it there for now.
+        */
+       pthread_mutex_lock(&notification_trigger_tokens_ht_lock);
+       ret_code = notification_thread_command_get_tokens(
+                       notification_thread_handle, &triggers);
+       if (ret_code != LTTNG_OK) {
+               ret = -1;
+               goto end;
+       }
+
+       assert(triggers);
+
+       t_status = lttng_triggers_get_count(triggers, &count);
+       if (t_status != LTTNG_TRIGGER_STATUS_OK) {
+               ret = -1;
+               goto end;
+       }
+
+       for (unsigned int i = 0; i < count; i++) {
+               struct lttng_condition *condition;
+               struct lttng_event_rule *event_rule;
+               struct lttng_trigger *trigger;
+               struct ust_app_token_event_rule *ua_token;
+               uint64_t token;
+
+               trigger = lttng_triggers_get_pointer_of_index(triggers, i);
+               assert(trigger);
+
+               /* TODO: error checking and type checking */
+               token = lttng_trigger_get_key(trigger);
+               condition = lttng_trigger_get_condition(trigger);
+               (void) lttng_condition_event_rule_get_rule_no_const(condition, &event_rule);
+
+               if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
+                       /* Skip kernel related trigger */
+                       continue;
+               }
+
+               /* Iterate over all known token trigger */
+               ua_token = find_ust_app_token_event_rule(app->tokens_ht, token);
+               if (!ua_token) {
+                       ret = create_ust_app_token_event_rule(event_rule, app, token);
+                       if (ret < 0) {
+                               goto end;
+                       }
+               }
+       }
+
+       /* Remove all unknown trigger from the app
+        * TODO find a way better way then this, do it on the unregister command
+        * and be specific on the token to remove instead of going over all
+        * trigger known to the app. This is sub optimal.
+        */
+       cds_lfht_for_each_entry (app->tokens_ht->ht, &app_trigger_iter.iter,
+                       token_event_rule_element, node.node) {
+               uint64_t token;
+               bool found = false;
+
+               token = token_event_rule_element->token;
+
+               /*
+                * Check if the app event trigger still exists on the
+                * notification side.
+                * TODO: might want to change the backing data struct of the
+                * lttng_triggers object to allow quick lookup?
+                * For kernel mostly all of this can be removed once we delete
+                * on a per trigger basis.
+                */
+
+               for (unsigned int i = 0; i < count; i++) {
+                       struct lttng_trigger *trigger;
+                       uint64_t inner_token;
+
+                       trigger = lttng_triggers_get_pointer_of_index(
+                                       triggers, i);
+                       assert(trigger);
+
+                       inner_token = lttng_trigger_get_key(trigger);
+
+                       if (inner_token == token) {
+                               found = true;
+                               break;
+                       }
+               }
+
+               if (found) {
+                       /* Still valid */
+                       continue;
+               }
+
+               /* TODO: This is fucking ugly API for fuck sake */
+               assert(!lttng_ht_del(app->tokens_ht, &app_trigger_iter));
+
+               (void) disable_ust_object(app, token_event_rule_element->obj);
+
+               delete_ust_app_token_event_rule(app->sock, token_event_rule_element, app);
+       }
+end:
+       rcu_read_unlock();
+       pthread_mutex_unlock(&notification_trigger_tokens_ht_lock);
+       return;
+}
+
 /*
  * The caller must ensure that the application is compatible and is tracked
  * by the PID tracker.
@@ -5184,7 +5563,11 @@ void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
        if (!app->compatible) {
                return;
        }
-       if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
+       if (trace_ust_id_tracker_lookup(LTTNG_TRACKER_VPID, usess, app->pid) &&
+                       trace_ust_id_tracker_lookup(
+                                       LTTNG_TRACKER_VUID, usess, app->uid) &&
+                       trace_ust_id_tracker_lookup(
+                                       LTTNG_TRACKER_VGID, usess, app->gid)) {
                /*
                 * Synchronize the application's internal tracing configuration
                 * and start tracing.
@@ -5196,6 +5579,16 @@ void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
        }
 }
 
+void ust_app_global_update_tokens(struct ust_app *app)
+{
+       DBG2("UST app global update token for app sock %d", app->sock);
+
+       if (!app->compatible) {
+               return;
+       }
+       ust_app_synchronize_tokens(app);
+}
+
 /*
  * Called with session lock held.
  */
@@ -5211,6 +5604,18 @@ void ust_app_global_update_all(struct ltt_ust_session *usess)
        rcu_read_unlock();
 }
 
+void ust_app_global_update_all_tokens(void)
+{
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               ust_app_global_update_tokens(app);
+       }
+       rcu_read_unlock();
+}
+
 /*
  * Add context to a specific channel for global UST domain.
  */
@@ -5921,6 +6326,7 @@ enum lttng_error_code ust_app_snapshot_record(
                        struct buffer_reg_channel *reg_chan;
                        struct consumer_socket *socket;
                        char pathname[PATH_MAX];
+                       size_t consumer_path_offset = 0;
 
                        if (!reg->registry->reg.ust->metadata_key) {
                                /* Skip since no metadata is present */
@@ -5936,12 +6342,8 @@ enum lttng_error_code ust_app_snapshot_record(
                        }
 
                        memset(pathname, 0, sizeof(pathname));
-                       /*
-                        * DEFAULT_UST_TRACE_UID_PATH already contains a path
-                        * separator.
-                        */
                        ret = snprintf(pathname, sizeof(pathname),
-                                       DEFAULT_UST_TRACE_DIR DEFAULT_UST_TRACE_UID_PATH,
+                                       DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
                                        reg->uid, reg->bits_per_long);
                        if (ret < 0) {
                                PERROR("snprintf snapshot path");
@@ -5950,7 +6352,8 @@ enum lttng_error_code ust_app_snapshot_record(
                        }
                        /* Free path allowed on previous iteration. */
                        free(trace_path);
-                       trace_path = setup_channel_trace_path(usess->consumer, pathname);
+                       trace_path = setup_channel_trace_path(usess->consumer, pathname,
+                                               &consumer_path_offset);
                        if (!trace_path) {
                                status = LTTNG_ERR_INVALID;
                                goto error;
@@ -5961,7 +6364,7 @@ enum lttng_error_code ust_app_snapshot_record(
                                status = consumer_snapshot_channel(socket,
                                                reg_chan->consumer_key,
                                                output, 0, usess->uid,
-                                               usess->gid, trace_path, wait,
+                                               usess->gid, &trace_path[consumer_path_offset], wait,
                                                nb_packets_per_stream);
                                if (status != LTTNG_OK) {
                                        goto error;
@@ -5969,7 +6372,8 @@ enum lttng_error_code ust_app_snapshot_record(
                        }
                        status = consumer_snapshot_channel(socket,
                                        reg->registry->reg.ust->metadata_key, output, 1,
-                                       usess->uid, usess->gid, trace_path, wait, 0);
+                                       usess->uid, usess->gid, &trace_path[consumer_path_offset],
+                                       wait, 0);
                        if (status != LTTNG_OK) {
                                goto error;
                        }
@@ -5985,6 +6389,7 @@ enum lttng_error_code ust_app_snapshot_record(
                        struct ust_app_session *ua_sess;
                        struct ust_registry_session *registry;
                        char pathname[PATH_MAX];
+                       size_t consumer_path_offset = 0;
 
                        ua_sess = lookup_session_by_app(usess, app);
                        if (!ua_sess) {
@@ -6002,7 +6407,7 @@ enum lttng_error_code ust_app_snapshot_record(
 
                        /* Add the UST default trace dir to path. */
                        memset(pathname, 0, sizeof(pathname));
-                       ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "%s",
+                       ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
                                        ua_sess->path);
                        if (ret < 0) {
                                status = LTTNG_ERR_INVALID;
@@ -6011,7 +6416,8 @@ enum lttng_error_code ust_app_snapshot_record(
                        }
                        /* Free path allowed on previous iteration. */
                        free(trace_path);
-                       trace_path = setup_channel_trace_path(usess->consumer, pathname);
+                       trace_path = setup_channel_trace_path(usess->consumer, pathname,
+                                       &consumer_path_offset);
                        if (!trace_path) {
                                status = LTTNG_ERR_INVALID;
                                goto error;
@@ -6024,7 +6430,7 @@ enum lttng_error_code ust_app_snapshot_record(
                                                                .uid,
                                                ua_sess->effective_credentials
                                                                .gid,
-                                               trace_path, wait,
+                                               &trace_path[consumer_path_offset], wait,
                                                nb_packets_per_stream);
                                switch (status) {
                                case LTTNG_OK:
@@ -6045,7 +6451,7 @@ enum lttng_error_code ust_app_snapshot_record(
                                        registry->metadata_key, output, 1,
                                        ua_sess->effective_credentials.uid,
                                        ua_sess->effective_credentials.gid,
-                                       trace_path, wait, 0);
+                                       &trace_path[consumer_path_offset], wait, 0);
                        switch (status) {
                        case LTTNG_OK:
                                break;
@@ -6325,6 +6731,11 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
                        struct buffer_reg_channel *reg_chan;
                        struct consumer_socket *socket;
 
+                       if (!reg->registry->reg.ust->metadata_key) {
+                               /* Skip since no metadata is present */
+                               continue;
+                       }
+
                        /* Get consumer socket to use to push the metadata.*/
                        socket = consumer_find_socket_by_bitness(reg->bits_per_long,
                                        usess->consumer);
@@ -6459,7 +6870,7 @@ enum lttng_error_code ust_app_create_channel_subdirectories(
 
                cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
                        fmt_ret = asprintf(&pathname_index,
-                                      DEFAULT_UST_TRACE_DIR DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
+                                      DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
                                       reg->uid, reg->bits_per_long);
                        if (fmt_ret < 0) {
                                ERR("Failed to format channel index directory");
@@ -6486,6 +6897,17 @@ enum lttng_error_code ust_app_create_channel_subdirectories(
        {
                struct ust_app *app;
 
+               /*
+                * Create the toplevel ust/ directory in case no apps are running.
+                */
+               chunk_status = lttng_trace_chunk_create_subdirectory(
+                               usess->current_trace_chunk,
+                               DEFAULT_UST_TRACE_DIR);
+               if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       ret = LTTNG_ERR_CREATE_DIR_FAIL;
+                       goto error;
+               }
+
                cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
                                pid_n.node) {
                        struct ust_app_session *ua_sess;
@@ -6504,7 +6926,7 @@ enum lttng_error_code ust_app_create_channel_subdirectories(
                        }
 
                        fmt_ret = asprintf(&pathname_index,
-                                       DEFAULT_UST_TRACE_DIR "%s/" DEFAULT_INDEX_DIR,
+                                       DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
                                        ua_sess->path);
                        if (fmt_ret < 0) {
                                ERR("Failed to format channel index directory");
@@ -6535,3 +6957,151 @@ error:
        rcu_read_unlock();
        return ret;
 }
+
+/*
+ * Clear all the channels of a session.
+ *
+ * Return LTTNG_OK on success or else an LTTng error code.
+ */
+enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
+{
+       int ret;
+       enum lttng_error_code cmd_ret = LTTNG_OK;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       struct ltt_ust_session *usess = session->ust_session;
+
+       assert(usess);
+
+       rcu_read_lock();
+
+       if (usess->active) {
+               ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
+               cmd_ret = LTTNG_ERR_FATAL;
+               goto end;
+       }
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *reg_chan;
+                       struct consumer_socket *socket;
+
+                       /* Get consumer socket to use to push the metadata.*/
+                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               cmd_ret = LTTNG_ERR_INVALID;
+                               goto error_socket;
+                       }
+
+                       /* Clear the data channels. */
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       reg_chan, node.node) {
+                               ret = consumer_clear_channel(socket,
+                                               reg_chan->consumer_key);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+                       }
+
+                       (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+
+                       /*
+                        * Clear the metadata channel.
+                        * Metadata channel is not cleared per se but we still need to
+                        * perform a rotation operation on it behind the scene.
+                        */
+                       ret = consumer_clear_channel(socket,
+                                       reg->registry->reg.ust->metadata_key);
+                       if (ret < 0) {
+                               goto error;
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct consumer_socket *socket;
+                       struct lttng_ht_iter chan_iter;
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct ust_registry_session *registry;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       /* Get the right consumer socket for the application. */
+                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               cmd_ret = LTTNG_ERR_INVALID;
+                               goto error_socket;
+                       }
+
+                       registry = get_session_registry(ua_sess);
+                       if (!registry) {
+                               DBG("Application session is being torn down. Skip application.");
+                               continue;
+                       }
+
+                       /* Clear the data channels. */
+                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+                                       ua_chan, node.node) {
+                               ret = consumer_clear_channel(socket, ua_chan->key);
+                               if (ret < 0) {
+                                       /* Per-PID buffer and application going away. */
+                                       if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+                                               continue;
+                                       }
+                                       goto error;
+                               }
+                       }
+
+                       (void) push_metadata(registry, usess->consumer);
+
+                       /*
+                        * Clear the metadata channel.
+                        * Metadata channel is not cleared per se but we still need to
+                        * perform rotation operation on it behind the scene.
+                        */
+                       ret = consumer_clear_channel(socket, registry->metadata_key);
+                       if (ret < 0) {
+                               /* Per-PID buffer and application going away. */
+                               if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+                                       continue;
+                               }
+                               goto error;
+                       }
+               }
+               break;
+       }
+       default:
+               assert(0);
+               break;
+       }
+
+       cmd_ret = LTTNG_OK;
+       goto end;
+
+error:
+       switch (-ret) {
+       case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
+               cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
+               break;
+       default:
+               cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
+       }
+
+error_socket:
+end:
+       rcu_read_unlock();
+       return cmd_ret;
+}
This page took 0.041157 seconds and 5 git commands to generate.