Fix: possible use after free
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
index fdcad1c304ab0c717fb3f619b812de6d17433b31..d1fa0d09cb2b04d75b3419cd6fa969a602253eb0 100644 (file)
 
 #include "buffer-registry.h"
 #include "fd-limit.h"
-#include "health.h"
+#include "health-sessiond.h"
 #include "ust-app.h"
 #include "ust-consumer.h"
 #include "ust-ctl.h"
+#include "utils.h"
 
-/* Next available channel key. */
-static unsigned long next_channel_key;
-static unsigned long next_session_id;
+/* Next available channel key. Access under next_channel_key_lock. */
+static uint64_t _next_channel_key;
+static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
+
+/* Next available session ID. Access under next_session_id_lock. */
+static uint64_t _next_session_id;
+static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
 
 /*
- * Return the atomically incremented value of next_channel_key.
+ * Return the incremented value of next_channel_key.
  */
-static inline unsigned long get_next_channel_key(void)
+static uint64_t get_next_channel_key(void)
 {
-       return uatomic_add_return(&next_channel_key, 1);
+       uint64_t ret;
+
+       pthread_mutex_lock(&next_channel_key_lock);
+       ret = ++_next_channel_key;
+       pthread_mutex_unlock(&next_channel_key_lock);
+       return ret;
 }
 
 /*
  * Return the atomically incremented value of next_session_id.
  */
-static inline unsigned long get_next_session_id(void)
+static uint64_t get_next_session_id(void)
 {
-       return uatomic_add_return(&next_session_id, 1);
+       uint64_t ret;
+
+       pthread_mutex_lock(&next_session_id_lock);
+       ret = ++_next_session_id;
+       pthread_mutex_unlock(&next_session_id_lock);
+       return ret;
 }
 
 static void copy_channel_attr_to_ustctl(
@@ -89,7 +104,7 @@ static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
        event = caa_container_of(node, struct ust_app_event, node.node);
        key = _key;
 
-       /* Match the 3 elements of the key: name, filter and loglevel. */
+       /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
 
        /* Event name */
        if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
@@ -125,6 +140,21 @@ static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
                }
        }
 
+       /* One of the exclusions is NULL, fail. */
+       if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
+               goto no_match;
+       }
+
+       if (key->exclusion && event->exclusion) {
+               /* Both exclusions exists, check count followed by the names. */
+               if (event->exclusion->count != key->exclusion->count ||
+                               memcmp(event->exclusion->names, key->exclusion->names,
+                                       event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
+                       goto no_match;
+               }
+       }
+
+
        /* Match. */
        return 1;
 
@@ -151,6 +181,7 @@ static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
        key.name = event->attr.name;
        key.filter = event->filter;
        key.loglevel = event->attr.loglevel;
+       key.exclusion = event->exclusion;
 
        node_ptr = cds_lfht_add_unique(ht->ht,
                        ht->hash_fct(event->node.key, lttng_ht_seed),
@@ -256,7 +287,8 @@ void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
        assert(ua_event);
 
        free(ua_event->filter);
-
+       if (ua_event->exclusion != NULL)
+               free(ua_event->exclusion);
        if (ua_event->obj != NULL) {
                ret = ustctl_release_object(sock, ua_event->obj);
                if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
@@ -305,6 +337,23 @@ void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
        free(stream);
 }
 
+/*
+ * We need to execute ht_destroy outside of RCU read-side critical
+ * section and outside of call_rcu thread, so we postpone its execution
+ * using ht_cleanup_push. It is simpler than to change the semantic of
+ * the many callers of delete_ust_app_session().
+ */
+static
+void delete_ust_app_channel_rcu(struct rcu_head *head)
+{
+       struct ust_app_channel *ua_chan =
+               caa_container_of(head, struct ust_app_channel, rcu_head);
+
+       ht_cleanup_push(ua_chan->ctx);
+       ht_cleanup_push(ua_chan->events);
+       free(ua_chan);
+}
+
 /*
  * Delete ust app channel safely. RCU read lock must be held before calling
  * this function.
@@ -332,11 +381,11 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
 
        /* Wipe context */
        cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
+               cds_list_del(&ua_ctx->list);
                ret = lttng_ht_del(ua_chan->ctx, &iter);
                assert(!ret);
                delete_ust_app_ctx(sock, ua_ctx);
        }
-       lttng_ht_destroy(ua_chan->ctx);
 
        /* Wipe events */
        cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
@@ -345,18 +394,20 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
                assert(!ret);
                delete_ust_app_event(sock, ua_event);
        }
-       lttng_ht_destroy(ua_chan->events);
 
-       /* Wipe and free registry from session registry. */
-       registry = get_session_registry(ua_chan->session);
-       if (registry) {
-               ust_registry_channel_del_free(registry, ua_chan->key);
+       if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
+               /* Wipe and free registry from session registry. */
+               registry = get_session_registry(ua_chan->session);
+               if (registry) {
+                       ust_registry_channel_del_free(registry, ua_chan->key);
+               }
        }
 
        if (ua_chan->obj != NULL) {
                /* Remove channel from application UST object descriptor. */
                iter.iter.node = &ua_chan->ust_objd_node.node;
-               lttng_ht_del(app->ust_objd, &iter);
+               ret = lttng_ht_del(app->ust_objd, &iter);
+               assert(!ret);
                ret = ustctl_release_object(sock, ua_chan->obj);
                if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
                        ERR("UST app sock %d release channel obj failed with ret %d",
@@ -365,11 +416,14 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
                lttng_fd_put(LTTNG_FD_APPS, 1);
                free(ua_chan->obj);
        }
-       free(ua_chan);
+       call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
 }
 
 /*
- * Push metadata to consumer socket. The socket lock MUST be acquired.
+ * Push metadata to consumer socket.
+ *
+ * The socket lock MUST be acquired.
+ * The ust app session lock MUST be acquired.
  *
  * On success, return the len of metadata pushed or else a negative value.
  */
@@ -383,8 +437,19 @@ ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
 
        assert(registry);
        assert(socket);
-       /* Should never be 0 which is the initial state. */
-       assert(registry->metadata_key);
+
+       /*
+        * On a push metadata error either the consumer is dead or the metadata
+        * channel has been destroyed because its endpoint might have died (e.g:
+        * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
+        * metadata again which is not valid anymore on the consumer side.
+        *
+        * The ust app session mutex locked allows us to make this check without
+        * the registry lock.
+        */
+       if (registry->metadata_closed) {
+               return -EPIPE;
+       }
 
        pthread_mutex_lock(&registry->lock);
 
@@ -417,6 +482,25 @@ push_data:
        ret = consumer_push_metadata(socket, registry->metadata_key,
                        metadata_str, len, offset);
        if (ret < 0) {
+               /*
+                * There is an acceptable race here between the registry metadata key
+                * assignment and the creation on the consumer. The session daemon can
+                * concurrently push metadata for this registry while being created on
+                * the consumer since the metadata key of the registry is assigned
+                * *before* it is setup to avoid the consumer to ask for metadata that
+                * could possibly be not found in the session daemon.
+                *
+                * The metadata will get pushed either by the session being stopped or
+                * the consumer requesting metadata if that race is triggered.
+                */
+               if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
+                       ret = 0;
+               }
+
+               /* Update back the actual metadata len sent since it failed here. */
+               pthread_mutex_lock(&registry->lock);
+               registry->metadata_len_sent -= len;
+               pthread_mutex_unlock(&registry->lock);
                ret_val = ret;
                goto error_push;
        }
@@ -459,7 +543,7 @@ static int push_metadata(struct ust_registry_session *registry,
         */
        if (!registry->metadata_key) {
                ret_val = 0;
-               goto error_rcu_unlock;
+               goto end_rcu_unlock;
        }
 
        /* Get consumer socket to use to push the metadata.*/
@@ -492,6 +576,13 @@ static int push_metadata(struct ust_registry_session *registry,
        return 0;
 
 error_rcu_unlock:
+       /*
+        * On error, flag the registry that the metadata is closed. We were unable
+        * to push anything and this means that either the consumer is not
+        * responding or the metadata cache has been destroyed on the consumer.
+        */
+       registry->metadata_closed = 1;
+end_rcu_unlock:
        rcu_read_unlock();
        return ret_val;
 }
@@ -517,7 +608,7 @@ static int close_metadata(struct ust_registry_session *registry,
 
        if (!registry->metadata_key || registry->metadata_closed) {
                ret = 0;
-               goto error;
+               goto end;
        }
 
        /* Get consumer socket to use to push the metadata.*/
@@ -533,14 +624,34 @@ static int close_metadata(struct ust_registry_session *registry,
                goto error;
        }
 
-       /* Metadata successfully closed. Flag the registry. */
-       registry->metadata_closed = 1;
-
 error:
+       /*
+        * Metadata closed. Even on error this means that the consumer is not
+        * responding or not found so either way a second close should NOT be emit
+        * for this registry.
+        */
+       registry->metadata_closed = 1;
+end:
        rcu_read_unlock();
        return ret;
 }
 
+/*
+ * We need to execute ht_destroy outside of RCU read-side critical
+ * section and outside of call_rcu thread, so we postpone its execution
+ * using ht_cleanup_push. It is simpler than to change the semantic of
+ * the many callers of delete_ust_app_session().
+ */
+static
+void delete_ust_app_session_rcu(struct rcu_head *head)
+{
+       struct ust_app_session *ua_sess =
+               caa_container_of(head, struct ust_app_session, rcu_head);
+
+       ht_cleanup_push(ua_sess->channels);
+       free(ua_sess);
+}
+
 /*
  * Delete ust app session safely. RCU read lock must be held before calling
  * this function.
@@ -556,16 +667,21 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
 
        assert(ua_sess);
 
+       pthread_mutex_lock(&ua_sess->lock);
+
        registry = get_session_registry(ua_sess);
-       if (registry) {
+       if (registry && !registry->metadata_closed) {
                /* Push metadata for application before freeing the application. */
                (void) push_metadata(registry, ua_sess->consumer);
 
                /*
                 * Don't ask to close metadata for global per UID buffers. Close
-                * metadata only on destroy trace session in this case.
+                * metadata only on destroy trace session in this case. Also, the
+                * previous push metadata could have flag the metadata registry to
+                * close so don't send a close command if closed.
                 */
-               if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
+               if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
+                               !registry->metadata_closed) {
                        /* And ask to close it for this session registry. */
                        (void) close_metadata(registry, ua_sess->consumer);
                }
@@ -577,7 +693,6 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
                assert(!ret);
                delete_ust_app_channel(sock, ua_chan, app);
        }
-       lttng_ht_destroy(ua_sess->channels);
 
        /* In case of per PID, the registry is kept in the session. */
        if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
@@ -595,12 +710,16 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
                                        sock, ret);
                }
        }
-       free(ua_sess);
+       pthread_mutex_unlock(&ua_sess->lock);
+
+       call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
 }
 
 /*
  * Delete a traceable application structure from the global list. Never call
  * this function outside of a call_rcu call.
+ *
+ * RCU read side lock should _NOT_ be held when calling this function.
  */
 static
 void delete_ust_app(struct ust_app *app)
@@ -608,21 +727,21 @@ void delete_ust_app(struct ust_app *app)
        int ret, sock;
        struct ust_app_session *ua_sess, *tmp_ua_sess;
 
-       rcu_read_lock();
-
        /* Delete ust app sessions info */
        sock = app->sock;
        app->sock = -1;
 
-       lttng_ht_destroy(app->sessions);
-
        /* Wipe sessions */
        cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
                        teardown_node) {
                /* Free every object in the session and the session. */
+               rcu_read_lock();
                delete_ust_app_session(sock, ua_sess, app);
+               rcu_read_unlock();
        }
-       lttng_ht_destroy(app->ust_objd);
+
+       ht_cleanup_push(app->sessions);
+       ht_cleanup_push(app->ust_objd);
 
        /*
         * Wait until we have deleted the application from the sock hash table
@@ -645,8 +764,6 @@ void delete_ust_app(struct ust_app *app)
 
        DBG2("UST app pid %d deleted", app->pid);
        free(app);
-
-       rcu_read_unlock();
 }
 
 /*
@@ -746,6 +863,7 @@ struct ust_app_channel *alloc_ust_app_channel(char *name,
        lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
 
        CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
+       CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
 
        /* Copy attributes */
        if (attr) {
@@ -837,6 +955,8 @@ struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
                goto error;
        }
 
+       CDS_INIT_LIST_HEAD(&ua_ctx->list);
+
        if (uctx) {
                memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
        }
@@ -874,8 +994,7 @@ error:
  * Find an ust_app using the sock and return it. RCU read side lock must be
  * held before calling this helper function.
  */
-static
-struct ust_app *find_app_by_sock(int sock)
+struct ust_app *ust_app_find_by_sock(int sock)
 {
        struct lttng_ht_node_ulong *node;
        struct lttng_ht_iter iter;
@@ -923,7 +1042,8 @@ error:
  * Return an ust_app_event object or NULL on error.
  */
 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
-               char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
+               char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
+               const struct lttng_event_exclusion *exclusion)
 {
        struct lttng_ht_iter iter;
        struct lttng_ht_node_str *node;
@@ -937,6 +1057,8 @@ static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
        key.name = name;
        key.filter = filter;
        key.loglevel = loglevel;
+       /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
+       key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
 
        /* Lookup using the event name as hash and a custom match fct. */
        cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
@@ -972,6 +1094,12 @@ int create_ust_channel_context(struct ust_app_channel *ua_chan,
                        ERR("UST app create channel context failed for app (pid: %d) "
                                        "with ret %d", app->pid, ret);
                } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
                        DBG3("UST app disable event failed. Application is dead.");
                }
                goto error;
@@ -1010,6 +1138,12 @@ int set_ust_event_filter(struct ust_app_event *ua_event,
                        ERR("UST app event %s filter failed for app (pid: %d) "
                                        "with ret %d", ua_event->attr.name, app->pid, ret);
                } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
                        DBG3("UST app filter event failed. Application is dead.");
                }
                goto error;
@@ -1022,6 +1156,47 @@ error:
        return ret;
 }
 
+/*
+ * Set event exclusions on the tracer.
+ */
+static
+int set_ust_event_exclusion(struct ust_app_event *ua_event,
+               struct ust_app *app)
+{
+       int ret;
+
+       health_code_update();
+
+       if (!ua_event->exclusion || !ua_event->exclusion->count) {
+               ret = 0;
+               goto error;
+       }
+
+       ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
+                       ua_event->obj);
+       if (ret < 0) {
+               if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+                       ERR("UST app event %s exclusions failed for app (pid: %d) "
+                                       "with ret %d", ua_event->attr.name, app->pid, ret);
+               } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
+                       DBG3("UST app event exclusion failed. Application is dead.");
+               }
+               goto error;
+       }
+
+       DBG2("UST exclusion set successfully for event %s", ua_event->name);
+
+error:
+       health_code_update();
+       return ret;
+}
+
 /*
  * Disable the specified event on to UST tracer for the UST session.
  */
@@ -1039,6 +1214,12 @@ static int disable_ust_event(struct ust_app *app,
                                        "and session handle %d with ret %d",
                                        ua_event->attr.name, app->pid, ua_sess->handle, ret);
                } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
                        DBG3("UST app disable event failed. Application is dead.");
                }
                goto error;
@@ -1069,6 +1250,12 @@ static int disable_ust_channel(struct ust_app *app,
                                        "and session handle %d with ret %d",
                                        ua_chan->name, app->pid, ua_sess->handle, ret);
                } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
                        DBG3("UST app disable channel failed. Application is dead.");
                }
                goto error;
@@ -1099,6 +1286,12 @@ static int enable_ust_channel(struct ust_app *app,
                                        "and session handle %d with ret %d",
                                        ua_chan->name, app->pid, ua_sess->handle, ret);
                } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
                        DBG3("UST app enable channel failed. Application is dead.");
                }
                goto error;
@@ -1131,6 +1324,12 @@ static int enable_ust_event(struct ust_app *app,
                                        "and session handle %d with ret %d",
                                        ua_event->attr.name, app->pid, ua_sess->handle, ret);
                } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
                        DBG3("UST app enable event failed. Application is dead.");
                }
                goto error;
@@ -1211,6 +1410,12 @@ int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
                        ERR("Error ustctl create event %s for app pid: %d with ret %d",
                                        ua_event->attr.name, app->pid, ret);
                } else {
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       ret = 0;
                        DBG3("UST app create event failed. Application is dead.");
                }
                goto error;
@@ -1231,6 +1436,14 @@ int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
                }
        }
 
+       /* Set exclusions for the event */
+       if (ua_event->exclusion) {
+               ret = set_ust_event_exclusion(ua_event, app);
+               if (ret < 0) {
+                       goto error;
+               }
+       }
+
        /* If event not enabled, disable it on the tracer */
        if (ua_event->enabled == 0) {
                ret = disable_ust_event(app, ua_sess, ua_event);
@@ -1266,6 +1479,8 @@ error:
 static void shadow_copy_event(struct ust_app_event *ua_event,
                struct ltt_ust_event *uevent)
 {
+       size_t exclusion_alloc_size;
+
        strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
        ua_event->name[sizeof(ua_event->name) - 1] = '\0';
 
@@ -1279,6 +1494,19 @@ static void shadow_copy_event(struct ust_app_event *ua_event,
                ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
                /* Filter might be NULL here in case of ENONEM. */
        }
+
+       /* Copy exclusion data */
+       if (uevent->exclusion) {
+               exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
+                               LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
+               ua_event->exclusion = zmalloc(exclusion_alloc_size);
+               if (ua_event->exclusion == NULL) {
+                       PERROR("malloc");
+               } else {
+                       memcpy(ua_event->exclusion, uevent->exclusion,
+                                       exclusion_alloc_size);
+               }
+       }
 }
 
 /*
@@ -1298,6 +1526,9 @@ static void shadow_copy_channel(struct ust_app_channel *ua_chan,
        strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
        ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
 
+       ua_chan->tracefile_size = uchan->tracefile_size;
+       ua_chan->tracefile_count = uchan->tracefile_count;
+
        /* Copy event attributes since the layout is different. */
        ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
        ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
@@ -1313,7 +1544,7 @@ static void shadow_copy_channel(struct ust_app_channel *ua_chan,
        ua_chan->enabled = uchan->enabled;
        ua_chan->tracing_channel_id = uchan->id;
 
-       cds_lfht_for_each_entry(uchan->ctx->ht, &iter.iter, uctx, node.node) {
+       cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
                ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
                if (ua_ctx == NULL) {
                        continue;
@@ -1321,12 +1552,13 @@ static void shadow_copy_channel(struct ust_app_channel *ua_chan,
                lttng_ht_node_init_ulong(&ua_ctx->node,
                                (unsigned long) ua_ctx->ctx.ctx);
                lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
+               cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
        }
 
        /* Copy all events from ltt ust channel to ust app channel */
        cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
                ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
-                               uevent->filter, uevent->attr.loglevel);
+                               uevent->filter, uevent->attr.loglevel, uevent->exclusion);
                if (ua_event == NULL) {
                        DBG2("UST event %s not found on shadow copy channel",
                                        uevent->attr.name);
@@ -1374,11 +1606,13 @@ static void shadow_copy_session(struct ust_app_session *ua_sess,
        ua_sess->bits_per_long = app->bits_per_long;
        /* There is only one consumer object per session possible. */
        ua_sess->consumer = usess->consumer;
+       ua_sess->output_traces = usess->output_traces;
+       ua_sess->live_timer_interval = usess->live_timer_interval;
 
        switch (ua_sess->buffer_type) {
        case LTTNG_BUFFER_PER_PID:
                ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
-                               DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s/", app->name, app->pid,
+                               DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
                                datetime);
                break;
        case LTTNG_BUFFER_PER_UID:
@@ -1437,7 +1671,7 @@ void __lookup_session_by_app(struct ltt_ust_session *usess,
                        struct ust_app *app, struct lttng_ht_iter *iter)
 {
        /* Get right UST app session from app */
-       lttng_ht_lookup(app->sessions, (void *)((unsigned long) usess->id), iter);
+       lttng_ht_lookup(app->sessions, &usess->id, iter);
 }
 
 /*
@@ -1448,10 +1682,10 @@ static struct ust_app_session *lookup_session_by_app(
                struct ltt_ust_session *usess, struct ust_app *app)
 {
        struct lttng_ht_iter iter;
-       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_node_u64 *node;
 
        __lookup_session_by_app(usess, app, &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
        if (node == NULL) {
                goto error;
        }
@@ -1499,7 +1733,9 @@ static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
        ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
                        app->bits_per_long, app->uint8_t_alignment,
                        app->uint16_t_alignment, app->uint32_t_alignment,
-                       app->uint64_t_alignment, app->long_alignment, app->byte_order);
+                       app->uint64_t_alignment, app->long_alignment,
+                       app->byte_order, app->version.major,
+                       app->version.minor);
        if (ret < 0) {
                goto error;
        }
@@ -1550,10 +1786,12 @@ static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
        }
 
        /* Initialize registry. */
-       ret = ust_registry_session_init(&reg_uid->registry->reg.ust, app,
+       ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
                        app->bits_per_long, app->uint8_t_alignment,
                        app->uint16_t_alignment, app->uint32_t_alignment,
-                       app->uint64_t_alignment, app->long_alignment, app->byte_order);
+                       app->uint64_t_alignment, app->long_alignment,
+                       app->byte_order, app->version.major,
+                       app->version.minor);
        if (ret < 0) {
                goto error;
        }
@@ -1597,7 +1835,7 @@ static int create_ust_app_session(struct ltt_ust_session *usess,
 
        ua_sess = lookup_session_by_app(usess, app);
        if (ua_sess == NULL) {
-               DBG2("UST app pid: %d session id %d not found, creating it",
+               DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
                                app->pid, usess->id);
                ua_sess = alloc_ust_app_session(app);
                if (ua_sess == NULL) {
@@ -1640,6 +1878,13 @@ static int create_ust_app_session(struct ltt_ust_session *usess,
                                                app->pid, ret);
                        } else {
                                DBG("UST app creating session failed. Application is dead");
+                               /*
+                                * This is normal behavior, an application can die during the
+                                * creation process. Don't report an error so the execution can
+                                * continue normally. This will get flagged ENOTCONN and the
+                                * caller will handle it.
+                                */
+                               ret = 0;
                        }
                        delete_ust_app_session(-1, ua_sess, app);
                        if (ret != -ENOMEM) {
@@ -1655,9 +1900,9 @@ static int create_ust_app_session(struct ltt_ust_session *usess,
                ua_sess->handle = ret;
 
                /* Add ust app session to app's HT */
-               lttng_ht_node_init_ulong(&ua_sess->node,
-                               (unsigned long) ua_sess->tracing_id);
-               lttng_ht_add_unique_ulong(app->sessions, &ua_sess->node);
+               lttng_ht_node_init_u64(&ua_sess->node,
+                               ua_sess->tracing_id);
+               lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
 
                DBG2("UST app session created successfully with handle %d", ret);
        }
@@ -1708,6 +1953,7 @@ int create_ust_app_channel_context(struct ust_app_session *ua_sess,
 
        lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
        lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
+       cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
 
        ret = create_ust_channel_context(ua_chan, ua_ctx, app);
        if (ret < 0) {
@@ -1794,7 +2040,7 @@ static int enable_ust_app_channel(struct ust_app_session *ua_sess,
        lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
        ua_chan_node = lttng_ht_iter_get_node_str(&iter);
        if (ua_chan_node == NULL) {
-               DBG2("Unable to find channel %s in ust session id %u",
+               DBG2("Unable to find channel %s in ust session id %" PRIu64,
                                uchan->name, ua_sess->tracing_id);
                goto error;
        }
@@ -1876,9 +2122,11 @@ static int do_consumer_create_channel(struct ltt_ust_session *usess,
         * Now get the channel from the consumer. This call wil populate the stream
         * list of that channel and set the ust objects.
         */
-       ret = ust_consumer_get_channel(socket, ua_chan);
-       if (ret < 0) {
-               goto error_destroy;
+       if (usess->consumer->enabled) {
+               ret = ust_consumer_get_channel(socket, ua_chan);
+               if (ret < 0) {
+                       goto error_destroy;
+               }
        }
 
        rcu_read_unlock();
@@ -2045,6 +2293,7 @@ static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
        }
        assert(reg_chan);
        reg_chan->consumer_key = ua_chan->key;
+       reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
 
        /* Create and add a channel registry to session. */
        ret = ust_registry_channel_add(reg_sess->reg.ust,
@@ -2146,6 +2395,7 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
 
                ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
                if (ret < 0) {
+                       (void) release_ust_app_stream(-1, &stream);
                        goto error_stream_unlock;
                }
 
@@ -2208,6 +2458,14 @@ static int create_channel_per_uid(struct ust_app *app,
                ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
                                app->bits_per_long, reg_uid->registry->reg.ust);
                if (ret < 0) {
+                       /*
+                        * Let's remove the previously created buffer registry channel so
+                        * it's not visible anymore in the session registry.
+                        */
+                       ust_registry_channel_del_free(reg_uid->registry->reg.ust,
+                                       ua_chan->tracing_channel_id);
+                       buffer_reg_channel_remove(reg_uid->registry, reg_chan);
+                       buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
                        goto error;
                }
 
@@ -2340,7 +2598,7 @@ error:
  * Create UST app channel and create it on the tracer. Set ua_chanp of the
  * newly created channel if not NULL.
  *
- * Called with UST app session lock held.
+ * Called with UST app session lock and RCU read-side lock held.
  *
  * Return 0 on success or else a negative value.
  */
@@ -2366,7 +2624,7 @@ static int create_ust_app_channel(struct ust_app_session *ua_sess,
        if (ua_chan == NULL) {
                /* Only malloc can fail here */
                ret = -ENOMEM;
-               goto error;
+               goto error_alloc;
        }
        shadow_copy_channel(ua_chan, uchan);
 
@@ -2394,6 +2652,7 @@ end:
 
 error:
        delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
+error_alloc:
        return ret;
 }
 
@@ -2412,7 +2671,7 @@ int create_ust_app_event(struct ust_app_session *ua_sess,
 
        /* Get event node */
        ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
-                       uevent->filter, uevent->attr.loglevel);
+                       uevent->filter, uevent->attr.loglevel, uevent->exclusion);
        if (ua_event != NULL) {
                ret = -EEXIST;
                goto end;
@@ -2470,8 +2729,8 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
        registry = get_session_registry(ua_sess);
        assert(registry);
 
-       /* Metadata already exists for this registry. */
-       if (registry->metadata_key) {
+       /* Metadata already exists for this registry or it was closed previously */
+       if (registry->metadata_key || registry->metadata_closed) {
                ret = 0;
                goto error;
        }
@@ -2489,8 +2748,8 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
                metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
                metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
                metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
-               metadata->attr.switch_timer_interval = DEFAULT_UST_CHANNEL_SWITCH_TIMER;
-               metadata->attr.read_timer_interval = DEFAULT_UST_CHANNEL_READ_TIMER;
+               metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
+               metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
                metadata->attr.output = LTTNG_UST_MMAP;
                metadata->attr.type = LTTNG_UST_CHAN_METADATA;
        } else {
@@ -2499,13 +2758,6 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
                metadata->attr.type = LTTNG_UST_CHAN_METADATA;
        }
 
-       /* Get the right consumer socket for the application. */
-       socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
-       if (!socket) {
-               ret = -EINVAL;
-               goto error_consumer;
-       }
-
        /* Need one fd for the channel. */
        ret = lttng_fd_get(LTTNG_FD_APPS, 1);
        if (ret < 0) {
@@ -2513,6 +2765,13 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
                goto error;
        }
 
+       /* Get the right consumer socket for the application. */
+       socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
+       if (!socket) {
+               ret = -EINVAL;
+               goto error_consumer;
+       }
+
        /*
         * Keep metadata key so we can identify it on the consumer side. Assign it
         * to the registry *before* we ask the consumer so we avoid the race of the
@@ -2530,11 +2789,8 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
        ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
                        registry);
        if (ret < 0) {
-               /*
-                * Safe because the metadata obj pointer is not set so the delete below
-                * will not put a FD back again.
-                */
-               lttng_fd_put(LTTNG_FD_APPS, 1);
+               /* Nullify the metadata key so we don't try to close it later on. */
+               registry->metadata_key = 0;
                goto error_consumer;
        }
 
@@ -2546,11 +2802,8 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
         */
        ret = consumer_setup_metadata(socket, metadata->key);
        if (ret < 0) {
-               /*
-                * Safe because the metadata obj pointer is not set so the delete below
-                * will not put a FD back again.
-                */
-               lttng_fd_put(LTTNG_FD_APPS, 1);
+               /* Nullify the metadata key so we don't try to close it later on. */
+               registry->metadata_key = 0;
                goto error_consumer;
        }
 
@@ -2558,19 +2811,12 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
                        metadata->key, app->pid);
 
 error_consumer:
+       lttng_fd_put(LTTNG_FD_APPS, 1);
        delete_ust_app_channel(-1, metadata, app);
 error:
        return ret;
 }
 
-/*
- * Return pointer to traceable apps list.
- */
-struct lttng_ht *ust_app_get_ht(void)
-{
-       return ust_app_ht;
-}
-
 /*
  * Return ust app pointer or NULL if not found. RCU read side lock MUST be
  * acquired before calling this function.
@@ -2642,7 +2888,7 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
 
        lta->v_major = msg->major;
        lta->v_minor = msg->minor;
-       lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
        lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
        lta->notify_sock = -1;
 
@@ -2805,15 +3051,18 @@ void ust_app_unregister(int sock)
                 * session so the delete session will NOT push/close a second time.
                 */
                registry = get_session_registry(ua_sess);
-               if (registry) {
+               if (registry && !registry->metadata_closed) {
                        /* Push metadata for application before freeing the application. */
                        (void) push_metadata(registry, ua_sess->consumer);
 
                        /*
                         * Don't ask to close metadata for global per UID buffers. Close
-                        * metadata only on destroy trace session in this case.
+                        * metadata only on destroy trace session in this case. Also, the
+                        * previous push metadata could have flag the metadata registry to
+                        * close so don't send a close command if closed.
                         */
-                       if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
+                       if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
+                                       !registry->metadata_closed) {
                                /* And ask to close it for this session registry. */
                                (void) close_metadata(registry, ua_sess->consumer);
                        }
@@ -2830,20 +3079,6 @@ void ust_app_unregister(int sock)
        return;
 }
 
-/*
- * Return traceable_app_count
- */
-unsigned long ust_app_list_count(void)
-{
-       unsigned long count;
-
-       rcu_read_lock();
-       count = lttng_ht_get_count(ust_app_ht);
-       rcu_read_unlock();
-
-       return count;
-}
-
 /*
  * Fill events array with all events name of all registered apps.
  */
@@ -2890,13 +3125,19 @@ int ust_app_list_events(struct lttng_event **events)
                                        &uiter)) != -LTTNG_UST_ERR_NOENT) {
                        /* Handle ustctl error. */
                        if (ret < 0) {
-                               free(tmp_event);
-                               if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
+                               if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
                                        ERR("UST app tp list get failed for app %d with ret %d",
                                                        app->sock, ret);
                                } else {
                                        DBG3("UST app tp list get failed. Application is dead");
+                                       /*
+                                        * This is normal behavior, an application can die during the
+                                        * creation process. Don't report an error so the execution can
+                                        * continue normally. Continue normal execution.
+                                        */
+                                       break;
                                }
+                               free(tmp_event);
                                goto rcu_error;
                        }
 
@@ -2984,13 +3225,19 @@ int ust_app_list_event_fields(struct lttng_event_field **fields)
                                        &uiter)) != -LTTNG_UST_ERR_NOENT) {
                        /* Handle ustctl error. */
                        if (ret < 0) {
-                               free(tmp_event);
-                               if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
+                               if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
                                        ERR("UST app tp list field failed for app %d with ret %d",
                                                        app->sock, ret);
                                } else {
                                        DBG3("UST app tp list field failed. Application is dead");
+                                       /*
+                                        * This is normal behavior, an application can die during the
+                                        * creation process. Don't report an error so the execution can
+                                        * continue normally. Reset list and count for next app.
+                                        */
+                                       break;
                                }
+                               free(tmp_event);
                                goto rcu_error;
                        }
 
@@ -3013,12 +3260,13 @@ int ust_app_list_event_fields(struct lttng_event_field **fields)
                        }
 
                        memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
-                       tmp_event[count].type = uiter.type;
+                       /* Mapping between these enums matches 1 to 1. */
+                       tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
                        tmp_event[count].nowrite = uiter.nowrite;
 
                        memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
                        tmp_event[count].event.loglevel = uiter.loglevel;
-                       tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
+                       tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
                        tmp_event[count].event.pid = app->pid;
                        tmp_event[count].event.enabled = -1;
                        count++;
@@ -3039,6 +3287,8 @@ error:
 
 /*
  * Free and clean all traceable apps of the global list.
+ *
+ * Should _NOT_ be called with RCU read-side lock held.
  */
 void ust_app_clean_list(void)
 {
@@ -3069,13 +3319,12 @@ void ust_app_clean_list(void)
                ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
                assert(!ret);
        }
+       rcu_read_unlock();
 
        /* Destroy is done only when the ht is empty */
-       lttng_ht_destroy(ust_app_ht);
-       lttng_ht_destroy(ust_app_ht_by_sock);
-       lttng_ht_destroy(ust_app_ht_by_notify_sock);
-
-       rcu_read_unlock();
+       ht_cleanup_push(ust_app_ht);
+       ht_cleanup_push(ust_app_ht_by_sock);
+       ht_cleanup_push(ust_app_ht_by_notify_sock);
 }
 
 /*
@@ -3107,7 +3356,7 @@ int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
                goto error;
        }
 
-       DBG2("UST app disabling channel %s from global domain for session id %d",
+       DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
                        uchan->name, usess->id);
 
        rcu_read_lock();
@@ -3168,7 +3417,7 @@ int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
                goto error;
        }
 
-       DBG2("UST app enabling channel %s to global domain for session id %d",
+       DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
                        uchan->name, usess->id);
 
        rcu_read_lock();
@@ -3216,7 +3465,8 @@ int ust_app_disable_event_glb(struct ltt_ust_session *usess,
        struct ust_app_event *ua_event;
 
        DBG("UST app disabling event %s for all apps in channel "
-                       "%s for session id %d", uevent->attr.name, uchan->name, usess->id);
+                       "%s for session id %" PRIu64,
+                       uevent->attr.name, uchan->name, usess->id);
 
        rcu_read_lock();
 
@@ -3239,7 +3489,7 @@ int ust_app_disable_event_glb(struct ltt_ust_session *usess,
                lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
                ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
                if (ua_chan_node == NULL) {
-                       DBG2("Channel %s not found in session id %d for app pid %d."
+                       DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
                                        "Skipping", uchan->name, usess->id, app->pid);
                        continue;
                }
@@ -3266,65 +3516,6 @@ int ust_app_disable_event_glb(struct ltt_ust_session *usess,
        return ret;
 }
 
-/*
- * For a specific UST session and UST channel, the event for all
- * registered apps.
- */
-int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter, uiter;
-       struct lttng_ht_node_str *ua_chan_node;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-       struct ust_app_channel *ua_chan;
-       struct ust_app_event *ua_event;
-
-       DBG("UST app disabling all event for all apps in channel "
-                       "%s for session id %d", uchan->name, usess->id);
-
-       rcu_read_lock();
-
-       /* For all registered applications */
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (!ua_sess) {
-                       /* The application has problem or is probably dead. */
-                       continue;
-               }
-
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               /* If the channel is not found, there is a code flow error */
-               assert(ua_chan_node);
-
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-
-               /* Disable each events of channel */
-               cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
-                               node.node) {
-                       ret = disable_ust_app_event(ua_sess, ua_event, app);
-                       if (ret < 0) {
-                               /* XXX: Report error someday... */
-                               continue;
-                       }
-               }
-       }
-
-       rcu_read_unlock();
-
-       return ret;
-}
-
 /*
  * For a specific UST session, create the channel for all registered apps.
  */
@@ -3340,7 +3531,7 @@ int ust_app_create_channel_glb(struct ltt_ust_session *usess,
        assert(usess);
        assert(uchan);
 
-       DBG2("UST app adding channel %s to UST domain for session id %d",
+       DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
                        uchan->name, usess->id);
 
        rcu_read_lock();
@@ -3420,7 +3611,7 @@ int ust_app_enable_event_glb(struct ltt_ust_session *usess,
        struct ust_app_channel *ua_chan;
        struct ust_app_event *ua_event;
 
-       DBG("UST app enabling event %s for all apps for session id %d",
+       DBG("UST app enabling event %s for all apps for session id %" PRIu64,
                        uevent->attr.name, usess->id);
 
        /*
@@ -3458,7 +3649,7 @@ int ust_app_enable_event_glb(struct ltt_ust_session *usess,
 
                /* Get event node */
                ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
-                               uevent->filter, uevent->attr.loglevel);
+                               uevent->filter, uevent->attr.loglevel, uevent->exclusion);
                if (ua_event == NULL) {
                        DBG3("UST app enable event %s not found for app PID %d."
                                        "Skipping app", uevent->attr.name, app->pid);
@@ -3493,7 +3684,7 @@ int ust_app_create_event_glb(struct ltt_ust_session *usess,
        struct ust_app_session *ua_sess;
        struct ust_app_channel *ua_chan;
 
-       DBG("UST app creating event %s for all apps for session id %d",
+       DBG("UST app creating event %s for all apps for session id %" PRIu64,
                        uevent->attr.name, usess->id);
 
        rcu_read_lock();
@@ -3543,6 +3734,7 @@ int ust_app_create_event_glb(struct ltt_ust_session *usess,
 /*
  * Start tracing for a specific UST session and app.
  */
+static
 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
 {
        int ret = 0;
@@ -3602,6 +3794,13 @@ skip_setup:
                                        app->pid, ret);
                } else {
                        DBG("UST app start session failed. Application is dead.");
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       goto end;
                }
                goto error_unlock;
        }
@@ -3635,12 +3834,11 @@ error_unlock:
 /*
  * Stop tracing for a specific UST session and app.
  */
+static
 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
 {
        int ret = 0;
-       struct lttng_ht_iter iter;
        struct ust_app_session *ua_sess;
-       struct ust_app_channel *ua_chan;
        struct ust_registry_session *registry;
 
        DBG("Stopping tracing for ust app pid %d", app->pid);
@@ -3678,6 +3876,12 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
                                        app->pid, ret);
                } else {
                        DBG("UST app stop session failed. Application is dead.");
+                       /*
+                        * This is normal behavior, an application can die during the
+                        * creation process. Don't report an error so the execution can
+                        * continue normally.
+                        */
+                       goto end_unlock;
                }
                goto error_rcu_unlock;
        }
@@ -3693,34 +3897,15 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
 
        health_code_update();
 
-       /* Flushing buffers */
-       cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
-                       node.node) {
-               health_code_update();
-               assert(ua_chan->is_sent);
-               ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
-               if (ret < 0) {
-                       if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
-                               ERR("UST app PID %d channel %s flush failed with ret %d",
-                                               app->pid, ua_chan->name, ret);
-                       } else {
-                               DBG3("UST app failed to flush %s. Application is dead.",
-                                               ua_chan->name);
-                               /* No need to continue. */
-                               break;
-                       }
-                       /* Continuing flushing all buffers */
-                       continue;
-               }
-       }
-
-       health_code_update();
-
        registry = get_session_registry(ua_sess);
        assert(registry);
-       /* Push metadata for application before freeing the application. */
-       (void) push_metadata(registry, ua_sess->consumer);
 
+       if (!registry->metadata_closed) {
+               /* Push metadata for application before freeing the application. */
+               (void) push_metadata(registry, ua_sess->consumer);
+       }
+
+end_unlock:
        pthread_mutex_unlock(&ua_sess->lock);
 end_no_session:
        rcu_read_unlock();
@@ -3734,6 +3919,67 @@ error_rcu_unlock:
        return -1;
 }
 
+/*
+ * Flush buffers for a specific UST session and app.
+ */
+static
+int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app_session *ua_sess;
+       struct ust_app_channel *ua_chan;
+
+       DBG("Flushing buffers for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       if (!app->compatible) {
+               goto end_no_session;
+       }
+
+       ua_sess = lookup_session_by_app(usess, app);
+       if (ua_sess == NULL) {
+               goto end_no_session;
+       }
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       health_code_update();
+
+       /* Flushing buffers */
+       cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
+                       node.node) {
+               health_code_update();
+               assert(ua_chan->is_sent);
+               ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
+               if (ret < 0) {
+                       if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+                               ERR("UST app PID %d channel %s flush failed with ret %d",
+                                               app->pid, ua_chan->name, ret);
+                       } else {
+                               DBG3("UST app failed to flush %s. Application is dead.",
+                                               ua_chan->name);
+                               /*
+                                * This is normal behavior, an application can die during the
+                                * creation process. Don't report an error so the execution can
+                                * continue normally.
+                                */
+                       }
+                       /* Continuing flushing all buffers */
+                       continue;
+               }
+       }
+
+       health_code_update();
+
+       pthread_mutex_unlock(&ua_sess->lock);
+end_no_session:
+       rcu_read_unlock();
+       health_code_update();
+       return 0;
+}
+
 /*
  * Destroy a specific UST session in apps.
  */
@@ -3742,7 +3988,7 @@ static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
        int ret;
        struct ust_app_session *ua_sess;
        struct lttng_ht_iter iter;
-       struct lttng_ht_node_ulong *node;
+       struct lttng_ht_node_u64 *node;
 
        DBG("Destroy tracing for ust app pid %d", app->pid);
 
@@ -3753,7 +3999,7 @@ static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
        }
 
        __lookup_session_by_app(usess, app, &iter);
-       node = lttng_ht_iter_get_node_ulong(&iter);
+       node = lttng_ht_iter_get_node_u64(&iter);
        if (node == NULL) {
                /* Session is being or is deleted. */
                goto end;
@@ -3816,10 +4062,23 @@ int ust_app_stop_trace_all(struct ltt_ust_session *usess)
 
        rcu_read_lock();
 
-       /* Flush all per UID buffers associated to that session. */
-       if (usess->buffer_type == LTTNG_BUFFER_PER_UID) {
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               ret = ust_app_stop_trace(usess, app);
+               if (ret < 0) {
+                       /* Continue to next apps even on error */
+                       continue;
+               }
+       }
+
+       /* Flush buffers and push metadata (for UID buffers). */
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
                struct buffer_reg_uid *reg;
+
+               /* Flush all per UID buffers associated to that session. */
                cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct ust_registry_session *ust_session_reg;
                        struct buffer_reg_channel *reg_chan;
                        struct consumer_socket *socket;
 
@@ -3840,15 +4099,28 @@ int ust_app_stop_trace_all(struct ltt_ust_session *usess)
                                 */
                                (void) consumer_flush_channel(socket, reg_chan->consumer_key);
                        }
+
+                       ust_session_reg = reg->registry->reg.ust;
+                       if (!ust_session_reg->metadata_closed) {
+                               /* Push metadata. */
+                               (void) push_metadata(ust_session_reg, usess->consumer);
+                       }
                }
-       }
 
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ret = ust_app_stop_trace(usess, app);
-               if (ret < 0) {
-                       /* Continue to next apps even on error */
-                       continue;
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       ret = ust_app_flush_trace(usess, app);
+                       if (ret < 0) {
+                               /* Continue to next apps even on error */
+                               continue;
+                       }
                }
+               break;
+       default:
+               assert(0);
+               break;
        }
 
        rcu_read_unlock();
@@ -3888,7 +4160,7 @@ int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
 {
        int ret = 0;
-       struct lttng_ht_iter iter, uiter, iter_ctx;
+       struct lttng_ht_iter iter, uiter;
        struct ust_app *app;
        struct ust_app_session *ua_sess = NULL;
        struct ust_app_channel *ua_chan;
@@ -3898,12 +4170,12 @@ void ust_app_global_update(struct ltt_ust_session *usess, int sock)
        assert(usess);
        assert(sock >= 0);
 
-       DBG2("UST app global update for app sock %d for session id %d", sock,
+       DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
                        usess->id);
 
        rcu_read_lock();
 
-       app = find_app_by_sock(sock);
+       app = ust_app_find_by_sock(sock);
        if (app == NULL) {
                /*
                 * Application can be unregistered before so this is possible hence
@@ -3960,8 +4232,11 @@ void ust_app_global_update(struct ltt_ust_session *usess, int sock)
                        }
                }
 
-               cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter_ctx.iter, ua_ctx,
-                               node.node) {
+               /*
+                * Add context using the list so they are enabled in the same order the
+                * user added them.
+                */
+               cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
                        ret = create_ust_channel_context(ua_chan, ua_ctx, app);
                        if (ret < 0) {
                                goto error_unlock;
@@ -4100,7 +4375,7 @@ int ust_app_enable_event_pid(struct ltt_ust_session *usess,
        ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
 
        ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
-                       uevent->filter, uevent->attr.loglevel);
+                       uevent->filter, uevent->attr.loglevel, uevent->exclusion);
        if (ua_event == NULL) {
                ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
                if (ret < 0) {
@@ -4120,69 +4395,6 @@ end:
        return ret;
 }
 
-/*
- * Disable event for a channel from a UST session for a specific PID.
- */
-int ust_app_disable_event_pid(struct ltt_ust_session *usess,
-               struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
-       struct ust_app *app;
-       struct ust_app_session *ua_sess;
-       struct ust_app_channel *ua_chan;
-       struct ust_app_event *ua_event;
-
-       DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
-
-       rcu_read_lock();
-
-       app = ust_app_find_by_pid(pid);
-       if (app == NULL) {
-               ERR("UST app disable event per PID %d not found", pid);
-               ret = -1;
-               goto error;
-       }
-
-       if (!app->compatible) {
-               ret = 0;
-               goto error;
-       }
-
-       ua_sess = lookup_session_by_app(usess, app);
-       if (!ua_sess) {
-               /* The application has problem or is probably dead. */
-               goto error;
-       }
-
-       /* Lookup channel in the ust app session */
-       lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
-       ua_chan_node = lttng_ht_iter_get_node_str(&iter);
-       if (ua_chan_node == NULL) {
-               /* Channel does not exist, skip disabling */
-               goto error;
-       }
-       ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-
-       lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
-       ua_event_node = lttng_ht_iter_get_node_str(&iter);
-       if (ua_event_node == NULL) {
-               /* Event does not exist, skip disabling */
-               goto error;
-       }
-       ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
-
-       ret = disable_ust_app_event(ua_sess, ua_event, app);
-       if (ret < 0) {
-               goto error;
-       }
-
-error:
-       rcu_read_unlock();
-       return ret;
-}
-
 /*
  * Calibrate registered applications.
  */
@@ -4334,12 +4546,19 @@ static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
                DBG("Application socket %d is being teardown. Abort event notify",
                                sock);
                ret = 0;
+               free(fields);
                goto error_rcu_unlock;
        }
 
-       /* Lookup channel by UST object descriptor. Should always be found. */
+       /* Lookup channel by UST object descriptor. */
        ua_chan = find_channel_by_objd(app, cobjd);
-       assert(ua_chan);
+       if (!ua_chan) {
+               DBG("Application channel is being teardown. Abort event notify");
+               ret = 0;
+               free(fields);
+               goto error_rcu_unlock;
+       }
+
        assert(ua_chan->session);
        ua_sess = ua_chan->session;
 
@@ -4373,6 +4592,9 @@ static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
        } else {
                /* Get current already assigned values. */
                type = chan_reg->header_type;
+               free(fields);
+               /* Set to NULL so the error path does not do a double free. */
+               fields = NULL;
        }
        /* Channel id is set during the object creation. */
        chan_id = chan_reg->chan_id;
@@ -4408,6 +4630,9 @@ error:
        pthread_mutex_unlock(&registry->lock);
 error_rcu_unlock:
        rcu_read_unlock();
+       if (ret) {
+               free(fields);
+       }
        return ret;
 }
 
@@ -4440,12 +4665,23 @@ static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
                DBG("Application socket %d is being teardown. Abort event notify",
                                sock);
                ret = 0;
+               free(sig);
+               free(fields);
+               free(model_emf_uri);
                goto error_rcu_unlock;
        }
 
-       /* Lookup channel by UST object descriptor. Should always be found. */
+       /* Lookup channel by UST object descriptor. */
        ua_chan = find_channel_by_objd(app, cobjd);
-       assert(ua_chan);
+       if (!ua_chan) {
+               DBG("Application channel is being teardown. Abort event notify");
+               ret = 0;
+               free(sig);
+               free(fields);
+               free(model_emf_uri);
+               goto error_rcu_unlock;
+       }
+
        assert(ua_chan->session);
        ua_sess = ua_chan->session;
 
@@ -4460,9 +4696,15 @@ static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
 
        pthread_mutex_lock(&registry->lock);
 
+       /*
+        * From this point on, this call acquires the ownership of the sig, fields
+        * and model_emf_uri meaning any free are done inside it if needed. These
+        * three variables MUST NOT be read/write after this.
+        */
        ret_code = ust_registry_create_event(registry, chan_reg_key,
                        sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
-                       model_emf_uri, ua_sess->buffer_type, &event_id);
+                       model_emf_uri, ua_sess->buffer_type, &event_id,
+                       app);
 
        /*
         * The return value is returned to ustctl so in case of an error, the
@@ -4536,7 +4778,12 @@ int ust_app_recv_notify(int sock)
                        goto error;
                }
 
-               /* Add event to the UST registry coming from the notify socket. */
+               /*
+                * Add event to the UST registry coming from the notify socket. This
+                * call will free if needed the sig, fields and model_emf_uri. This
+                * code path loses the ownsership of these variables and transfer them
+                * to the this function.
+                */
                ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
                                fields, loglevel, model_emf_uri);
                if (ret < 0) {
@@ -4564,6 +4811,11 @@ int ust_app_recv_notify(int sock)
                        goto error;
                }
 
+               /*
+                * The fields ownership are transfered to this function call meaning
+                * that if needed it will be freed. After this, it's invalid to access
+                * fields or clean it up.
+                */
                ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
                                fields);
                if (ret < 0) {
@@ -4657,3 +4909,235 @@ close_socket:
                call_rcu(&obj->head, close_notify_sock_rcu);
        }
 }
+
+/*
+ * Destroy a ust app data structure and free its memory.
+ */
+void ust_app_destroy(struct ust_app *app)
+{
+       if (!app) {
+               return;
+       }
+
+       call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+}
+
+/*
+ * Take a snapshot for a given UST session. The snapshot is sent to the given
+ * output.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int ust_app_snapshot_record(struct ltt_ust_session *usess,
+               struct snapshot_output *output, int wait, unsigned int nb_streams)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       char pathname[PATH_MAX];
+       uint64_t max_stream_size = 0;
+
+       assert(usess);
+       assert(output);
+
+       rcu_read_lock();
+
+       /*
+        * Compute the maximum size of a single stream if a max size is asked by
+        * the caller.
+        */
+       if (output->max_size > 0 && nb_streams > 0) {
+               max_stream_size = output->max_size / nb_streams;
+       }
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *reg_chan;
+                       struct consumer_socket *socket;
+
+                       /* Get consumer socket to use to push the metadata.*/
+                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+
+                       memset(pathname, 0, sizeof(pathname));
+                       ret = snprintf(pathname, sizeof(pathname),
+                                       DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
+                                       reg->uid, reg->bits_per_long);
+                       if (ret < 0) {
+                               PERROR("snprintf snapshot path");
+                               goto error;
+                       }
+
+                       /* Add the UST default trace dir to path. */
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       reg_chan, node.node) {
+
+                               /*
+                                * Make sure the maximum stream size is not lower than the
+                                * subbuffer size or else it's an error since we won't be able to
+                                * snapshot anything.
+                                */
+                               if (max_stream_size &&
+                                               reg_chan->subbuf_size > max_stream_size) {
+                                       ret = -EINVAL;
+                                       DBG3("UST app snapshot record maximum stream size %" PRIu64
+                                                       " is smaller than subbuffer size of %zu",
+                                                       max_stream_size, reg_chan->subbuf_size);
+                                       goto error;
+                               }
+                               ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0,
+                                               usess->uid, usess->gid, pathname, wait,
+                                               max_stream_size);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+                       }
+                       ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output,
+                                       1, usess->uid, usess->gid, pathname, wait,
+                                       max_stream_size);
+                       if (ret < 0) {
+                               goto error;
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct consumer_socket *socket;
+                       struct lttng_ht_iter chan_iter;
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct ust_registry_session *registry;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       /* Get the right consumer socket for the application. */
+                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                                       output->consumer);
+                       if (!socket) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+
+                       /* Add the UST default trace dir to path. */
+                       memset(pathname, 0, sizeof(pathname));
+                       ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
+                                       ua_sess->path);
+                       if (ret < 0) {
+                               PERROR("snprintf snapshot path");
+                               goto error;
+                       }
+
+                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+                                       ua_chan, node.node) {
+                               /*
+                                * Make sure the maximum stream size is not lower than the
+                                * subbuffer size or else it's an error since we won't be able to
+                                * snapshot anything.
+                                */
+                               if (max_stream_size &&
+                                               ua_chan->attr.subbuf_size > max_stream_size) {
+                                       ret = -EINVAL;
+                                       DBG3("UST app snapshot record maximum stream size %" PRIu64
+                                                       " is smaller than subbuffer size of %" PRIu64,
+                                                       max_stream_size, ua_chan->attr.subbuf_size);
+                                       goto error;
+                               }
+
+                               ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
+                                               ua_sess->euid, ua_sess->egid, pathname, wait,
+                                               max_stream_size);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+                       }
+
+                       registry = get_session_registry(ua_sess);
+                       assert(registry);
+                       ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
+                                       1, ua_sess->euid, ua_sess->egid, pathname, wait,
+                                       max_stream_size);
+                       if (ret < 0) {
+                               goto error;
+                       }
+               }
+               break;
+       }
+       default:
+               assert(0);
+               break;
+       }
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Return the number of streams for a UST session.
+ */
+unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
+{
+       unsigned int ret = 0;
+       struct ust_app *app;
+       struct lttng_ht_iter iter;
+
+       assert(usess);
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *reg_chan;
+
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       reg_chan, node.node) {
+                               ret += reg_chan->stream_count;
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               rcu_read_lock();
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct lttng_ht_iter chan_iter;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+
+                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+                                       ua_chan, node.node) {
+                               ret += ua_chan->streams.count;
+                       }
+               }
+               rcu_read_unlock();
+               break;
+       }
+       default:
+               assert(0);
+               break;
+       }
+
+       return ret;
+}
This page took 0.078433 seconds and 5 git commands to generate.