SoW-2019-0002: Dynamic Snapshot
[lttng-tools.git] / src / bin / lttng-sessiond / notification-thread-events.c
index 73ec72f89c308b86ce2c95da5c364738d5b42680..e3c35f3a5e9e4f7cbbabfe48b923ed0556a05b9d 100644 (file)
 #include <common/macros.h>
 #include <lttng/condition/condition.h>
 #include <lttng/action/action-internal.h>
+#include <lttng/action/group-internal.h>
 #include <lttng/notification/notification-internal.h>
 #include <lttng/condition/condition-internal.h>
 #include <lttng/condition/buffer-usage-internal.h>
 #include <lttng/condition/session-consumed-size-internal.h>
 #include <lttng/condition/session-rotation-internal.h>
+#include <lttng/condition/event-rule-internal.h>
 #include <lttng/notification/channel-internal.h>
+#include <lttng/trigger/trigger-internal.h>
 
 #include <time.h>
 #include <unistd.h>
@@ -50,7 +53,7 @@ enum lttng_object_type {
 
 struct lttng_trigger_list_element {
        /* No ownership of the trigger object is assumed. */
-       const struct lttng_trigger *trigger;
+       struct lttng_trigger *trigger;
        struct cds_list_head node;
 };
 
@@ -108,6 +111,7 @@ struct lttng_session_trigger_list {
 struct lttng_trigger_ht_element {
        struct lttng_trigger *trigger;
        struct cds_lfht_node node;
+       struct cds_lfht_node node_by_name;
        /* call_rcu delayed reclaim. */
        struct rcu_head rcu_node;
 };
@@ -117,85 +121,17 @@ struct lttng_condition_list_element {
        struct cds_list_head node;
 };
 
-struct notification_client_list_element {
-       struct notification_client *client;
-       struct cds_list_head node;
-};
-
-struct notification_client_list {
-       const struct lttng_trigger *trigger;
-       struct cds_list_head list;
-       struct cds_lfht_node notification_trigger_ht_node;
-       /* call_rcu delayed reclaim. */
-       struct rcu_head rcu_node;
-};
-
-struct notification_client {
-       int socket;
-       /* Client protocol version. */
-       uint8_t major, minor;
-       uid_t uid;
-       gid_t gid;
-       /*
-        * Indicates if the credentials and versions of the client have been
-        * checked.
-        */
-       bool validated;
-       /*
-        * Conditions to which the client's notification channel is subscribed.
-        * List of struct lttng_condition_list_node. The condition member is
-        * owned by the client.
-        */
-       struct cds_list_head condition_list;
-       struct cds_lfht_node client_socket_ht_node;
-       struct {
-               struct {
-                       /*
-                        * During the reception of a message, the reception
-                        * buffers' "size" is set to contain the current
-                        * message's complete payload.
-                        */
-                       struct lttng_dynamic_buffer buffer;
-                       /* Bytes left to receive for the current message. */
-                       size_t bytes_to_receive;
-                       /* Type of the message being received. */
-                       enum lttng_notification_channel_message_type msg_type;
-                       /*
-                        * Indicates whether or not credentials are expected
-                        * from the client.
-                        */
-                       bool expect_creds;
-                       /*
-                        * Indicates whether or not credentials were received
-                        * from the client.
-                        */
-                       bool creds_received;
-                       /* Only used during credentials reception. */
-                       lttng_sock_cred creds;
-               } inbound;
-               struct {
-                       /*
-                        * Indicates whether or not a notification addressed to
-                        * this client was dropped because a command reply was
-                        * already buffered.
-                        *
-                        * A notification is dropped whenever the buffer is not
-                        * empty.
-                        */
-                       bool dropped_notification;
-                       /*
-                        * Indicates whether or not a command reply is already
-                        * buffered. In this case, it means that the client is
-                        * not consuming command replies before emitting a new
-                        * one. This could be caused by a protocol error or a
-                        * misbehaving/malicious client.
-                        */
-                       bool queued_command_reply;
-                       struct lttng_dynamic_buffer buffer;
-               } outbound;
-       } communication;
-       /* call_rcu delayed reclaim. */
-       struct rcu_head rcu_node;
+/*
+ * Facilities to carry the different notifications type in the action processing
+ * code path.
+ */
+struct lttng_trigger_notification {
+       union {
+               struct lttng_ust_trigger_notification *ust;
+               uint64_t *kernel;
+       } u;
+       uint64_t id;
+       enum lttng_domain_type type;
 };
 
 struct channel_state_sample {
@@ -258,20 +194,29 @@ void lttng_session_trigger_list_destroy(
                struct lttng_session_trigger_list *list);
 static
 int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
-               const struct lttng_trigger *trigger);
+               struct lttng_trigger *trigger);
 
 
 static
-int match_client(struct cds_lfht_node *node, const void *key)
+int match_client_socket(struct cds_lfht_node *node, const void *key)
 {
        /* This double-cast is intended to supress pointer-to-cast warning. */
-       int socket = (int) (intptr_t) key;
-       struct notification_client *client;
+       const int socket = (int) (intptr_t) key;
+       const struct notification_client *client = caa_container_of(node,
+                       struct notification_client, client_socket_ht_node);
 
-       client = caa_container_of(node, struct notification_client,
-                       client_socket_ht_node);
+       return client->socket == socket;
+}
 
-       return !!(client->socket == socket);
+static
+int match_client_id(struct cds_lfht_node *node, const void *key)
+{
+       /* This double-cast is intended to supress pointer-to-cast warning. */
+       const notification_client_id id = *((notification_client_id *) key);
+       const struct notification_client *client = caa_container_of(
+                       node, struct notification_client, client_id_ht_node);
+
+       return client->id == id;
 }
 
 static
@@ -326,18 +271,42 @@ int match_channel_info(struct cds_lfht_node *node, const void *key)
 }
 
 static
-int match_condition(struct cds_lfht_node *node, const void *key)
+int match_trigger(struct cds_lfht_node *node, const void *key)
 {
-       struct lttng_condition *condition_key = (struct lttng_condition *) key;
-       struct lttng_trigger_ht_element *trigger;
-       struct lttng_condition *condition;
+       bool match = false;
+       struct lttng_trigger *trigger_key = (struct lttng_trigger *) key;
+       struct lttng_trigger_ht_element *trigger_ht_element;
+       const struct lttng_credentials *creds_key;
+       const struct lttng_credentials *creds_node;
 
-       trigger = caa_container_of(node, struct lttng_trigger_ht_element,
+       trigger_ht_element = caa_container_of(node, struct lttng_trigger_ht_element,
                        node);
-       condition = lttng_trigger_get_condition(trigger->trigger);
-       assert(condition);
 
-       return !!lttng_condition_is_equal(condition_key, condition);
+       match = lttng_trigger_is_equal(trigger_key, trigger_ht_element->trigger);
+       if (!match) {
+               goto end;
+       }
+
+       /* Validate credential */
+       /* TODO: this could be moved to lttng_trigger_equal depending on how we
+        * handle root behaviour on disable and listing.
+        */
+       creds_key = lttng_trigger_get_credentials(trigger_key);
+       creds_node = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+       match = lttng_credentials_is_equal(creds_key, creds_node);
+end:
+       return !!match;
+}
+
+static
+int match_trigger_token(struct cds_lfht_node *node, const void *key)
+{
+       const uint64_t *_key = key;
+       struct notification_trigger_tokens_ht_element *element;
+
+       element = caa_container_of(node, struct notification_trigger_tokens_ht_element,
+                       node);
+       return *_key == element->token ;
 }
 
 static
@@ -350,7 +319,7 @@ int match_client_list_condition(struct cds_lfht_node *node, const void *key)
        assert(condition_key);
 
        client_list = caa_container_of(node, struct notification_client_list,
-                       notification_trigger_ht_node);
+                       notification_trigger_clients_ht_node);
        condition = lttng_trigger_get_const_condition(client_list->trigger);
 
        return !!lttng_condition_is_equal(condition_key, condition);
@@ -366,6 +335,23 @@ int match_session(struct cds_lfht_node *node, const void *key)
        return !strcmp(session_info->name, name);
 }
 
+/*
+ * Match function for string node.
+ */
+static int match_str(struct cds_lfht_node *node, const void *key)
+{
+       struct lttng_trigger_ht_element *trigger_ht_element;
+       const char *name;
+
+       trigger_ht_element = caa_container_of(node, struct lttng_trigger_ht_element,
+                       node_by_name);
+
+       /* TODO error checking */
+       lttng_trigger_get_name(trigger_ht_element->trigger, &name);
+
+       return hash_match_key_str(name, (void *) key);
+}
+
 static
 unsigned long lttng_condition_buffer_usage_hash(
        const struct lttng_condition *_condition)
@@ -442,6 +428,22 @@ unsigned long lttng_condition_session_rotation_hash(
        return hash;
 }
 
+static
+unsigned long lttng_condition_event_rule_hash(
+       const struct lttng_condition *_condition)
+{
+       unsigned long hash, condition_type;
+       struct lttng_condition_event_rule *condition;
+
+       condition = container_of(_condition,
+                       struct lttng_condition_event_rule, parent);
+       condition_type = (unsigned long) condition->parent.type;
+       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
+
+       /* TODO: further hasg using the event rule? on pattern maybe?*/
+       return hash;
+}
+
 /*
  * The lttng_condition hashing code is kept in this file (rather than
  * condition.c) since it makes use of GPLv2 code (hashtable utils), which we
@@ -459,6 +461,8 @@ unsigned long lttng_condition_hash(const struct lttng_condition *condition)
        case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
        case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
                return lttng_condition_session_rotation_hash(condition);
+       case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+               return lttng_condition_event_rule_hash(condition);
        default:
                ERR("[notification-thread] Unexpected condition type caught");
                abort();
@@ -475,6 +479,18 @@ unsigned long hash_channel_key(struct channel_key *key)
        return key_hash ^ domain_hash;
 }
 
+static
+unsigned long hash_client_socket(int socket)
+{
+       return hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed);
+}
+
+static
+unsigned long hash_client_id(notification_client_id id)
+{
+       return hash_key_u64(&id, lttng_ht_seed);
+}
+
 /*
  * Get the type of object to which a given condition applies. Bindings let
  * the notification system evaluate a trigger's condition when a given
@@ -495,6 +511,8 @@ enum lttng_object_type get_condition_binding_object(
        case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
        case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
                return LTTNG_OBJECT_TYPE_SESSION;
+       case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+               return LTTNG_OBJECT_TYPE_NONE;
        default:
                return LTTNG_OBJECT_TYPE_UNKNOWN;
        }
@@ -666,7 +684,90 @@ error:
        return NULL;
 }
 
-/* RCU read lock must be held by the caller. */
+LTTNG_HIDDEN
+bool notification_client_list_get(struct notification_client_list *list)
+{
+       return urcu_ref_get_unless_zero(&list->ref);
+}
+
+static
+void free_notification_client_list_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct notification_client_list,
+                       rcu_node));
+}
+
+static
+void notification_client_list_release(struct urcu_ref *list_ref)
+{
+       struct notification_client_list *list =
+                       container_of(list_ref, typeof(*list), ref);
+       struct notification_client_list_element *client_list_element, *tmp;
+
+       if (list->notification_trigger_clients_ht) {
+               rcu_read_lock();
+               cds_lfht_del(list->notification_trigger_clients_ht,
+                               &list->notification_trigger_clients_ht_node);
+               rcu_read_unlock();
+               list->notification_trigger_clients_ht = NULL;
+       }
+       cds_list_for_each_entry_safe(client_list_element, tmp,
+                                    &list->list, node) {
+               free(client_list_element);
+       }
+       pthread_mutex_destroy(&list->lock);
+       call_rcu(&list->rcu_node, free_notification_client_list_rcu);
+}
+
+static
+struct notification_client_list *notification_client_list_create(
+               const struct lttng_trigger *trigger)
+{
+       struct notification_client_list *client_list =
+                       zmalloc(sizeof(*client_list));
+
+       if (!client_list) {
+               goto error;
+       }
+       pthread_mutex_init(&client_list->lock, NULL);
+       urcu_ref_init(&client_list->ref);
+       cds_lfht_node_init(&client_list->notification_trigger_clients_ht_node);
+       CDS_INIT_LIST_HEAD(&client_list->list);
+       client_list->trigger = trigger;
+error:
+       return client_list;
+}
+
+static
+void publish_notification_client_list(
+               struct notification_thread_state *state,
+               struct notification_client_list *list)
+{
+       const struct lttng_condition *condition =
+                       lttng_trigger_get_const_condition(list->trigger);
+
+       assert(!list->notification_trigger_clients_ht);
+
+       list->notification_trigger_clients_ht =
+                       state->notification_trigger_clients_ht;
+
+       rcu_read_lock();
+       cds_lfht_add(state->notification_trigger_clients_ht,
+                       lttng_condition_hash(condition),
+                       &list->notification_trigger_clients_ht_node);
+       rcu_read_unlock();
+}
+
+LTTNG_HIDDEN
+void notification_client_list_put(struct notification_client_list *list)
+{
+       if (!list) {
+               return;
+       }
+       return urcu_ref_put(&list->ref, notification_client_list_release);
+}
+
+/* Provides a reference to the returned list. */
 static
 struct notification_client_list *get_client_list_from_condition(
        struct notification_thread_state *state,
@@ -674,20 +775,24 @@ struct notification_client_list *get_client_list_from_condition(
 {
        struct cds_lfht_node *node;
        struct cds_lfht_iter iter;
+       struct notification_client_list *list = NULL;
 
+       rcu_read_lock();
        cds_lfht_lookup(state->notification_trigger_clients_ht,
                        lttng_condition_hash(condition),
                        match_client_list_condition,
                        condition,
                        &iter);
        node = cds_lfht_iter_get_node(&iter);
-
-        return node ? caa_container_of(node,
-                       struct notification_client_list,
-                       notification_trigger_ht_node) : NULL;
+       if (node) {
+               list = container_of(node, struct notification_client_list,
+                               notification_trigger_clients_ht_node);
+               list = notification_client_list_get(list) ? list : NULL;
+       }
+       rcu_read_unlock();
+        return list;
 }
 
-/* This function must be called with the RCU read lock held. */
 static
 int evaluate_channel_condition_for_client(
                const struct lttng_condition *condition,
@@ -703,6 +808,8 @@ int evaluate_channel_condition_for_client(
        struct channel_state_sample *last_sample = NULL;
        struct lttng_channel_trigger_list *channel_trigger_list = NULL;
 
+       rcu_read_lock();
+
        /* Find the channel associated with the condition. */
        cds_lfht_for_each_entry(state->channel_triggers_ht, &iter,
                        channel_trigger_list, channel_triggers_ht_node) {
@@ -777,6 +884,7 @@ int evaluate_channel_condition_for_client(
        *session_uid = channel_info->session_info->uid;
        *session_gid = channel_info->session_info->gid;
 end:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -812,7 +920,6 @@ end:
        return session_name;
 }
 
-/* This function must be called with the RCU read lock held. */
 static
 int evaluate_session_condition_for_client(
                const struct lttng_condition *condition,
@@ -826,6 +933,7 @@ int evaluate_session_condition_for_client(
        const char *session_name;
        struct session_info *session_info = NULL;
 
+       rcu_read_lock();
        session_name = get_condition_session_name(condition);
 
        /* Find the session associated with the trigger. */
@@ -879,10 +987,10 @@ int evaluate_session_condition_for_client(
 end_session_put:
        session_info_put(session_info);
 end:
+       rcu_read_unlock();
        return ret;
 }
 
-/* This function must be called with the RCU read lock held. */
 static
 int evaluate_condition_for_client(const struct lttng_trigger *trigger,
                const struct lttng_condition *condition,
@@ -891,7 +999,9 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger,
 {
        int ret;
        struct lttng_evaluation *evaluation = NULL;
-       struct notification_client_list client_list = { 0 };
+       struct notification_client_list client_list = {
+               .lock = PTHREAD_MUTEX_INITIALIZER,
+       };
        struct notification_client_list_element client_list_element = { 0 };
        uid_t object_uid = 0;
        gid_t object_gid = 0;
@@ -911,6 +1021,7 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger,
                                &evaluation, &object_uid, &object_gid);
                break;
        case LTTNG_OBJECT_TYPE_NONE:
+               DBG("[notification-thread] Newly subscribed-to condition not binded to object, nothing to evaluate");
                ret = 0;
                goto end;
        case LTTNG_OBJECT_TYPE_UNKNOWN:
@@ -933,7 +1044,7 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger,
         * Create a temporary client list with the client currently
         * subscribing.
         */
-       cds_lfht_node_init(&client_list.notification_trigger_ht_node);
+       cds_lfht_node_init(&client_list.notification_trigger_clients_ht_node);
        CDS_INIT_LIST_HEAD(&client_list.list);
        client_list.trigger = trigger;
 
@@ -957,7 +1068,7 @@ int notification_thread_client_subscribe(struct notification_client *client,
                enum lttng_notification_channel_status *_status)
 {
        int ret = 0;
-       struct notification_client_list *client_list;
+       struct notification_client_list *client_list = NULL;
        struct lttng_condition_list_element *condition_list_element = NULL;
        struct notification_client_list_element *client_list_element = NULL;
        enum lttng_notification_channel_status status =
@@ -986,8 +1097,6 @@ int notification_thread_client_subscribe(struct notification_client *client,
                goto error;
        }
 
-       rcu_read_lock();
-
        /*
         * Add the newly-subscribed condition to the client's subscription list.
         */
@@ -1003,20 +1112,24 @@ int notification_thread_client_subscribe(struct notification_client *client,
                 * since this trigger is not registered yet.
                 */
                free(client_list_element);
-               goto end_unlock;
+               goto end;
        }
 
        /*
         * The condition to which the client just subscribed is evaluated
         * at this point so that conditions that are already TRUE result
         * in a notification being sent out.
+        *
+        * The client_list's trigger is used without locking the list itself.
+        * This is correct since the list doesn't own the trigger and the
+        * object is immutable.
         */
        if (evaluate_condition_for_client(client_list->trigger, condition,
                        client, state)) {
                WARN("[notification-thread] Evaluation of a condition on client subscription failed, aborting.");
                ret = -1;
                free(client_list_element);
-               goto end_unlock;
+               goto end;
        }
 
        /*
@@ -1026,13 +1139,17 @@ int notification_thread_client_subscribe(struct notification_client *client,
         */
        client_list_element->client = client;
        CDS_INIT_LIST_HEAD(&client_list_element->node);
+
+       pthread_mutex_lock(&client_list->lock);
        cds_list_add(&client_list_element->node, &client_list->list);
-end_unlock:
-       rcu_read_unlock();
+       pthread_mutex_unlock(&client_list->lock);
 end:
        if (_status) {
                *_status = status;
        }
+       if (client_list) {
+               notification_client_list_put(client_list);
+       }
        return ret;
 error:
        free(condition_list_element);
@@ -1088,23 +1205,24 @@ int notification_thread_client_unsubscribe(
         * Remove the client from the list of clients interested the trigger
         * matching the condition.
         */
-       rcu_read_lock();
        client_list = get_client_list_from_condition(state, condition);
        if (!client_list) {
-               goto end_unlock;
+               goto end;
        }
 
+       pthread_mutex_lock(&client_list->lock);
        cds_list_for_each_entry_safe(client_list_element, client_tmp,
                        &client_list->list, node) {
-               if (client_list_element->client->socket != client->socket) {
+               if (client_list_element->client->id != client->id) {
                        continue;
                }
                cds_list_del(&client_list_element->node);
                free(client_list_element);
                break;
        }
-end_unlock:
-       rcu_read_unlock();
+       pthread_mutex_unlock(&client_list->lock);
+       notification_client_list_put(client_list);
+       client_list = NULL;
 end:
        lttng_condition_destroy(condition);
        if (_status) {
@@ -1123,24 +1241,22 @@ static
 void notification_client_destroy(struct notification_client *client,
                struct notification_thread_state *state)
 {
-       struct lttng_condition_list_element *condition_list_element, *tmp;
-
        if (!client) {
                return;
        }
 
-       /* Release all conditions to which the client was subscribed. */
-       cds_list_for_each_entry_safe(condition_list_element, tmp,
-                       &client->condition_list, node) {
-               (void) notification_thread_client_unsubscribe(client,
-                               condition_list_element->condition, state, NULL);
-       }
-
+       /*
+        * The client object is not reachable by other threads, no need to lock
+        * the client here.
+        */
        if (client->socket >= 0) {
                (void) lttcomm_close_unix_sock(client->socket);
+               client->socket = -1;
        }
+       client->communication.active = false;
        lttng_dynamic_buffer_reset(&client->communication.inbound.buffer);
        lttng_dynamic_buffer_reset(&client->communication.outbound.buffer);
+       pthread_mutex_destroy(&client->lock);
        call_rcu(&client->rcu_node, free_notification_client_rcu);
 }
 
@@ -1157,8 +1273,8 @@ struct notification_client *get_client_from_socket(int socket,
        struct notification_client *client = NULL;
 
        cds_lfht_lookup(state->client_socket_ht,
-                       hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed),
-                       match_client,
+                       hash_client_socket(socket),
+                       match_client_socket,
                        (void *) (unsigned long) socket,
                        &iter);
        node = cds_lfht_iter_get_node(&iter);
@@ -1172,6 +1288,34 @@ end:
        return client;
 }
 
+/*
+ * Call with rcu_read_lock held (and hold for the lifetime of the returned
+ * client pointer).
+ */
+static
+struct notification_client *get_client_from_id(notification_client_id id,
+               struct notification_thread_state *state)
+{
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *node;
+       struct notification_client *client = NULL;
+
+       cds_lfht_lookup(state->client_id_ht,
+                       hash_client_id(id),
+                       match_client_id,
+                       &id,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (!node) {
+               goto end;
+       }
+
+       client = caa_container_of(node, struct notification_client,
+                       client_id_ht_node);
+end:
+       return client;
+}
+
 static
 bool buffer_usage_condition_applies_to_channel(
                const struct lttng_condition *condition,
@@ -1375,7 +1519,7 @@ void lttng_session_trigger_list_destroy(struct lttng_session_trigger_list *list)
 
 static
 int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
-               const struct lttng_trigger *trigger)
+               struct lttng_trigger *trigger)
 {
        int ret = 0;
        struct lttng_trigger_list_element *new_element =
@@ -1762,6 +1906,7 @@ int handle_notification_thread_command_session_rotation(
                struct notification_client_list *client_list;
                struct lttng_evaluation *evaluation = NULL;
                enum lttng_condition_type condition_type;
+               bool client_list_is_empty;
 
                trigger = trigger_list_element->trigger;
                condition = lttng_trigger_get_const_condition(trigger);
@@ -1785,7 +1930,10 @@ int handle_notification_thread_command_session_rotation(
                client_list = get_client_list_from_condition(state, condition);
                assert(client_list);
 
-               if (cds_list_empty(&client_list->list)) {
+               pthread_mutex_lock(&client_list->lock);
+               client_list_is_empty = cds_list_empty(&client_list->list);
+               pthread_mutex_unlock(&client_list->lock);
+               if (client_list_is_empty) {
                        /*
                         * No clients interested in the evaluation's result,
                         * skip it.
@@ -1805,7 +1953,7 @@ int handle_notification_thread_command_session_rotation(
                        /* Internal error */
                        ret = -1;
                        cmd_result = LTTNG_ERR_UNK;
-                       goto end;
+                       goto put_list;
                }
 
                /* Dispatch evaluation result to all clients. */
@@ -1814,8 +1962,10 @@ int handle_notification_thread_command_session_rotation(
                                session_info->uid,
                                session_info->gid);
                lttng_evaluation_destroy(evaluation);
+put_list:
+               notification_client_list_put(client_list);
                if (caa_unlikely(ret)) {
-                       goto end;
+                       break;
                }
        }
 end:
@@ -1826,59 +1976,294 @@ end:
 }
 
 static
-int condition_is_supported(struct lttng_condition *condition)
+int handle_notification_thread_command_add_application(
+       struct notification_thread_handle *handle,
+       struct notification_thread_state *state,
+       int read_side_trigger_event_application_pipe,
+       enum lttng_error_code *_cmd_result)
 {
-       int ret;
+       int ret = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct notification_event_trigger_source_element *element = NULL;
 
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-       {
-               enum lttng_domain_type domain;
+       element = zmalloc(sizeof(*element));
+       if (!element) {
+               cmd_result = LTTNG_ERR_NOMEM;
+               ret = -1;
+               goto end;
+       }
 
-               ret = lttng_condition_buffer_usage_get_domain_type(condition,
-                               &domain);
-               if (ret) {
-                       ret = -1;
-                       goto end;
-               }
+       CDS_INIT_LIST_HEAD(&element->node);
+       element->fd = read_side_trigger_event_application_pipe;
 
-               if (domain != LTTNG_DOMAIN_KERNEL) {
-                       ret = 1;
-                       goto end;
-               }
+       pthread_mutex_lock(&handle->event_trigger_sources.lock);
+       cds_list_add(&element->node, &handle->event_trigger_sources.list);
+       pthread_mutex_unlock(&handle->event_trigger_sources.lock);
 
-               /*
-                * Older kernel tracers don't expose the API to monitor their
-                * buffers. Therefore, we reject triggers that require that
-                * mechanism to be available to be evaluated.
-                */
-               ret = kernel_supports_ring_buffer_snapshot_sample_positions();
-               break;
-       }
-       default:
-               ret = 1;
+       /* TODO: remove on failure to add to list? */
+
+       /* Adding the read side pipe to the event poll */
+       ret = lttng_poll_add(&state->events,
+                       read_side_trigger_event_application_pipe,
+                       LPOLLIN | LPOLLERR);
+
+       DBG3("[notification-thread] Adding application event source from fd: %d", read_side_trigger_event_application_pipe);
+       if (ret < 0) {
+               /* TODO: what should be the value of cmd_result??? */
+               ERR("[notification-thread] Failed to add event source pipe fd to pollset");
+               goto end;
        }
+
 end:
+       *_cmd_result = cmd_result;
        return ret;
 }
 
-/* Must be called with RCU read lock held. */
 static
-int bind_trigger_to_matching_session(const struct lttng_trigger *trigger,
-               struct notification_thread_state *state)
+int handle_notification_thread_command_remove_application(
+       struct notification_thread_handle *handle,
+       struct notification_thread_state *state,
+       int read_side_trigger_event_application_pipe,
+       enum lttng_error_code *_cmd_result)
 {
        int ret = 0;
-       const struct lttng_condition *condition;
-       const char *session_name;
-       struct lttng_session_trigger_list *trigger_list;
+       enum lttng_error_code cmd_result = LTTNG_OK;
 
-       condition = lttng_trigger_get_const_condition(trigger);
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
-       {
-               enum lttng_condition_status status;
+       /* TODO: missing a lock propably to revisit */
+       struct notification_event_trigger_source_element *source_element, *tmp;
+       cds_list_for_each_entry_safe(source_element, tmp,
+                       &handle->event_trigger_sources.list, node) {
+               if (source_element->fd != read_side_trigger_event_application_pipe) {
+                       continue;
+               }
+
+               DBG("[notification-thread] Removed event source from event source list");
+               cds_list_del(&source_element->node);
+               break;
+       }
+
+       DBG3("[notification-thread] Removing application event source from fd: %d", read_side_trigger_event_application_pipe);
+       /* Removing the read side pipe to the event poll */
+       ret = lttng_poll_del(&state->events,
+                       read_side_trigger_event_application_pipe);
+       if (ret < 0) {
+               /* TODO: what should be the value of cmd_result??? */
+               ERR("[notification-thread] Failed to remove event source pipe fd from pollset");
+               goto end;
+       }
+
+end:
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static int handle_notification_thread_command_get_tokens(
+               struct notification_thread_handle *handle,
+               struct notification_thread_state *state,
+               struct lttng_triggers **triggers,
+               enum lttng_error_code *_cmd_result)
+{
+       int ret = 0, i = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct cds_lfht_iter iter;
+       struct notification_trigger_tokens_ht_element *element;
+       struct lttng_triggers *local_triggers = NULL;
+
+       local_triggers = lttng_triggers_create();
+       if (!local_triggers) {
+               cmd_result = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       rcu_read_lock();
+       cds_lfht_for_each_entry (
+                       state->trigger_tokens_ht, &iter, element, node) {
+               ret = lttng_triggers_add(local_triggers, element->trigger);
+               if (ret < 0) {
+                       cmd_result = LTTNG_ERR_FATAL;
+                       ret = -1;
+                       goto end;
+               }
+
+               /* Ownership is shared with the lttng_triggers object */
+               lttng_trigger_get(element->trigger);
+
+               i++;
+       }
+
+       /* Passing ownership up */
+       *triggers = local_triggers;
+       local_triggers = NULL;
+
+end:
+       rcu_read_unlock();
+       lttng_triggers_destroy(local_triggers);
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static
+int handle_notification_thread_command_list_triggers(
+       struct notification_thread_handle *handle,
+       struct notification_thread_state *state,
+       uid_t uid,
+       gid_t gid,
+       struct lttng_triggers **triggers,
+       enum lttng_error_code *_cmd_result)
+{
+       int ret = 0, i = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct cds_lfht_iter iter;
+       struct lttng_trigger_ht_element *trigger_ht_element;
+       struct lttng_triggers *local_triggers = NULL;
+       const struct lttng_credentials *creds;
+       
+       long scb, sca;
+       unsigned long count;
+
+       rcu_read_lock();
+       cds_lfht_count_nodes(state->triggers_ht, &scb, &count, &sca);
+
+       /* TODO check downcasting */
+       local_triggers = lttng_triggers_create();
+       if (!local_triggers) {
+               cmd_result = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       cds_lfht_for_each_entry (state->triggers_ht, &iter,
+                       trigger_ht_element, node) {
+               /* Only return the trigger for which the requestion client have
+                * access. For now the root user can only list its own
+                * triggers.
+                * TODO: root user behavior
+                */
+               creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+               if ((uid != creds->uid) || (gid != creds->gid)) {
+                       continue;
+               }
+
+               ret = lttng_triggers_add(local_triggers, trigger_ht_element->trigger);
+               if (ret < 0) {
+                       ret = -1;
+                       goto end;
+               }
+               /* Ownership is shared with the lttng_triggers object */
+               lttng_trigger_get(trigger_ht_element->trigger);
+
+               i++;
+       }
+
+       /* Passing ownership up */
+       *triggers = local_triggers;
+       local_triggers = NULL;
+
+end:
+       rcu_read_unlock();
+       lttng_triggers_destroy(local_triggers);
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static
+int condition_is_supported(struct lttng_condition *condition)
+{
+       int ret;
+
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+       {
+               enum lttng_domain_type domain;
+
+               ret = lttng_condition_buffer_usage_get_domain_type(condition,
+                               &domain);
+               if (ret) {
+                       ret = -1;
+                       goto end;
+               }
+
+               if (domain != LTTNG_DOMAIN_KERNEL) {
+                       ret = 1;
+                       goto end;
+               }
+
+               /*
+                * Older kernel tracers don't expose the API to monitor their
+                * buffers. Therefore, we reject triggers that require that
+                * mechanism to be available to be evaluated.
+                */
+               ret = kernel_supports_ring_buffer_snapshot_sample_positions();
+               break;
+       }
+       case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+       {
+               /* TODO:
+                * Check for kernel support.
+                * Check for ust support ??
+                */
+               ret = 1;
+               break;
+       }
+       default:
+               ret = 1;
+       }
+end:
+       return ret;
+}
+
+static
+int action_is_supported(struct lttng_action *action)
+{
+       int ret;
+
+       switch (lttng_action_get_type(action)) {
+       case LTTNG_ACTION_TYPE_NOTIFY:
+       case LTTNG_ACTION_TYPE_START_SESSION:
+       case LTTNG_ACTION_TYPE_STOP_SESSION:
+       case LTTNG_ACTION_TYPE_ROTATE_SESSION:
+       case LTTNG_ACTION_TYPE_SNAPSHOT_SESSION:
+       {
+               /* TODO validate that this is true for kernel in regards to
+                * rotation and snapshot. Start stop is not a problem notify
+                * either.
+                */
+               /* For now all type of actions are supported */
+               ret = 1;
+               break;
+       }
+       case LTTNG_ACTION_TYPE_GROUP:
+       {
+               /* TODO: Iterate over all internal actions and validate that
+                * they are supported
+                */
+               ret = 1;
+               break;
+
+       }
+       default:
+               ret = 1;
+       }
+
+       return ret;
+}
+
+/* Must be called with RCU read lock held. */
+static
+int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+       const struct lttng_condition *condition;
+       const char *session_name;
+       struct lttng_session_trigger_list *trigger_list;
+
+       condition = lttng_trigger_get_const_condition(trigger);
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+       {
+               enum lttng_condition_status status;
 
                status = lttng_condition_session_rotation_get_session_name(
                                condition, &session_name);
@@ -1911,7 +2296,7 @@ end:
 
 /* Must be called with RCU read lock held. */
 static
-int bind_trigger_to_matching_channels(const struct lttng_trigger *trigger,
+int bind_trigger_to_matching_channels(struct lttng_trigger *trigger,
                struct notification_thread_state *state)
 {
        int ret = 0;
@@ -1955,95 +2340,26 @@ end:
        return ret;
 }
 
-/*
- * FIXME A client's credentials are not checked when registering a trigger, nor
- *       are they stored alongside with the trigger.
- *
- * The effects of this are benign since:
- *     - The client will succeed in registering the trigger, as it is valid,
- *     - The trigger will, internally, be bound to the channel/session,
- *     - The notifications will not be sent since the client's credentials
- *       are checked against the channel at that moment.
- *
- * If this function returns a non-zero value, it means something is
- * fundamentally broken and the whole subsystem/thread will be torn down.
- *
- * If a non-fatal error occurs, just set the cmd_result to the appropriate
- * error code.
- */
-static
-int handle_notification_thread_command_register_trigger(
+static int action_notify_register_trigger(
                struct notification_thread_state *state,
-               struct lttng_trigger *trigger,
-               enum lttng_error_code *cmd_result)
+               struct lttng_trigger *trigger)
 {
+
        int ret = 0;
        struct lttng_condition *condition;
        struct notification_client *client;
        struct notification_client_list *client_list = NULL;
-       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
-       struct notification_client_list_element *client_list_element, *tmp;
-       struct cds_lfht_node *node;
        struct cds_lfht_iter iter;
-       bool free_trigger = true;
-
-       rcu_read_lock();
+       struct notification_client_list_element *client_list_element, *tmp;
 
        condition = lttng_trigger_get_condition(trigger);
        assert(condition);
 
-       ret = condition_is_supported(condition);
-       if (ret < 0) {
-               goto error;
-       } else if (ret == 0) {
-               *cmd_result = LTTNG_ERR_NOT_SUPPORTED;
-               goto error;
-       } else {
-               /* Feature is supported, continue. */
-               ret = 0;
-       }
-
-       trigger_ht_element = zmalloc(sizeof(*trigger_ht_element));
-       if (!trigger_ht_element) {
-               ret = -1;
-               goto error;
-       }
-
-       /* Add trigger to the trigger_ht. */
-       cds_lfht_node_init(&trigger_ht_element->node);
-       trigger_ht_element->trigger = trigger;
-
-       node = cds_lfht_add_unique(state->triggers_ht,
-                       lttng_condition_hash(condition),
-                       match_condition,
-                       condition,
-                       &trigger_ht_element->node);
-       if (node != &trigger_ht_element->node) {
-               /* Not a fatal error, simply report it to the client. */
-               *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
-               goto error_free_ht_element;
-       }
-
-       /*
-        * Ownership of the trigger and of its wrapper was transfered to
-        * the triggers_ht.
-        */
-       trigger_ht_element = NULL;
-       free_trigger = false;
-
-       /*
-        * The rest only applies to triggers that have a "notify" action.
-        * It is not skipped as this is the only action type currently
-        * supported.
-        */
-       client_list = zmalloc(sizeof(*client_list));
+       client_list = notification_client_list_create(trigger);
        if (!client_list) {
                ret = -1;
-               goto error_free_ht_element;
+               goto end;
        }
-       cds_lfht_node_init(&client_list->notification_trigger_ht_node);
-       CDS_INIT_LIST_HEAD(&client_list->list);
-       client_list->trigger = trigger;
 
        /* Build a list of clients to which this new trigger applies. */
        cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
@@ -2055,23 +2371,19 @@ int handle_notification_thread_command_register_trigger(
                client_list_element = zmalloc(sizeof(*client_list_element));
                if (!client_list_element) {
                        ret = -1;
-                       goto error_free_client_list;
+                       goto error_put_client_list;
                }
                CDS_INIT_LIST_HEAD(&client_list_element->node);
                client_list_element->client = client;
                cds_list_add(&client_list_element->node, &client_list->list);
        }
 
-       cds_lfht_add(state->notification_trigger_clients_ht,
-                       lttng_condition_hash(condition),
-                       &client_list->notification_trigger_ht_node);
-
        switch (get_condition_binding_object(condition)) {
        case LTTNG_OBJECT_TYPE_SESSION:
                /* Add the trigger to the list if it matches a known session. */
                ret = bind_trigger_to_matching_session(trigger, state);
                if (ret) {
-                       goto error_free_client_list;
+                       goto error_put_client_list;
                }
                break;
        case LTTNG_OBJECT_TYPE_CHANNEL:
@@ -2081,7 +2393,7 @@ int handle_notification_thread_command_register_trigger(
                 */
                ret = bind_trigger_to_matching_channels(trigger, state);
                if (ret) {
-                       goto error_free_client_list;
+                       goto error_put_client_list;
                }
                break;
        case LTTNG_OBJECT_TYPE_NONE:
@@ -2089,7 +2401,7 @@ int handle_notification_thread_command_register_trigger(
        default:
                ERR("[notification-thread] Unknown object type on which to bind a newly registered trigger was encountered");
                ret = -1;
-               goto error_free_client_list;
+               goto error_put_client_list;
        }
 
        /*
@@ -2117,13 +2429,15 @@ int handle_notification_thread_command_register_trigger(
         * current state. Otherwise, the next evaluation cycle may only see
         * that the evaluations remain the same (true for samples n-1 and n) and
         * the client will never know that the condition has been met.
+        *
+        * No need to lock the list here as it has not been published yet.
         */
        cds_list_for_each_entry_safe(client_list_element, tmp,
                        &client_list->list, node) {
                ret = evaluate_condition_for_client(trigger, condition,
                                client_list_element->client, state);
                if (ret) {
-                       goto error_free_client_list;
+                       goto error_put_client_list;
                }
        }
 
@@ -2131,25 +2445,252 @@ int handle_notification_thread_command_register_trigger(
         * Client list ownership transferred to the
         * notification_trigger_clients_ht.
         */
+       publish_notification_client_list(state, client_list);
        client_list = NULL;
+error_put_client_list:
+       notification_client_list_put(client_list);
+end:
+       return ret;
+}
 
-       *cmd_result = LTTNG_OK;
-error_free_client_list:
-       if (client_list) {
-               cds_list_for_each_entry_safe(client_list_element, tmp,
-                               &client_list->list, node) {
-                       free(client_list_element);
+static
+bool trigger_name_taken(struct notification_thread_state *state, const char *name)
+{
+       struct cds_lfht_node *triggers_by_name_ht_node;
+       struct cds_lfht_iter iter;
+       /* TODO change hashing for trigger */
+       cds_lfht_lookup(state->triggers_by_name_ht,
+                       hash_key_str(name, lttng_ht_seed),
+                       match_str,
+                       name,
+                       &iter);
+       triggers_by_name_ht_node = cds_lfht_iter_get_node(&iter);
+       if (triggers_by_name_ht_node) {
+               return true;
+       } else {
+               return false;
+       }
+
+}
+static
+void generate_trigger_name(struct notification_thread_state *state, struct lttng_trigger *trigger, const char **name)
+{
+       /* Here the offset criteria guarantee an end. This will be a nice
+        * bikeshedding conversation. I would simply generate uuid and use them
+        * as trigger name.
+        */
+       bool taken = false;
+       do {
+               lttng_trigger_generate_name(trigger, state->trigger_id.name_offset);
+               /* TODO error checking */
+               lttng_trigger_get_name(trigger, name);
+               taken = trigger_name_taken(state, *name);
+               if (taken) {
+                       state->trigger_id.name_offset++;
+               }
+       } while (taken || state->trigger_id.name_offset == UINT32_MAX);
+}
+
+static bool action_is_notify(const struct lttng_action *action)
+{
+       /* TODO for action groups we need to iterate over all of them */
+       enum lttng_action_type type = lttng_action_get_type_const(action);
+       bool ret = false;
+       enum lttng_action_status status;
+       const struct lttng_action *tmp;
+       unsigned int i, count;
+
+       switch (type) {
+       case LTTNG_ACTION_TYPE_NOTIFY:
+               ret = true;
+               break;
+       case LTTNG_ACTION_TYPE_GROUP:
+               status = lttng_action_group_get_count(action, &count);
+               if (status != LTTNG_ACTION_STATUS_OK) {
+                       assert(0);
+               }
+               for (i = 0; i < count; i++) {
+                       tmp = lttng_action_group_get_at_index_const(action, i);
+                       assert(tmp);
+                       ret = action_is_notify(tmp);
+                       if (ret) {
+                               break;
+                       }
+               }
+               break;
+       default:
+               ret = false;
+               break;
+       }
+
+       return ret;
+}
+
+/*
+ * TODO: REVIEW THIS COMMENT.
+ * FIXME A client's credentials are not checked when registering a trigger, nor
+ *       are they stored alongside with the trigger.
+ *
+ * The effects of this are benign since:
+ *     - The client will succeed in registering the trigger, as it is valid,
+ *     - The trigger will, internally, be bound to the channel/session,
+ *     - The notifications will not be sent since the client's credentials
+ *       are checked against the channel at that moment.
+ *
+ * If this function returns a non-zero value, it means something is
+ * fundamentally broken and the whole subsystem/thread will be torn down.
+ *
+ * If a non-fatal error occurs, just set the cmd_result to the appropriate
+ * error code.
+ */
+static
+int handle_notification_thread_command_register_trigger(
+               struct notification_thread_state *state,
+               struct lttng_trigger *trigger,
+               enum lttng_error_code *cmd_result)
+{
+       int ret = 0;
+       int is_supported;
+       struct lttng_condition *condition;
+       struct lttng_action *action;
+       struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+       struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element = NULL;
+       struct cds_lfht_node *node;
+       const char* trigger_name;
+       bool free_trigger = true;
+
+       assert(trigger->creds.set);
+
+       rcu_read_lock();
+
+       /* Set the trigger's key */
+       lttng_trigger_set_key(trigger, state->trigger_id.token_generator);
+
+       if (lttng_trigger_get_name(trigger, &trigger_name) == LTTNG_TRIGGER_STATUS_UNSET) {
+               generate_trigger_name(state, trigger, &trigger_name);
+       } else if (trigger_name_taken(state, trigger_name)) {
+               /* Not a fatal error */
+               *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+               ret = 0;
+               goto error;
+       }
+
+       condition = lttng_trigger_get_condition(trigger);
+       assert(condition);
+
+       action = lttng_trigger_get_action(trigger);
+       assert(action);
+
+       is_supported = condition_is_supported(condition);
+       if (is_supported < 0) {
+               goto error;
+       } else if (is_supported == 0) {
+               ret = 0;
+               *cmd_result = LTTNG_ERR_NOT_SUPPORTED;
+               goto error;
+       }
+
+       is_supported = action_is_supported(action);
+       if (is_supported < 0) {
+               goto error;
+       } else if (is_supported == 0) {
+               ret = 0;
+               *cmd_result = LTTNG_ERR_NOT_SUPPORTED;
+               goto error;
+       }
+
+       trigger_ht_element = zmalloc(sizeof(*trigger_ht_element));
+       if (!trigger_ht_element) {
+               ret = -1;
+               goto error;
+       }
+
+       /* Add trigger to the trigger_ht. */
+       cds_lfht_node_init(&trigger_ht_element->node);
+       cds_lfht_node_init(&trigger_ht_element->node_by_name);
+
+       /*
+        * This element own the trigger object from now own, this is why there
+        * is no lttng_trigger_get here.
+        * This thread is now the owner of the trigger object.
+        */
+       trigger_ht_element->trigger = trigger;
+
+       node = cds_lfht_add_unique(state->triggers_ht,
+                       lttng_condition_hash(condition),
+                       match_trigger,
+                       trigger,
+                       &trigger_ht_element->node);
+       if (node != &trigger_ht_element->node) {
+               /* Not a fatal error, simply report it to the client. */
+               *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+               goto error_free_ht_element;
+       }
+
+       node = cds_lfht_add_unique(state->triggers_by_name_ht,
+                       hash_key_str(trigger_name, lttng_ht_seed),
+                       match_str,
+                       trigger_name,
+                       &trigger_ht_element->node_by_name);
+       if (node != &trigger_ht_element->node_by_name) {
+               /* This should never happen */
+               /* Not a fatal error, simply report it to the client. */
+               /* TODO remove from the trigger_ht */
+               *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+               goto error_free_ht_element;
+       }
+
+       if (lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
+               trigger_tokens_ht_element = zmalloc(sizeof(*trigger_tokens_ht_element));
+               if (!trigger_tokens_ht_element) {
+                       ret = -1;
+                       goto error;
+               }
+
+               /* Add trigger token to the trigger_tokens_ht. */
+               cds_lfht_node_init(&trigger_tokens_ht_element->node);
+               trigger_tokens_ht_element->token = trigger->key.value;
+               trigger_tokens_ht_element->trigger = trigger;
+
+               node = cds_lfht_add_unique(state->trigger_tokens_ht,
+                               hash_key_u64(&trigger_tokens_ht_element->token, lttng_ht_seed),
+                               match_trigger_token,
+                               &trigger_tokens_ht_element->token,
+                               &trigger_tokens_ht_element->node);
+               if (node != &trigger_tokens_ht_element->node) {
+                       /* TODO: THIS IS A FATAL ERROR... should never happen */
+                       /* Not a fatal error, simply report it to the client. */
+                       *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+                       goto error_free_ht_element;
+               }
+       }
+
+       /*
+        * Ownership of the trigger and of its wrapper was transfered to
+        * the triggers_ht. Same for token ht element if necessary.
+        */
+       trigger_tokens_ht_element = NULL;
+       trigger_ht_element = NULL;
+       free_trigger = false;
+
+       if (action_is_notify(action)) {
+               ret = action_notify_register_trigger(state, trigger);
+               if (ret < 0) {
+                       /* TODO should cmd_result be set here? */
+                       ret = -1;
+                       goto error_free_ht_element;
                }
-               free(client_list);
        }
+
+       /* Increment the trigger unique id generator */
+       state->trigger_id.token_generator++;
+       *cmd_result = LTTNG_OK;
+
 error_free_ht_element:
        free(trigger_ht_element);
+       free(trigger_tokens_ht_element);
 error:
        if (free_trigger) {
-               struct lttng_action *action = lttng_trigger_get_action(trigger);
-
-               lttng_condition_destroy(condition);
-               lttng_action_destroy(action);
                lttng_trigger_destroy(trigger);
        }
        rcu_read_unlock();
@@ -2157,16 +2698,16 @@ error:
 }
 
 static
-void free_notification_client_list_rcu(struct rcu_head *node)
+void free_lttng_trigger_ht_element_rcu(struct rcu_head *node)
 {
-       free(caa_container_of(node, struct notification_client_list,
+       free(caa_container_of(node, struct lttng_trigger_ht_element,
                        rcu_node));
 }
 
 static
-void free_lttng_trigger_ht_element_rcu(struct rcu_head *node)
+void free_notification_trigger_tokens_ht_element_rcu(struct rcu_head *node)
 {
-       free(caa_container_of(node, struct lttng_trigger_ht_element,
+       free(caa_container_of(node, struct notification_trigger_tokens_ht_element,
                        rcu_node));
 }
 
@@ -2180,19 +2721,22 @@ int handle_notification_thread_command_unregister_trigger(
        struct cds_lfht_node *triggers_ht_node;
        struct lttng_channel_trigger_list *trigger_list;
        struct notification_client_list *client_list;
-       struct notification_client_list_element *client_list_element, *tmp;
        struct lttng_trigger_ht_element *trigger_ht_element = NULL;
        struct lttng_condition *condition = lttng_trigger_get_condition(
                        trigger);
-       struct lttng_action *action;
+       struct lttng_action *action = lttng_trigger_get_action(trigger);
        enum lttng_error_code cmd_reply;
 
        rcu_read_lock();
 
+       /* TODO change hashing for trigger */
+       /* TODO Disabling for the root user is not complete, for now the root
+        * user cannot disable the trigger from another user.
+        */
        cds_lfht_lookup(state->triggers_ht,
                        lttng_condition_hash(condition),
-                       match_condition,
-                       condition,
+                       match_trigger,
+                       trigger,
                        &iter);
        triggers_ht_node = cds_lfht_iter_get_node(&iter);
        if (!triggers_ht_node) {
@@ -2209,13 +2753,7 @@ int handle_notification_thread_command_unregister_trigger(
 
                cds_list_for_each_entry_safe(trigger_element, tmp,
                                &trigger_list->list, node) {
-                       const struct lttng_condition *current_condition =
-                                       lttng_trigger_get_const_condition(
-                                               trigger_element->trigger);
-
-                       assert(current_condition);
-                       if (!lttng_condition_is_equal(condition,
-                                       current_condition)) {
+                       if (!lttng_trigger_is_equal(trigger, trigger_element->trigger)) {
                                continue;
                        }
 
@@ -2226,30 +2764,45 @@ int handle_notification_thread_command_unregister_trigger(
                }
        }
 
-       /*
-        * Remove and release the client list from
-        * notification_trigger_clients_ht.
-        */
-       client_list = get_client_list_from_condition(state, condition);
-       assert(client_list);
+       if (lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
+               struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element;
+               cds_lfht_for_each_entry(state->trigger_tokens_ht, &iter, trigger_tokens_ht_element,
+                               node) {
+                       if (!lttng_trigger_is_equal(trigger, trigger_tokens_ht_element->trigger)) {
+                               continue;
+                       }
 
-       cds_list_for_each_entry_safe(client_list_element, tmp,
-                       &client_list->list, node) {
-               free(client_list_element);
+                       /* TODO talk to all app and remove it */
+                       DBG("[notification-thread] Removed trigger from tokens_ht");
+                       cds_lfht_del(state->trigger_tokens_ht,
+                                       &trigger_tokens_ht_element->node);
+                       call_rcu(&trigger_tokens_ht_element->rcu_node, free_notification_trigger_tokens_ht_element_rcu);
+
+                       break;
+               }
+       }
+
+       if (action_is_notify(action)) {
+               /*
+                * Remove and release the client list from
+                * notification_trigger_clients_ht.
+                */
+               client_list = get_client_list_from_condition(state, condition);
+               assert(client_list);
+
+               /* Put new reference and the hashtable's reference. */
+               notification_client_list_put(client_list);
+               notification_client_list_put(client_list);
+               client_list = NULL;
        }
-       cds_lfht_del(state->notification_trigger_clients_ht,
-                       &client_list->notification_trigger_ht_node);
-       call_rcu(&client_list->rcu_node, free_notification_client_list_rcu);
 
        /* Remove trigger from triggers_ht. */
        trigger_ht_element = caa_container_of(triggers_ht_node,
                        struct lttng_trigger_ht_element, node);
+       cds_lfht_del(state->triggers_by_name_ht, &trigger_ht_element->node_by_name);
        cds_lfht_del(state->triggers_ht, triggers_ht_node);
 
-       condition = lttng_trigger_get_condition(trigger_ht_element->trigger);
-       lttng_condition_destroy(condition);
-       action = lttng_trigger_get_action(trigger_ht_element->trigger);
-       lttng_action_destroy(action);
+       /* Release the ownership of the trigger */
        lttng_trigger_destroy(trigger_ht_element->trigger);
        call_rcu(&trigger_ht_element->rcu_node, free_lttng_trigger_ht_element_rcu);
 end:
@@ -2327,6 +2880,47 @@ int handle_notification_thread_command(
                                cmd->parameters.session_rotation.location,
                                &cmd->reply_code);
                break;
+       case NOTIFICATION_COMMAND_TYPE_ADD_APPLICATION:
+               ret = handle_notification_thread_command_add_application(
+                               handle,
+                               state,
+                               cmd->parameters.application.read_side_trigger_event_application_pipe,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_REMOVE_APPLICATION:
+               ret = handle_notification_thread_command_remove_application(
+                               handle,
+                               state,
+                               cmd->parameters.application.read_side_trigger_event_application_pipe,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_GET_TOKENS:
+       {
+               struct lttng_triggers *triggers = NULL;
+               ret = handle_notification_thread_command_get_tokens(
+                               handle, state, &triggers, &cmd->reply_code);
+               cmd->reply.get_tokens.triggers = triggers;
+               ret = 0;
+               break;
+
+               cmd->reply_code = LTTNG_OK;
+               ret = 0;
+               break;
+       }
+       case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
+       {
+               struct lttng_triggers *triggers = NULL;
+               ret = handle_notification_thread_command_list_triggers(
+                               handle,
+                               state,
+                               cmd->parameters.list_triggers.uid,
+                               cmd->parameters.list_triggers.gid,
+                               &triggers,
+                               &cmd->reply_code);
+               cmd->reply.list_triggers.triggers = triggers;
+               ret = 0;
+               break;
+       }
        case NOTIFICATION_COMMAND_TYPE_QUIT:
                DBG("[notification-thread] Received quit command");
                cmd->reply_code = LTTNG_OK;
@@ -2342,7 +2936,12 @@ int handle_notification_thread_command(
        }
 end:
        cds_list_del(&cmd->cmd_list_node);
-       lttng_waiter_wake_up(&cmd->reply_waiter);
+       if (cmd->is_async) {
+               free(cmd);
+               cmd = NULL;
+       } else {
+               lttng_waiter_wake_up(&cmd->reply_waiter);
+       }
        pthread_mutex_unlock(&handle->cmd_queue.lock);
        return ret;
 error_unlock:
@@ -2355,12 +2954,6 @@ error:
        return -1;
 }
 
-static
-unsigned long hash_client_socket(int socket)
-{
-       return hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed);
-}
-
 static
 int socket_set_non_blocking(int socket)
 {
@@ -2384,11 +2977,14 @@ end:
        return ret;
 }
 
+/* Client lock must be acquired by caller. */
 static
 int client_reset_inbound_state(struct notification_client *client)
 {
        int ret;
 
+       ASSERT_LOCKED(client->lock);
+
        ret = lttng_dynamic_buffer_set_size(
                        &client->communication.inbound.buffer, 0);
        assert(!ret);
@@ -2419,11 +3015,16 @@ int handle_notification_thread_client_connect(
                ret = -1;
                goto error;
        }
+       pthread_mutex_init(&client->lock, NULL);
+       client->id = state->next_notification_client_id++;
        CDS_INIT_LIST_HEAD(&client->condition_list);
        lttng_dynamic_buffer_init(&client->communication.inbound.buffer);
        lttng_dynamic_buffer_init(&client->communication.outbound.buffer);
        client->communication.inbound.expect_creds = true;
+
+       pthread_mutex_lock(&client->lock);
        ret = client_reset_inbound_state(client);
+       pthread_mutex_unlock(&client->lock);
        if (ret) {
                ERR("[notification-thread] Failed to reset client communication's inbound state");
                ret = 0;
@@ -2467,6 +3068,9 @@ int handle_notification_thread_client_connect(
        cds_lfht_add(state->client_socket_ht,
                        hash_client_socket(client->socket),
                        &client->client_socket_ht_node);
+       cds_lfht_add(state->client_id_ht,
+                       hash_client_id(client->id),
+                       &client->client_id_ht_node);
        rcu_read_unlock();
 
        return ret;
@@ -2475,9 +3079,45 @@ error:
        return ret;
 }
 
-int handle_notification_thread_client_disconnect(
-               int client_socket,
+/* RCU read-lock must be held by the caller. */
+static
+int notification_thread_client_disconnect(
+               struct notification_client *client,
                struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_condition_list_element *condition_list_element, *tmp;
+
+       /* Acquire the client lock to disable its communication atomically. */
+       pthread_mutex_lock(&client->lock);
+       client->communication.active = false;
+       ret = lttng_poll_del(&state->events, client->socket);
+       if (ret) {
+               ERR("[notification-thread] Failed to remove client socket %d from poll set",
+                               client->socket);
+       }
+       pthread_mutex_unlock(&client->lock);
+
+       cds_lfht_del(state->client_socket_ht, &client->client_socket_ht_node);
+       cds_lfht_del(state->client_id_ht, &client->client_id_ht_node);
+
+       /* Release all conditions to which the client was subscribed. */
+       cds_list_for_each_entry_safe(condition_list_element, tmp,
+                       &client->condition_list, node) {
+               (void) notification_thread_client_unsubscribe(client,
+                               condition_list_element->condition, state, NULL);
+       }
+
+       /*
+        * Client no longer accessible to other threads (through the
+        * client lists).
+        */
+       notification_client_destroy(client, state);
+       return ret;
+}
+
+int handle_notification_thread_client_disconnect(
+               int client_socket, struct notification_thread_state *state)
 {
        int ret = 0;
        struct notification_client *client;
@@ -2494,13 +3134,7 @@ int handle_notification_thread_client_disconnect(
                goto end;
        }
 
-       ret = lttng_poll_del(&state->events, client_socket);
-       if (ret) {
-               ERR("[notification-thread] Failed to remove client socket from poll set");
-       }
-        cds_lfht_del(state->client_socket_ht,
-                       &client->client_socket_ht_node);
-       notification_client_destroy(client, state);
+       ret = notification_thread_client_disconnect(client, state);
 end:
        rcu_read_unlock();
        return ret;
@@ -2516,11 +3150,11 @@ int handle_notification_thread_client_disconnect_all(
        rcu_read_lock();
        DBG("[notification-thread] Closing all client connections");
        cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
-               client_socket_ht_node) {
+                       client_socket_ht_node) {
                int ret;
 
-               ret = handle_notification_thread_client_disconnect(
-                               client->socket, state);
+               ret = notification_thread_client_disconnect(
+                               client, state);
                if (ret) {
                        error_encoutered = true;
                }
@@ -2550,11 +3184,63 @@ int handle_notification_thread_trigger_unregister_all(
 }
 
 static
-int client_flush_outgoing_queue(struct notification_client *client,
+int client_handle_transmission_status(
+               struct notification_client *client,
+               enum client_transmission_status transmission_status,
                struct notification_thread_state *state)
+{
+       int ret = 0;
+
+       ASSERT_LOCKED(client->lock);
+
+       switch (transmission_status) {
+       case CLIENT_TRANSMISSION_STATUS_COMPLETE:
+               ret = lttng_poll_mod(&state->events, client->socket,
+                               CLIENT_POLL_MASK_IN);
+               if (ret) {
+                       goto end;
+               }
+
+               client->communication.outbound.queued_command_reply = false;
+               client->communication.outbound.dropped_notification = false;
+               break;
+       case CLIENT_TRANSMISSION_STATUS_QUEUED:
+               /*
+                * We want to be notified whenever there is buffer space
+                * available to send the rest of the payload.
+                */
+               ret = lttng_poll_mod(&state->events, client->socket,
+                               CLIENT_POLL_MASK_IN_OUT);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       case CLIENT_TRANSMISSION_STATUS_FAIL:
+               ret = notification_thread_client_disconnect(client, state);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       case CLIENT_TRANSMISSION_STATUS_ERROR:
+               ret = -1;
+               goto end;
+       default:
+               abort();
+       }
+end:
+       return ret;
+}
+
+/* Client lock must be acquired by caller. */
+static
+enum client_transmission_status client_flush_outgoing_queue(
+               struct notification_client *client)
 {
        ssize_t ret;
        size_t to_send_count;
+       enum client_transmission_status status;
+
+       ASSERT_LOCKED(client->lock);
 
        assert(client->communication.outbound.buffer.size != 0);
        to_send_count = client->communication.outbound.buffer.size;
@@ -2580,25 +3266,12 @@ int client_flush_outgoing_queue(struct notification_client *client,
                if (ret) {
                        goto error;
                }
-
-               /*
-                * We want to be notified whenever there is buffer space
-                * available to send the rest of the payload.
-                */
-               ret = lttng_poll_mod(&state->events, client->socket,
-                               CLIENT_POLL_MASK_IN_OUT);
-               if (ret) {
-                       goto error;
-               }
+               status = CLIENT_TRANSMISSION_STATUS_QUEUED;
        } else if (ret < 0) {
                /* Generic error, disconnect the client. */
-               ERR("[notification-thread] Failed to send flush outgoing queue, disconnecting client (socket fd = %i)",
+               ERR("[notification-thread] Failed to flush outgoing queue, disconnecting client (socket fd = %i)",
                                client->socket);
-               ret = handle_notification_thread_client_disconnect(
-                               client->socket, state);
-               if (ret) {
-                       goto error;
-               }
+               status = CLIENT_TRANSMISSION_STATUS_FAIL;
        } else {
                /* No error and flushed the queue completely. */
                ret = lttng_dynamic_buffer_set_size(
@@ -2606,21 +3279,15 @@ int client_flush_outgoing_queue(struct notification_client *client,
                if (ret) {
                        goto error;
                }
-               ret = lttng_poll_mod(&state->events, client->socket,
-                               CLIENT_POLL_MASK_IN);
-               if (ret) {
-                       goto error;
-               }
-
-               client->communication.outbound.queued_command_reply = false;
-               client->communication.outbound.dropped_notification = false;
+               status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
        }
 
-       return 0;
+       return status;
 error:
-       return -1;
+       return CLIENT_TRANSMISSION_STATUS_ERROR;
 }
 
+/* Client lock must be acquired by caller. */
 static
 int client_send_command_reply(struct notification_client *client,
                struct notification_thread_state *state,
@@ -2635,6 +3302,9 @@ int client_send_command_reply(struct notification_client *client,
                .size = sizeof(reply),
        };
        char buffer[sizeof(msg) + sizeof(reply)];
+       enum client_transmission_status transmission_status;
+
+       ASSERT_LOCKED(client->lock);
 
        if (client->communication.outbound.queued_command_reply) {
                /* Protocol error. */
@@ -2650,22 +3320,214 @@ int client_send_command_reply(struct notification_client *client,
                        &client->communication.outbound.buffer,
                        buffer, sizeof(buffer));
        if (ret) {
-               goto error;
+               goto error;
+       }
+
+       transmission_status = client_flush_outgoing_queue(client);
+       ret = client_handle_transmission_status(
+                       client, transmission_status, state);
+       if (ret) {
+               goto error;
+       }
+
+       if (client->communication.outbound.buffer.size != 0) {
+               /* Queue could not be emptied. */
+               client->communication.outbound.queued_command_reply = true;
+       }
+
+       return 0;
+error:
+       return -1;
+}
+
+static
+int client_handle_message_unknown(struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret;
+
+       pthread_mutex_lock(&client->lock);
+
+       /*
+        * Receiving message header. The function will be called again
+        * once the rest of the message as been received and can be
+        * interpreted.
+        */
+       const struct lttng_notification_channel_message *msg;
+
+       assert(sizeof(*msg) == client->communication.inbound.buffer.size);
+       msg = (const struct lttng_notification_channel_message *)
+                             client->communication.inbound.buffer.data;
+
+       if (msg->size == 0 ||
+                       msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) {
+               ERR("[notification-thread] Invalid notification channel message: length = %u",
+                               msg->size);
+               ret = -1;
+               goto end;
+       }
+
+       switch (msg->type) {
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
+               break;
+       default:
+               ret = -1;
+               ERR("[notification-thread] Invalid notification channel message: unexpected message type");
+               goto end;
+       }
+
+       client->communication.inbound.bytes_to_receive = msg->size;
+       client->communication.inbound.msg_type =
+                       (enum lttng_notification_channel_message_type) msg->type;
+       ret = lttng_dynamic_buffer_set_size(
+                       &client->communication.inbound.buffer, msg->size);
+end:
+       pthread_mutex_unlock(&client->lock);
+       return ret;
+}
+
+static
+int client_handle_message_handshake(struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_notification_channel_command_handshake *handshake_client;
+       const struct lttng_notification_channel_command_handshake handshake_reply = {
+                       .major = LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR,
+                       .minor = LTTNG_NOTIFICATION_CHANNEL_VERSION_MINOR,
+       };
+       const struct lttng_notification_channel_message msg_header = {
+                       .type = LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE,
+                       .size = sizeof(handshake_reply),
+       };
+       enum lttng_notification_channel_status status =
+                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+       char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)];
+       enum client_transmission_status transmission_status;
+
+       pthread_mutex_lock(&client->lock);
+
+       memcpy(send_buffer, &msg_header, sizeof(msg_header));
+       memcpy(send_buffer + sizeof(msg_header), &handshake_reply,
+                       sizeof(handshake_reply));
+
+       handshake_client =
+                       (struct lttng_notification_channel_command_handshake *)
+                                       client->communication.inbound.buffer
+                                                       .data;
+       client->major = handshake_client->major;
+       client->minor = handshake_client->minor;
+       if (!client->communication.inbound.creds_received) {
+               ERR("[notification-thread] No credentials received from client");
+               ret = -1;
+               goto end;
+       }
+
+       client->uid = LTTNG_SOCK_GET_UID_CRED(
+                       &client->communication.inbound.creds);
+       client->gid = LTTNG_SOCK_GET_GID_CRED(
+                       &client->communication.inbound.creds);
+       DBG("[notification-thread] Received handshake from client (uid = %u, gid = %u) with version %i.%i",
+                       client->uid, client->gid, (int) client->major,
+                       (int) client->minor);
+
+       if (handshake_client->major !=
+                       LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR) {
+               status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION;
+       }
+
+       ret = lttng_dynamic_buffer_append(
+                       &client->communication.outbound.buffer, send_buffer,
+                       sizeof(send_buffer));
+       if (ret) {
+               ERR("[notification-thread] Failed to send protocol version to notification channel client");
+               goto end;
+       }
+
+       transmission_status = client_flush_outgoing_queue(client);
+       ret = client_handle_transmission_status(
+                       client, transmission_status, state);
+       if (ret) {
+               goto end;
+       }
+
+       ret = client_send_command_reply(client, state, status);
+       if (ret) {
+               ERR("[notification-thread] Failed to send reply to notification channel client");
+               goto end;
+       }
+
+       /* Set reception state to receive the next message header. */
+       ret = client_reset_inbound_state(client);
+       if (ret) {
+               ERR("[notification-thread] Failed to reset client communication's inbound state");
+               goto end;
+       }
+       client->validated = true;
+       client->communication.active = true;
+
+end:
+       pthread_mutex_unlock(&client->lock);
+       return ret;
+}
+
+static
+int client_handle_message_subscription(
+               struct notification_client *client,
+               enum lttng_notification_channel_message_type msg_type,
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_condition *condition;
+       enum lttng_notification_channel_status status =
+                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+       const struct lttng_buffer_view condition_view =
+                       lttng_buffer_view_from_dynamic_buffer(
+                                       &client->communication.inbound.buffer,
+                                       0, -1);
+       size_t expected_condition_size;
+
+       pthread_mutex_lock(&client->lock);
+       expected_condition_size = client->communication.inbound.buffer.size;
+       pthread_mutex_unlock(&client->lock);
+
+       ret = lttng_condition_create_from_buffer(&condition_view, &condition);
+       if (ret != expected_condition_size) {
+               ERR("[notification-thread] Malformed condition received from client");
+               goto end;
+       }
+
+       if (msg_type == LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE) {
+               ret = notification_thread_client_subscribe(
+                               client, condition, state, &status);
+       } else {
+               ret = notification_thread_client_unsubscribe(
+                               client, condition, state, &status);
+       }
+       if (ret) {
+               goto end;
        }
 
-       ret = client_flush_outgoing_queue(client, state);
+       pthread_mutex_lock(&client->lock);
+       ret = client_send_command_reply(client, state, status);
        if (ret) {
-               goto error;
+               ERR("[notification-thread] Failed to send reply to notification channel client");
+               goto end_unlock;
        }
 
-       if (client->communication.outbound.buffer.size != 0) {
-               /* Queue could not be emptied. */
-               client->communication.outbound.queued_command_reply = true;
+       /* Set reception state to receive the next message header. */
+       ret = client_reset_inbound_state(client);
+       if (ret) {
+               ERR("[notification-thread] Failed to reset client communication's inbound state");
+               goto end_unlock;
        }
 
-       return 0;
-error:
-       return -1;
+end_unlock:
+       pthread_mutex_unlock(&client->lock);
+end:
+       return ret;
 }
 
 static
@@ -2687,158 +3549,19 @@ int client_dispatch_message(struct notification_client *client,
        switch (client->communication.inbound.msg_type) {
        case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN:
        {
-               /*
-                * Receiving message header. The function will be called again
-                * once the rest of the message as been received and can be
-                * interpreted.
-                */
-               const struct lttng_notification_channel_message *msg;
-
-               assert(sizeof(*msg) ==
-                               client->communication.inbound.buffer.size);
-               msg = (const struct lttng_notification_channel_message *)
-                               client->communication.inbound.buffer.data;
-
-               if (msg->size == 0 || msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) {
-                       ERR("[notification-thread] Invalid notification channel message: length = %u", msg->size);
-                       ret = -1;
-                       goto end;
-               }
-
-               switch (msg->type) {
-               case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
-               case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
-               case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
-                       break;
-               default:
-                       ret = -1;
-                       ERR("[notification-thread] Invalid notification channel message: unexpected message type");
-                       goto end;
-               }
-
-               client->communication.inbound.bytes_to_receive = msg->size;
-               client->communication.inbound.msg_type =
-                               (enum lttng_notification_channel_message_type) msg->type;
-               ret = lttng_dynamic_buffer_set_size(
-                               &client->communication.inbound.buffer, msg->size);
-               if (ret) {
-                       goto end;
-               }
+               ret = client_handle_message_unknown(client, state);
                break;
        }
        case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
        {
-               struct lttng_notification_channel_command_handshake *handshake_client;
-               struct lttng_notification_channel_command_handshake handshake_reply = {
-                       .major = LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR,
-                       .minor = LTTNG_NOTIFICATION_CHANNEL_VERSION_MINOR,
-               };
-               struct lttng_notification_channel_message msg_header = {
-                       .type = LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE,
-                       .size = sizeof(handshake_reply),
-               };
-               enum lttng_notification_channel_status status =
-                               LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
-               char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)];
-
-               memcpy(send_buffer, &msg_header, sizeof(msg_header));
-               memcpy(send_buffer + sizeof(msg_header), &handshake_reply,
-                               sizeof(handshake_reply));
-
-               handshake_client =
-                               (struct lttng_notification_channel_command_handshake *)
-                                       client->communication.inbound.buffer.data;
-               client->major = handshake_client->major;
-               client->minor = handshake_client->minor;
-               if (!client->communication.inbound.creds_received) {
-                       ERR("[notification-thread] No credentials received from client");
-                       ret = -1;
-                       goto end;
-               }
-
-               client->uid = LTTNG_SOCK_GET_UID_CRED(
-                               &client->communication.inbound.creds);
-               client->gid = LTTNG_SOCK_GET_GID_CRED(
-                               &client->communication.inbound.creds);
-               DBG("[notification-thread] Received handshake from client (uid = %u, gid = %u) with version %i.%i",
-                               client->uid, client->gid, (int) client->major,
-                               (int) client->minor);
-
-               if (handshake_client->major != LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR) {
-                       status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION;
-               }
-
-               ret = lttng_dynamic_buffer_append(&client->communication.outbound.buffer,
-                               send_buffer, sizeof(send_buffer));
-               if (ret) {
-                       ERR("[notification-thread] Failed to send protocol version to notification channel client");
-                       goto end;
-               }
-
-               ret = client_flush_outgoing_queue(client, state);
-               if (ret) {
-                       goto end;
-               }
-
-               ret = client_send_command_reply(client, state, status);
-               if (ret) {
-                       ERR("[notification-thread] Failed to send reply to notification channel client");
-                       goto end;
-               }
-
-               /* Set reception state to receive the next message header. */
-               ret = client_reset_inbound_state(client);
-               if (ret) {
-                       ERR("[notification-thread] Failed to reset client communication's inbound state");
-                       goto end;
-               }
-               client->validated = true;
+               ret = client_handle_message_handshake(client, state);
                break;
        }
        case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
        case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
        {
-               struct lttng_condition *condition;
-               enum lttng_notification_channel_status status =
-                               LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
-               const struct lttng_buffer_view condition_view =
-                               lttng_buffer_view_from_dynamic_buffer(
-                                       &client->communication.inbound.buffer,
-                                       0, -1);
-               size_t expected_condition_size =
-                               client->communication.inbound.buffer.size;
-
-               ret = lttng_condition_create_from_buffer(&condition_view,
-                               &condition);
-               if (ret != expected_condition_size) {
-                       ERR("[notification-thread] Malformed condition received from client");
-                       goto end;
-               }
-
-               if (client->communication.inbound.msg_type ==
-                               LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE) {
-                       ret = notification_thread_client_subscribe(client,
-                                       condition, state, &status);
-               } else {
-                       ret = notification_thread_client_unsubscribe(client,
-                                       condition, state, &status);
-               }
-               if (ret) {
-                       goto end;
-               }
-
-               ret = client_send_command_reply(client, state, status);
-               if (ret) {
-                       ERR("[notification-thread] Failed to send reply to notification channel client");
-                       goto end;
-               }
-
-               /* Set reception state to receive the next message header. */
-               ret = client_reset_inbound_state(client);
-               if (ret) {
-                       ERR("[notification-thread] Failed to reset client communication's inbound state");
-                       goto end;
-               }
+               ret = client_handle_message_subscription(client,
+                               client->communication.inbound.msg_type, state);
                break;
        }
        default:
@@ -2856,6 +3579,7 @@ int handle_notification_thread_client_in(
        struct notification_client *client;
        ssize_t recv_ret;
        size_t offset;
+       bool message_is_complete = false;
 
        client = get_client_from_socket(socket, state);
        if (!client) {
@@ -2864,6 +3588,7 @@ int handle_notification_thread_client_in(
                goto end;
        }
 
+       pthread_mutex_lock(&client->lock);
        offset = client->communication.inbound.buffer.size -
                        client->communication.inbound.bytes_to_receive;
        if (client->communication.inbound.expect_creds) {
@@ -2880,12 +3605,17 @@ int handle_notification_thread_client_in(
                                client->communication.inbound.buffer.data + offset,
                                client->communication.inbound.bytes_to_receive);
        }
+       if (recv_ret >= 0) {
+               client->communication.inbound.bytes_to_receive -= recv_ret;
+               message_is_complete = client->communication.inbound
+                                                     .bytes_to_receive == 0;
+       }
+       pthread_mutex_unlock(&client->lock);
        if (recv_ret < 0) {
                goto error_disconnect_client;
        }
 
-       client->communication.inbound.bytes_to_receive -= recv_ret;
-       if (client->communication.inbound.bytes_to_receive == 0) {
+       if (message_is_complete) {
                ret = client_dispatch_message(client, state);
                if (ret) {
                        /*
@@ -2894,13 +3624,11 @@ int handle_notification_thread_client_in(
                         */
                        goto error_disconnect_client;
                }
-       } else {
-               goto end;
        }
 end:
        return ret;
 error_disconnect_client:
-       ret = handle_notification_thread_client_disconnect(socket, state);
+       ret = notification_thread_client_disconnect(client, state);
        return ret;
 }
 
@@ -2910,6 +3638,7 @@ int handle_notification_thread_client_out(
 {
        int ret;
        struct notification_client *client;
+       enum client_transmission_status transmission_status;
 
        client = get_client_from_socket(socket, state);
        if (!client) {
@@ -2918,7 +3647,11 @@ int handle_notification_thread_client_out(
                goto end;
        }
 
-       ret = client_flush_outgoing_queue(client, state);
+       pthread_mutex_lock(&client->lock);
+       transmission_status = client_flush_outgoing_queue(client);
+       ret = client_handle_transmission_status(
+                       client, transmission_status, state);
+       pthread_mutex_unlock(&client->lock);
        if (ret) {
                goto end;
        }
@@ -3089,33 +3822,77 @@ end:
 }
 
 static
-int client_enqueue_dropped_notification(struct notification_client *client,
-               struct notification_thread_state *state)
+int client_notification_overflow(struct notification_client *client)
 {
-       int ret;
-       struct lttng_notification_channel_message msg = {
+       int ret = 0;
+       const struct lttng_notification_channel_message msg = {
                .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION_DROPPED,
-               .size = 0,
        };
 
+       ASSERT_LOCKED(client->lock);
+
+       DBG("Dropping notification addressed to client (socket fd = %i)",
+                       client->socket);
+       if (client->communication.outbound.dropped_notification) {
+               /*
+                * The client already has a "notification dropped" message
+                * in its outgoing queue. Nothing to do since all
+                * of those messages are coalesced.
+                */
+               goto end;
+       }
+
+       client->communication.outbound.dropped_notification = true;
        ret = lttng_dynamic_buffer_append(
                        &client->communication.outbound.buffer, &msg,
                        sizeof(msg));
+       if (ret) {
+               PERROR("Failed to enqueue \"dropped notification\" message in client's (socket fd = %i) outgoing queue",
+                               client->socket);
+       }
+end:
        return ret;
 }
 
+static int client_handle_transmission_status_wrapper(
+               struct notification_client *client,
+               enum client_transmission_status status,
+               void *user_data)
+{
+       return client_handle_transmission_status(client, status,
+                       (struct notification_thread_state *) user_data);
+}
+
 static
 int send_evaluation_to_clients(const struct lttng_trigger *trigger,
                const struct lttng_evaluation *evaluation,
                struct notification_client_list* client_list,
                struct notification_thread_state *state,
-               uid_t channel_uid, gid_t channel_gid)
+               uid_t object_uid, gid_t object_gid)
+{
+       return notification_client_list_send_evaluation(client_list,
+                       lttng_trigger_get_const_condition(trigger), evaluation,
+                       lttng_trigger_get_credentials(trigger),
+                       &(struct lttng_credentials){
+                                       .uid = object_uid, .gid = object_gid},
+                       client_handle_transmission_status_wrapper, state);
+}
+
+LTTNG_HIDDEN
+int notification_client_list_send_evaluation(
+               struct notification_client_list *client_list,
+               const struct lttng_condition *condition,
+               const struct lttng_evaluation *evaluation,
+               const struct lttng_credentials *trigger_creds,
+               const struct lttng_credentials *source_object_creds,
+               report_client_transmission_result_cb client_report,
+               void *user_data)
 {
        int ret = 0;
        struct lttng_dynamic_buffer msg_buffer;
        struct notification_client_list_element *client_list_element, *tmp;
        const struct lttng_notification notification = {
-               .condition = (struct lttng_condition *) lttng_trigger_get_const_condition(trigger),
+               .condition = (struct lttng_condition *) condition,
                .evaluation = (struct lttng_evaluation *) evaluation,
        };
        struct lttng_notification_channel_message msg_header = {
@@ -3141,16 +3918,40 @@ int send_evaluation_to_clients(const struct lttng_trigger *trigger,
        ((struct lttng_notification_channel_message * ) msg_buffer.data)->size =
                        (uint32_t) (msg_buffer.size - sizeof(msg_header));
 
+       pthread_mutex_lock(&client_list->lock);
        cds_list_for_each_entry_safe(client_list_element, tmp,
                        &client_list->list, node) {
+               enum client_transmission_status transmission_status;
                struct notification_client *client =
                                client_list_element->client;
 
-               if (client->uid != channel_uid && client->gid != channel_gid &&
-                               client->uid != 0) {
-                       /* Client is not allowed to monitor this channel. */
-                       DBG("[notification-thread] Skipping client at it does not have the permission to receive notification for this channel");
-                       continue;
+               ret = 0;
+               pthread_mutex_lock(&client->lock);
+               if (source_object_creds) {
+                       if (client->uid != source_object_creds->uid &&
+                                       client->gid != source_object_creds->gid &&
+                                       client->uid != 0) {
+                               /*
+                                * Client is not allowed to monitor this
+                                * object.
+                                */
+                               DBG("[notification-thread] Skipping client at it does not have the object permission to receive notification for this trigger");
+                               goto unlock_client;
+                       }
+               }
+
+               /* TODO: what is the behavior for root client on non root
+                * trigger? Since multiple triggers (different user) can have the same condition
+                * but with different action group that can have each a notify.
+                * Does the root client receive multiple notification for all
+                * those triggers with the same condition or only notification
+                * for triggers the root user configured?
+                * For now we do the later. All users including the root user
+                * can only receive notification from trigger it registered.
+                */
+               if (client->uid != trigger_creds->uid && client->gid != trigger_creds->gid) {
+                       DBG("[notification-thread] Skipping client at it does not have the permission to receive notification for this trigger");
+                       goto unlock_client;
                }
 
                DBG("[notification-thread] Sending notification to client (fd = %i, %zu bytes)",
@@ -3163,37 +3964,302 @@ int send_evaluation_to_clients(const struct lttng_trigger *trigger,
                         * notification since the socket spilled-over to the
                         * queue.
                         */
-                       DBG("[notification-thread] Dropping notification addressed to client (socket fd = %i)",
-                                       client->socket);
-                       if (!client->communication.outbound.dropped_notification) {
-                               client->communication.outbound.dropped_notification = true;
-                               ret = client_enqueue_dropped_notification(
-                                               client, state);
-                               if (ret) {
-                                       goto end;
-                               }
+                       ret = client_notification_overflow(client);
+                       if (ret) {
+                               goto unlock_client;
                        }
-                       continue;
                }
 
                ret = lttng_dynamic_buffer_append_buffer(
                                &client->communication.outbound.buffer,
                                &msg_buffer);
                if (ret) {
-                       goto end;
+                       goto unlock_client;
                }
 
-               ret = client_flush_outgoing_queue(client, state);
+               transmission_status = client_flush_outgoing_queue(client);
+               ret = client_report(client, transmission_status, user_data);
                if (ret) {
-                       goto end;
+                       goto unlock_client;
+               }
+unlock_client:
+               pthread_mutex_unlock(&client->lock);
+               if (ret) {
+                       goto end_unlock_list;
                }
        }
        ret = 0;
+
+end_unlock_list:
+       pthread_mutex_unlock(&client_list->lock);
 end:
        lttng_dynamic_buffer_reset(&msg_buffer);
        return ret;
 }
 
+int perform_event_action_notify(struct notification_thread_state *state,
+               const struct lttng_trigger *trigger,
+               const struct lttng_trigger_notification *notification,
+               const struct lttng_action *action)
+{
+       int ret;
+       struct notification_client_list *client_list;
+       struct lttng_evaluation *evaluation = NULL;
+       const struct lttng_credentials *creds = lttng_trigger_get_credentials(trigger);
+
+       /*
+        * Check if any client is subscribed to the result of this
+        * evaluation.
+        */
+       client_list = get_client_list_from_condition(state, trigger->condition);
+       assert(client_list);
+       if (cds_list_empty(&client_list->list)) {
+               ret = 0;
+               goto end;
+       }
+
+       evaluation = lttng_evaluation_event_rule_create(trigger->name);
+       if (!evaluation) {
+               ERR("Failed to create event rule hit evaluation");
+               ret = -1;
+               goto end;
+       }
+
+       /* Dispatch evaluation result to all clients.
+        * Note that here the passed credentials are the one from the trigger,
+        * this is because there is no internal object binded to the trigger per
+        * see and the credential validation is done at the registration level
+        * for the event rule based trigger. For a channel the credential
+        * validation can only be done on notify since the trigger can be
+        * registered before the channel/session creation.
+        */
+       ret = send_evaluation_to_clients(trigger,
+                       evaluation, client_list, state,
+                       creds->uid,
+                       creds->gid);
+       lttng_evaluation_destroy(evaluation);
+       if (caa_unlikely(ret)) {
+               goto end;
+       }
+end:
+       return ret;
+
+}
+
+/* This can be called recursively, pass NULL for action on the first iteration */
+int perform_event_action(struct notification_thread_state *state,
+               const struct lttng_trigger *trigger,
+               const struct lttng_trigger_notification *notification,
+               const struct lttng_action *action)
+{
+       int ret = 0;
+       enum lttng_action_type action_type;
+
+       assert(trigger);
+
+       if (!action) {
+               action = lttng_trigger_get_const_action(trigger);
+       }
+
+       action_type = lttng_action_get_type_const(action);
+       DBG("Handling action %s for trigger id %s (%" PRIu64 ")",
+                       lttng_action_type_string(action_type), trigger->name,
+                       trigger->key.value);
+
+       switch (action_type) {
+       case LTTNG_ACTION_TYPE_GROUP:
+       {
+               /* Recurse into the group */
+               const struct lttng_action *tmp = NULL;
+               unsigned int count = 0;
+               (void) lttng_action_group_get_count(action, &count);
+               for (int i = 0; i < count; i++) {
+                       tmp = lttng_action_group_get_at_index_const(action, i);
+                       assert(tmp);
+                       ret = perform_event_action(state, trigger, notification, tmp);
+                       if (ret < 0) {
+                               goto end;
+                       }
+               }
+               break;
+       }
+       case LTTNG_ACTION_TYPE_NOTIFY:
+       {
+               ret = perform_event_action_notify(state, trigger, notification, action);
+               break;
+       }
+       default:
+               break;
+       }
+end:
+       return ret;
+}
+
+int handle_notification_thread_event(struct notification_thread_state *state,
+               int pipe,
+               enum lttng_domain_type domain)
+{
+       int ret;
+       struct lttng_ust_trigger_notification ust_notification;
+       uint64_t kernel_notification;
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       struct notification_trigger_tokens_ht_element *element;
+       struct lttng_trigger_notification notification;
+       void *reception_buffer;
+       size_t reception_size;
+       enum action_executor_status executor_status;
+       struct notification_client_list *client_list = NULL;
+
+       notification.type = domain;
+
+       switch(domain) {
+       case LTTNG_DOMAIN_UST:
+               reception_buffer = (void *) &ust_notification;
+               reception_size = sizeof(ust_notification);
+               notification.u.ust = &ust_notification;
+               break;
+       case LTTNG_DOMAIN_KERNEL:
+               reception_buffer = (void *) &kernel_notification;
+               reception_size = sizeof(kernel_notification);
+               notification.u.kernel = &kernel_notification;
+               break;
+       default:
+               assert(0);
+       }
+
+       /*
+        * The monitoring pipe only holds messages smaller than PIPE_BUF,
+        * ensuring that read/write of sampling messages are atomic.
+        */
+       /* TODO: should we read as much as we can ? EWOULDBLOCK? */
+
+       ret = lttng_read(pipe, reception_buffer, reception_size);
+       if (ret != reception_size) {
+               ERR("[notification-thread] Failed to read from event source pipe (fd = %i)",
+                               pipe);
+               /* TODO: Should this error out completly.
+                * This can happen when an app is killed as of today
+                * ret = -1 cause the whole thread to die and fuck up
+                * everything.
+                */
+               goto end;
+       }
+
+       switch(domain) {
+       case LTTNG_DOMAIN_UST:
+               notification.id = ust_notification.id;
+               break;
+       case LTTNG_DOMAIN_KERNEL:
+               notification.id = kernel_notification;
+               break;
+       default:
+               assert(0);
+       }
+
+       /* Find triggers associated with this token. */
+       rcu_read_lock();
+       cds_lfht_lookup(state->trigger_tokens_ht,
+                       hash_key_u64(&notification.id, lttng_ht_seed), match_trigger_token,
+                       &notification.id, &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (caa_unlikely(!node)) {
+               /* TODO: is this an error? This might happen if the receive side
+                * is slow to process event from source and that the trigger was
+                * removed but the app still kicking. This yield another
+                * question on the trigger lifetime and when we can remove a
+                * trigger. How to guarantee that all event with the token idea
+                * have be processed? Do we want to provide this guarantee?
+                *
+                * Update: I have encountered this when using a trigger on
+                * sched_switch and then removing it. The frequency is quite
+                * high hence we en up exactly in the mentionned scenario.
+                * AFAIK this might be the best way to handle this.
+                */
+               ret = 0;
+               goto end_unlock;
+       }
+       element = caa_container_of(node,
+                       struct notification_trigger_tokens_ht_element,
+                       node);
+
+       if (!lttng_trigger_is_ready_to_fire(element->trigger)) {
+               ret = 0;
+               goto end_unlock;
+       }
+
+       client_list = get_client_list_from_condition(state,
+                       lttng_trigger_get_const_condition(element->trigger));
+       executor_status = action_executor_enqueue(
+                       state->executor, element->trigger, client_list);
+       switch (executor_status) {
+       case ACTION_EXECUTOR_STATUS_OK:
+               ret = 0;
+               break;
+       case ACTION_EXECUTOR_STATUS_OVERFLOW:
+       {
+               struct notification_client_list_element *client_list_element,
+                               *tmp;
+
+               /*
+                * Not a fatal error; this is expected and simply means the
+                * executor has too much work queued already.
+                */
+               ret = 0;
+
+               if (!client_list) {
+                       break;
+               }
+
+               /* Warn clients that a notification (or more) was dropped. */
+               pthread_mutex_lock(&client_list->lock);
+               cds_list_for_each_entry_safe(client_list_element, tmp,
+                               &client_list->list, node) {
+                       enum client_transmission_status transmission_status;
+                       struct notification_client *client =
+                                       client_list_element->client;
+
+                       pthread_mutex_lock(&client->lock);
+                       ret = client_notification_overflow(client);
+                       if (ret) {
+                               /* Fatal error. */
+                               goto next_client;
+                       }
+
+                       transmission_status =
+                                       client_flush_outgoing_queue(client);
+                       ret = client_handle_transmission_status(
+                                       client, transmission_status, state);
+                       if (ret) {
+                               /* Fatal error. */
+                               goto next_client;
+                       }
+next_client:
+                       pthread_mutex_unlock(&client->lock);
+                       if (ret) {
+                               break;
+                       }
+               }
+               pthread_mutex_lock(&client_list->lock);
+               break;
+       }
+       case ACTION_EXECUTOR_STATUS_ERROR:
+               /* Fatal error, shut down everything. */
+               ERR("Fatal error encoutered while enqueuing action");
+               ret = -1;
+               goto end_unlock;
+       default:
+               /* Unhandled error. */
+               abort();
+       }
+
+end_unlock:
+       notification_client_list_put(client_list);
+       rcu_read_unlock();
+end:
+       return ret;
+}
+
 int handle_notification_thread_channel_sample(
                struct notification_thread_state *state, int pipe,
                enum lttng_domain_type domain)
@@ -3332,16 +4398,23 @@ int handle_notification_thread_channel_sample(
                        node) {
                const struct lttng_condition *condition;
                const struct lttng_action *action;
-               const struct lttng_trigger *trigger;
-               struct notification_client_list *client_list;
+               struct lttng_trigger *trigger;
+               struct notification_client_list *client_list = NULL;
                struct lttng_evaluation *evaluation = NULL;
+               bool client_list_is_empty;
 
+               ret = 0;
                trigger = trigger_list_element->trigger;
                condition = lttng_trigger_get_const_condition(trigger);
                assert(condition);
                action = lttng_trigger_get_const_action(trigger);
+               
+               if (!lttng_trigger_is_ready_to_fire(trigger)) {
+                       goto put_list;
+               }
 
                /* Notify actions are the only type currently supported. */
+               /* TODO support other type of action */
                assert(lttng_action_get_type_const(action) ==
                                LTTNG_ACTION_TYPE_NOTIFY);
 
@@ -3351,12 +4424,13 @@ int handle_notification_thread_channel_sample(
                 */
                client_list = get_client_list_from_condition(state, condition);
                assert(client_list);
-               if (cds_list_empty(&client_list->list)) {
+               client_list_is_empty = cds_list_empty(&client_list->list);
+               if (client_list_is_empty) {
                        /*
                         * No clients interested in the evaluation's result,
                         * skip it.
                         */
-                       continue;
+                       goto put_list;
                }
 
                ret = evaluate_buffer_condition(condition, &evaluation, state,
@@ -3366,11 +4440,11 @@ int handle_notification_thread_channel_sample(
                                latest_session_consumed_total,
                                channel_info);
                if (caa_unlikely(ret)) {
-                       goto end_unlock;
+                       goto put_list;
                }
 
                if (caa_likely(!evaluation)) {
-                       continue;
+                       goto put_list;
                }
 
                /* Dispatch evaluation result to all clients. */
@@ -3379,8 +4453,10 @@ int handle_notification_thread_channel_sample(
                                channel_info->session_info->uid,
                                channel_info->session_info->gid);
                lttng_evaluation_destroy(evaluation);
+put_list:
+               notification_client_list_put(client_list);
                if (caa_unlikely(ret)) {
-                       goto end_unlock;
+                       break;
                }
        }
 end_unlock:
This page took 0.085854 seconds and 5 git commands to generate.