+ rcu_read_lock();
+ /* Unpublish the list from the session_triggers_ht. */
+ cds_lfht_del(list->session_triggers_ht,
+ &list->session_triggers_ht_node);
+ rcu_read_unlock();
+ call_rcu(&list->rcu_node, free_session_trigger_list_rcu);
+}
+
+static
+int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
+ struct lttng_trigger *trigger)
+{
+ int ret = 0;
+ struct lttng_trigger_list_element *new_element =
+ zmalloc(sizeof(*new_element));
+
+ if (!new_element) {
+ ret = -1;
+ goto end;
+ }
+ CDS_INIT_LIST_HEAD(&new_element->node);
+ new_element->trigger = trigger;
+ cds_list_add(&new_element->node, &list->list);
+end:
+ return ret;
+}
+
+static
+bool trigger_applies_to_session(const struct lttng_trigger *trigger,
+ const char *session_name)
+{
+ bool applies = false;
+ const struct lttng_condition *condition;
+
+ condition = lttng_trigger_get_const_condition(trigger);
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+ {
+ enum lttng_condition_status condition_status;
+ const char *condition_session_name;
+
+ condition_status = lttng_condition_session_rotation_get_session_name(
+ condition, &condition_session_name);
+ if (condition_status != LTTNG_CONDITION_STATUS_OK) {
+ ERR("[notification-thread] Failed to retrieve session rotation condition's session name");
+ goto end;
+ }
+
+ assert(condition_session_name);
+ applies = !strcmp(condition_session_name, session_name);
+ break;
+ }
+ default:
+ goto end;
+ }
+end:
+ return applies;
+}
+
+/*
+ * Allocate and initialize an lttng_session_trigger_list which contains
+ * all triggers that apply to the session named 'session_name'.
+ *
+ * No ownership of 'session_name' is assumed by the session trigger list.
+ * It is the caller's responsability to ensure the session name is alive
+ * for as long as this list is.
+ */
+static
+struct lttng_session_trigger_list *lttng_session_trigger_list_build(
+ const struct notification_thread_state *state,
+ const char *session_name)
+{
+ int trigger_count = 0;
+ struct lttng_session_trigger_list *session_trigger_list = NULL;
+ struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+ struct cds_lfht_iter iter;
+
+ session_trigger_list = lttng_session_trigger_list_create(session_name,
+ state->session_triggers_ht);
+
+ /* Add all triggers applying to the session named 'session_name'. */
+ cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
+ node) {
+ int ret;
+
+ if (!trigger_applies_to_session(trigger_ht_element->trigger,
+ session_name)) {
+ continue;
+ }
+
+ ret = lttng_session_trigger_list_add(session_trigger_list,
+ trigger_ht_element->trigger);
+ if (ret) {
+ goto error;
+ }
+
+ trigger_count++;
+ }
+
+ DBG("[notification-thread] Found %i triggers that apply to newly created session",
+ trigger_count);
+ return session_trigger_list;
+error:
+ lttng_session_trigger_list_destroy(session_trigger_list);
+ return NULL;
+}
+
+static
+struct session_info *find_or_create_session_info(
+ struct notification_thread_state *state,
+ const char *name, uid_t uid, gid_t gid)
+{
+ struct session_info *session = NULL;
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+ struct lttng_session_trigger_list *trigger_list;
+
+ rcu_read_lock();
+ cds_lfht_lookup(state->sessions_ht,
+ hash_key_str(name, lttng_ht_seed),
+ match_session,
+ name,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (node) {
+ DBG("[notification-thread] Found session info of session \"%s\" (uid = %i, gid = %i)",
+ name, uid, gid);
+ session = caa_container_of(node, struct session_info,
+ sessions_ht_node);
+ assert(session->uid == uid);
+ assert(session->gid == gid);
+ session_info_get(session);
+ goto end;
+ }
+
+ trigger_list = lttng_session_trigger_list_build(state, name);
+ if (!trigger_list) {
+ goto error;
+ }
+
+ session = session_info_create(name, uid, gid, trigger_list,
+ state->sessions_ht);
+ if (!session) {
+ ERR("[notification-thread] Failed to allocation session info for session \"%s\" (uid = %i, gid = %i)",
+ name, uid, gid);
+ lttng_session_trigger_list_destroy(trigger_list);
+ goto error;
+ }
+ trigger_list = NULL;
+
+ cds_lfht_add(state->sessions_ht, hash_key_str(name, lttng_ht_seed),
+ &session->sessions_ht_node);
+end:
+ rcu_read_unlock();
+ return session;
+error:
+ rcu_read_unlock();
+ session_info_put(session);
+ return NULL;
+}
+
+static
+int handle_notification_thread_command_add_channel(
+ struct notification_thread_state *state,
+ const char *session_name, uid_t session_uid, gid_t session_gid,
+ const char *channel_name, enum lttng_domain_type channel_domain,
+ uint64_t channel_key_int, uint64_t channel_capacity,
+ enum lttng_error_code *cmd_result)
+{
+ struct cds_list_head trigger_list;
+ struct channel_info *new_channel_info = NULL;
+ struct channel_key channel_key = {
+ .key = channel_key_int,
+ .domain = channel_domain,
+ };
+ struct lttng_channel_trigger_list *channel_trigger_list = NULL;
+ struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+ int trigger_count = 0;
+ struct cds_lfht_iter iter;
+ struct session_info *session_info = NULL;
+
+ DBG("[notification-thread] Adding channel %s from session %s, channel key = %" PRIu64 " in %s domain",
+ channel_name, session_name, channel_key_int,
+ channel_domain == LTTNG_DOMAIN_KERNEL ? "kernel" : "user space");
+
+ CDS_INIT_LIST_HEAD(&trigger_list);
+
+ session_info = find_or_create_session_info(state, session_name,
+ session_uid, session_gid);
+ if (!session_info) {
+ /* Allocation error or an internal error occurred. */
+ goto error;
+ }
+
+ new_channel_info = channel_info_create(channel_name, &channel_key,
+ channel_capacity, session_info);
+ if (!new_channel_info) {
+ goto error;
+ }
+
+ rcu_read_lock();
+ /* Build a list of all triggers applying to the new channel. */
+ cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
+ node) {
+ struct lttng_trigger_list_element *new_element;
+
+ if (!trigger_applies_to_channel(trigger_ht_element->trigger,
+ new_channel_info)) {
+ continue;
+ }
+
+ new_element = zmalloc(sizeof(*new_element));
+ if (!new_element) {
+ rcu_read_unlock();
+ goto error;
+ }
+ CDS_INIT_LIST_HEAD(&new_element->node);
+ new_element->trigger = trigger_ht_element->trigger;
+ cds_list_add(&new_element->node, &trigger_list);
+ trigger_count++;
+ }
+ rcu_read_unlock();
+
+ DBG("[notification-thread] Found %i triggers that apply to newly added channel",
+ trigger_count);
+ channel_trigger_list = zmalloc(sizeof(*channel_trigger_list));
+ if (!channel_trigger_list) {
+ goto error;
+ }
+ channel_trigger_list->channel_key = new_channel_info->key;
+ CDS_INIT_LIST_HEAD(&channel_trigger_list->list);
+ cds_lfht_node_init(&channel_trigger_list->channel_triggers_ht_node);
+ cds_list_splice(&trigger_list, &channel_trigger_list->list);
+
+ rcu_read_lock();
+ /* Add channel to the channel_ht which owns the channel_infos. */
+ cds_lfht_add(state->channels_ht,
+ hash_channel_key(&new_channel_info->key),
+ &new_channel_info->channels_ht_node);
+ /*
+ * Add the list of triggers associated with this channel to the
+ * channel_triggers_ht.
+ */
+ cds_lfht_add(state->channel_triggers_ht,
+ hash_channel_key(&new_channel_info->key),
+ &channel_trigger_list->channel_triggers_ht_node);
+ rcu_read_unlock();
+ session_info_put(session_info);
+ *cmd_result = LTTNG_OK;
+ return 0;
+error:
+ channel_info_destroy(new_channel_info);
+ session_info_put(session_info);
+ return 1;
+}
+
+static
+void free_channel_trigger_list_rcu(struct rcu_head *node)
+{
+ free(caa_container_of(node, struct lttng_channel_trigger_list,
+ rcu_node));
+}
+
+static
+void free_channel_state_sample_rcu(struct rcu_head *node)
+{
+ free(caa_container_of(node, struct channel_state_sample,
+ rcu_node));
+}
+
+static
+int handle_notification_thread_command_remove_channel(
+ struct notification_thread_state *state,
+ uint64_t channel_key, enum lttng_domain_type domain,
+ enum lttng_error_code *cmd_result)
+{
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+ struct lttng_channel_trigger_list *trigger_list;
+ struct lttng_trigger_list_element *trigger_list_element, *tmp;
+ struct channel_key key = { .key = channel_key, .domain = domain };
+ struct channel_info *channel_info;
+
+ DBG("[notification-thread] Removing channel key = %" PRIu64 " in %s domain",
+ channel_key, domain == LTTNG_DOMAIN_KERNEL ? "kernel" : "user space");
+
+ rcu_read_lock();
+
+ cds_lfht_lookup(state->channel_triggers_ht,
+ hash_channel_key(&key),
+ match_channel_trigger_list,
+ &key,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ /*
+ * There is a severe internal error if we are being asked to remove a
+ * channel that doesn't exist.
+ */
+ if (!node) {
+ ERR("[notification-thread] Channel being removed is unknown to the notification thread");
+ goto end;
+ }
+
+ /* Free the list of triggers associated with this channel. */
+ trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
+ channel_triggers_ht_node);
+ cds_list_for_each_entry_safe(trigger_list_element, tmp,
+ &trigger_list->list, node) {
+ cds_list_del(&trigger_list_element->node);
+ free(trigger_list_element);
+ }
+ cds_lfht_del(state->channel_triggers_ht, node);
+ call_rcu(&trigger_list->rcu_node, free_channel_trigger_list_rcu);
+
+ /* Free sampled channel state. */
+ cds_lfht_lookup(state->channel_state_ht,
+ hash_channel_key(&key),
+ match_channel_state_sample,
+ &key,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ /*
+ * This is expected to be NULL if the channel is destroyed before we
+ * received a sample.
+ */
+ if (node) {
+ struct channel_state_sample *sample = caa_container_of(node,
+ struct channel_state_sample,
+ channel_state_ht_node);
+
+ cds_lfht_del(state->channel_state_ht, node);
+ call_rcu(&sample->rcu_node, free_channel_state_sample_rcu);
+ }
+
+ /* Remove the channel from the channels_ht and free it. */
+ cds_lfht_lookup(state->channels_ht,
+ hash_channel_key(&key),
+ match_channel_info,
+ &key,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ assert(node);
+ channel_info = caa_container_of(node, struct channel_info,
+ channels_ht_node);
+ cds_lfht_del(state->channels_ht, node);
+ channel_info_destroy(channel_info);
+end:
+ rcu_read_unlock();
+ *cmd_result = LTTNG_OK;
+ return 0;
+}
+
+static
+int handle_notification_thread_command_session_rotation(
+ struct notification_thread_state *state,
+ enum notification_thread_command_type cmd_type,
+ const char *session_name, uid_t session_uid, gid_t session_gid,
+ uint64_t trace_archive_chunk_id,
+ struct lttng_trace_archive_location *location,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct lttng_session_trigger_list *trigger_list;
+ struct lttng_trigger_list_element *trigger_list_element;
+ struct session_info *session_info;
+
+ rcu_read_lock();
+
+ session_info = find_or_create_session_info(state, session_name,
+ session_uid, session_gid);
+ if (!session_info) {
+ /* Allocation error or an internal error occurred. */
+ ret = -1;
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ session_info->rotation.ongoing =
+ cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING;
+ session_info->rotation.id = trace_archive_chunk_id;
+ trigger_list = get_session_trigger_list(state, session_name);
+ if (!trigger_list) {
+ DBG("[notification-thread] No triggers applying to session \"%s\" found",
+ session_name);
+ goto end;
+ }
+
+ cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
+ node) {
+ const struct lttng_condition *condition;
+ const struct lttng_action *action;
+ const struct lttng_trigger *trigger;
+ struct notification_client_list *client_list;
+ struct lttng_evaluation *evaluation = NULL;
+ enum lttng_condition_type condition_type;
+ bool client_list_is_empty;
+
+ trigger = trigger_list_element->trigger;
+ condition = lttng_trigger_get_const_condition(trigger);
+ assert(condition);
+ condition_type = lttng_condition_get_type(condition);
+
+ if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING &&
+ cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
+ continue;
+ } else if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED &&
+ cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED) {
+ continue;
+ }
+
+ action = lttng_trigger_get_const_action(trigger);
+
+ /* Notify actions are the only type currently supported. */
+ assert(lttng_action_get_type_const(action) ==
+ LTTNG_ACTION_TYPE_NOTIFY);
+
+ client_list = get_client_list_from_condition(state, condition);
+ assert(client_list);
+
+ pthread_mutex_lock(&client_list->lock);
+ client_list_is_empty = cds_list_empty(&client_list->list);
+ pthread_mutex_unlock(&client_list->lock);
+ if (client_list_is_empty) {
+ /*
+ * No clients interested in the evaluation's result,
+ * skip it.
+ */
+ continue;
+ }
+
+ if (cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
+ evaluation = lttng_evaluation_session_rotation_ongoing_create(
+ trace_archive_chunk_id);
+ } else {
+ evaluation = lttng_evaluation_session_rotation_completed_create(
+ trace_archive_chunk_id, location);
+ }
+
+ if (!evaluation) {
+ /* Internal error */
+ ret = -1;
+ cmd_result = LTTNG_ERR_UNK;
+ goto put_list;
+ }
+
+ /* Dispatch evaluation result to all clients. */
+ ret = send_evaluation_to_clients(trigger_list_element->trigger,
+ evaluation, client_list, state,
+ session_info->uid,
+ session_info->gid);
+ lttng_evaluation_destroy(evaluation);
+put_list:
+ notification_client_list_put(client_list);
+ if (caa_unlikely(ret)) {
+ break;
+ }
+ }
+end:
+ session_info_put(session_info);
+ *_cmd_result = cmd_result;
+ rcu_read_unlock();
+ return ret;
+}
+
+static
+int handle_notification_thread_command_add_application(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ int read_side_trigger_event_application_pipe,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct notification_event_trigger_source_element *element = NULL;
+
+ element = zmalloc(sizeof(*element));
+ if (!element) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ ret = -1;
+ goto end;
+ }
+
+ CDS_INIT_LIST_HEAD(&element->node);
+ element->fd = read_side_trigger_event_application_pipe;
+
+ pthread_mutex_lock(&handle->event_trigger_sources.lock);
+ cds_list_add(&element->node, &handle->event_trigger_sources.list);
+ pthread_mutex_unlock(&handle->event_trigger_sources.lock);
+
+ /* TODO: remove on failure to add to list? */
+
+ /* Adding the read side pipe to the event poll */
+ ret = lttng_poll_add(&state->events,
+ read_side_trigger_event_application_pipe,
+ LPOLLIN | LPOLLERR);
+
+ DBG3("[notification-thread] Adding application event source from fd: %d", read_side_trigger_event_application_pipe);
+ if (ret < 0) {
+ /* TODO: what should be the value of cmd_result??? */
+ ERR("[notification-thread] Failed to add event source pipe fd to pollset");
+ goto end;
+ }
+
+end:
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int handle_notification_thread_command_remove_application(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ int read_side_trigger_event_application_pipe,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+
+ /* TODO: missing a lock propably to revisit */
+ struct notification_event_trigger_source_element *source_element, *tmp;
+ cds_list_for_each_entry_safe(source_element, tmp,
+ &handle->event_trigger_sources.list, node) {
+ if (source_element->fd != read_side_trigger_event_application_pipe) {
+ continue;
+ }
+
+ DBG("[notification-thread] Removed event source from event source list");
+ cds_list_del(&source_element->node);
+ break;
+ }
+
+ DBG3("[notification-thread] Removing application event source from fd: %d", read_side_trigger_event_application_pipe);
+ /* Removing the read side pipe to the event poll */
+ ret = lttng_poll_del(&state->events,
+ read_side_trigger_event_application_pipe);
+ if (ret < 0) {
+ /* TODO: what should be the value of cmd_result??? */
+ ERR("[notification-thread] Failed to remove event source pipe fd from pollset");
+ goto end;
+ }
+
+end:
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static int handle_notification_thread_command_get_tokens(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ struct lttng_triggers **triggers,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0, i = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct cds_lfht_iter iter;
+ struct notification_trigger_tokens_ht_element *element;
+ struct lttng_triggers *local_triggers = NULL;
+
+ local_triggers = lttng_triggers_create();
+ if (!local_triggers) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry (
+ state->trigger_tokens_ht, &iter, element, node) {
+ ret = lttng_triggers_add(local_triggers, element->trigger);
+ if (ret < 0) {
+ cmd_result = LTTNG_ERR_FATAL;
+ ret = -1;
+ goto end;
+ }
+
+ /* Ownership is shared with the lttng_triggers object */
+ lttng_trigger_get(element->trigger);
+
+ i++;
+ }
+
+ /* Passing ownership up */
+ *triggers = local_triggers;
+ local_triggers = NULL;
+
+end:
+ rcu_read_unlock();
+ lttng_triggers_destroy(local_triggers);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int handle_notification_thread_command_list_triggers(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ uid_t uid,
+ gid_t gid,
+ struct lttng_triggers **triggers,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0, i = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct cds_lfht_iter iter;
+ struct lttng_trigger_ht_element *trigger_ht_element;
+ struct lttng_triggers *local_triggers = NULL;
+ const struct lttng_credentials *creds;
+
+ long scb, sca;
+ unsigned long count;
+
+ rcu_read_lock();
+ cds_lfht_count_nodes(state->triggers_ht, &scb, &count, &sca);
+
+ /* TODO check downcasting */
+ local_triggers = lttng_triggers_create();
+ if (!local_triggers) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ cds_lfht_for_each_entry (state->triggers_ht, &iter,
+ trigger_ht_element, node) {
+ /* Only return the trigger for which the requestion client have
+ * access. For now the root user can only list its own
+ * triggers.
+ * TODO: root user behavior
+ */
+ creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+ if ((uid != creds->uid) || (gid != creds->gid)) {
+ continue;
+ }
+
+ ret = lttng_triggers_add(local_triggers, trigger_ht_element->trigger);
+ if (ret < 0) {
+ ret = -1;
+ goto end;
+ }
+ /* Ownership is shared with the lttng_triggers object */
+ lttng_trigger_get(trigger_ht_element->trigger);
+
+ i++;
+ }
+
+ /* Passing ownership up */
+ *triggers = local_triggers;
+ local_triggers = NULL;
+
+end:
+ rcu_read_unlock();
+ lttng_triggers_destroy(local_triggers);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int condition_is_supported(struct lttng_condition *condition)
+{
+ int ret;
+
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+ case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+ {
+ enum lttng_domain_type domain;
+
+ ret = lttng_condition_buffer_usage_get_domain_type(condition,
+ &domain);
+ if (ret) {
+ ret = -1;
+ goto end;
+ }
+
+ if (domain != LTTNG_DOMAIN_KERNEL) {
+ ret = 1;
+ goto end;
+ }
+
+ /*
+ * Older kernel tracers don't expose the API to monitor their
+ * buffers. Therefore, we reject triggers that require that
+ * mechanism to be available to be evaluated.
+ */
+ ret = kernel_supports_ring_buffer_snapshot_sample_positions();
+ break;
+ }
+ case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+ {
+ /* TODO:
+ * Check for kernel support.
+ * Check for ust support ??
+ */
+ ret = 1;
+ break;
+ }
+ default:
+ ret = 1;
+ }
+end:
+ return ret;
+}
+
+static
+int action_is_supported(struct lttng_action *action)
+{
+ int ret;
+
+ switch (lttng_action_get_type(action)) {
+ case LTTNG_ACTION_TYPE_NOTIFY:
+ case LTTNG_ACTION_TYPE_START_SESSION:
+ case LTTNG_ACTION_TYPE_STOP_SESSION:
+ case LTTNG_ACTION_TYPE_ROTATE_SESSION:
+ case LTTNG_ACTION_TYPE_SNAPSHOT_SESSION:
+ {
+ /* TODO validate that this is true for kernel in regards to
+ * rotation and snapshot. Start stop is not a problem notify
+ * either.
+ */
+ /* For now all type of actions are supported */
+ ret = 1;
+ break;
+ }
+ case LTTNG_ACTION_TYPE_GROUP:
+ {
+ /* TODO: Iterate over all internal actions and validate that
+ * they are supported
+ */
+ ret = 1;
+ break;
+
+ }
+ default:
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* Must be called with RCU read lock held. */
+static
+int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
+ struct notification_thread_state *state)
+{
+ int ret = 0;
+ const struct lttng_condition *condition;
+ const char *session_name;
+ struct lttng_session_trigger_list *trigger_list;
+
+ condition = lttng_trigger_get_const_condition(trigger);
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+ {
+ enum lttng_condition_status status;
+
+ status = lttng_condition_session_rotation_get_session_name(
+ condition, &session_name);
+ if (status != LTTNG_CONDITION_STATUS_OK) {
+ ERR("[notification-thread] Failed to bind trigger to session: unable to get 'session_rotation' condition's session name");
+ ret = -1;
+ goto end;
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ goto end;
+ }
+
+ trigger_list = get_session_trigger_list(state, session_name);
+ if (!trigger_list) {
+ DBG("[notification-thread] Unable to bind trigger applying to session \"%s\" as it is not yet known to the notification system",
+ session_name);
+ goto end;
+
+ }
+
+ DBG("[notification-thread] Newly registered trigger bound to session \"%s\"",
+ session_name);
+ ret = lttng_session_trigger_list_add(trigger_list, trigger);
+end:
+ return ret;
+}
+
+/* Must be called with RCU read lock held. */
+static
+int bind_trigger_to_matching_channels(struct lttng_trigger *trigger,
+ struct notification_thread_state *state)
+{
+ int ret = 0;
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+ struct channel_info *channel;
+
+ cds_lfht_for_each_entry(state->channels_ht, &iter, channel,
+ channels_ht_node) {
+ struct lttng_trigger_list_element *trigger_list_element;
+ struct lttng_channel_trigger_list *trigger_list;
+ struct cds_lfht_iter lookup_iter;
+
+ if (!trigger_applies_to_channel(trigger, channel)) {
+ continue;
+ }
+
+ cds_lfht_lookup(state->channel_triggers_ht,
+ hash_channel_key(&channel->key),
+ match_channel_trigger_list,
+ &channel->key,
+ &lookup_iter);
+ node = cds_lfht_iter_get_node(&lookup_iter);
+ assert(node);
+ trigger_list = caa_container_of(node,
+ struct lttng_channel_trigger_list,
+ channel_triggers_ht_node);
+
+ trigger_list_element = zmalloc(sizeof(*trigger_list_element));
+ if (!trigger_list_element) {
+ ret = -1;
+ goto end;
+ }
+ CDS_INIT_LIST_HEAD(&trigger_list_element->node);
+ trigger_list_element->trigger = trigger;
+ cds_list_add(&trigger_list_element->node, &trigger_list->list);
+ DBG("[notification-thread] Newly registered trigger bound to channel \"%s\"",
+ channel->name);
+ }
+end:
+ return ret;
+}
+
+static int action_notify_register_trigger(
+ struct notification_thread_state *state,
+ struct lttng_trigger *trigger)
+{
+
+ int ret = 0;
+ struct lttng_condition *condition;
+ struct notification_client *client;
+ struct notification_client_list *client_list = NULL;
+ struct cds_lfht_iter iter;
+ struct notification_client_list_element *client_list_element, *tmp;
+
+ condition = lttng_trigger_get_condition(trigger);
+ assert(condition);
+
+ client_list = notification_client_list_create(trigger);
+ if (!client_list) {
+ ret = -1;
+ goto end;
+ }
+
+ /* Build a list of clients to which this new trigger applies. */
+ cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
+ client_socket_ht_node) {
+ if (!trigger_applies_to_client(trigger, client)) {
+ continue;
+ }
+
+ client_list_element = zmalloc(sizeof(*client_list_element));
+ if (!client_list_element) {
+ ret = -1;
+ goto error_put_client_list;
+ }
+ CDS_INIT_LIST_HEAD(&client_list_element->node);
+ client_list_element->client = client;
+ cds_list_add(&client_list_element->node, &client_list->list);
+ }
+
+ switch (get_condition_binding_object(condition)) {
+ case LTTNG_OBJECT_TYPE_SESSION:
+ /* Add the trigger to the list if it matches a known session. */
+ ret = bind_trigger_to_matching_session(trigger, state);
+ if (ret) {
+ goto error_put_client_list;
+ }
+ break;
+ case LTTNG_OBJECT_TYPE_CHANNEL:
+ /*
+ * Add the trigger to list of triggers bound to the channels
+ * currently known.
+ */
+ ret = bind_trigger_to_matching_channels(trigger, state);
+ if (ret) {
+ goto error_put_client_list;
+ }
+ break;
+ case LTTNG_OBJECT_TYPE_NONE:
+ break;
+ default:
+ ERR("[notification-thread] Unknown object type on which to bind a newly registered trigger was encountered");
+ ret = -1;
+ goto error_put_client_list;
+ }
+
+ /*
+ * Since there is nothing preventing clients from subscribing to a
+ * condition before the corresponding trigger is registered, we have
+ * to evaluate this new condition right away.
+ *
+ * At some point, we were waiting for the next "evaluation" (e.g. on
+ * reception of a channel sample) to evaluate this new condition, but
+ * that was broken.
+ *
+ * The reason it was broken is that waiting for the next sample
+ * does not allow us to properly handle transitions for edge-triggered
+ * conditions.
+ *
+ * Consider this example: when we handle a new channel sample, we
+ * evaluate each conditions twice: once with the previous state, and
+ * again with the newest state. We then use those two results to
+ * determine whether a state change happened: a condition was false and
+ * became true. If a state change happened, we have to notify clients.
+ *
+ * Now, if a client subscribes to a given notification and registers
+ * a trigger *after* that subscription, we have to make sure the
+ * condition is evaluated at this point while considering only the
+ * current state. Otherwise, the next evaluation cycle may only see
+ * that the evaluations remain the same (true for samples n-1 and n) and
+ * the client will never know that the condition has been met.
+ *
+ * No need to lock the list here as it has not been published yet.
+ */
+ cds_list_for_each_entry_safe(client_list_element, tmp,
+ &client_list->list, node) {
+ ret = evaluate_condition_for_client(trigger, condition,
+ client_list_element->client, state);
+ if (ret) {
+ goto error_put_client_list;
+ }
+ }
+
+ /*
+ * Client list ownership transferred to the
+ * notification_trigger_clients_ht.
+ */
+ publish_notification_client_list(state, client_list);
+ client_list = NULL;
+error_put_client_list:
+ notification_client_list_put(client_list);
+end:
+ return ret;
+}
+
+static
+bool trigger_name_taken(struct notification_thread_state *state, const char *name)
+{
+ struct cds_lfht_node *triggers_by_name_ht_node;
+ struct cds_lfht_iter iter;
+ /* TODO change hashing for trigger */
+ cds_lfht_lookup(state->triggers_by_name_ht,
+ hash_key_str(name, lttng_ht_seed),
+ match_str,
+ name,
+ &iter);
+ triggers_by_name_ht_node = cds_lfht_iter_get_node(&iter);
+ if (triggers_by_name_ht_node) {
+ return true;
+ } else {
+ return false;
+ }