+ call_rcu(&sample->rcu_node, free_channel_state_sample_rcu);
+ }
+
+ /* Remove the channel from the channels_ht and free it. */
+ cds_lfht_lookup(state->channels_ht,
+ hash_channel_key(&key),
+ match_channel_info,
+ &key,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ assert(node);
+ channel_info = caa_container_of(node, struct channel_info,
+ channels_ht_node);
+ cds_lfht_del(state->channels_ht, node);
+ channel_info_destroy(channel_info);
+end:
+ rcu_read_unlock();
+ *cmd_result = LTTNG_OK;
+ return 0;
+}
+
+static
+int handle_notification_thread_command_session_rotation(
+ struct notification_thread_state *state,
+ enum notification_thread_command_type cmd_type,
+ const char *session_name, uid_t session_uid, gid_t session_gid,
+ uint64_t trace_archive_chunk_id,
+ struct lttng_trace_archive_location *location,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct lttng_session_trigger_list *trigger_list;
+ struct lttng_trigger_list_element *trigger_list_element;
+ struct session_info *session_info;
+
+ rcu_read_lock();
+
+ session_info = find_or_create_session_info(state, session_name,
+ session_uid, session_gid);
+ if (!session_info) {
+ /* Allocation error or an internal error occurred. */
+ ret = -1;
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ session_info->rotation.ongoing =
+ cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING;
+ session_info->rotation.id = trace_archive_chunk_id;
+ trigger_list = get_session_trigger_list(state, session_name);
+ if (!trigger_list) {
+ DBG("[notification-thread] No triggers applying to session \"%s\" found",
+ session_name);
+ goto end;
+ }
+
+ cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
+ node) {
+ const struct lttng_condition *condition;
+ const struct lttng_action *action;
+ const struct lttng_trigger *trigger;
+ struct notification_client_list *client_list;
+ struct lttng_evaluation *evaluation = NULL;
+ enum lttng_condition_type condition_type;
+ bool client_list_is_empty;
+
+ trigger = trigger_list_element->trigger;
+ condition = lttng_trigger_get_const_condition(trigger);
+ assert(condition);
+ condition_type = lttng_condition_get_type(condition);
+
+ if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING &&
+ cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
+ continue;
+ } else if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED &&
+ cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED) {
+ continue;
+ }
+
+ action = lttng_trigger_get_const_action(trigger);
+
+ /* Notify actions are the only type currently supported. */
+ assert(lttng_action_get_type_const(action) ==
+ LTTNG_ACTION_TYPE_NOTIFY);
+
+ client_list = get_client_list_from_condition(state, condition);
+ assert(client_list);
+
+ pthread_mutex_lock(&client_list->lock);
+ client_list_is_empty = cds_list_empty(&client_list->list);
+ pthread_mutex_unlock(&client_list->lock);
+ if (client_list_is_empty) {
+ /*
+ * No clients interested in the evaluation's result,
+ * skip it.
+ */
+ continue;
+ }
+
+ if (cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
+ evaluation = lttng_evaluation_session_rotation_ongoing_create(
+ trace_archive_chunk_id);
+ } else {
+ evaluation = lttng_evaluation_session_rotation_completed_create(
+ trace_archive_chunk_id, location);
+ }
+
+ if (!evaluation) {
+ /* Internal error */
+ ret = -1;
+ cmd_result = LTTNG_ERR_UNK;
+ goto put_list;
+ }
+
+ /* Dispatch evaluation result to all clients. */
+ ret = send_evaluation_to_clients(trigger_list_element->trigger,
+ evaluation, client_list, state,
+ session_info->uid,
+ session_info->gid);
+ lttng_evaluation_destroy(evaluation);
+put_list:
+ notification_client_list_put(client_list);
+ if (caa_unlikely(ret)) {
+ break;
+ }
+ }
+end:
+ session_info_put(session_info);
+ *_cmd_result = cmd_result;
+ rcu_read_unlock();
+ return ret;
+}
+
+static
+int handle_notification_thread_command_add_application(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ int read_side_trigger_event_application_pipe,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct notification_event_trigger_source_element *element = NULL;
+
+ element = zmalloc(sizeof(*element));
+ if (!element) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ ret = -1;
+ goto end;
+ }
+
+ CDS_INIT_LIST_HEAD(&element->node);
+ element->fd = read_side_trigger_event_application_pipe;
+
+ pthread_mutex_lock(&handle->event_trigger_sources.lock);
+ cds_list_add(&element->node, &handle->event_trigger_sources.list);
+ pthread_mutex_unlock(&handle->event_trigger_sources.lock);
+
+ /* TODO: remove on failure to add to list? */
+
+ /* Adding the read side pipe to the event poll */
+ ret = lttng_poll_add(&state->events,
+ read_side_trigger_event_application_pipe,
+ LPOLLIN | LPOLLERR);
+
+ DBG3("[notification-thread] Adding application event source from fd: %d", read_side_trigger_event_application_pipe);
+ if (ret < 0) {
+ /* TODO: what should be the value of cmd_result??? */
+ ERR("[notification-thread] Failed to add event source pipe fd to pollset");
+ goto end;
+ }
+
+end:
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int handle_notification_thread_command_remove_application(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ int read_side_trigger_event_application_pipe,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+
+ /* TODO: missing a lock propably to revisit */
+ struct notification_event_trigger_source_element *source_element, *tmp;
+ cds_list_for_each_entry_safe(source_element, tmp,
+ &handle->event_trigger_sources.list, node) {
+ if (source_element->fd != read_side_trigger_event_application_pipe) {
+ continue;
+ }
+
+ DBG("[notification-thread] Removed event source from event source list");
+ cds_list_del(&source_element->node);
+ free(source_element);
+ break;
+ }
+
+ DBG3("[notification-thread] Removing application event source from fd: %d", read_side_trigger_event_application_pipe);
+ /* Removing the read side pipe to the event poll */
+ ret = lttng_poll_del(&state->events,
+ read_side_trigger_event_application_pipe);
+ if (ret < 0) {
+ /* TODO: what should be the value of cmd_result??? */
+ ERR("[notification-thread] Failed to remove event source pipe fd from pollset");
+ goto end;
+ }
+
+end:
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static int handle_notification_thread_command_get_tokens(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ struct lttng_triggers **triggers,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0, i = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct cds_lfht_iter iter;
+ struct notification_trigger_tokens_ht_element *element;
+ struct lttng_triggers *local_triggers = NULL;
+
+ local_triggers = lttng_triggers_create();
+ if (!local_triggers) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry (
+ state->trigger_tokens_ht, &iter, element, node) {
+ ret = lttng_triggers_add(local_triggers, element->trigger);
+ if (ret < 0) {
+ cmd_result = LTTNG_ERR_FATAL;
+ ret = -1;
+ goto end;
+ }
+
+ /* Ownership is shared with the lttng_triggers object */
+ lttng_trigger_get(element->trigger);
+
+ i++;
+ }
+
+ /* Passing ownership up */
+ *triggers = local_triggers;
+ local_triggers = NULL;
+
+end:
+ rcu_read_unlock();
+ lttng_triggers_destroy(local_triggers);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int handle_notification_thread_command_list_triggers(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ uid_t uid,
+ gid_t gid,
+ struct lttng_triggers **triggers,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0, i = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct cds_lfht_iter iter;
+ struct lttng_trigger_ht_element *trigger_ht_element;
+ struct lttng_triggers *local_triggers = NULL;
+ const struct lttng_credentials *creds;
+
+ long scb, sca;
+ unsigned long count;
+
+ rcu_read_lock();
+ cds_lfht_count_nodes(state->triggers_ht, &scb, &count, &sca);
+
+ /* TODO check downcasting */
+ local_triggers = lttng_triggers_create();
+ if (!local_triggers) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ cds_lfht_for_each_entry (state->triggers_ht, &iter,
+ trigger_ht_element, node) {
+ /* Only return the trigger for which the requestion client have
+ * access. For now the root user can only list its own
+ * triggers.
+ * TODO: root user behavior
+ */
+ creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+ if ((uid != creds->uid) || (gid != creds->gid)) {
+ continue;
+ }
+
+ ret = lttng_triggers_add(local_triggers, trigger_ht_element->trigger);
+ if (ret < 0) {
+ ret = -1;
+ goto end;
+ }
+ /* Ownership is shared with the lttng_triggers object */
+ lttng_trigger_get(trigger_ht_element->trigger);
+
+ i++;
+ }
+
+ /* Passing ownership up */
+ *triggers = local_triggers;
+ local_triggers = NULL;
+
+end:
+ rcu_read_unlock();
+ lttng_triggers_destroy(local_triggers);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int condition_is_supported(struct lttng_condition *condition)
+{
+ int ret;
+
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+ case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+ {
+ enum lttng_domain_type domain;
+
+ ret = lttng_condition_buffer_usage_get_domain_type(condition,
+ &domain);
+ if (ret) {
+ ret = -1;
+ goto end;
+ }
+
+ if (domain != LTTNG_DOMAIN_KERNEL) {
+ ret = 1;
+ goto end;
+ }
+
+ /*
+ * Older kernel tracers don't expose the API to monitor their
+ * buffers. Therefore, we reject triggers that require that
+ * mechanism to be available to be evaluated.
+ */
+ ret = kernel_supports_ring_buffer_snapshot_sample_positions();
+ break;
+ }
+ case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+ {
+ /* TODO:
+ * Check for kernel support.
+ * Check for ust support ??
+ */
+ ret = 1;
+ break;
+ }
+ default:
+ ret = 1;
+ }
+end:
+ return ret;
+}
+
+static
+int action_is_supported(struct lttng_action *action)
+{
+ int ret;
+
+ switch (lttng_action_get_type(action)) {
+ case LTTNG_ACTION_TYPE_NOTIFY:
+ case LTTNG_ACTION_TYPE_START_SESSION:
+ case LTTNG_ACTION_TYPE_STOP_SESSION:
+ case LTTNG_ACTION_TYPE_ROTATE_SESSION:
+ case LTTNG_ACTION_TYPE_SNAPSHOT_SESSION:
+ {
+ /* TODO validate that this is true for kernel in regards to
+ * rotation and snapshot. Start stop is not a problem notify
+ * either.
+ */
+ /* For now all type of actions are supported */
+ ret = 1;
+ break;
+ }
+ case LTTNG_ACTION_TYPE_GROUP:
+ {
+ /* TODO: Iterate over all internal actions and validate that
+ * they are supported
+ */
+ ret = 1;
+ break;
+
+ }
+ default:
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* Must be called with RCU read lock held. */
+static
+int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
+ struct notification_thread_state *state)
+{
+ int ret = 0;
+ const struct lttng_condition *condition;
+ const char *session_name;
+ struct lttng_session_trigger_list *trigger_list;
+
+ condition = lttng_trigger_get_const_condition(trigger);
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+ {
+ enum lttng_condition_status status;
+
+ status = lttng_condition_session_rotation_get_session_name(
+ condition, &session_name);
+ if (status != LTTNG_CONDITION_STATUS_OK) {
+ ERR("[notification-thread] Failed to bind trigger to session: unable to get 'session_rotation' condition's session name");
+ ret = -1;
+ goto end;
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ goto end;
+ }
+
+ trigger_list = get_session_trigger_list(state, session_name);
+ if (!trigger_list) {
+ DBG("[notification-thread] Unable to bind trigger applying to session \"%s\" as it is not yet known to the notification system",
+ session_name);
+ goto end;
+
+ }
+
+ DBG("[notification-thread] Newly registered trigger bound to session \"%s\"",
+ session_name);
+ ret = lttng_session_trigger_list_add(trigger_list, trigger);
+end:
+ return ret;
+}
+
+/* Must be called with RCU read lock held. */
+static
+int bind_trigger_to_matching_channels(struct lttng_trigger *trigger,
+ struct notification_thread_state *state)
+{
+ int ret = 0;
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+ struct channel_info *channel;
+
+ cds_lfht_for_each_entry(state->channels_ht, &iter, channel,
+ channels_ht_node) {
+ struct lttng_trigger_list_element *trigger_list_element;
+ struct lttng_channel_trigger_list *trigger_list;
+ struct cds_lfht_iter lookup_iter;
+
+ if (!trigger_applies_to_channel(trigger, channel)) {
+ continue;
+ }
+
+ cds_lfht_lookup(state->channel_triggers_ht,
+ hash_channel_key(&channel->key),
+ match_channel_trigger_list,
+ &channel->key,
+ &lookup_iter);
+ node = cds_lfht_iter_get_node(&lookup_iter);
+ assert(node);
+ trigger_list = caa_container_of(node,
+ struct lttng_channel_trigger_list,
+ channel_triggers_ht_node);
+
+ trigger_list_element = zmalloc(sizeof(*trigger_list_element));
+ if (!trigger_list_element) {
+ ret = -1;
+ goto end;
+ }
+ CDS_INIT_LIST_HEAD(&trigger_list_element->node);
+ trigger_list_element->trigger = trigger;
+ cds_list_add(&trigger_list_element->node, &trigger_list->list);
+ DBG("[notification-thread] Newly registered trigger bound to channel \"%s\"",
+ channel->name);
+ }
+end:
+ return ret;
+}
+
+static
+bool trigger_name_taken(struct notification_thread_state *state, const char *name)
+{
+ struct cds_lfht_node *triggers_by_name_ht_node;
+ struct cds_lfht_iter iter;
+ /* TODO change hashing for trigger */
+ cds_lfht_lookup(state->triggers_by_name_ht,
+ hash_key_str(name, lttng_ht_seed),
+ match_str,
+ name,
+ &iter);
+ triggers_by_name_ht_node = cds_lfht_iter_get_node(&iter);
+ if (triggers_by_name_ht_node) {
+ return true;
+ } else {
+ return false;
+ }
+
+}
+
+static
+void generate_trigger_name(struct notification_thread_state *state, struct lttng_trigger *trigger, const char **name)
+{
+ /* Here the offset criteria guarantee an end. This will be a nice
+ * bikeshedding conversation. I would simply generate uuid and use them
+ * as trigger name.
+ */
+ bool taken = false;
+ do {
+ lttng_trigger_generate_name(trigger, state->trigger_id.name_offset);
+ /* TODO error checking */
+ lttng_trigger_get_name(trigger, name);
+ taken = trigger_name_taken(state, *name);
+ if (taken) {
+ state->trigger_id.name_offset++;
+ }
+ } while (taken || state->trigger_id.name_offset == UINT32_MAX);
+}
+
+static int action_notify_register_trigger(
+ struct notification_thread_state *state,
+ struct lttng_trigger *trigger)
+{
+
+ int ret = 0;
+ struct lttng_condition *condition;
+ struct notification_client *client;
+ struct notification_client_list *client_list = NULL;
+ struct cds_lfht_iter iter;
+ struct notification_client_list_element *client_list_element, *tmp;
+
+ condition = lttng_trigger_get_condition(trigger);
+ assert(condition);
+
+ client_list = notification_client_list_create(trigger);
+ if (!client_list) {
+ ret = -1;
+ goto end;
+ }
+
+ /* Build a list of clients to which this new trigger applies. */
+ cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
+ client_socket_ht_node) {
+ if (!trigger_applies_to_client(trigger, client)) {
+ continue;
+ }
+
+ client_list_element = zmalloc(sizeof(*client_list_element));
+ if (!client_list_element) {
+ ret = -1;
+ goto error_put_client_list;
+ }
+ CDS_INIT_LIST_HEAD(&client_list_element->node);
+ client_list_element->client = client;
+ cds_list_add(&client_list_element->node, &client_list->list);
+ }
+
+ switch (get_condition_binding_object(condition)) {
+ case LTTNG_OBJECT_TYPE_SESSION:
+ /* Add the trigger to the list if it matches a known session. */
+ ret = bind_trigger_to_matching_session(trigger, state);
+ if (ret) {
+ goto error_put_client_list;
+ }
+ break;
+ case LTTNG_OBJECT_TYPE_CHANNEL:
+ /*
+ * Add the trigger to list of triggers bound to the channels
+ * currently known.
+ */
+ ret = bind_trigger_to_matching_channels(trigger, state);
+ if (ret) {
+ goto error_put_client_list;
+ }
+ break;
+ case LTTNG_OBJECT_TYPE_NONE:
+ break;
+ default:
+ ERR("[notification-thread] Unknown object type on which to bind a newly registered trigger was encountered");
+ ret = -1;
+ goto error_put_client_list;
+ }
+
+ /*
+ * Since there is nothing preventing clients from subscribing to a
+ * condition before the corresponding trigger is registered, we have
+ * to evaluate this new condition right away.
+ *
+ * At some point, we were waiting for the next "evaluation" (e.g. on
+ * reception of a channel sample) to evaluate this new condition, but
+ * that was broken.
+ *
+ * The reason it was broken is that waiting for the next sample
+ * does not allow us to properly handle transitions for edge-triggered
+ * conditions.
+ *
+ * Consider this example: when we handle a new channel sample, we
+ * evaluate each conditions twice: once with the previous state, and
+ * again with the newest state. We then use those two results to
+ * determine whether a state change happened: a condition was false and
+ * became true. If a state change happened, we have to notify clients.
+ *
+ * Now, if a client subscribes to a given notification and registers
+ * a trigger *after* that subscription, we have to make sure the
+ * condition is evaluated at this point while considering only the
+ * current state. Otherwise, the next evaluation cycle may only see
+ * that the evaluations remain the same (true for samples n-1 and n) and
+ * the client will never know that the condition has been met.
+ *
+ * No need to lock the list here as it has not been published yet.
+ */
+ cds_list_for_each_entry_safe(client_list_element, tmp,
+ &client_list->list, node) {
+ ret = evaluate_condition_for_client(trigger, condition,
+ client_list_element->client, state);
+ if (ret) {
+ goto error_put_client_list;
+ }