#include <common/macros.h>
#include <lttng/condition/condition.h>
#include <lttng/action/action-internal.h>
+#include <lttng/action/group-internal.h>
+#include <lttng/domain-internal.h>
#include <lttng/notification/notification-internal.h>
#include <lttng/condition/condition-internal.h>
#include <lttng/condition/buffer-usage-internal.h>
#include <lttng/condition/session-consumed-size-internal.h>
#include <lttng/condition/session-rotation-internal.h>
+#include <lttng/condition/event-rule-internal.h>
+#include <lttng/domain-internal.h>
#include <lttng/notification/channel-internal.h>
+#include <lttng/trigger/trigger-internal.h>
#include <time.h>
#include <unistd.h>
#include <inttypes.h>
#include <fcntl.h>
+#include "condition-internal.h"
#include "notification-thread.h"
#include "notification-thread-events.h"
#include "notification-thread-commands.h"
#include "lttng-sessiond.h"
#include "kernel.h"
+#include "trigger-error-accounting.h"
#define CLIENT_POLL_MASK_IN (LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP)
#define CLIENT_POLL_MASK_IN_OUT (CLIENT_POLL_MASK_IN | LPOLLOUT)
struct lttng_trigger_ht_element {
struct lttng_trigger *trigger;
struct cds_lfht_node node;
+ struct cds_lfht_node node_by_name_uid;
/* call_rcu delayed reclaim. */
struct rcu_head rcu_node;
};
enum client_transmission_status transmission_status,
struct notification_thread_state *state);
+static
+void free_lttng_trigger_ht_element_rcu(struct rcu_head *node);
+
static
int match_client_socket(struct cds_lfht_node *node, const void *key)
{
}
static
-int match_condition(struct cds_lfht_node *node, const void *key)
+int match_trigger(struct cds_lfht_node *node, const void *key)
{
- struct lttng_condition *condition_key = (struct lttng_condition *) key;
- struct lttng_trigger_ht_element *trigger;
- struct lttng_condition *condition;
+ struct lttng_trigger *trigger_key = (struct lttng_trigger *) key;
+ struct lttng_trigger_ht_element *trigger_ht_element;
- trigger = caa_container_of(node, struct lttng_trigger_ht_element,
+ trigger_ht_element = caa_container_of(node, struct lttng_trigger_ht_element,
node);
- condition = lttng_trigger_get_condition(trigger->trigger);
- assert(condition);
- return !!lttng_condition_is_equal(condition_key, condition);
+ return !!lttng_trigger_is_equal(trigger_key, trigger_ht_element->trigger);
+}
+
+static
+int match_trigger_token(struct cds_lfht_node *node, const void *key)
+{
+ const uint64_t *_key = key;
+ struct notification_trigger_tokens_ht_element *element;
+
+ element = caa_container_of(node, struct notification_trigger_tokens_ht_element,
+ node);
+ return *_key == element->token ;
}
static
return !strcmp(session_info->name, name);
}
-static
-unsigned long lttng_condition_buffer_usage_hash(
- const struct lttng_condition *_condition)
+/*
+ * Match trigger based on name and credentials only.
+ * Name duplication is NOT allowed for the same uid.
+ */
+static int match_name_uid(struct cds_lfht_node *node, const void *key)
{
- unsigned long hash;
- unsigned long condition_type;
- struct lttng_condition_buffer_usage *condition;
+ bool match = false;
+ struct lttng_trigger_ht_element *trigger_ht_element;
+ const char *name;
+ const char *key_name;
+ enum lttng_trigger_status status;
+ const struct lttng_credentials *key_creds;
+ const struct lttng_credentials *node_creds;
+ struct lttng_trigger *trigger_key = (struct lttng_trigger *) key;
- condition = container_of(_condition,
- struct lttng_condition_buffer_usage, parent);
+ trigger_ht_element = caa_container_of(node, struct lttng_trigger_ht_element,
+ node_by_name_uid);
- condition_type = (unsigned long) condition->parent.type;
- hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
- if (condition->session_name) {
- hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
- }
- if (condition->channel_name) {
- hash ^= hash_key_str(condition->channel_name, lttng_ht_seed);
- }
- if (condition->domain.set) {
- hash ^= hash_key_ulong(
- (void *) condition->domain.type,
- lttng_ht_seed);
- }
- if (condition->threshold_ratio.set) {
- uint64_t val;
+ status = lttng_trigger_get_name(trigger_ht_element->trigger, &name);
+ assert(status == LTTNG_TRIGGER_STATUS_OK);
- val = condition->threshold_ratio.value * (double) UINT32_MAX;
- hash ^= hash_key_u64(&val, lttng_ht_seed);
- } else if (condition->threshold_bytes.set) {
- uint64_t val;
+ status = lttng_trigger_get_name(trigger_key, &key_name);
+ assert(status == LTTNG_TRIGGER_STATUS_OK);
- val = condition->threshold_bytes.value;
- hash ^= hash_key_u64(&val, lttng_ht_seed);
+ if (strcmp(name, key_name) != 0) {
+ goto end;
}
- return hash;
-}
-static
-unsigned long lttng_condition_session_consumed_size_hash(
- const struct lttng_condition *_condition)
-{
- unsigned long hash;
- unsigned long condition_type =
- (unsigned long) LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE;
- struct lttng_condition_session_consumed_size *condition;
- uint64_t val;
+ /* Check the uid */
+ key_creds = lttng_trigger_get_credentials(trigger_key);
+ node_creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
- condition = container_of(_condition,
- struct lttng_condition_session_consumed_size, parent);
+ match = lttng_credentials_is_equal_uid(key_creds, node_creds);
- hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
- if (condition->session_name) {
- hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
- }
- val = condition->consumed_threshold_bytes.value;
- hash ^= hash_key_u64(&val, lttng_ht_seed);
- return hash;
-}
-
-static
-unsigned long lttng_condition_session_rotation_hash(
- const struct lttng_condition *_condition)
-{
- unsigned long hash, condition_type;
- struct lttng_condition_session_rotation *condition;
-
- condition = container_of(_condition,
- struct lttng_condition_session_rotation, parent);
- condition_type = (unsigned long) condition->parent.type;
- hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
- assert(condition->session_name);
- hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
- return hash;
-}
-
-/*
- * The lttng_condition hashing code is kept in this file (rather than
- * condition.c) since it makes use of GPLv2 code (hashtable utils), which we
- * don't want to link in liblttng-ctl.
- */
-static
-unsigned long lttng_condition_hash(const struct lttng_condition *condition)
-{
- switch (condition->type) {
- case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
- case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
- return lttng_condition_buffer_usage_hash(condition);
- case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
- return lttng_condition_session_consumed_size_hash(condition);
- case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
- case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
- return lttng_condition_session_rotation_hash(condition);
- default:
- ERR("[notification-thread] Unexpected condition type caught");
- abort();
- }
+end:
+ return match;
}
static
case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
return LTTNG_OBJECT_TYPE_SESSION;
+ case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+ return LTTNG_OBJECT_TYPE_NONE;
default:
return LTTNG_OBJECT_TYPE_UNKNOWN;
}
&evaluation, &object_uid, &object_gid);
break;
case LTTNG_OBJECT_TYPE_NONE:
+ DBG("[notification-thread] Newly subscribed-to condition not binded to object, nothing to evaluate");
ret = 0;
goto end;
case LTTNG_OBJECT_TYPE_UNKNOWN:
DBG("[notification-thread] Adding channel %s from session %s, channel key = %" PRIu64 " in %s domain",
channel_name, session_name, channel_key_int,
- channel_domain == LTTNG_DOMAIN_KERNEL ? "kernel" : "user space");
+ lttng_domain_type_str(channel_domain));
CDS_INIT_LIST_HEAD(&trigger_list);
struct channel_info *channel_info;
DBG("[notification-thread] Removing channel key = %" PRIu64 " in %s domain",
- channel_key, domain == LTTNG_DOMAIN_KERNEL ? "kernel" : "user space");
+ channel_key, lttng_domain_type_str(domain));
rcu_read_lock();
struct lttng_trigger_list_element *trigger_list_element;
struct session_info *session_info;
const struct lttng_credentials session_creds = {
- .uid = session_uid,
- .gid = session_gid,
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(session_uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(session_gid),
};
rcu_read_lock();
return ret;
}
+static
+int handle_notification_thread_command_add_application(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ int read_side_trigger_event_application_pipe,
+ enum lttng_domain_type domain_type,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct notification_event_trigger_source_element *element = NULL;
+
+ element = zmalloc(sizeof(*element));
+ if (!element) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ ret = -1;
+ goto end;
+ }
+
+ CDS_INIT_LIST_HEAD(&element->node);
+ element->fd = read_side_trigger_event_application_pipe;
+ element->domain = domain_type;
+
+ pthread_mutex_lock(&handle->event_trigger_sources.lock);
+ cds_list_add(&element->node, &handle->event_trigger_sources.list);
+ pthread_mutex_unlock(&handle->event_trigger_sources.lock);
+
+ /* TODO: remove on failure to add to list? */
+
+ /* Adding the read side pipe to the event poll */
+ ret = lttng_poll_add(&state->events,
+ read_side_trigger_event_application_pipe,
+ LPOLLIN | LPOLLERR);
+
+ DBG3("[notification-thread] Adding application event source from fd: %d", read_side_trigger_event_application_pipe);
+ if (ret < 0) {
+ /* TODO: what should be the value of cmd_result??? */
+ ERR("[notification-thread] Failed to add event source pipe fd to pollset");
+ goto end;
+ }
+
+end:
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int handle_notification_thread_command_remove_application(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ int read_side_trigger_event_application_pipe,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ /* Used for logging */
+ enum lttng_domain_type domain = LTTNG_DOMAIN_NONE;
+
+ /* TODO: missing a lock propably to revisit */
+ struct notification_event_trigger_source_element *source_element = NULL, *tmp;
+
+ cds_list_for_each_entry_safe(source_element, tmp,
+ &handle->event_trigger_sources.list, node) {
+ if (source_element->fd != read_side_trigger_event_application_pipe) {
+ continue;
+ }
+
+ cds_list_del(&source_element->node);
+ break;
+ }
+
+ /* It should always be found */
+ assert(source_element);
+
+ DBG3("[notification-thread] Removing application event source from fd: %d of domain: %s",
+ read_side_trigger_event_application_pipe,
+ lttng_domain_type_str(domain));
+ free(source_element);
+
+ /* Removing the read side pipe to the event poll */
+ ret = lttng_poll_del(&state->events,
+ read_side_trigger_event_application_pipe);
+ if (ret < 0) {
+ /* TODO: what should be the value of cmd_result??? */
+ ERR("[notification-thread] Failed to remove event source pipe fd from pollset");
+ goto end;
+ }
+
+end:
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static int handle_notification_thread_command_get_tokens(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ struct lttng_triggers **triggers,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0, i = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct cds_lfht_iter iter;
+ struct notification_trigger_tokens_ht_element *element;
+ struct lttng_triggers *local_triggers = NULL;
+
+ local_triggers = lttng_triggers_create();
+ if (!local_triggers) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry (
+ state->trigger_tokens_ht, &iter, element, node) {
+ ret = lttng_triggers_add(local_triggers, element->trigger);
+ if (ret < 0) {
+ cmd_result = LTTNG_ERR_FATAL;
+ ret = -1;
+ goto end;
+ }
+
+ i++;
+ }
+
+ /* Passing ownership up */
+ *triggers = local_triggers;
+ local_triggers = NULL;
+
+end:
+ rcu_read_unlock();
+ lttng_triggers_destroy(local_triggers);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int trigger_update_error_count(struct lttng_trigger *trigger)
+{
+ int ret = 0;
+ uint64_t error_count = 0;
+ enum trigger_error_accounting_status status;
+
+ status = trigger_error_accounting_get_count(trigger, &error_count);
+ if (status != TRIGGER_ERROR_ACCOUNTING_STATUS_OK) {
+ ERR("Error getting trigger error count");
+ }
+
+ lttng_trigger_set_error_count(trigger, error_count);
+ return ret;
+}
+
+static int handle_notification_thread_command_list_triggers(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ uid_t uid,
+ struct lttng_triggers **triggers,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0, i = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct cds_lfht_iter iter;
+ struct lttng_trigger_ht_element *trigger_ht_element;
+ struct lttng_triggers *local_triggers = NULL;
+ const struct lttng_credentials *creds;
+
+ long scb, sca;
+ unsigned long count;
+
+ rcu_read_lock();
+ cds_lfht_count_nodes(state->triggers_ht, &scb, &count, &sca);
+
+ local_triggers = lttng_triggers_create();
+ if (!local_triggers) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ cds_lfht_for_each_entry (state->triggers_ht, &iter,
+ trigger_ht_element, node) {
+ /*
+ * Only return the trigger for which the requestion client have
+ * access.
+ * Root user have visibility over all triggers.
+ */
+ creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+ if (uid != lttng_credentials_get_uid(creds) && uid != 0) {
+ continue;
+ }
+
+ ret = trigger_update_error_count(trigger_ht_element->trigger);
+ assert(!ret);
+
+ ret = lttng_triggers_add(local_triggers, trigger_ht_element->trigger);
+ if (ret < 0) {
+ ret = -1;
+ goto end;
+ }
+
+ i++;
+ }
+
+ /* Passing ownership up */
+ *triggers = local_triggers;
+ local_triggers = NULL;
+
+end:
+ rcu_read_unlock();
+ lttng_triggers_destroy(local_triggers);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
static
int condition_is_supported(struct lttng_condition *condition)
{
ret = kernel_supports_ring_buffer_snapshot_sample_positions();
break;
}
+ case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+ {
+ /* TODO:
+ * Check for kernel support.
+ * Check for ust support ??
+ */
+ ret = 1;
+ break;
+ }
default:
ret = 1;
}
return ret;
}
+static
+int action_is_supported(struct lttng_action *action)
+{
+ int ret;
+
+ switch (lttng_action_get_type(action)) {
+ case LTTNG_ACTION_TYPE_NOTIFY:
+ case LTTNG_ACTION_TYPE_START_SESSION:
+ case LTTNG_ACTION_TYPE_STOP_SESSION:
+ case LTTNG_ACTION_TYPE_ROTATE_SESSION:
+ case LTTNG_ACTION_TYPE_SNAPSHOT_SESSION:
+ {
+ /* TODO validate that this is true for kernel in regards to
+ * rotation and snapshot. Start stop is not a problem notify
+ * either.
+ */
+ /* For now all type of actions are supported */
+ ret = 1;
+ break;
+ }
+ case LTTNG_ACTION_TYPE_GROUP:
+ {
+ /* TODO: Iterate over all internal actions and validate that
+ * they are supported
+ */
+ ret = 1;
+ break;
+
+ }
+ default:
+ ret = 1;
+ }
+
+ return ret;
+}
+
/* Must be called with RCU read lock held. */
static
int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
enum lttng_action_type action_type;
assert(action);
- action_type = lttng_action_get_type_const(action);
+ action_type = lttng_action_get_type(action);
if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
is_notify = true;
goto end;
lttng_action_group_get_at_index(
action, i);
- action_type = lttng_action_get_type_const(inner_action);
+ action_type = lttng_action_get_type(inner_action);
if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
is_notify = true;
goto end;
return is_notify;
}
+static bool trigger_name_taken(struct notification_thread_state *state,
+ const struct lttng_trigger *trigger)
+{
+ struct cds_lfht_node *triggers_by_name_uid_ht_node;
+ struct cds_lfht_iter iter;
+
+ /*
+ * No duplicata is allowed in the triggers_by_name_uid_ht.
+ * The match is done against the trigger name and uid.
+ */
+ cds_lfht_lookup(state->triggers_by_name_uid_ht,
+ hash_key_str(trigger->name, lttng_ht_seed),
+ match_name_uid,
+ trigger,
+ &iter);
+ triggers_by_name_uid_ht_node = cds_lfht_iter_get_node(&iter);
+ if (triggers_by_name_uid_ht_node) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static
+void generate_trigger_name(struct notification_thread_state *state, struct lttng_trigger *trigger, const char **name)
+{
+ /*
+ * Here the offset criteria guarantee an end. This will be a nice
+ * bikeshedding conversation. I would simply generate uuid and use them
+ * as trigger name.
+ */
+ bool taken = false;
+ enum lttng_trigger_status status;
+ do {
+ lttng_trigger_generate_name(trigger, state->trigger_id.name_offset);
+
+ status = lttng_trigger_get_name(trigger, name);
+ assert(status == LTTNG_TRIGGER_STATUS_OK);
+
+ taken = trigger_name_taken(state, trigger);
+ if (taken) {
+ state->trigger_id.name_offset++;
+ }
+ } while (taken || state->trigger_id.name_offset == UINT32_MAX);
+}
+
/*
* FIXME A client's credentials are not checked when registering a trigger.
*
enum lttng_error_code *cmd_result)
{
int ret = 0;
+ int is_supported;
struct lttng_condition *condition;
+ struct lttng_action *action;
struct notification_client *client;
struct notification_client_list *client_list = NULL;
struct lttng_trigger_ht_element *trigger_ht_element = NULL;
struct notification_client_list_element *client_list_element;
+ struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element = NULL;
struct cds_lfht_node *node;
struct cds_lfht_iter iter;
+ const char* trigger_name;
bool free_trigger = true;
struct lttng_evaluation *evaluation = NULL;
struct lttng_credentials object_creds;
+ uid_t object_uid;
+ gid_t object_gid;
enum action_executor_status executor_status;
rcu_read_lock();
+ /* Set the trigger's tracer token */
+ lttng_trigger_set_tracer_token(trigger, state->trigger_id.token_generator);
+
+ if (lttng_trigger_get_name(trigger, &trigger_name) ==
+ LTTNG_TRIGGER_STATUS_UNSET) {
+ generate_trigger_name(state, trigger, &trigger_name);
+ } else if (trigger_name_taken(state, trigger)) {
+ /* Not a fatal error */
+ *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+ ret = 0;
+ goto error;
+ }
+
condition = lttng_trigger_get_condition(trigger);
assert(condition);
- ret = condition_is_supported(condition);
- if (ret < 0) {
+ action = lttng_trigger_get_action(trigger);
+ assert(action);
+
+ is_supported = condition_is_supported(condition);
+ if (is_supported < 0) {
goto error;
- } else if (ret == 0) {
+ } else if (is_supported == 0) {
*cmd_result = LTTNG_ERR_NOT_SUPPORTED;
goto error;
- } else {
- /* Feature is supported, continue. */
+ }
+
+ is_supported = action_is_supported(action);
+ if (is_supported < 0) {
+ goto error;
+ } else if (is_supported == 0) {
ret = 0;
+ *cmd_result = LTTNG_ERR_NOT_SUPPORTED;
+ goto error;
}
trigger_ht_element = zmalloc(sizeof(*trigger_ht_element));
/* Add trigger to the trigger_ht. */
cds_lfht_node_init(&trigger_ht_element->node);
+ cds_lfht_node_init(&trigger_ht_element->node_by_name_uid);
trigger_ht_element->trigger = trigger;
node = cds_lfht_add_unique(state->triggers_ht,
lttng_condition_hash(condition),
- match_condition,
- condition,
+ match_trigger,
+ trigger,
&trigger_ht_element->node);
if (node != &trigger_ht_element->node) {
/* Not a fatal error, simply report it to the client. */
goto error_free_ht_element;
}
+ node = cds_lfht_add_unique(state->triggers_by_name_uid_ht,
+ hash_key_str(trigger_name, lttng_ht_seed), match_name_uid,
+ trigger, &trigger_ht_element->node_by_name_uid);
+ if (node != &trigger_ht_element->node_by_name_uid) {
+ /* Not a fatal error, simply report it to the client. */
+ cds_lfht_del(state->triggers_ht, &trigger_ht_element->node);
+ *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+ goto error_free_ht_element;
+ }
+
+ if (lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
+ uint64_t error_counter_index = 0;
+ enum trigger_error_accounting_status error_accounting_status;
+
+ trigger_tokens_ht_element = zmalloc(sizeof(*trigger_tokens_ht_element));
+ if (!trigger_tokens_ht_element) {
+ ret = -1;
+ cds_lfht_del(state->triggers_ht, &trigger_ht_element->node);
+ cds_lfht_del(state->triggers_by_name_uid_ht, &trigger_ht_element->node_by_name_uid);
+ goto error;
+ }
+
+ error_accounting_status = trigger_error_accounting_register_trigger(
+ trigger, &error_counter_index);
+ if (error_accounting_status != TRIGGER_ERROR_ACCOUNTING_STATUS_OK) {
+ ERR("Error registering trigger for error accounting");
+ cds_lfht_del(state->triggers_ht, &trigger_ht_element->node);
+ cds_lfht_del(state->triggers_by_name_uid_ht, &trigger_ht_element->node_by_name_uid);
+ *cmd_result = LTTNG_ERR_TRIGGER_GROUP_ERROR_COUNTER_FULL;
+ goto error_free_ht_element;
+ }
+
+ lttng_trigger_set_error_counter_index(trigger, error_counter_index);
+
+ /* Add trigger token to the trigger_tokens_ht. */
+ cds_lfht_node_init(&trigger_tokens_ht_element->node);
+ trigger_tokens_ht_element->token = LTTNG_OPTIONAL_GET(trigger->tracer_token);
+ trigger_tokens_ht_element->trigger = trigger;
+
+ node = cds_lfht_add_unique(state->trigger_tokens_ht,
+ hash_key_u64(&trigger_tokens_ht_element->token, lttng_ht_seed),
+ match_trigger_token,
+ &trigger_tokens_ht_element->token,
+ &trigger_tokens_ht_element->node);
+ if (node != &trigger_tokens_ht_element->node) {
+ /* TODO: THIS IS A FATAL ERROR... should never happen */
+ /* Not a fatal error, simply report it to the client. */
+ *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+ cds_lfht_del(state->triggers_ht, &trigger_ht_element->node);
+ cds_lfht_del(state->triggers_by_name_uid_ht, &trigger_ht_element->node_by_name_uid);
+ trigger_error_accounting_unregister_trigger(trigger);
+ goto error_free_ht_element;
+ }
+ }
+
/*
* Ownership of the trigger and of its wrapper was transfered to
- * the triggers_ht.
+ * the triggers_ht. Same for token ht element if necessary.
*/
+ trigger_tokens_ht_element = NULL;
trigger_ht_element = NULL;
free_trigger = false;
switch (get_condition_binding_object(condition)) {
case LTTNG_OBJECT_TYPE_SESSION:
ret = evaluate_session_condition_for_client(condition, state,
- &evaluation, &object_creds.uid,
- &object_creds.gid);
+ &evaluation, &object_uid,
+ &object_gid);
break;
case LTTNG_OBJECT_TYPE_CHANNEL:
ret = evaluate_channel_condition_for_client(condition, state,
- &evaluation, &object_creds.uid,
- &object_creds.gid);
+ &evaluation, &object_uid,
+ &object_gid);
break;
case LTTNG_OBJECT_TYPE_NONE:
ret = 0;
- goto error_put_client_list;
+ break;
case LTTNG_OBJECT_TYPE_UNKNOWN:
default:
ret = -1;
- goto error_put_client_list;
+ break;
}
if (ret) {
goto error_put_client_list;
}
+ LTTNG_OPTIONAL_SET(&object_creds.uid, object_uid);
+ LTTNG_OPTIONAL_SET(&object_creds.gid, object_gid);
+
DBG("Newly registered trigger's condition evaluated to %s",
evaluation ? "true" : "false");
if (!evaluation) {
/* Evaluation yielded nothing. Normal exit. */
ret = 0;
- goto error_put_client_list;
+ goto end;
}
/*
*/
WARN("No space left when enqueuing action associated to newly registered trigger");
ret = 0;
- goto error_put_client_list;
+ goto end;
default:
abort();
}
+end:
+ /* Increment the trigger unique id generator */
+ state->trigger_id.token_generator++;
*cmd_result = LTTNG_OK;
error_put_client_list:
notification_client_list_put(client_list);
error_free_ht_element:
- free(trigger_ht_element);
+ if (trigger_ht_element) {
+ /* Delayed removal due to RCU constraint on delete. */
+ call_rcu(&trigger_ht_element->rcu_node, free_lttng_trigger_ht_element_rcu);
+ }
+
+ free(trigger_tokens_ht_element);
error:
if (free_trigger) {
lttng_trigger_destroy(trigger);
rcu_node));
}
+static
+void free_notification_trigger_tokens_ht_element_rcu(struct rcu_head *node)
+{
+ free(caa_container_of(node, struct notification_trigger_tokens_ht_element,
+ rcu_node));
+}
+
static
int handle_notification_thread_command_unregister_trigger(
struct notification_thread_state *state,
cds_lfht_lookup(state->triggers_ht,
lttng_condition_hash(condition),
- match_condition,
- condition,
+ match_trigger,
+ trigger,
&iter);
triggers_ht_node = cds_lfht_iter_get_node(&iter);
if (!triggers_ht_node) {
cds_list_for_each_entry_safe(trigger_element, tmp,
&trigger_list->list, node) {
- const struct lttng_condition *current_condition =
- lttng_trigger_get_const_condition(
- trigger_element->trigger);
-
- assert(current_condition);
- if (!lttng_condition_is_equal(condition,
- current_condition)) {
+ if (!lttng_trigger_is_equal(trigger, trigger_element->trigger)) {
continue;
}
}
}
- /*
- * Remove and release the client list from
- * notification_trigger_clients_ht.
- */
- client_list = get_client_list_from_condition(state, condition);
- assert(client_list);
+ if (lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
+ struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element;
+ cds_lfht_for_each_entry(state->trigger_tokens_ht, &iter, trigger_tokens_ht_element,
+ node) {
+ if (!lttng_trigger_is_equal(trigger, trigger_tokens_ht_element->trigger)) {
+ continue;
+ }
- /* Put new reference and the hashtable's reference. */
- notification_client_list_put(client_list);
- notification_client_list_put(client_list);
- client_list = NULL;
+ trigger_error_accounting_unregister_trigger(trigger_tokens_ht_element->trigger);
+
+ /* TODO talk to all app and remove it */
+ DBG("[notification-thread] Removed trigger from tokens_ht");
+ cds_lfht_del(state->trigger_tokens_ht,
+ &trigger_tokens_ht_element->node);
+ call_rcu(&trigger_tokens_ht_element->rcu_node, free_notification_trigger_tokens_ht_element_rcu);
+
+ break;
+ }
+ }
+
+ if (is_trigger_action_notify(trigger)) {
+ /*
+ * Remove and release the client list from
+ * notification_trigger_clients_ht.
+ */
+ client_list = get_client_list_from_condition(state, condition);
+ assert(client_list);
+
+ /* Put new reference and the hashtable's reference. */
+ notification_client_list_put(client_list);
+ notification_client_list_put(client_list);
+ client_list = NULL;
+ }
/* Remove trigger from triggers_ht. */
trigger_ht_element = caa_container_of(triggers_ht_node,
struct lttng_trigger_ht_element, node);
+ cds_lfht_del(state->triggers_by_name_uid_ht, &trigger_ht_element->node_by_name_uid);
cds_lfht_del(state->triggers_ht, triggers_ht_node);
/* Release the ownership of the trigger. */
cmd->parameters.session_rotation.location,
&cmd->reply_code);
break;
+ case NOTIFICATION_COMMAND_TYPE_ADD_APPLICATION:
+ ret = handle_notification_thread_command_add_application(
+ handle,
+ state,
+ cmd->parameters.application.read_side_trigger_event_application_pipe,
+ cmd->parameters.application.domain,
+ &cmd->reply_code);
+ break;
+ case NOTIFICATION_COMMAND_TYPE_REMOVE_APPLICATION:
+ ret = handle_notification_thread_command_remove_application(
+ handle,
+ state,
+ cmd->parameters.application.read_side_trigger_event_application_pipe,
+ &cmd->reply_code);
+ break;
+ case NOTIFICATION_COMMAND_TYPE_GET_TOKENS:
+ {
+ struct lttng_triggers *triggers = NULL;
+ ret = handle_notification_thread_command_get_tokens(
+ handle, state, &triggers, &cmd->reply_code);
+ cmd->reply.get_tokens.triggers = triggers;
+ ret = 0;
+ break;
+ }
+ case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
+ {
+ struct lttng_triggers *triggers = NULL;
+ ret = handle_notification_thread_command_list_triggers(
+ handle,
+ state,
+ cmd->parameters.list_triggers.uid,
+ &triggers,
+ &cmd->reply_code);
+ cmd->reply.list_triggers.triggers = triggers;
+ ret = 0;
+ break;
+ }
case NOTIFICATION_COMMAND_TYPE_QUIT:
DBG("[notification-thread] Received quit command");
cmd->reply_code = LTTNG_OK;
struct notification_thread_state *state,
uid_t object_uid, gid_t object_gid)
{
+ const struct lttng_credentials creds = {
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(object_uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(object_gid),
+ };
+
return notification_client_list_send_evaluation(client_list,
lttng_trigger_get_const_condition(trigger), evaluation,
lttng_trigger_get_credentials(trigger),
- &(struct lttng_credentials){
- .uid = object_uid, .gid = object_gid},
+ &creds,
client_handle_transmission_status_wrapper, state);
}
}
if (source_object_creds) {
- if (client->uid != source_object_creds->uid &&
- client->gid != source_object_creds->gid &&
+ if (client->uid != lttng_credentials_get_uid(source_object_creds) &&
+ client->gid != lttng_credentials_get_gid(source_object_creds) &&
client->uid != 0) {
/*
* Client is not allowed to monitor this
}
}
- if (client->uid != trigger_creds->uid && client->gid != trigger_creds->gid) {
+ if (client->uid != lttng_credentials_get_uid(trigger_creds) && client->gid != lttng_credentials_get_gid(trigger_creds)) {
DBG("[notification-thread] Skipping client at it does not have the permission to receive notification for this trigger");
goto skip_client;
}
return ret;
}
+static struct lttng_trigger_notification *receive_notification(int pipe,
+ enum lttng_domain_type domain)
+{
+ int ret;
+ uint64_t id;
+ struct lttng_trigger_notification *notification = NULL;
+ char *capture_buffer = NULL;
+ size_t capture_buffer_size;
+ void *reception_buffer;
+ size_t reception_size;
+
+ struct lttng_ust_trigger_notification ust_notification;
+ struct lttng_kernel_trigger_notification kernel_notification;
+
+ /* Init lttng_trigger_notification */
+
+ switch(domain) {
+ case LTTNG_DOMAIN_UST:
+ reception_buffer = (void *) &ust_notification;
+ reception_size = sizeof(ust_notification);
+ break;
+ case LTTNG_DOMAIN_KERNEL:
+ reception_buffer = (void *) &kernel_notification;
+ reception_size = sizeof(kernel_notification);
+ break;
+ default:
+ assert(0);
+ }
+
+ /*
+ * The monitoring pipe only holds messages smaller than PIPE_BUF,
+ * ensuring that read/write of sampling messages are atomic.
+ */
+ /* TODO: should we read as much as we can ? EWOULDBLOCK? */
+
+ ret = lttng_read(pipe, reception_buffer, reception_size);
+ if (ret != reception_size) {
+ PERROR("Failed to read from event source pipe (fd = %i, size to read=%zu, ret=%d)",
+ pipe, reception_size, ret);
+ /* TODO: Should this error out completly.
+ * This can happen when an app is killed as of today
+ * ret = -1 cause the whole thread to die and fuck up
+ * everything.
+ */
+ goto end;
+ }
+
+ switch(domain) {
+ case LTTNG_DOMAIN_UST:
+ id = ust_notification.id;
+ capture_buffer_size =
+ ust_notification.capture_buf_size;
+ break;
+ case LTTNG_DOMAIN_KERNEL:
+ id = kernel_notification.id;
+ capture_buffer_size =
+ kernel_notification.capture_buf_size;
+ break;
+ default:
+ assert(0);
+ }
+
+ if (capture_buffer_size == 0) {
+ capture_buffer = NULL;
+ goto skip_capture;
+ }
+
+ capture_buffer = zmalloc(capture_buffer_size);
+ if (!capture_buffer) {
+ ERR("[notification-thread] Failed to allocate capture buffer");
+ goto end;
+ }
+
+ /*
+ * Fetch additional payload (capture).
+ */
+ ret = lttng_read(pipe, capture_buffer, capture_buffer_size);
+ if (ret != capture_buffer_size) {
+ ERR("[notification-thread] Failed to read from event source pipe (fd = %i)",
+ pipe);
+ /* TODO: Should this error out completly.
+ * This can happen when an app is killed as of today
+ * ret = -1 cause the whole thread to die and fuck up
+ * everything.
+ */
+ goto end;
+ }
+
+skip_capture:
+ notification = lttng_trigger_notification_create(
+ id, domain, capture_buffer, capture_buffer_size);
+ if (notification == NULL) {
+ goto end;
+ }
+
+ /* Ownership transfered to the lttng_trigger_notification object */
+ capture_buffer = NULL;
+
+end:
+ free(capture_buffer);
+ return notification;
+}
+
+int handle_notification_thread_event(struct notification_thread_state *state,
+ int pipe,
+ enum lttng_domain_type domain)
+{
+ int ret;
+ enum lttng_trigger_status trigger_status;
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+ struct notification_trigger_tokens_ht_element *element;
+ struct lttng_evaluation *evaluation = NULL;
+ struct lttng_trigger_notification *notification = NULL;
+ enum action_executor_status executor_status;
+ struct notification_client_list *client_list = NULL;
+ const char *trigger_name;
+ unsigned int capture_count = 0;
+
+ notification = receive_notification(pipe, domain);
+ if (notification == NULL) {
+ ERR("[notification-thread] Error receiving notification from tracer (fd = %i, domain = %s)",
+ pipe, lttng_domain_type_str(domain));
+ ret = -1;
+ goto end;
+ }
+
+ /* Find triggers associated with this token. */
+ rcu_read_lock();
+ cds_lfht_lookup(state->trigger_tokens_ht,
+ hash_key_u64(¬ification->id, lttng_ht_seed),
+ match_trigger_token, ¬ification->id, &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (caa_unlikely(!node)) {
+ /* TODO: is this an error? This might happen if the receive side
+ * is slow to process event from source and that the trigger was
+ * removed but the app still kicking. This yield another
+ * question on the trigger lifetime and when we can remove a
+ * trigger. How to guarantee that all event with the token idea
+ * have be processed? Do we want to provide this guarantee?
+ *
+ * Update: I have encountered this when using a trigger on
+ * sched_switch and then removing it. The frequency is quite
+ * high hence we en up exactly in the mentionned scenario.
+ * AFAIK this might be the best way to handle this.
+ */
+ ret = 0;
+ goto end_unlock;
+ }
+ element = caa_container_of(node,
+ struct notification_trigger_tokens_ht_element,
+ node);
+
+ if (!lttng_trigger_should_fire(element->trigger)) {
+ ret = 0;
+ goto end_unlock;
+ }
+
+ lttng_trigger_fire(element->trigger);
+
+ trigger_status = lttng_trigger_get_name(element->trigger, &trigger_name);
+ assert(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+ if (LTTNG_CONDITION_STATUS_OK !=
+ lttng_condition_event_rule_get_capture_descriptor_count(
+ lttng_trigger_get_const_condition(
+ element->trigger),
+ &capture_count)) {
+ ERR("Get capture count");
+ ret = -1;
+ goto end;
+ }
+
+ if (!notification->capture_buffer && capture_count != 0) {
+ ERR("Expected capture but capture buffer is null");
+ ret = -1;
+ goto end;
+ }
+
+ evaluation = lttng_evaluation_event_rule_create(
+ container_of(lttng_trigger_get_const_condition(
+ element->trigger),
+ struct lttng_condition_event_rule,
+ parent),
+ trigger_name,
+ notification->capture_buffer,
+ notification->capture_buf_size, false);
+
+ if (evaluation == NULL) {
+ ERR("[notification-thread] Failed to create event rule hit evaluation");
+ ret = -1;
+ goto end_unlock;
+ }
+ client_list = get_client_list_from_condition(state,
+ lttng_trigger_get_const_condition(element->trigger));
+ executor_status = action_executor_enqueue(state->executor,
+ element->trigger, evaluation, NULL, client_list);
+ switch (executor_status) {
+ case ACTION_EXECUTOR_STATUS_OK:
+ ret = 0;
+ break;
+ case ACTION_EXECUTOR_STATUS_OVERFLOW:
+ {
+ struct notification_client_list_element *client_list_element,
+ *tmp;
+
+ /*
+ * Not a fatal error; this is expected and simply means the
+ * executor has too much work queued already.
+ */
+ ret = 0;
+
+ if (!client_list) {
+ break;
+ }
+
+ /* Warn clients that a notification (or more) was dropped. */
+ pthread_mutex_lock(&client_list->lock);
+ cds_list_for_each_entry_safe(client_list_element, tmp,
+ &client_list->list, node) {
+ enum client_transmission_status transmission_status;
+ struct notification_client *client =
+ client_list_element->client;
+
+ pthread_mutex_lock(&client->lock);
+ ret = client_notification_overflow(client);
+ if (ret) {
+ /* Fatal error. */
+ goto next_client;
+ }
+
+ transmission_status =
+ client_flush_outgoing_queue(client);
+ ret = client_handle_transmission_status(
+ client, transmission_status, state);
+ if (ret) {
+ /* Fatal error. */
+ goto next_client;
+ }
+next_client:
+ pthread_mutex_unlock(&client->lock);
+ if (ret) {
+ break;
+ }
+ }
+ pthread_mutex_unlock(&client_list->lock);
+ break;
+ }
+ case ACTION_EXECUTOR_STATUS_INVALID:
+ case ACTION_EXECUTOR_STATUS_ERROR:
+ /* Fatal error, shut down everything. */
+ ERR("Fatal error encoutered while enqueuing action");
+ ret = -1;
+ goto end_unlock;
+ default:
+ /* Unhandled error. */
+ abort();
+ }
+
+end_unlock:
+ lttng_trigger_notification_destroy(notification);
+ notification_client_list_put(client_list);
+ rcu_read_unlock();
+end:
+ return ret;
+}
+
int handle_notification_thread_channel_sample(
struct notification_thread_state *state, int pipe,
enum lttng_domain_type domain)
*/
DBG("[notification-thread] Received a sample for an unknown channel from consumerd, key = %" PRIu64 " in %s domain",
latest_sample.key.key,
- domain == LTTNG_DOMAIN_KERNEL ? "kernel" :
- "user space");
+ lttng_domain_type_str(domain));
goto end_unlock;
}
channel_info = caa_container_of(node, struct channel_info,
}
channel_creds = (typeof(channel_creds)) {
- .uid = channel_info->session_info->uid,
- .gid = channel_info->session_info->gid,
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->gid),
};
trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
node) {
const struct lttng_condition *condition;
+ const struct lttng_action *action;
struct lttng_trigger *trigger;
struct notification_client_list *client_list = NULL;
struct lttng_evaluation *evaluation = NULL;
trigger = trigger_list_element->trigger;
condition = lttng_trigger_get_const_condition(trigger);
assert(condition);
+ action = lttng_trigger_get_const_action(trigger);
+
+ if (!lttng_trigger_should_fire(trigger)) {
+ goto put_list;
+ }
+
+ lttng_trigger_fire(trigger);
+
+ /* Notify actions are the only type currently supported. */
+ /* TODO support other type of action */
+ assert(lttng_action_get_type(action) ==
+ LTTNG_ACTION_TYPE_NOTIFY);
/*
* Check if any client is subscribed to the result of this