/*
- * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2 only,
- * as published by the Free Software Foundation.
+ * SPDX-License-Identifier: GPL-2.0-only
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define _LGPL_SOURCE
#include <errno.h>
+#include <fcntl.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <urcu/compiler.h>
#include <signal.h>
+#include <common/bytecode/bytecode.h>
#include <common/common.h>
+#include <common/hashtable/utils.h>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/event-rule/tracepoint.h>
+#include <lttng/condition/condition.h>
+#include <lttng/condition/event-rule-internal.h>
+#include <lttng/condition/event-rule.h>
+#include <lttng/trigger/trigger-internal.h>
#include <common/sessiond-comm/sessiond-comm.h>
#include "buffer-registry.h"
+#include "condition-internal.h"
#include "fd-limit.h"
#include "health-sessiond.h"
#include "ust-app.h"
#include "lttng-sessiond.h"
#include "notification-thread-commands.h"
#include "rotate.h"
+#include "event.h"
+#include "trigger-error-accounting.h"
+
+
+struct lttng_ht *ust_app_ht;
+struct lttng_ht *ust_app_ht_by_sock;
+struct lttng_ht *ust_app_ht_by_notify_sock;
static
int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
{
struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
ua_sess->tracing_id, ua_sess->bits_per_long,
- ua_sess->real_credentials.uid);
+ lttng_credentials_get_uid(&ua_sess->real_credentials));
if (!reg_uid) {
goto error;
}
free(ua_event);
}
+/*
+ * Delete ust app token event_rule safely. RCU read lock must be held before calling
+ * this function. TODO: or does it????
+ */
+static
+void delete_ust_app_token_event_rule(int sock, struct ust_app_token_event_rule *ua_token,
+ struct ust_app *app)
+{
+ int ret;
+
+ assert(ua_token);
+
+ if (ua_token->exclusion != NULL)
+ free(ua_token->exclusion);
+ if (ua_token->obj != NULL) {
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_release_object(sock, ua_token->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app sock %d release event obj failed with ret %d",
+ sock, ret);
+ }
+ free(ua_token->obj);
+ }
+ lttng_trigger_put(ua_token->trigger);
+ free(ua_token);
+}
+
/*
* Release ust data object of the given stream.
*
{
int ret, sock;
struct ust_app_session *ua_sess, *tmp_ua_sess;
+ struct lttng_ht_iter iter;
+ struct ust_app_token_event_rule *token;
/*
* The session list lock must be held during this function to guarantee
rcu_read_unlock();
}
+ /* Wipe token associated with the app */
+ cds_lfht_for_each_entry(app->tokens_ht->ht, &iter.iter, token,
+ node.node) {
+ ret = lttng_ht_del(app->tokens_ht, &iter);
+ assert(!ret);
+ delete_ust_app_token_event_rule(app->sock, token, app);
+ }
+
ht_cleanup_push(app->sessions);
ht_cleanup_push(app->ust_sessions_objd);
ht_cleanup_push(app->ust_objd);
+ ht_cleanup_push(app->tokens_ht);
+
+ /* This can happen if trigger setup failed. e.g killed app */
+ if (app->token_communication.handle) {
+ ustctl_release_object(sock, app->token_communication.handle);
+ free(app->token_communication.handle);
+ }
+
+ lttng_pipe_destroy(app->token_communication.trigger_event_pipe);
/*
* Wait until we have deleted the application from the sock hash table
* Alloc new UST app channel.
*/
static
-struct ust_app_channel *alloc_ust_app_channel(char *name,
+struct ust_app_channel *alloc_ust_app_channel(const char *name,
struct ust_app_session *ua_sess,
struct lttng_ust_channel_attr *attr)
{
return NULL;
}
+/*
+ * Alloc new UST app token event rule.
+ */
+static struct ust_app_token_event_rule *alloc_ust_app_token_event_rule(
+ struct lttng_trigger *trigger)
+{
+ struct ust_app_token_event_rule *ua_token;
+ struct lttng_condition *condition = NULL;
+ struct lttng_event_rule *event_rule = NULL;
+
+ ua_token = zmalloc(sizeof(struct ust_app_token_event_rule));
+ if (ua_token == NULL) {
+ PERROR("Failed to allocate ust_app_token_event_rule structure");
+ goto error;
+ }
+
+ /* Get reference of the trigger */
+ /* TODO should this be like lttng_event_rule_get with a returned bool? */
+ lttng_trigger_get(trigger);
+
+ ua_token->enabled = 1;
+ ua_token->token = lttng_trigger_get_tracer_token(trigger);
+ lttng_ht_node_init_u64(&ua_token->node, ua_token->token);
+
+ condition = lttng_trigger_get_condition(trigger);
+ assert(condition);
+ assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
+
+ assert(LTTNG_CONDITION_STATUS_OK == lttng_condition_event_rule_get_rule_mutable(condition, &event_rule));
+ assert(event_rule);
+
+ ua_token->trigger = trigger;
+ ua_token->filter = lttng_event_rule_get_filter_bytecode(event_rule);
+ ua_token->exclusion = lttng_event_rule_generate_exclusions(event_rule);
+ ua_token->error_counter_index = lttng_trigger_get_error_counter_index(trigger);
+
+ /* TODO put capture here? or later*/
+
+ DBG3("UST app token event rule %" PRIu64 " allocated", ua_token->token);
+
+ return ua_token;
+
+error:
+ return NULL;
+}
+
/*
* Alloc new UST app context.
*/
if (uctx) {
memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
- char *provider_name = NULL, *ctx_name = NULL;
+ char *provider_name = NULL, *ctx_name = NULL;
provider_name = strdup(uctx->u.app_ctx.provider_name);
ctx_name = strdup(uctx->u.app_ctx.ctx_name);
}
/*
- * Allocate a filter and copy the given original filter.
+ * Create a liblttng-ust filter bytecode from given bytecode.
*
* Return allocated filter or NULL on error.
*/
-static struct lttng_filter_bytecode *copy_filter_bytecode(
- struct lttng_filter_bytecode *orig_f)
+static struct lttng_ust_filter_bytecode *
+create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
{
- struct lttng_filter_bytecode *filter = NULL;
+ struct lttng_ust_filter_bytecode *filter = NULL;
/* Copy filter bytecode */
filter = zmalloc(sizeof(*filter) + orig_f->len);
if (!filter) {
- PERROR("zmalloc alloc filter bytecode");
+ PERROR("zmalloc alloc ust filter bytecode");
goto error;
}
+ assert(sizeof(struct lttng_bytecode) ==
+ sizeof(struct lttng_ust_filter_bytecode));
memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
-
error:
return filter;
}
/*
- * Create a liblttng-ust filter bytecode from given bytecode.
+ * Create a liblttng-ust capture bytecode from given bytecode.
*
* Return allocated filter or NULL on error.
*/
-static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
- struct lttng_filter_bytecode *orig_f)
+static struct lttng_ust_capture_bytecode *
+create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
{
- struct lttng_ust_filter_bytecode *filter = NULL;
+ struct lttng_ust_capture_bytecode *capture = NULL;
- /* Copy filter bytecode */
- filter = zmalloc(sizeof(*filter) + orig_f->len);
- if (!filter) {
- PERROR("zmalloc alloc ust filter bytecode");
+ /* Copy capture bytecode */
+ capture = zmalloc(sizeof(*capture) + orig_f->len);
+ if (!capture) {
+ PERROR("zmalloc alloc ust capture bytecode");
goto error;
}
- assert(sizeof(struct lttng_filter_bytecode) ==
- sizeof(struct lttng_ust_filter_bytecode));
- memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
+ assert(sizeof(struct lttng_bytecode) ==
+ sizeof(struct lttng_ust_capture_bytecode));
+ memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
error:
- return filter;
+ return capture;
}
/*
* Return an ust_app_event object or NULL on error.
*/
static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
- const char *name, const struct lttng_filter_bytecode *filter,
+ const char *name, const struct lttng_bytecode *filter,
int loglevel_value,
const struct lttng_event_exclusion *exclusion)
{
return event;
}
+/*
+ * Lookup for an ust app tokens based on a token id.
+ *
+ * Return an ust_app_token_event_rule object or NULL on error.
+ */
+static struct ust_app_token_event_rule *find_ust_app_token_event_rule(struct lttng_ht *ht,
+ uint64_t token)
+{
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_u64 *node;
+ struct ust_app_token_event_rule *token_event_rule = NULL;
+
+ assert(ht);
+
+ lttng_ht_lookup(ht, &token, &iter);
+ node = lttng_ht_iter_get_node_u64(&iter);
+ if (node == NULL) {
+ DBG2("UST app token %" PRIu64 " not found", token);
+ goto end;
+ }
+
+ token_event_rule = caa_container_of(node, struct ust_app_token_event_rule, node);
+end:
+ return token_event_rule;
+}
+
/*
* Create the channel context on the tracer.
*
/*
* Set the filter on the tracer.
*/
-static
-int set_ust_event_filter(struct ust_app_event *ua_event,
- struct ust_app *app)
+static int set_ust_filter(struct ust_app *app,
+ const struct lttng_bytecode *bytecode,
+ struct lttng_ust_object_data *ust_object)
{
int ret;
struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
health_code_update();
- if (!ua_event->filter) {
- ret = 0;
+ ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
+ if (!ust_bytecode) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_set_filter(app->sock, ust_bytecode,
+ ust_object);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app set filter failed for object %p of app (pid: %d) "
+ "with ret %d", ust_object, app->pid, ret);
+ } else {
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally.
+ */
+ ret = 0;
+ DBG3("UST app set filter. Application is dead.");
+ }
goto error;
}
- ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
+ DBG2("UST filter set for object %p successfully", ust_object);
+
+error:
+ health_code_update();
+ free(ust_bytecode);
+ return ret;
+}
+
+/*
+ * Set a capture bytecode for the passed object.
+ * The seqnum enforce the ordering at runtime and on reception.
+ */
+static int set_ust_capture(struct ust_app *app,
+ const struct lttng_bytecode *bytecode,
+ unsigned int seqnum,
+ struct lttng_ust_object_data *ust_object)
+{
+ int ret;
+ struct lttng_ust_capture_bytecode *ust_bytecode = NULL;
+
+ health_code_update();
+
+ ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
if (!ust_bytecode) {
ret = -LTTNG_ERR_NOMEM;
goto error;
}
+
+ /* Set the seqnum */
+ ust_bytecode->seqnum = seqnum;
+
pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_set_filter(app->sock, ust_bytecode,
- ua_event->obj);
+ ret = ustctl_set_capture(app->sock, ust_bytecode,
+ ust_object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app event %s filter failed for app (pid: %d) "
- "with ret %d", ua_event->attr.name, app->pid, ret);
+ ERR("UST app set capture failed for object %p of app (pid: %d) "
+ "with ret %d", ust_object, app->pid, ret);
} else {
/*
* This is normal behavior, an application can die during the
* continue normally.
*/
ret = 0;
- DBG3("UST app filter event failed. Application is dead.");
+ DBG3("UST app set capture. Application is dead.");
}
goto error;
}
- DBG2("UST filter set successfully for event %s", ua_event->name);
+ DBG2("UST capture set for object %p successfully", ust_object);
error:
health_code_update();
/*
* Set event exclusions on the tracer.
*/
-static
-int set_ust_event_exclusion(struct ust_app_event *ua_event,
- struct ust_app *app)
+static int set_ust_exclusions(struct ust_app *app,
+ struct lttng_event_exclusion *exclusions,
+ struct lttng_ust_object_data *ust_object)
{
int ret;
- struct lttng_ust_event_exclusion *ust_exclusion = NULL;
+ struct lttng_ust_event_exclusion *ust_exclusions = NULL;
- health_code_update();
+ assert(exclusions && exclusions->count > 0);
- if (!ua_event->exclusion || !ua_event->exclusion->count) {
- ret = 0;
- goto error;
- }
+ health_code_update();
- ust_exclusion = create_ust_exclusion_from_exclusion(
- ua_event->exclusion);
- if (!ust_exclusion) {
+ ust_exclusions = create_ust_exclusion_from_exclusion(
+ exclusions);
+ if (!ust_exclusions) {
ret = -LTTNG_ERR_NOMEM;
goto error;
}
pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
+ ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app event %s exclusions failed for app (pid: %d) "
- "with ret %d", ua_event->attr.name, app->pid, ret);
+ ERR("UST app exclusions failed for object %p of app (pid: %d) "
+ "with ret %d", ust_object, app->pid, ret);
} else {
/*
* This is normal behavior, an application can die during the
* continue normally.
*/
ret = 0;
- DBG3("UST app event exclusion failed. Application is dead.");
+ DBG3("UST app set exclusions failed. Application is dead.");
}
goto error;
}
- DBG2("UST exclusion set successfully for event %s", ua_event->name);
+ DBG2("UST exclusions set successfully for object %p", ust_object);
error:
health_code_update();
- free(ust_exclusion);
+ free(ust_exclusions);
return ret;
}
/*
* Disable the specified event on to UST tracer for the UST session.
*/
-static int disable_ust_event(struct ust_app *app,
- struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
+static int disable_ust_object(struct ust_app *app,
+ struct lttng_ust_object_data *object)
{
int ret;
health_code_update();
pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_disable(app->sock, ua_event->obj);
+ ret = ustctl_disable(app->sock, object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app event %s disable failed for app (pid: %d) "
- "and session handle %d with ret %d",
- ua_event->attr.name, app->pid, ua_sess->handle, ret);
+ ERR("UST app disable failed for object %p app (pid: %d) with ret %d",
+ object, app->pid, ret);
} else {
/*
* This is normal behavior, an application can die during the
goto error;
}
- DBG2("UST app event %s disabled successfully for app (pid: %d)",
- ua_event->attr.name, app->pid);
+ DBG2("UST app object %p disabled successfully for app (pid: %d)",
+ object, app->pid);
error:
health_code_update();
/*
* Enable the specified event on to UST tracer for the UST session.
*/
-static int enable_ust_event(struct ust_app *app,
- struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
+static int enable_ust_object(struct ust_app *app, struct lttng_ust_object_data *ust_object)
{
int ret;
health_code_update();
pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_enable(app->sock, ua_event->obj);
+ ret = ustctl_enable(app->sock, ust_object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app event %s enable failed for app (pid: %d) "
- "and session handle %d with ret %d",
- ua_event->attr.name, app->pid, ua_sess->handle, ret);
+ ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
+ ust_object, app->pid, ret);
} else {
/*
* This is normal behavior, an application can die during the
* continue normally.
*/
ret = 0;
- DBG3("UST app enable event failed. Application is dead.");
+ DBG3("UST app enable failed. Application is dead.");
}
goto error;
}
- DBG2("UST app event %s enabled successfully for app (pid: %d)",
- ua_event->attr.name, app->pid);
+ DBG2("UST app object %p enabled successfully for app (pid: %d)",
+ ust_object, app->pid);
error:
health_code_update();
ua_event->handle = ua_event->obj->handle;
- DBG2("UST app event %s created successfully for pid:%d",
- ua_event->attr.name, app->pid);
+ DBG2("UST app event %s created successfully for pid:%d object: %p",
+ ua_event->attr.name, app->pid, ua_event->obj);
health_code_update();
/* Set filter if one is present. */
if (ua_event->filter) {
- ret = set_ust_event_filter(ua_event, app);
+ ret = set_ust_filter(app, ua_event->filter, ua_event->obj);
if (ret < 0) {
goto error;
}
/* Set exclusions for the event */
if (ua_event->exclusion) {
- ret = set_ust_event_exclusion(ua_event, app);
+ ret = set_ust_exclusions(app, ua_event->exclusion, ua_event->obj);
if (ret < 0) {
goto error;
}
* We now need to explicitly enable the event, since it
* is now disabled at creation.
*/
- ret = enable_ust_event(app, ua_sess, ua_event);
+ ret = enable_ust_object(app, ua_event->obj);
if (ret < 0) {
/*
* If we hit an EPERM, something is wrong with our enable call. If
return ret;
}
+static
+void init_ust_trigger_from_event_rule(const struct lttng_event_rule *rule, struct lttng_ust_trigger *trigger)
+{
+ enum lttng_event_rule_status status;
+ enum lttng_loglevel_type loglevel_type;
+ enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+ int loglevel = -1;
+ const char *pattern;
+
+ /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported */
+ assert(lttng_event_rule_get_type(rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
+
+ memset(trigger, 0, sizeof(*trigger));
+
+ if (lttng_event_rule_is_agent(rule)) {
+ /*
+ * Special event for agents
+ * The actual meat of the event is in the filter that will be
+ * attached later on.
+ * Set the default values for the agent event.
+ */
+ pattern = event_get_default_agent_ust_name(lttng_event_rule_get_domain_type(rule));
+ loglevel = 0;
+ ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+ } else {
+ status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
+ if (status != LTTNG_EVENT_RULE_STATUS_OK) {
+ /* At this point this is a fatal error */
+ assert(0);
+ }
+
+ status = lttng_event_rule_tracepoint_get_log_level_type(
+ rule, &loglevel_type);
+ if (status != LTTNG_EVENT_RULE_STATUS_OK) {
+ /* At this point this is a fatal error */
+ assert(0);
+ }
+
+ switch (loglevel_type) {
+ case LTTNG_EVENT_LOGLEVEL_ALL:
+ ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+ break;
+ case LTTNG_EVENT_LOGLEVEL_RANGE:
+ ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
+ break;
+ case LTTNG_EVENT_LOGLEVEL_SINGLE:
+ ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
+ break;
+ }
+
+ if (loglevel_type != LTTNG_EVENT_LOGLEVEL_ALL) {
+ status = lttng_event_rule_tracepoint_get_log_level(
+ rule, &loglevel);
+ assert(status == LTTNG_EVENT_RULE_STATUS_OK);
+ }
+ }
+
+ trigger->instrumentation = LTTNG_UST_TRACEPOINT;
+ strncpy(trigger->name, pattern, LTTNG_UST_SYM_NAME_LEN - 1);
+ trigger->loglevel_type = ust_loglevel_type;
+ trigger->loglevel = loglevel;
+}
+
+/*
+ * Create the specified event rule token onto the UST tracer for a UST app.
+ */
+static
+int create_ust_token_event_rule(struct ust_app *app, struct ust_app_token_event_rule *ua_token)
+{
+ int ret = 0;
+ struct lttng_ust_trigger trigger;
+ struct lttng_condition *condition = NULL;
+ struct lttng_event_rule *event_rule = NULL;
+ unsigned int capture_bytecode_count = 0;
+
+ health_code_update();
+ assert(app->token_communication.handle);
+
+ condition = lttng_trigger_get_condition(ua_token->trigger);
+ assert(condition);
+ assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT);
+
+ lttng_condition_event_rule_get_rule_mutable(condition, &event_rule);
+ assert(event_rule);
+ assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
+ /* Should we also test for UST at this point, or do we trust all the
+ * upper level? */
+
+ init_ust_trigger_from_event_rule(event_rule, &trigger);
+
+ trigger.id = ua_token->token;
+ trigger.error_counter_index = ua_token->error_counter_index;
+
+ /* Create UST trigger on tracer */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_create_trigger(app->sock, &trigger, app->token_communication.handle, &ua_token->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ abort();
+ ERR("Error ustctl create trigger %s for app pid: %d with ret %d",
+ trigger.name, app->pid, ret);
+ } else {
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally.
+ */
+ ret = 0;
+ DBG3("UST app create event failed. Application is dead.");
+ }
+ goto error;
+ }
+
+ ua_token->handle = ua_token->obj->handle;
+
+ DBG2("UST app event %s created successfully for pid:%d object: %p",
+ trigger.name, app->pid, ua_token->obj);
+
+ health_code_update();
+
+ /* Set filter if one is present. */
+ if (ua_token->filter) {
+ ret = set_ust_filter(app, ua_token->filter, ua_token->obj);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /* Set exclusions for the event */
+ if (ua_token->exclusion) {
+ ret = set_ust_exclusions(app, ua_token->exclusion, ua_token->obj);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /* Set the capture bytecode
+ * TODO: do we want to emulate what is done with exclusion and provide
+ * and object with a count of capture bytecode? instead of multiple
+ * call?
+ * */
+ capture_bytecode_count = lttng_trigger_get_capture_bytecode_count(ua_token->trigger);
+ for (unsigned int i = 0; i < capture_bytecode_count; i++) {
+ const struct lttng_bytecode *capture_bytecode = lttng_trigger_get_capture_bytecode_at_index(ua_token->trigger, i);
+ ret = set_ust_capture(app, capture_bytecode, i, ua_token->obj);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /*
+ * We now need to explicitly enable the event, since it
+ * is disabled at creation.
+ */
+ ret = enable_ust_object(app, ua_token->obj);
+ if (ret < 0) {
+ /*
+ * If we hit an EPERM, something is wrong with our enable call. If
+ * we get an EEXIST, there is a problem on the tracer side since we
+ * just created it.
+ */
+ switch (ret) {
+ case -LTTNG_UST_ERR_PERM:
+ /* Code flow problem */
+ assert(0);
+ case -LTTNG_UST_ERR_EXIST:
+ /* It's OK for our use case. */
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ goto error;
+ }
+ ua_token->enabled = true;
+
+error:
+ health_code_update();
+ return ret;
+}
+
/*
* Copy data between an UST app event and a LTT event.
*/
/* Copy filter bytecode */
if (uevent->filter) {
- ua_event->filter = copy_filter_bytecode(uevent->filter);
+ ua_event->filter = bytecode_copy(uevent->filter);
/* Filter might be NULL here in case of ENONEM. */
}
ua_sess->tracing_id = usess->id;
ua_sess->id = get_next_session_id();
- ua_sess->real_credentials.uid = app->uid;
- ua_sess->real_credentials.gid = app->gid;
- ua_sess->effective_credentials.uid = usess->uid;
- ua_sess->effective_credentials.gid = usess->gid;
+ LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
+ LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
+ LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
+ LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
ua_sess->buffer_type = usess->buffer_type;
ua_sess->bits_per_long = app->bits_per_long;
case LTTNG_BUFFER_PER_UID:
ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
DEFAULT_UST_TRACE_UID_PATH,
- ua_sess->real_credentials.uid,
+ lttng_credentials_get_uid(&ua_sess->real_credentials),
app->bits_per_long);
break;
default:
app->uint64_t_alignment, app->long_alignment,
app->byte_order, app->version.major, app->version.minor,
reg_pid->root_shm_path, reg_pid->shm_path,
- ua_sess->effective_credentials.uid,
- ua_sess->effective_credentials.gid, ua_sess->tracing_id,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ ua_sess->tracing_id,
app->uid);
if (ret < 0) {
/*
*/
static
int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
- struct lttng_ust_context_attr *uctx,
+ struct lttng_ust_context_attr *uctx,
struct ust_app *app)
{
int ret = 0;
{
int ret;
- ret = enable_ust_event(app, ua_sess, ua_event);
+ ret = enable_ust_object(app, ua_event->obj);
if (ret < 0) {
goto error;
}
{
int ret;
- ret = disable_ust_event(app, ua_sess, ua_event);
+ ret = disable_ust_object(app, ua_event->obj);
if (ret < 0) {
goto error;
}
notification_ret = notification_thread_command_add_channel(
notification_thread_handle, session->name,
- ua_sess->effective_credentials.uid,
- ua_sess->effective_credentials.gid, ua_chan->name,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ ua_chan->name,
ua_chan->key, LTTNG_DOMAIN_UST,
ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
if (notification_ret != LTTNG_OK) {
cmd_ret = notification_thread_command_add_channel(
notification_thread_handle, session->name,
- ua_sess->effective_credentials.uid,
- ua_sess->effective_credentials.gid, ua_chan->name,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ ua_chan->name,
ua_chan->key, LTTNG_DOMAIN_UST,
ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
if (cmd_ret != LTTNG_OK) {
}
/*
- * Create UST metadata and open it on the tracer side.
+ * Create UST app event and create it on the tracer side.
*
- * Called with UST app session lock held and RCU read side lock.
+ * Called with ust app session mutex held.
*/
-static int create_ust_app_metadata(struct ust_app_session *ua_sess,
- struct ust_app *app, struct consumer_output *consumer)
+static
+int create_ust_app_token_event_rule(struct lttng_trigger *trigger,
+ struct ust_app *app)
{
int ret = 0;
- struct ust_app_channel *metadata;
- struct consumer_socket *socket;
- struct ust_registry_session *registry;
- struct ltt_session *session = NULL;
-
- assert(ua_sess);
- assert(app);
- assert(consumer);
-
- registry = get_session_registry(ua_sess);
- /* The UST app session is held registry shall not be null. */
- assert(registry);
+ struct ust_app_token_event_rule *ua_token;
- pthread_mutex_lock(®istry->lock);
-
- /* Metadata already exists for this registry or it was closed previously */
- if (registry->metadata_key || registry->metadata_closed) {
- ret = 0;
- goto error;
- }
-
- /* Allocate UST metadata */
- metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
- if (!metadata) {
- /* malloc() failed */
+ ua_token = alloc_ust_app_token_event_rule(trigger);
+ if (ua_token == NULL) {
ret = -ENOMEM;
- goto error;
+ goto end;
}
- memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
-
+ /* Create it on the tracer side */
+ ret = create_ust_token_event_rule(app, ua_token);
+ if (ret < 0) {
+ /*
+ * Not found previously means that it does not exist on the
+ * tracer. If the application reports that the event existed,
+ * it means there is a bug in the sessiond or lttng-ust
+ * (or corruption, etc.)
+ */
+ if (ret == -LTTNG_UST_ERR_EXIST) {
+ ERR("Tracer for application reported that a token event rule being created already existed: "
+ "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
+ lttng_trigger_get_tracer_token(trigger),
+ app->pid, app->ppid, app->uid,
+ app->gid);
+ }
+ goto error;
+ }
+
+ lttng_ht_add_unique_u64(app->tokens_ht, &ua_token->node);
+
+ DBG2("UST app create token event rule %" PRIu64 " for PID %d completed", lttng_trigger_get_tracer_token(trigger),
+ app->pid);
+
+ goto end;
+
+error:
+ /* Valid. Calling here is already in a read side lock */
+ delete_ust_app_token_event_rule(-1, ua_token, app);
+end:
+ return ret;
+}
+
+/*
+ * Create UST metadata and open it on the tracer side.
+ *
+ * Called with UST app session lock held and RCU read side lock.
+ */
+static int create_ust_app_metadata(struct ust_app_session *ua_sess,
+ struct ust_app *app, struct consumer_output *consumer)
+{
+ int ret = 0;
+ struct ust_app_channel *metadata;
+ struct consumer_socket *socket;
+ struct ust_registry_session *registry;
+ struct ltt_session *session = NULL;
+
+ assert(ua_sess);
+ assert(app);
+ assert(consumer);
+
+ registry = get_session_registry(ua_sess);
+ /* The UST app session is held registry shall not be null. */
+ assert(registry);
+
+ pthread_mutex_lock(®istry->lock);
+
+ /* Metadata already exists for this registry or it was closed previously */
+ if (registry->metadata_key || registry->metadata_closed) {
+ ret = 0;
+ goto error;
+ }
+
+ /* Allocate UST metadata */
+ metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
+ if (!metadata) {
+ /* malloc() failed */
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
+
/* Need one fd for the channel. */
ret = lttng_fd_get(LTTNG_FD_APPS, 1);
if (ret < 0) {
struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
{
struct ust_app *lta = NULL;
+ struct lttng_pipe *trigger_event_source_pipe = NULL;
assert(msg);
assert(sock >= 0);
goto error;
}
+ trigger_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
+ if (!trigger_event_source_pipe) {
+ PERROR("Open trigger pipe");
+ goto error;
+ }
+
lta = zmalloc(sizeof(struct ust_app));
if (lta == NULL) {
PERROR("malloc");
goto error;
}
+ lta->token_communication.trigger_event_pipe = trigger_event_source_pipe;
+
lta->ppid = msg->ppid;
lta->uid = msg->uid;
lta->gid = msg->gid;
lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
lta->notify_sock = -1;
+ lta->tokens_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
/* Copy name and make sure it's NULL terminated. */
strncpy(lta->name, msg->name, sizeof(lta->name));
return ret;
}
+/*
+ * Setup the base trigger group.
+ *
+ * Return 0 on success else a negative value either an errno code or a
+ * LTTng-UST error code.
+ */
+int ust_app_setup_trigger_group(struct ust_app *app)
+{
+ int ret;
+ int writefd;
+ struct lttng_ust_object_data *group = NULL;
+ enum lttng_error_code lttng_ret;
+ enum trigger_error_accounting_status trigger_error_accounting_status;
+
+ assert(app);
+
+ /* Get the write side of the pipe */
+ writefd = lttng_pipe_get_writefd(app->token_communication.trigger_event_pipe);
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_create_trigger_group(app->sock, writefd, &group);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app %d create_trigger_group failed with ret %d, trigger pipe %d", app->sock, ret, writefd);
+ } else {
+ DBG("UST app %d create trigger group failed. Application is dead", app->sock);
+ }
+ goto end;
+ }
+
+ lttng_ret = notification_thread_command_add_application(
+ notification_thread_handle, lttng_pipe_get_readfd(app->token_communication.trigger_event_pipe), LTTNG_DOMAIN_UST);
+ if (lttng_ret != LTTNG_OK) {
+ /* TODO: error */
+ ret = - 1;
+ ERR("Failed to add channel to notification thread");
+ goto end;
+ }
+
+ /* Assign handle only when the complete setup is valid */
+ app->token_communication.handle = group;
+
+ trigger_error_accounting_status = trigger_error_accounting_register_app(app);
+ if (trigger_error_accounting_status != TRIGGER_ERROR_ACCOUNTING_STATUS_OK) {
+ ERR("Failed to setup trigger error accouting for app");
+ ret = -1;
+ goto end;
+ }
+
+
+end:
+ return ret;
+}
+
/*
* Unregister app by removing it from the global traceable app list and freeing
* the data struct.
*/
void ust_app_unregister(int sock)
{
+ enum lttng_error_code ret_code;
struct ust_app *lta;
struct lttng_ht_node_ulong *node;
struct lttng_ht_iter ust_app_sock_iter;
lta->pid);
}
+ /* trigger handle can be null in certain scenario such as a dead app */
+ if (lta->token_communication.handle) {
+ int fd = lttng_pipe_get_readfd(
+ lta->token_communication.trigger_event_pipe);
+
+ ret_code = notification_thread_command_remove_application(
+ notification_thread_handle,
+ fd);
+ if (ret_code != LTTNG_OK) {
+ ERR("Failed to remove application from notification thread");
+ }
+ }
+
/* Free memory */
call_rcu(<a->pid_n.head, delete_ust_app_rcu);
ret = ust_app_channel_allocate(ua_sess, uchan,
LTTNG_UST_CHAN_PER_CPU, usess,
&ua_chan);
- if (ret == 0) {
- ret = ust_app_channel_send(app, usess,
- ua_sess, ua_chan);
- } else {
- goto end;
+ if (ret < 0) {
+ goto error;
+ }
+
+ ret = ust_app_channel_send(app, usess,
+ ua_sess, ua_chan);
+ if (ret) {
+ goto error;
}
/* Add contexts. */
ret = create_ust_app_channel_context(ua_chan,
&uctx->ctx, app);
if (ret) {
- goto end;
+ goto error;
}
}
}
+
+error:
if (ret < 0) {
switch (ret) {
case -ENOTCONN:
break;
}
}
-end:
+
if (ret == 0 && _ua_chan) {
/*
* Only return the application's channel on success. Note
return ret;
}
-/*
- * For a specific UST session, create the channel for all registered apps.
- */
-int ust_app_create_channel_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan)
-{
- int ret = 0;
- struct cds_lfht_iter iter;
- struct ust_app *app;
-
- assert(usess);
- assert(usess->active);
- assert(uchan);
-
- DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
- uchan->name, usess->id);
-
- rcu_read_lock();
- /* For every registered applications */
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter, app, pid_n.node) {
- struct ust_app_session *ua_sess;
- int session_was_created = 0;
-
- if (!app->compatible ||
- !trace_ust_pid_tracker_lookup(usess, app->pid)) {
- goto error_rcu_unlock;
- }
-
- /*
- * Create session on the tracer side and add it to app session HT. Note
- * that if session exist, it will simply return a pointer to the ust
- * app session.
- */
- ret = find_or_create_ust_app_session(usess, app, &ua_sess,
- &session_was_created);
- if (ret < 0) {
- switch (ret) {
- case -ENOTCONN:
- /*
- * The application's socket is not valid. Either a bad
- * socket or a timeout on it. We can't inform the caller
- * that for a specific app, the session failed so lets
- * continue here; it is not an error.
- */
- ret = 0;
- goto error_rcu_unlock;
- case -ENOMEM:
- default:
- goto error_rcu_unlock;
- }
- }
-
- if (ua_sess->deleted) {
- continue;
- }
- ret = ust_app_channel_create(usess, ua_sess, uchan, app, NULL);
- if (ret) {
- if (session_was_created) {
- destroy_app_session(app, ua_sess);
- }
- /* Continue to the next application. */
- }
- }
-
-error_rcu_unlock:
- rcu_read_unlock();
- return ret;
-}
-
/*
* Enable event for a specific session and channel on the tracer.
*/
goto end;
}
+ if (ua_sess->enabled) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto end;
+ }
+
/* Upon restart, we skip the setup, already done */
if (ua_sess->started) {
goto skip_setup;
/* Indicate that the session has been started once */
ua_sess->started = 1;
+ ua_sess->enabled = 1;
pthread_mutex_unlock(&ua_sess->lock);
}
health_code_update();
+ ua_sess->enabled = 0;
/* Quiescent wait after stopping trace */
pthread_mutex_lock(&app->sock_lock);
return ret;
}
+static
+void ust_app_synchronize_tokens(struct ust_app *app)
+{
+ int ret = 0;
+ enum lttng_error_code ret_code;
+ enum lttng_trigger_status t_status;
+ struct lttng_ht_iter app_trigger_iter;
+ struct lttng_triggers *triggers;
+ struct ust_app_token_event_rule *token_event_rule_element;
+ unsigned int count;
+
+ rcu_read_lock();
+ /* TODO: is this necessary to protect against new trigger being added ?
+ * notification_trigger_tokens_ht is still the backing data structure
+ * for this listing. Leave it there for now.
+ */
+ pthread_mutex_lock(¬ification_trigger_tokens_ht_lock);
+ ret_code = notification_thread_command_get_tokens(
+ notification_thread_handle, &triggers);
+ if (ret_code != LTTNG_OK) {
+ ret = -1;
+ goto end;
+ }
+
+ assert(triggers);
+
+ t_status = lttng_triggers_get_count(triggers, &count);
+ if (t_status != LTTNG_TRIGGER_STATUS_OK) {
+ ret = -1;
+ goto end;
+ }
+
+ for (unsigned int i = 0; i < count; i++) {
+ struct lttng_condition *condition;
+ struct lttng_event_rule *event_rule;
+ struct lttng_trigger *trigger;
+ struct ust_app_token_event_rule *ua_token;
+ uint64_t token;
+
+ trigger = lttng_triggers_get_pointer_of_index(triggers, i);
+ assert(trigger);
+
+ /* TODO: error checking and type checking */
+ token = lttng_trigger_get_tracer_token(trigger);
+ condition = lttng_trigger_get_condition(trigger);
+ (void) lttng_condition_event_rule_get_rule_mutable(condition, &event_rule);
+
+ if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
+ /* Skip kernel related trigger */
+ continue;
+ }
+
+ /* Iterate over all known token trigger */
+ ua_token = find_ust_app_token_event_rule(app->tokens_ht, token);
+ if (!ua_token) {
+ ret = create_ust_app_token_event_rule(trigger, app);
+ if (ret < 0) {
+ goto end;
+ }
+ }
+ }
+
+ /* Remove all unknown trigger from the app
+ * TODO find a way better way then this, do it on the unregister command
+ * and be specific on the token to remove instead of going over all
+ * trigger known to the app. This is sub optimal.
+ */
+ cds_lfht_for_each_entry (app->tokens_ht->ht, &app_trigger_iter.iter,
+ token_event_rule_element, node.node) {
+ uint64_t token;
+ bool found = false;
+
+ token = token_event_rule_element->token;
+
+ /*
+ * Check if the app event trigger still exists on the
+ * notification side.
+ * TODO: might want to change the backing data struct of the
+ * lttng_triggers object to allow quick lookup?
+ * For kernel mostly all of this can be removed once we delete
+ * on a per trigger basis.
+ */
+
+ for (unsigned int i = 0; i < count; i++) {
+ struct lttng_trigger *trigger;
+ uint64_t inner_token;
+
+ trigger = lttng_triggers_get_pointer_of_index(
+ triggers, i);
+ assert(trigger);
+
+ inner_token = lttng_trigger_get_tracer_token(trigger);
+
+ if (inner_token == token) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ /* Still valid */
+ continue;
+ }
+
+ /* TODO: This is fucking ugly API for fuck sake */
+ assert(!lttng_ht_del(app->tokens_ht, &app_trigger_iter));
+
+ (void) disable_ust_object(app, token_event_rule_element->obj);
+
+ delete_ust_app_token_event_rule(app->sock, token_event_rule_element, app);
+ }
+end:
+ lttng_triggers_destroy(triggers);
+ rcu_read_unlock();
+ pthread_mutex_unlock(¬ification_trigger_tokens_ht_lock);
+ return;
+}
+
/*
* The caller must ensure that the application is compatible and is tracked
- * by the PID tracker.
+ * by the process attribute trackers.
*/
static
void ust_app_synchronize(struct ltt_ust_session *usess,
* allocated (if necessary) and sent to the application, and
* all enabled contexts will be added to the channel.
*/
- ret = find_or_create_ust_app_channel(usess, ua_sess,
+ ret = find_or_create_ust_app_channel(usess, ua_sess,
app, uchan, &ua_chan);
if (ret) {
/* Tracer is probably gone or ENOMEM. */
if (!app->compatible) {
return;
}
- if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
+ if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
+ usess, app->pid) &&
+ trace_ust_id_tracker_lookup(
+ LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
+ usess, app->uid) &&
+ trace_ust_id_tracker_lookup(
+ LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
+ usess, app->gid)) {
/*
* Synchronize the application's internal tracing configuration
* and start tracing.
}
}
+void ust_app_global_update_tokens(struct ust_app *app)
+{
+ DBG2("UST app global update token for app sock %d", app->sock);
+
+ if (!app->compatible) {
+ return;
+ }
+ if (app->token_communication.handle == NULL) {
+ WARN("UST app global update token for app sock %d skipped since communcation handle is null", app->sock);
+ return;
+ }
+
+ ust_app_synchronize_tokens(app);
+}
+
/*
* Called with session lock held.
*/
rcu_read_unlock();
}
+void ust_app_global_update_all_tokens(void)
+{
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update_tokens(app);
+ }
+ rcu_read_unlock();
+}
+
+void ust_app_update_trigger_error_count(struct lttng_trigger *trigger)
+{
+ uint64_t error_count = 0;
+ enum trigger_error_accounting_status status;
+
+ status = trigger_error_accounting_get_count(trigger, &error_count);
+ if (status != TRIGGER_ERROR_ACCOUNTING_STATUS_OK) {
+ ERR("Error getting trigger error count");
+ }
+
+ lttng_trigger_set_error_count(trigger, error_count);
+}
+
/*
* Add context to a specific channel for global UST domain.
*/
status = LTTNG_ERR_INVALID;
goto error;
}
- /* Add the UST default trace dir to path. */
+ /* Add the UST default trace dir to path. */
cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
reg_chan, node.node) {
status = consumer_snapshot_channel(socket,
status = LTTNG_ERR_INVALID;
goto error;
}
- cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
ua_chan, node.node) {
status = consumer_snapshot_channel(socket,
ua_chan->key, output, 0,
- ua_sess->effective_credentials
- .uid,
- ua_sess->effective_credentials
- .gid,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
&trace_path[consumer_path_offset], wait,
nb_packets_per_stream);
switch (status) {
}
status = consumer_snapshot_channel(socket,
registry->metadata_key, output, 1,
- ua_sess->effective_credentials.uid,
- ua_sess->effective_credentials.gid,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
&trace_path[consumer_path_offset], wait, 0);
switch (status) {
case LTTNG_OK:
struct buffer_reg_channel *reg_chan;
struct consumer_socket *socket;
+ if (!reg->registry->reg.ust->metadata_key) {
+ /* Skip since no metadata is present */
+ continue;
+ }
+
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
usess->consumer);
ua_chan, node.node) {
ret = consumer_rotate_channel(socket,
ua_chan->key,
- ua_sess->effective_credentials
- .uid,
- ua_sess->effective_credentials
- .gid,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
ua_sess->consumer,
/* is_metadata_channel */ false);
if (ret < 0) {
(void) push_metadata(registry, usess->consumer);
ret = consumer_rotate_channel(socket,
registry->metadata_key,
- ua_sess->effective_credentials.uid,
- ua_sess->effective_credentials.gid,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
ua_sess->consumer,
/* is_metadata_channel */ true);
if (ret < 0) {
rcu_read_unlock();
return ret;
}
+
+/*
+ * Clear all the channels of a session.
+ *
+ * Return LTTNG_OK on success or else an LTTng error code.
+ */
+enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
+{
+ int ret;
+ enum lttng_error_code cmd_ret = LTTNG_OK;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ struct ltt_ust_session *usess = session->ust_session;
+
+ assert(usess);
+
+ rcu_read_lock();
+
+ if (usess->active) {
+ ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
+ cmd_ret = LTTNG_ERR_FATAL;
+ goto end;
+ }
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct buffer_reg_channel *reg_chan;
+ struct consumer_socket *socket;
+
+ /* Get consumer socket to use to push the metadata.*/
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ cmd_ret = LTTNG_ERR_INVALID;
+ goto error_socket;
+ }
+
+ /* Clear the data channels. */
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ reg_chan, node.node) {
+ ret = consumer_clear_channel(socket,
+ reg_chan->consumer_key);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+
+ /*
+ * Clear the metadata channel.
+ * Metadata channel is not cleared per se but we still need to
+ * perform a rotation operation on it behind the scene.
+ */
+ ret = consumer_clear_channel(socket,
+ reg->registry->reg.ust->metadata_key);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct consumer_socket *socket;
+ struct lttng_ht_iter chan_iter;
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+
+ /* Get the right consumer socket for the application. */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ cmd_ret = LTTNG_ERR_INVALID;
+ goto error_socket;
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
+
+ /* Clear the data channels. */
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+ ua_chan, node.node) {
+ ret = consumer_clear_channel(socket, ua_chan->key);
+ if (ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+ continue;
+ }
+ goto error;
+ }
+ }
+
+ (void) push_metadata(registry, usess->consumer);
+
+ /*
+ * Clear the metadata channel.
+ * Metadata channel is not cleared per se but we still need to
+ * perform rotation operation on it behind the scene.
+ */
+ ret = consumer_clear_channel(socket, registry->metadata_key);
+ if (ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+ continue;
+ }
+ goto error;
+ }
+ }
+ break;
+ }
+ default:
+ assert(0);
+ break;
+ }
+
+ cmd_ret = LTTNG_OK;
+ goto end;
+
+error:
+ switch (-ret) {
+ case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
+ cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
+ break;
+ default:
+ cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
+ }
+
+error_socket:
+end:
+ rcu_read_unlock();
+ return cmd_ret;
+}
+
+/*
+ * This function skips the metadata channel as the begin/end timestamps of a
+ * metadata packet are useless.
+ *
+ * Moreover, opening a packet after a "clear" will cause problems for live
+ * sessions as it will introduce padding that was not part of the first trace
+ * chunk. The relay daemon expects the content of the metadata stream of
+ * successive metadata trace chunks to be strict supersets of one another.
+ *
+ * For example, flushing a packet at the beginning of the metadata stream of
+ * a trace chunk resulting from a "clear" session command will cause the
+ * size of the metadata stream of the new trace chunk to not match the size of
+ * the metadata stream of the original chunk. This will confuse the relay
+ * daemon as the same "offset" in a metadata stream will no longer point
+ * to the same content.
+ */
+enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
+{
+ enum lttng_error_code ret = LTTNG_OK;
+ struct lttng_ht_iter iter;
+ struct ltt_ust_session *usess = session->ust_session;
+
+ assert(usess);
+
+ rcu_read_lock();
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry (
+ reg, &usess->buffer_reg_uid_list, lnode) {
+ struct buffer_reg_channel *reg_chan;
+ struct consumer_socket *socket;
+
+ socket = consumer_find_socket_by_bitness(
+ reg->bits_per_long, usess->consumer);
+ if (!socket) {
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+
+ cds_lfht_for_each_entry(reg->registry->channels->ht,
+ &iter.iter, reg_chan, node.node) {
+ const int open_ret =
+ consumer_open_channel_packets(
+ socket,
+ reg_chan->consumer_key);
+
+ if (open_ret < 0) {
+ ret = LTTNG_ERR_UNK;
+ goto error;
+ }
+ }
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app *app;
+
+ cds_lfht_for_each_entry (
+ ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct consumer_socket *socket;
+ struct lttng_ht_iter chan_iter;
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+
+ /* Get the right consumer socket for the application. */
+ socket = consumer_find_socket_by_bitness(
+ app->bits_per_long, usess->consumer);
+ if (!socket) {
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
+
+ cds_lfht_for_each_entry(ua_sess->channels->ht,
+ &chan_iter.iter, ua_chan, node.node) {
+ const int open_ret =
+ consumer_open_channel_packets(
+ socket,
+ ua_chan->key);
+
+ if (open_ret < 0) {
+ /*
+ * Per-PID buffer and application going
+ * away.
+ */
+ if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+ continue;
+ }
+
+ ret = LTTNG_ERR_UNK;
+ goto error;
+ }
+ }
+ }
+ break;
+ }
+ default:
+ abort();
+ break;
+ }
+
+error:
+ rcu_read_unlock();
+ return ret;
+}