/*
- * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2 only,
- * as published by the Free Software Foundation.
+ * SPDX-License-Identifier: GPL-2.0-only
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define _LGPL_SOURCE
#include <errno.h>
+#include <fcntl.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <urcu/compiler.h>
-#include <lttng/ust-error.h>
#include <signal.h>
+#include <common/bytecode/bytecode.h>
+#include <common/compat/errno.h>
#include <common/common.h>
+#include <common/hashtable/utils.h>
+#include <common/shm.h>
+#include <lttng/lttng-error.h>
+#include <lttng/event-rule/event-rule.h>
+#include <lttng/event-rule/event-rule-internal.h>
+#include <lttng/event-rule/tracepoint.h>
+#include <lttng/condition/condition.h>
+#include <lttng/condition/on-event-internal.h>
+#include <lttng/condition/on-event.h>
+#include <lttng/map/map.h>
+#include <lttng/map/map-internal.h>
+#include <lttng/map/map-query-internal.h>
+#include <lttng/map-key.h>
+#include <lttng/map-key-internal.h>
+#include <lttng/trigger/trigger-internal.h>
#include <common/sessiond-comm/sessiond-comm.h>
#include "buffer-registry.h"
+#include "condition-internal.h"
#include "fd-limit.h"
#include "health-sessiond.h"
#include "ust-app.h"
#include "ust-consumer.h"
-#include "ust-ctl.h"
+#include "lttng-ust-ctl.h"
+#include "lttng-ust-error.h"
#include "utils.h"
#include "session.h"
#include "lttng-sessiond.h"
#include "notification-thread-commands.h"
+#include "rotate.h"
+#include "event.h"
+#include "event-notifier-error-accounting.h"
+#include "map.h"
+
+
+struct lttng_ht *ust_app_ht;
+struct lttng_ht *ust_app_ht_by_sock;
+struct lttng_ht *ust_app_ht_by_notify_sock;
static
int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
static uint64_t _next_channel_key;
static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
+/* Next available map key. Access under next_map_key_lock. */
+static uint64_t _next_map_key;
+static pthread_mutex_t next_map_key_lock = PTHREAD_MUTEX_INITIALIZER;
+
/* Next available session ID. Access under next_session_id_lock. */
static uint64_t _next_session_id;
static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
return ret;
}
+/*
+ * Return the incremented value of next_map_key.
+ */
+static uint64_t get_next_map_key(void)
+{
+ uint64_t ret;
+
+ pthread_mutex_lock(&next_map_key_lock);
+ ret = ++_next_map_key;
+ pthread_mutex_unlock(&next_map_key_lock);
+ return ret;
+}
+
/*
* Return the atomically incremented value of next_session_id.
*/
attr->switch_timer_interval = uattr->switch_timer_interval;
attr->read_timer_interval = uattr->read_timer_interval;
attr->output = uattr->output;
+ attr->blocking_timeout = uattr->u.s.blocking_timeout;
}
/*
key = _key;
ev_loglevel_value = event->attr.loglevel;
- /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
+ /*
+ * Match the 5 elements of the key:
+ * tracer token, name, filter, loglevel, exclusions
+ */
+
+ if (event->attr.token != key->tracer_token) {
+ goto no_match;
+ }
/* Event name */
if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
* Unique add of an ust app event in the given ht. This uses the custom
* ht_match_ust_app_event match function and the event name as hash.
*/
-static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
+static void add_unique_ust_app_event(struct lttng_ht *events_ht,
struct ust_app_event *event)
{
struct cds_lfht_node *node_ptr;
struct ust_app_ht_key key;
- struct lttng_ht *ht;
- assert(ua_chan);
- assert(ua_chan->events);
+ assert(events_ht);
assert(event);
- ht = ua_chan->events;
key.name = event->attr.name;
key.filter = event->filter;
key.loglevel_type = event->attr.loglevel;
key.exclusion = event->exclusion;
+ key.tracer_token = event->attr.token;
- node_ptr = cds_lfht_add_unique(ht->ht,
- ht->hash_fct(event->node.key, lttng_ht_seed),
+ node_ptr = cds_lfht_add_unique(events_ht->ht,
+ events_ht->hash_fct(event->node.key, lttng_ht_seed),
ht_match_ust_app_event, &key, &event->node.node);
assert(node_ptr == &event->node.node);
}
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
- ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
+ ua_sess->tracing_id, ua_sess->bits_per_long,
+ lttng_credentials_get_uid(&ua_sess->real_credentials));
if (!reg_uid) {
goto error;
}
free(ua_event);
}
+/*
+ * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
+ * through a call_rcu().
+ */
+static
+void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
+{
+ struct ust_app_event_notifier_rule *obj = caa_container_of(
+ head, struct ust_app_event_notifier_rule, rcu_head);
+
+ free(obj);
+}
+
+/*
+ * Delete ust app event notifier rule safely.
+ */
+static void delete_ust_app_event_notifier_rule(int sock,
+ struct ust_app_event_notifier_rule *ua_event_notifier_rule,
+ struct ust_app *app)
+{
+ int ret;
+
+ assert(ua_event_notifier_rule);
+
+ if (ua_event_notifier_rule->exclusion != NULL) {
+ free(ua_event_notifier_rule->exclusion);
+ }
+
+ if (ua_event_notifier_rule->obj != NULL) {
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_release_object(sock, ua_event_notifier_rule->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
+ app->name, (int) app->ppid, ret);
+ }
+
+ free(ua_event_notifier_rule->obj);
+ }
+
+ lttng_trigger_put(ua_event_notifier_rule->trigger);
+ call_rcu(&ua_event_notifier_rule->rcu_head,
+ free_ust_app_event_notifier_rule_rcu);
+}
+
/*
* Release ust data object of the given stream.
*
return ret;
}
+/*
+ * Release ust data object of the given map_counter.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int release_ust_app_map_counter(int sock, struct ust_app_map_counter *map_counter,
+ struct ust_app *app)
+{
+ int ret = 0;
+
+ assert(map_counter);
+
+ if (map_counter->obj) {
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_release_object(sock, map_counter->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app sock %d release map_counter obj failed with ret %d",
+ sock, ret);
+ }
+ lttng_fd_put(LTTNG_FD_APPS, 2);
+ free(map_counter->obj);
+ }
+
+ return ret;
+}
+
/*
* Delete ust app stream safely. RCU read lock must be held before calling
* this function.
free(stream);
}
+/*
+ * Delete ust app map_counter safely. RCU read lock must be held before calling
+ * this function.
+ */
+static
+void delete_ust_app_map_counter(int sock, struct ust_app_map_counter *map_counter,
+ struct ust_app *app)
+{
+ assert(map_counter);
+
+ (void) release_ust_app_map_counter(sock, map_counter, app);
+ free(map_counter);
+}
+
/*
* We need to execute ht_destroy outside of RCU read-side critical
* section and outside of call_rcu thread, so we postpone its execution
free(ua_chan);
}
+/*
+ * We need to execute ht_destroy outside of RCU read-side critical
+ * section and outside of call_rcu thread, so we postpone its execution
+ * using ht_cleanup_push. It is simpler than to change the semantic of
+ * the many callers of delete_ust_app_session().
+ */
+static
+void delete_ust_app_map_rcu(struct rcu_head *head)
+{
+ struct ust_app_map *ua_map =
+ caa_container_of(head, struct ust_app_map, rcu_head);
+
+ ht_cleanup_push(ua_map->events);
+ free(ua_map);
+}
+
/*
* Extract the lost packet or discarded events counter when the channel is
* being deleted and store the value in the parent channel so we can
end:
rcu_read_unlock();
+ if (session) {
+ session_put(session);
+ }
}
/*
registry = get_session_registry(ua_chan->session);
if (registry) {
ust_registry_channel_del_free(registry, ua_chan->key,
- true);
+ sock >= 0);
+ }
+ /*
+ * A negative socket can be used by the caller when
+ * cleaning-up a ua_chan in an error path. Skip the
+ * accounting in this case.
+ */
+ if (sock >= 0) {
+ save_per_pid_lost_discarded_counters(ua_chan);
}
- save_per_pid_lost_discarded_counters(ua_chan);
}
if (ua_chan->obj != NULL) {
/* Remove channel from application UST object descriptor. */
iter.iter.node = &ua_chan->ust_objd_node.node;
- ret = lttng_ht_del(app->ust_objd, &iter);
+ ret = lttng_ht_del(app->ust_chan_objd, &iter);
assert(!ret);
pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, ua_chan->obj);
call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
}
+static
+void copy_ust_app_map_values(int sock, struct ust_app_map *ua_map,
+ struct ust_app *app)
+{
+ struct ltt_ust_map_dead_pid_kv_values *kv_pair_list = ua_map->dead_app_kv_values;
+ struct ust_registry_session *ust_reg_sess;
+ struct ust_registry_map *ust_reg_map;
+ struct ust_registry_map_index_ht_entry *map_index_entry;
+ struct lttng_ht_iter key_iter;
+ struct lttng_ht *dead_app_kv_values;
+
+ assert(app->buffer_type == LTTNG_BUFFER_PER_PID);
+ ust_reg_sess = get_session_registry(ua_map->session);
+
+ pthread_mutex_lock(&ust_reg_sess->lock);
+ ust_reg_map = ust_registry_map_find(ust_reg_sess, ua_map->key);
+ pthread_mutex_unlock(&ust_reg_sess->lock);
+ assert(ust_reg_map);
+
+ DBG("Aggregating dead map values");
+
+ pthread_mutex_lock(&kv_pair_list->lock);
+
+ if (app->bits_per_long == 32) {
+ dead_app_kv_values = kv_pair_list->dead_app_kv_values_32bits;
+ } else {
+ dead_app_kv_values = kv_pair_list->dead_app_kv_values_64bits;
+ }
+
+ /* Iterate over all the formated_key -> counter index */
+ cds_lfht_for_each_entry(ust_reg_map->key_string_to_bucket_index_ht->ht,
+ &key_iter.iter, map_index_entry, node.node) {
+ bool overflow = 0, underflow = 0;
+ int64_t local_value = 0;
+ int ret;
+ size_t dimension_indexes[1] = {map_index_entry->index};
+
+ ret = ustctl_counter_aggregate(ua_map->map_handle,
+ dimension_indexes, &local_value, &overflow,
+ &underflow);
+ if (ret) {
+ ERR("Error getting counter value from the tracer: key = '%s'",
+ map_index_entry->formated_key);
+ ret = -1;
+ goto end;
+ }
+
+ map_add_or_increment_map_values(dead_app_kv_values,
+ map_index_entry->formated_key, local_value,
+ underflow, overflow);
+
+ }
+
+end:
+ pthread_mutex_unlock(&kv_pair_list->lock);
+ return;
+}
+/*
+ * Delete ust app map safely. RCU read lock must be held before calling
+ * this function.
+ *
+ * The session list lock must be held by the caller.
+ */
+static
+void delete_ust_app_map(int sock, struct ust_app_map *ua_map,
+ struct ust_app *app)
+{
+ int ret;
+ struct lttng_ht_iter iter;
+ struct ust_app_event *ua_event;
+ struct ust_app_map_counter *map_counter, *ctmp;
+ struct ust_registry_session *registry;
+
+ assert(ua_map);
+
+ DBG3("UST app deleting map %s", ua_map->name);
+
+ /* Wipe stream */
+ cds_list_for_each_entry_safe(map_counter, ctmp, &ua_map->counters.head, list) {
+ cds_list_del(&map_counter->list);
+ delete_ust_app_map_counter(sock, map_counter, app);
+ }
+
+ /* Wipe events */
+ cds_lfht_for_each_entry(ua_map->events->ht, &iter.iter, ua_event,
+ node.node) {
+ ret = lttng_ht_del(ua_map->events, &iter);
+ assert(!ret);
+ delete_ust_app_event(sock, ua_event, app);
+ }
+
+ if (ua_map->session->buffer_type == LTTNG_BUFFER_PER_PID) {
+ /* Wipe and free registry from session registry. */
+ registry = get_session_registry(ua_map->session);
+ if (registry) {
+ ust_registry_map_del_free(registry, ua_map->key);
+ }
+ }
+
+ if (ua_map->obj != NULL) {
+ /* Remove map from application UST object descriptor. */
+ iter.iter.node = &ua_map->ust_objd_node.node;
+ ret = lttng_ht_del(app->ust_map_objd, &iter);
+ assert(!ret);
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_release_object(sock, ua_map->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app sock %d release map obj failed with ret %d",
+ sock, ret);
+ }
+ lttng_fd_put(LTTNG_FD_APPS, 1);
+ free(ua_map->obj);
+ }
+ call_rcu(&ua_map->rcu_head, delete_ust_app_map_rcu);
+}
+
int ust_app_register_done(struct ust_app *app)
{
int ret;
* nullified. The session lock MUST be held unless the application is
* in the destroy path.
*
+ * Do not hold the registry lock while communicating with the consumerd, because
+ * doing so causes inter-process deadlocks between consumerd and sessiond with
+ * the metadata request notification.
+ *
* Return 0 on success else a negative value.
*/
static int close_metadata(struct ust_registry_session *registry,
{
int ret;
struct consumer_socket *socket;
+ uint64_t metadata_key;
+ bool registry_was_already_closed;
assert(registry);
assert(consumer);
rcu_read_lock();
pthread_mutex_lock(®istry->lock);
+ metadata_key = registry->metadata_key;
+ registry_was_already_closed = registry->metadata_closed;
+ if (metadata_key != 0) {
+ /*
+ * Metadata closed. Even on error this means that the consumer
+ * is not responding or not found so either way a second close
+ * should NOT be emit for this registry.
+ */
+ registry->metadata_closed = 1;
+ }
+ pthread_mutex_unlock(®istry->lock);
- if (!registry->metadata_key || registry->metadata_closed) {
+ if (metadata_key == 0 || registry_was_already_closed) {
ret = 0;
goto end;
}
consumer);
if (!socket) {
ret = -1;
- goto error;
+ goto end;
}
- ret = consumer_close_metadata(socket, registry->metadata_key);
+ ret = consumer_close_metadata(socket, metadata_key);
if (ret < 0) {
- goto error;
+ goto end;
}
-error:
- /*
- * Metadata closed. Even on error this means that the consumer is not
- * responding or not found so either way a second close should NOT be emit
- * for this registry.
- */
- registry->metadata_closed = 1;
end:
- pthread_mutex_unlock(®istry->lock);
rcu_read_unlock();
return ret;
}
caa_container_of(head, struct ust_app_session, rcu_head);
ht_cleanup_push(ua_sess->channels);
+ ht_cleanup_push(ua_sess->maps);
free(ua_sess);
}
int ret;
struct lttng_ht_iter iter;
struct ust_app_channel *ua_chan;
+ struct ust_app_map *ua_map;
struct ust_registry_session *registry;
assert(ua_sess);
ua_sess->deleted = true;
registry = get_session_registry(ua_sess);
+ /* Registry can be null on error path during initialization. */
if (registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(registry, ua_sess->consumer);
delete_ust_app_channel(sock, ua_chan, app);
}
+ cds_lfht_for_each_entry(ua_sess->maps->ht, &iter.iter, ua_map,
+ node.node) {
+ if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
+ copy_ust_app_map_values(sock, ua_map, app);
+ }
+ ret = lttng_ht_del(ua_sess->maps, &iter);
+ assert(!ret);
+ delete_ust_app_map(sock, ua_map, app);
+ }
+
/* In case of per PID, the registry is kept in the session. */
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
if (reg_pid) {
+ /*
+ * Registry can be null on error path during
+ * initialization.
+ */
buffer_reg_pid_remove(reg_pid);
buffer_reg_pid_destroy(reg_pid);
}
{
int ret, sock;
struct ust_app_session *ua_sess, *tmp_ua_sess;
+ struct lttng_ht_iter iter;
+ struct ust_app_event_notifier_rule *event_notifier_rule;
+ bool event_notifier_write_fd_is_open;
/*
* The session list lock must be held during this function to guarantee
rcu_read_unlock();
}
+ /* Remove the event notifier rules associated with this app. */
+ rcu_read_lock();
+ cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+ &iter.iter, event_notifier_rule, node.node) {
+ ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
+ assert(!ret);
+
+ delete_ust_app_event_notifier_rule(
+ app->sock, event_notifier_rule, app);
+ }
+
+ rcu_read_unlock();
+
ht_cleanup_push(app->sessions);
ht_cleanup_push(app->ust_sessions_objd);
- ht_cleanup_push(app->ust_objd);
+ ht_cleanup_push(app->ust_chan_objd);
+ ht_cleanup_push(app->ust_map_objd);
+ ht_cleanup_push(app->token_to_event_notifier_rule_ht);
+
+ /*
+ * This could be NULL if the event notifier setup failed (e.g the app
+ * was killed or the tracer does not support this feature).
+ */
+ if (app->event_notifier_group.object) {
+ enum lttng_error_code ret_code;
+ enum event_notifier_error_accounting_status status;
+
+ const int event_notifier_read_fd = lttng_pipe_get_readfd(
+ app->event_notifier_group.event_pipe);
+
+ ret_code = notification_thread_command_remove_tracer_event_source(
+ notification_thread_handle,
+ event_notifier_read_fd);
+ if (ret_code != LTTNG_OK) {
+ ERR("Failed to remove application tracer event source from notification thread");
+ }
+
+ status = event_notifier_error_accounting_unregister_app(app);
+ if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+ ERR("Error unregistering app from event notifier error accounting");
+ }
+
+ ustctl_release_object(sock, app->event_notifier_group.object);
+ free(app->event_notifier_group.object);
+ }
+
+ event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
+ app->event_notifier_group.event_pipe);
+ lttng_pipe_destroy(app->event_notifier_group.event_pipe);
+ /*
+ * Release the file descriptors reserved for the event notifier pipe.
+ * The app could be destroyed before the write end of the pipe could be
+ * passed to the application (and closed). In that case, both file
+ * descriptors must be released.
+ */
+ lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
/*
* Wait until we have deleted the application from the sock hash table
* Alloc new UST app session.
*/
static
-struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
+struct ust_app_session *alloc_ust_app_session(void)
{
struct ust_app_session *ua_sess;
ua_sess->handle = -1;
ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
+ ua_sess->maps = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
pthread_mutex_init(&ua_sess->lock, NULL);
return ua_sess;
* Alloc new UST app channel.
*/
static
-struct ust_app_channel *alloc_ust_app_channel(char *name,
+struct ust_app_channel *alloc_ust_app_channel(const char *name,
struct ust_app_session *ua_sess,
struct lttng_ust_channel_attr *attr)
{
ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
ua_chan->attr.read_timer_interval = attr->read_timer_interval;
ua_chan->attr.output = attr->output;
+ ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
}
/* By default, the channel is a per cpu channel. */
ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
return NULL;
}
+/*
+ * Alloc new UST app map.
+ */
+static
+struct ust_app_map *alloc_ust_app_map(const char *name,
+ struct ust_app_session *ua_sess)
+{
+ struct ust_app_map *ua_map;
+
+ /* Init most of the default value by allocating and zeroing */
+ ua_map = zmalloc(sizeof(struct ust_app_map));
+ if (ua_map == NULL) {
+ PERROR("malloc");
+ goto error;
+ }
+
+ /* Setup map name */
+ strncpy(ua_map->name, name, sizeof(ua_map->name));
+ ua_map->name[sizeof(ua_map->name) - 1] = '\0';
+
+ ua_map->enabled = 1;
+ ua_map->handle = -1;
+ ua_map->session = ua_sess;
+ ua_map->key = get_next_map_key();
+ ua_map->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+ lttng_ht_node_init_str(&ua_map->node, ua_map->name);
+
+ CDS_INIT_LIST_HEAD(&ua_map->counters.head);
+
+ DBG3("UST app map %s allocated", ua_map->name);
+
+ return ua_map;
+
+error:
+ return NULL;
+}
+
/*
* Allocate and initialize a UST app stream.
*
return stream;
}
+/*
+ * Allocate and initialize a UST app map_counter.
+ *
+ * Return newly allocated map_counter pointer or NULL on error.
+ */
+struct ust_app_map_counter *ust_app_alloc_map_counter(void)
+{
+ struct ust_app_map_counter *map_counter = NULL;
+
+ map_counter = zmalloc(sizeof(*map_counter));
+ if (map_counter == NULL) {
+ PERROR("zmalloc ust app map_counter");
+ goto error;
+ }
+
+ /* Zero could be a valid value for a handle so flag it to -1. */
+ map_counter->handle = -1;
+
+error:
+ return map_counter;
+}
+
/*
* Alloc new UST app event.
*/
/* Init most of the default value by allocating and zeroing */
ua_event = zmalloc(sizeof(struct ust_app_event));
if (ua_event == NULL) {
- PERROR("malloc");
+ PERROR("Failed to allocate ust_app_event structure");
goto error;
}
return NULL;
}
+
+/*
+ * Allocate a new UST app event notifier rule.
+ */
+static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
+ struct lttng_trigger *trigger)
+{
+ enum lttng_event_rule_generate_exclusions_status
+ generate_exclusion_status;
+ struct ust_app_event_notifier_rule *ua_event_notifier_rule;
+ struct lttng_condition *condition = NULL;
+ const struct lttng_event_rule *event_rule = NULL;
+
+ ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
+ if (ua_event_notifier_rule == NULL) {
+ PERROR("Failed to allocate ust_app_event_notifier_rule structure");
+ goto error;
+ }
+
+ ua_event_notifier_rule->enabled = 1;
+ ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
+ lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
+ ua_event_notifier_rule->token);
+
+ condition = lttng_trigger_get_condition(trigger);
+ assert(condition);
+ assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_ON_EVENT);
+
+ assert(LTTNG_CONDITION_STATUS_OK == lttng_condition_on_event_get_rule(condition, &event_rule));
+ assert(event_rule);
+
+ /* Acquire the event notifier's reference to the trigger. */
+ lttng_trigger_get(trigger);
+
+ ua_event_notifier_rule->trigger = trigger;
+ ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
+ generate_exclusion_status = lttng_event_rule_generate_exclusions(
+ event_rule, &ua_event_notifier_rule->exclusion);
+ switch (generate_exclusion_status) {
+ case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
+ case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
+ break;
+ default:
+ /* Error occured. */
+ ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
+ goto error_put_trigger;
+ }
+
+ DBG3("UST app event notifier rule allocated: token = %" PRIu64,
+ ua_event_notifier_rule->token);
+
+ return ua_event_notifier_rule;
+
+error_put_trigger:
+ lttng_trigger_put(trigger);
+error:
+ free(ua_event_notifier_rule);
+ return NULL;
+}
+
/*
* Alloc new UST app context.
*/
if (uctx) {
memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
- char *provider_name = NULL, *ctx_name = NULL;
+ char *provider_name = NULL, *ctx_name = NULL;
provider_name = strdup(uctx->u.app_ctx.provider_name);
ctx_name = strdup(uctx->u.app_ctx.ctx_name);
}
/*
- * Allocate a filter and copy the given original filter.
+ * Create a liblttng-ust filter bytecode from given bytecode.
*
* Return allocated filter or NULL on error.
*/
-static struct lttng_filter_bytecode *copy_filter_bytecode(
- struct lttng_filter_bytecode *orig_f)
+static struct lttng_ust_filter_bytecode *
+create_ust_filter_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
{
- struct lttng_filter_bytecode *filter = NULL;
+ struct lttng_ust_filter_bytecode *filter = NULL;
- /* Copy filter bytecode */
+ /* Copy filter bytecode. */
filter = zmalloc(sizeof(*filter) + orig_f->len);
if (!filter) {
- PERROR("zmalloc alloc filter bytecode");
+ PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
goto error;
}
+ assert(sizeof(struct lttng_bytecode) ==
+ sizeof(struct lttng_ust_filter_bytecode));
memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
-
error:
return filter;
}
/*
- * Create a liblttng-ust filter bytecode from given bytecode.
+ * Create a liblttng-ust capture bytecode from given bytecode.
*
* Return allocated filter or NULL on error.
*/
-static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
- struct lttng_filter_bytecode *orig_f)
+static struct lttng_ust_capture_bytecode *
+create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
{
- struct lttng_ust_filter_bytecode *filter = NULL;
+ struct lttng_ust_capture_bytecode *capture = NULL;
- /* Copy filter bytecode */
- filter = zmalloc(sizeof(*filter) + orig_f->len);
- if (!filter) {
- PERROR("zmalloc alloc ust filter bytecode");
+ /* Copy capture bytecode. */
+ capture = zmalloc(sizeof(*capture) + orig_f->len);
+ if (!capture) {
+ PERROR("Failed to allocate lttng_ust_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
goto error;
}
- assert(sizeof(struct lttng_filter_bytecode) ==
- sizeof(struct lttng_ust_filter_bytecode));
- memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
+ assert(sizeof(struct lttng_bytecode) ==
+ sizeof(struct lttng_ust_capture_bytecode));
+ memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
error:
- return filter;
+ return capture;
}
/*
* Return an ust_app_event object or NULL on error.
*/
static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
- char *name, struct lttng_filter_bytecode *filter,
+ const char *name, const struct lttng_bytecode *filter,
int loglevel_value,
- const struct lttng_event_exclusion *exclusion)
+ const struct lttng_event_exclusion *exclusion,
+ uint64_t tracer_token)
{
struct lttng_ht_iter iter;
struct lttng_ht_node_str *node;
key.loglevel_type = loglevel_value;
/* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
key.exclusion = exclusion;
+ key.tracer_token = tracer_token;
/* Lookup using the event name as hash and a custom match fct. */
cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
return event;
}
+/*
+ * Look-up an event notifier rule based on its token id.
+ *
+ * Must be called with the RCU read lock held.
+ * Return an ust_app_event_notifier_rule object or NULL on error.
+ */
+static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
+ struct lttng_ht *ht, uint64_t token)
+{
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_u64 *node;
+ struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
+
+ assert(ht);
+
+ lttng_ht_lookup(ht, &token, &iter);
+ node = lttng_ht_iter_get_node_u64(&iter);
+ if (node == NULL) {
+ DBG2("UST app event notifier rule token not found: token = %" PRIu64,
+ token);
+ goto end;
+ }
+
+ event_notifier_rule = caa_container_of(
+ node, struct ust_app_event_notifier_rule, node);
+end:
+ return event_notifier_rule;
+}
+
/*
* Create the channel context on the tracer.
*
* continue normally.
*/
ret = 0;
- DBG3("UST app disable event failed. Application is dead.");
+ DBG3("UST app add context failed. Application is dead.");
}
goto error;
}
/*
* Set the filter on the tracer.
*/
-static
-int set_ust_event_filter(struct ust_app_event *ua_event,
- struct ust_app *app)
+static int set_ust_object_filter(struct ust_app *app,
+ const struct lttng_bytecode *bytecode,
+ struct lttng_ust_object_data *ust_object)
{
int ret;
struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
health_code_update();
- if (!ua_event->filter) {
- ret = 0;
+ ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
+ if (!ust_bytecode) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_set_filter(app->sock, ust_bytecode,
+ ust_object);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app set object filter failed: object = %p of app pid = %d, ret = %d",
+ ust_object, app->pid, ret);
+ } else {
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally.
+ */
+ ret = 0;
+ DBG3("Failed to set UST app object filter. Application is dead.");
+ }
goto error;
}
- ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
+ DBG2("UST filter successfully set: object = %p", ust_object);
+
+error:
+ health_code_update();
+ free(ust_bytecode);
+ return ret;
+}
+
+/*
+ * Set a capture bytecode for the passed object.
+ * The sequence number enforces the ordering at runtime and on reception of
+ * the captured payloads.
+ */
+static int set_ust_capture(struct ust_app *app,
+ const struct lttng_bytecode *bytecode,
+ unsigned int capture_seqnum,
+ struct lttng_ust_object_data *ust_object)
+{
+ int ret;
+ struct lttng_ust_capture_bytecode *ust_bytecode = NULL;
+
+ health_code_update();
+
+ ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
if (!ust_bytecode) {
ret = -LTTNG_ERR_NOMEM;
goto error;
}
+
+ /*
+ * Set the sequence number to ensure the capture of fields is ordered.
+ */
+ ust_bytecode->seqnum = capture_seqnum;
+
pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_set_filter(app->sock, ust_bytecode,
- ua_event->obj);
+ ret = ustctl_set_capture(app->sock, ust_bytecode,
+ ust_object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app event %s filter failed for app (pid: %d) "
- "with ret %d", ua_event->attr.name, app->pid, ret);
+ ERR("UST app set object capture failed: object = %p of app pid = %d, ret = %d",
+ ust_object, app->pid, ret);
} else {
/*
* This is normal behavior, an application can die during the
* continue normally.
*/
ret = 0;
- DBG3("UST app filter event failed. Application is dead.");
+ DBG3("Failed to set UST app object capture. Application is dead.");
}
+
goto error;
}
- DBG2("UST filter set successfully for event %s", ua_event->name);
+ DBG2("UST capture successfully set: object = %p", ust_object);
error:
health_code_update();
static
struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
- struct lttng_event_exclusion *exclusion)
+ const struct lttng_event_exclusion *exclusion)
{
struct lttng_ust_event_exclusion *ust_exclusion = NULL;
size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
/*
* Set event exclusions on the tracer.
*/
-static
-int set_ust_event_exclusion(struct ust_app_event *ua_event,
- struct ust_app *app)
+static int set_ust_object_exclusions(struct ust_app *app,
+ const struct lttng_event_exclusion *exclusions,
+ struct lttng_ust_object_data *ust_object)
{
int ret;
- struct lttng_ust_event_exclusion *ust_exclusion = NULL;
+ struct lttng_ust_event_exclusion *ust_exclusions = NULL;
- health_code_update();
+ assert(exclusions && exclusions->count > 0);
- if (!ua_event->exclusion || !ua_event->exclusion->count) {
- ret = 0;
- goto error;
- }
+ health_code_update();
- ust_exclusion = create_ust_exclusion_from_exclusion(
- ua_event->exclusion);
- if (!ust_exclusion) {
+ ust_exclusions = create_ust_exclusion_from_exclusion(
+ exclusions);
+ if (!ust_exclusions) {
ret = -LTTNG_ERR_NOMEM;
goto error;
}
pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
+ ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app event %s exclusions failed for app (pid: %d) "
- "with ret %d", ua_event->attr.name, app->pid, ret);
+ ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
+ "with ret %d", ust_object, app->pid, ret);
} else {
/*
* This is normal behavior, an application can die during the
* continue normally.
*/
ret = 0;
- DBG3("UST app event exclusion failed. Application is dead.");
+ DBG3("Failed to set UST app object exclusions. Application is dead.");
}
goto error;
}
- DBG2("UST exclusion set successfully for event %s", ua_event->name);
+ DBG2("UST exclusions set successfully for object %p", ust_object);
error:
health_code_update();
- free(ust_exclusion);
+ free(ust_exclusions);
return ret;
}
/*
* Disable the specified event on to UST tracer for the UST session.
*/
-static int disable_ust_event(struct ust_app *app,
- struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
+static int disable_ust_object(struct ust_app *app,
+ struct lttng_ust_object_data *object)
{
int ret;
health_code_update();
pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_disable(app->sock, ua_event->obj);
+ ret = ustctl_disable(app->sock, object);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app event %s disable failed for app (pid: %d) "
- "and session handle %d with ret %d",
- ua_event->attr.name, app->pid, ua_sess->handle, ret);
+ ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
+ object, app->pid, ret);
} else {
/*
* This is normal behavior, an application can die during the
* continue normally.
*/
ret = 0;
- DBG3("UST app disable event failed. Application is dead.");
+ DBG3("Failed to disable UST app object. Application is dead.");
}
goto error;
}
- DBG2("UST app event %s disabled successfully for app (pid: %d)",
- ua_event->attr.name, app->pid);
+ DBG2("UST app object %p disabled successfully for app (pid: %d)",
+ object, app->pid);
error:
health_code_update();
return ret;
}
+/*
+ * Disable the specified map on to UST tracer for the UST session.
+ */
+static int disable_ust_map(struct ust_app *app,
+ struct ust_app_session *ua_sess, struct ust_app_map *ua_map)
+{
+ int ret;
+
+ health_code_update();
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_disable(app->sock, ua_map->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app map %s disable failed for app (pid: %d) "
+ "and session handle %d with ret %d",
+ ua_map->name, app->pid, ua_sess->handle, ret);
+ } else {
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally.
+ */
+ ret = 0;
+ DBG3("UST app disable map failed. Application is dead.");
+ }
+ goto error;
+ }
+
+ DBG2("UST app map %s disabled successfully for app (pid: %d)",
+ ua_map->name, app->pid);
+
+error:
+ health_code_update();
+ return ret;
+}
+
/*
* Enable the specified channel on to UST tracer for the UST session.
*/
}
/*
- * Enable the specified event on to UST tracer for the UST session.
+ * Enable the specified map on to UST tracer for the UST session.
*/
-static int enable_ust_event(struct ust_app *app,
- struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
+static int enable_ust_map(struct ust_app *app,
+ struct ust_app_session *ua_sess, struct ust_app_map *ua_map)
{
int ret;
health_code_update();
pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_enable(app->sock, ua_event->obj);
+ ret = ustctl_enable(app->sock, ua_map->obj);
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app event %s enable failed for app (pid: %d) "
+ ERR("UST app map %s enable failed for app (pid: %d) "
"and session handle %d with ret %d",
- ua_event->attr.name, app->pid, ua_sess->handle, ret);
+ ua_map->name, app->pid, ua_sess->handle, ret);
+ } else {
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally.
+ */
+ ret = 0;
+ DBG3("UST app enable map failed. Application is dead.");
+ }
+ goto error;
+ }
+
+ ua_map->enabled = 1;
+
+ DBG2("UST app map %s enabled successfully for app (pid: %d)",
+ ua_map->name, app->pid);
+
+error:
+ health_code_update();
+ return ret;
+}
+
+/*
+ * Enable the specified event on to UST tracer for the UST session.
+ */
+static int enable_ust_object(
+ struct ust_app *app, struct lttng_ust_object_data *ust_object)
+{
+ int ret;
+
+ health_code_update();
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_enable(app->sock, ust_object);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
+ ust_object, app->pid, ret);
} else {
/*
* This is normal behavior, an application can die during the
* continue normally.
*/
ret = 0;
- DBG3("UST app enable event failed. Application is dead.");
+ DBG3("Failed to enable UST app object. Application is dead.");
}
goto error;
}
- DBG2("UST app event %s enabled successfully for app (pid: %d)",
- ua_event->attr.name, app->pid);
+ DBG2("UST app object %p enabled successfully for app (pid: %d)",
+ ust_object, app->pid);
error:
health_code_update();
return ret;
}
+/*
+ * Send map and stream buffer to application.
+ *
+ * Return 0 on success. On error, a negative value is returned.
+ */
+static int send_map_pid_to_ust(struct ust_app *app,
+ struct ust_app_session *ua_sess, struct ust_app_map *ua_map)
+{
+ int ret;
+ struct ust_app_map_counter *counter, *ctmp;
+
+ assert(app);
+ assert(ua_sess);
+ assert(ua_map);
+
+ health_code_update();
+
+ DBG("UST app sending map %s to UST app sock %d", ua_map->name,
+ app->sock);
+
+ /* Send map to the application. */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_send_counter_data_to_ust(app->sock,
+ ua_sess->handle, ua_map->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ assert(ret == 0);
+
+ ua_map->handle = ua_map->obj->handle;
+
+ health_code_update();
+
+ /* Send all streams to application. */
+ cds_list_for_each_entry_safe(counter, ctmp, &ua_map->counters.head, list) {
+ pthread_mutex_lock(&app->sock_lock);
+ // Do send the per cpu counter here
+ ret = ustctl_send_counter_cpu_data_to_ust(app->sock,
+ ua_map->obj, counter->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ assert(ret == 0);
+
+ /* We don't need the stream anymore once sent to the tracer. */
+ cds_list_del(&counter->list);
+ delete_ust_app_map_counter(-1, counter, app);
+ }
+ /* Flag the map that it is sent to the application. */
+ ua_map->is_sent = 1;
+
+ health_code_update();
+ return ret;
+}
+
/*
* Create the specified event onto the UST tracer for a UST session.
*
* Should be called with session mutex held.
*/
static
-int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
+int create_ust_channel_event(struct ust_app *app, struct ust_app_session *ua_sess,
struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
{
int ret = 0;
pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ abort();
ERR("Error ustctl create event %s for app pid: %d with ret %d",
ua_event->attr.name, app->pid, ret);
} else {
ua_event->handle = ua_event->obj->handle;
- DBG2("UST app event %s created successfully for pid:%d",
- ua_event->attr.name, app->pid);
+ DBG2("UST app event %s created successfully for pid:%d object: %p",
+ ua_event->attr.name, app->pid, ua_event->obj);
health_code_update();
/* Set filter if one is present. */
if (ua_event->filter) {
- ret = set_ust_event_filter(ua_event, app);
+ ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
if (ret < 0) {
goto error;
}
/* Set exclusions for the event */
if (ua_event->exclusion) {
- ret = set_ust_event_exclusion(ua_event, app);
+ ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
if (ret < 0) {
goto error;
}
* We now need to explicitly enable the event, since it
* is now disabled at creation.
*/
- ret = enable_ust_event(app, ua_sess, ua_event);
+ ret = enable_ust_object(app, ua_event->obj);
if (ret < 0) {
/*
* If we hit an EPERM, something is wrong with our enable call. If
return ret;
}
-/*
- * Copy data between an UST app event and a LTT event.
- */
-static void shadow_copy_event(struct ust_app_event *ua_event,
- struct ltt_ust_event *uevent)
+static
+void add_key_token(struct lttng_ust_key_token *ust_key_token,
+ const struct lttng_map_key_token *key_token)
{
- size_t exclusion_alloc_size;
-
- strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
- ua_event->name[sizeof(ua_event->name) - 1] = '\0';
-
- ua_event->enabled = uevent->enabled;
+ switch (key_token->type) {
+ case LTTNG_MAP_KEY_TOKEN_TYPE_STRING:
+ {
+ const struct lttng_map_key_token_string *str_token;
+ str_token = (typeof(str_token)) key_token;
- /* Copy event attributes */
- memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
+ ust_key_token->type = LTTNG_UST_KEY_TOKEN_STRING;
+ strncpy(ust_key_token->arg.string, str_token->string,
+ LTTNG_UST_KEY_TOKEN_STRING_LEN_MAX);
- /* Copy filter bytecode */
- if (uevent->filter) {
- ua_event->filter = copy_filter_bytecode(uevent->filter);
- /* Filter might be NULL here in case of ENONEM. */
+ break;
}
-
- /* Copy exclusion data */
- if (uevent->exclusion) {
- exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
- LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
- ua_event->exclusion = zmalloc(exclusion_alloc_size);
- if (ua_event->exclusion == NULL) {
- PERROR("malloc");
- } else {
- memcpy(ua_event->exclusion, uevent->exclusion,
- exclusion_alloc_size);
+ case LTTNG_MAP_KEY_TOKEN_TYPE_VARIABLE:
+ {
+ const struct lttng_map_key_token_variable *var_token;
+ var_token = (typeof(var_token)) key_token;
+ switch (var_token->type) {
+ case LTTNG_MAP_KEY_TOKEN_VARIABLE_TYPE_EVENT_NAME:
+ ust_key_token->type = LTTNG_UST_KEY_TOKEN_EVENT_NAME;
+ break;
+ case LTTNG_MAP_KEY_TOKEN_VARIABLE_TYPE_PROVIDER_NAME:
+ ust_key_token->type = LTTNG_UST_KEY_TOKEN_PROVIDER_NAME;
+ break;
+ default:
+ abort();
}
+
+ break;
+ }
+ default:
+ abort();
}
}
/*
- * Copy data between an UST app channel and a LTT channel.
+ * Create the specified event onto the UST tracer for a UST session.
+ *
+ * Should be called with session mutex held.
*/
-static void shadow_copy_channel(struct ust_app_channel *ua_chan,
- struct ltt_ust_channel *uchan)
+static
+int create_ust_map_event(struct ust_app *app, struct ust_app_session *ua_sess,
+ struct ust_app_map *ua_map, const struct lttng_map_key *key,
+ struct ust_app_event *ua_event)
{
- struct lttng_ht_iter iter;
- struct ltt_ust_event *uevent;
- struct ltt_ust_context *uctx;
- struct ust_app_event *ua_event;
-
- DBG2("UST app shadow copy of channel %s started", ua_chan->name);
+ int ret = 0;
+ unsigned int i, key_token_count;
+ enum lttng_map_key_status status;
+ struct lttng_ust_counter_event counter_event = {0};
- strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
- ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
+ health_code_update();
- ua_chan->tracefile_size = uchan->tracefile_size;
- ua_chan->tracefile_count = uchan->tracefile_count;
+ memcpy(&counter_event.event, &ua_event->attr, sizeof(struct lttng_ust_event));
- /* Copy event attributes since the layout is different. */
- ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
- ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
- ua_chan->attr.overwrite = uchan->attr.overwrite;
- ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
- ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
- ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
- ua_chan->attr.output = uchan->attr.output;
- /*
- * Note that the attribute channel type is not set since the channel on the
- * tracing registry side does not have this information.
- */
+ status = lttng_map_key_get_token_count(key, &key_token_count);
+ if (status != LTTNG_MAP_KEY_STATUS_OK) {
+ ret = LTTNG_ERR_UNK;
+ goto error;
+ }
- ua_chan->enabled = uchan->enabled;
- ua_chan->tracing_channel_id = uchan->id;
+ assert(key_token_count > 0);
- cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
- struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
+ counter_event.key.nr_dimensions = 1;
+ counter_event.key.key_dimensions[0].nr_key_tokens = key_token_count;
- if (ua_ctx == NULL) {
- continue;
- }
- lttng_ht_node_init_ulong(&ua_ctx->node,
- (unsigned long) ua_ctx->ctx.ctx);
- lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
- cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
+ if (key_token_count > LTTNG_UST_NR_KEY_TOKEN) {
+ ERR("Too many key tokens for UST tracer: token count = %u token count max =%u",
+ key_token_count, LTTNG_UST_NR_KEY_TOKEN);
+ ret = LTTNG_ERR_INVALID;
+ goto error;
}
- /* Copy all events from ltt ust channel to ust app channel */
- cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
- ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
- uevent->filter, uevent->attr.loglevel, uevent->exclusion);
- if (ua_event == NULL) {
- DBG2("UST event %s not found on shadow copy channel",
- uevent->attr.name);
- ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
- if (ua_event == NULL) {
- continue;
- }
- shadow_copy_event(ua_event, uevent);
- add_unique_ust_app_event(ua_chan, ua_event);
- }
- }
+ for (i = 0; i < key_token_count; i++) {
+ const struct lttng_map_key_token *token =
+ lttng_map_key_get_token_at_index(key, i);
- DBG3("UST app shadow copy of channel %s done", ua_chan->name);
-}
+ add_key_token(&counter_event.key.key_dimensions[0].key_tokens[i],
+ token);
+ }
-/*
- * Copy data between a UST app session and a regular LTT session.
- */
-static void shadow_copy_session(struct ust_app_session *ua_sess,
- struct ltt_ust_session *usess, struct ust_app *app)
-{
- struct lttng_ht_node_str *ua_chan_node;
- struct lttng_ht_iter iter;
- struct ltt_ust_channel *uchan;
- struct ust_app_channel *ua_chan;
- time_t rawtime;
+ /* Create UST event on tracer */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_counter_create_event(app->sock, &counter_event, ua_map->obj,
+ &ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ abort();
+ ERR("Error ustctl counter create event %s for app pid: %d with ret %d",
+ ua_event->attr.name, app->pid, ret);
+ } else {
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally.
+ */
+ ret = 0;
+ DBG3("UST app counter create event failed. Application is dead.");
+ }
+ goto error;
+ }
+
+ ua_event->handle = ua_event->obj->handle;
+
+ DBG2("UST app map event %s created successfully for pid:%d object: %p",
+ ua_event->attr.name, app->pid, ua_event->obj);
+
+ health_code_update();
+
+ /* Set filter if one is present. */
+ if (ua_event->filter) {
+ ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /* Set exclusions for the event */
+ if (ua_event->exclusion) {
+ ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /* If event not enabled, disable it on the tracer */
+ if (ua_event->enabled) {
+ /*
+ * We now need to explicitly enable the event, since it
+ * is now disabled at creation.
+ */
+ ret = enable_ust_object(app, ua_event->obj);
+ if (ret < 0) {
+ /*
+ * If we hit an EPERM, something is wrong with our enable call. If
+ * we get an EEXIST, there is a problem on the tracer side since we
+ * just created it.
+ */
+ switch (ret) {
+ case -LTTNG_UST_ERR_PERM:
+ /* Code flow problem */
+ assert(0);
+ case -LTTNG_UST_ERR_EXIST:
+ /* It's OK for our use case. */
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ goto error;
+ }
+ }
+
+error:
+ health_code_update();
+ return ret;
+}
+
+static int init_ust_event_from_event_rule(
+ const struct lttng_event_rule *rule,
+ struct lttng_ust_event *event)
+{
+ enum lttng_event_rule_status status;
+ enum lttng_ust_loglevel_type ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+ int loglevel = -1, ret = 0;
+ const char *pattern;
+
+ /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
+ assert(lttng_event_rule_get_type(rule) ==
+ LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
+
+ if (lttng_event_rule_targets_agent_domain(rule)) {
+ /*
+ * Special event for agents
+ * The actual meat of the event is in the filter that will be
+ * attached later on.
+ * Set the default values for the agent event.
+ */
+ pattern = event_get_default_agent_ust_name(
+ lttng_event_rule_get_domain_type(rule));
+ loglevel = 0;
+ ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+ } else {
+ const struct lttng_log_level_rule *log_level_rule;
+
+ status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
+ if (status != LTTNG_EVENT_RULE_STATUS_OK) {
+ /* At this point, this is a fatal error. */
+ abort();
+ }
+
+ status = lttng_event_rule_tracepoint_get_log_level_rule(
+ rule, &log_level_rule);
+ if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
+ ust_loglevel_type = LTTNG_UST_LOGLEVEL_ALL;
+ } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
+ enum lttng_log_level_rule_status llr_status;
+
+ switch (lttng_log_level_rule_get_type(log_level_rule)) {
+ case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
+ ust_loglevel_type = LTTNG_UST_LOGLEVEL_SINGLE;
+ llr_status = lttng_log_level_rule_exactly_get_level(log_level_rule, &loglevel);
+ break;
+ case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
+ ust_loglevel_type = LTTNG_UST_LOGLEVEL_RANGE;
+ llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(log_level_rule, &loglevel);
+ break;
+ default:
+ abort();
+ }
+
+ assert(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
+ } else {
+ /* At this point this is a fatal error */
+ assert(0);
+ }
+ }
+
+ event->instrumentation = LTTNG_UST_TRACEPOINT;
+ ret = lttng_strncpy(event->name, pattern,
+ LTTNG_UST_SYM_NAME_LEN - 1);
+ if (ret) {
+ ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
+ pattern);
+ goto end;
+ }
+
+ event->loglevel_type = ust_loglevel_type;
+ event->loglevel = loglevel;
+end:
+ return ret;
+}
+
+/*
+ * Create the specified event notifier against the user space tracer of a
+ * given application.
+ */
+static int create_ust_event_notifier(struct ust_app *app,
+ struct ust_app_event_notifier_rule *ua_event_notifier_rule)
+{
+ int ret = 0;
+ struct lttng_ust_event_notifier event_notifier = {0};
+ const struct lttng_condition *condition = NULL;
+ const struct lttng_event_rule *event_rule = NULL;
+ unsigned int capture_bytecode_count = 0, i;
+ enum lttng_condition_status cond_status;
+
+ health_code_update();
+ assert(app->event_notifier_group.object);
+
+ condition = lttng_trigger_get_const_condition(
+ ua_event_notifier_rule->trigger);
+ assert(condition);
+ assert(lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_ON_EVENT);
+
+ lttng_condition_on_event_get_rule(condition, &event_rule);
+ assert(event_rule);
+ assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
+
+ init_ust_event_from_event_rule(event_rule, &event_notifier.event);
+ event_notifier.event.token = ua_event_notifier_rule->token;
+ event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
+
+ /* Create UST event notifier against the tracer. */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_create_event_notifier(app->sock, &event_notifier,
+ app->event_notifier_group.object,
+ &ua_event_notifier_rule->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
+ event_notifier.event.name, app->name,
+ app->ppid, ret);
+ } else {
+ /*
+ * This is normal behavior, an application can die
+ * during the creation process. Don't report an error so
+ * the execution can continue normally.
+ */
+ ret = 0;
+ DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
+ app->name, app->ppid);
+ }
+
+ goto error;
+ }
+
+ ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
+
+ DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
+ event_notifier.event.name, app->name, app->ppid,
+ ua_event_notifier_rule->obj);
+
+ health_code_update();
+
+ /* Set filter if one is present. */
+ if (ua_event_notifier_rule->filter) {
+ ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
+ ua_event_notifier_rule->obj);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /* Set exclusions for the event. */
+ if (ua_event_notifier_rule->exclusion) {
+ ret = set_ust_object_exclusions(app,
+ ua_event_notifier_rule->exclusion,
+ ua_event_notifier_rule->obj);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /* Set the capture bytecodes. */
+ cond_status = lttng_condition_on_event_get_capture_descriptor_count(
+ condition, &capture_bytecode_count);
+ assert(cond_status == LTTNG_CONDITION_STATUS_OK);
+
+ for (i = 0; i < capture_bytecode_count; i++) {
+ const struct lttng_bytecode *capture_bytecode =
+ lttng_condition_on_event_get_capture_bytecode_at_index(
+ condition, i);
+
+ ret = set_ust_capture(app, capture_bytecode, i,
+ ua_event_notifier_rule->obj);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /*
+ * We now need to explicitly enable the event, since it
+ * is disabled at creation.
+ */
+ ret = enable_ust_object(app, ua_event_notifier_rule->obj);
+ if (ret < 0) {
+ /*
+ * If we hit an EPERM, something is wrong with our enable call.
+ * If we get an EEXIST, there is a problem on the tracer side
+ * since we just created it.
+ */
+ switch (ret) {
+ case -LTTNG_UST_ERR_PERM:
+ /* Code flow problem. */
+ abort();
+ case -LTTNG_UST_ERR_EXIST:
+ /* It's OK for our use case. */
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ goto error;
+ }
+
+ ua_event_notifier_rule->enabled = true;
+
+error:
+ health_code_update();
+ return ret;
+}
+
+/*
+ * Copy data between an UST app event and a LTT event.
+ */
+static void shadow_copy_event(struct ust_app_event *ua_event,
+ struct ltt_ust_event *uevent)
+{
+ size_t exclusion_alloc_size;
+
+ strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
+ ua_event->name[sizeof(ua_event->name) - 1] = '\0';
+
+ ua_event->enabled = uevent->enabled;
+
+ /* Copy event attributes */
+ memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
+
+ /* Copy filter bytecode */
+ if (uevent->filter) {
+ ua_event->filter = lttng_bytecode_copy(uevent->filter);
+ /* Filter might be NULL here in case of ENONEM. */
+ }
+
+ /* Copy exclusion data */
+ if (uevent->exclusion) {
+ exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
+ LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
+ ua_event->exclusion = zmalloc(exclusion_alloc_size);
+ if (ua_event->exclusion == NULL) {
+ PERROR("malloc");
+ } else {
+ memcpy(ua_event->exclusion, uevent->exclusion,
+ exclusion_alloc_size);
+ }
+ }
+}
+
+/*
+ * Copy data between an UST app channel and a LTT channel.
+ */
+static void shadow_copy_channel(struct ust_app_channel *ua_chan,
+ struct ltt_ust_channel *uchan)
+{
+ DBG2("UST app shadow copy of channel %s started", ua_chan->name);
+
+ strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
+ ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
+
+ ua_chan->tracefile_size = uchan->tracefile_size;
+ ua_chan->tracefile_count = uchan->tracefile_count;
+
+ /* Copy event attributes since the layout is different. */
+ ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
+ ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
+ ua_chan->attr.overwrite = uchan->attr.overwrite;
+ ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
+ ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
+ ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
+ ua_chan->attr.output = uchan->attr.output;
+ ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
+
+ /*
+ * Note that the attribute channel type is not set since the channel on the
+ * tracing registry side does not have this information.
+ */
+
+ ua_chan->enabled = uchan->enabled;
+ ua_chan->tracing_channel_id = uchan->id;
+
+ DBG3("UST app shadow copy of channel %s done", ua_chan->name);
+}
+
+/*
+ * Copy data between a UST app session and a regular LTT session.
+ */
+static void shadow_copy_session(struct ust_app_session *ua_sess,
+ struct ltt_ust_session *usess, struct ust_app *app)
+{
struct tm *timeinfo;
char datetime[16];
int ret;
char tmp_shm_path[PATH_MAX];
- /* Get date and time for unique app path */
- time(&rawtime);
- timeinfo = localtime(&rawtime);
+ timeinfo = localtime(&app->registration_time);
strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
DBG2("Shadow copy of session handle %d", ua_sess->handle);
ua_sess->tracing_id = usess->id;
ua_sess->id = get_next_session_id();
- ua_sess->uid = app->uid;
- ua_sess->gid = app->gid;
- ua_sess->euid = usess->uid;
- ua_sess->egid = usess->gid;
+ LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
+ LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
+ LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
+ LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
ua_sess->buffer_type = usess->buffer_type;
ua_sess->bits_per_long = app->bits_per_long;
break;
case LTTNG_BUFFER_PER_UID:
ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
- DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
+ DEFAULT_UST_TRACE_UID_PATH,
+ lttng_credentials_get_uid(&ua_sess->real_credentials),
+ app->bits_per_long);
break;
default:
assert(0);
switch (ua_sess->buffer_type) {
case LTTNG_BUFFER_PER_PID:
ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
- DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
+ "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
app->name, app->pid, datetime);
break;
case LTTNG_BUFFER_PER_UID:
ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
- DEFAULT_UST_TRACE_UID_PATH,
+ "/" DEFAULT_UST_TRACE_UID_PATH,
app->uid, app->bits_per_long);
break;
default:
sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
}
-
- /* Iterate over all channels in global domain. */
- cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
- uchan, node.node) {
- struct lttng_ht_iter uiter;
-
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node != NULL) {
- /* Session exist. Contiuing. */
- continue;
- }
-
- DBG2("Channel %s not found on shadow session copy, creating it",
- uchan->name);
- ua_chan = alloc_ust_app_channel(uchan->name, ua_sess,
- &uchan->attr);
- if (ua_chan == NULL) {
- /* malloc failed FIXME: Might want to do handle ENOMEM .. */
- continue;
- }
- shadow_copy_channel(ua_chan, uchan);
- /*
- * The concept of metadata channel does not exist on the tracing
- * registry side of the session daemon so this can only be a per CPU
- * channel and not metadata.
- */
- ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
-
- lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
- }
return;
error:
* Lookup sesison wrapper.
*/
static
-void __lookup_session_by_app(struct ltt_ust_session *usess,
+void __lookup_session_by_app(const struct ltt_ust_session *usess,
struct ust_app *app, struct lttng_ht_iter *iter)
{
/* Get right UST app session from app */
* id.
*/
static struct ust_app_session *lookup_session_by_app(
- struct ltt_ust_session *usess, struct ust_app *app)
+ const struct ltt_ust_session *usess, struct ust_app *app)
{
struct lttng_ht_iter iter;
struct lttng_ht_node_u64 *node;
app->bits_per_long, app->uint8_t_alignment,
app->uint16_t_alignment, app->uint32_t_alignment,
app->uint64_t_alignment, app->long_alignment,
- app->byte_order, app->version.major,
- app->version.minor, reg_pid->root_shm_path,
- reg_pid->shm_path,
- ua_sess->euid, ua_sess->egid);
+ app->byte_order, app->version.major, app->version.minor,
+ reg_pid->root_shm_path, reg_pid->shm_path,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ ua_sess->tracing_id,
+ app->uid);
if (ret < 0) {
/*
* reg_pid->registry->reg.ust is NULL upon error, so we need to
app->uint64_t_alignment, app->long_alignment,
app->byte_order, app->version.major,
app->version.minor, reg_uid->root_shm_path,
- reg_uid->shm_path, usess->uid, usess->gid);
+ reg_uid->shm_path, usess->uid, usess->gid,
+ ua_sess->tracing_id, app->uid);
if (ret < 0) {
/*
* reg_uid->registry->reg.ust is NULL upon error, so we need to
* Returns 0 on success or else a negative code which is either -ENOMEM or
* -ENOTCONN which is the default code if the ustctl_create_session fails.
*/
-static int create_ust_app_session(struct ltt_ust_session *usess,
+static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
struct ust_app *app, struct ust_app_session **ua_sess_ptr,
int *is_created)
{
if (ua_sess == NULL) {
DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
app->pid, usess->id);
- ua_sess = alloc_ust_app_session(app);
+ ua_sess = alloc_ust_app_session();
if (ua_sess == NULL) {
/* Only malloc can failed so something is really wrong */
ret = -ENOMEM;
* Called with UST app session lock held and a RCU read side lock.
*/
static
-int create_ust_app_channel_context(struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan,
- struct lttng_ust_context_attr *uctx,
+int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
+ struct lttng_ust_context_attr *uctx,
struct ust_app *app)
{
int ret = 0;
ua_ctx = alloc_ust_app_ctx(uctx);
if (ua_ctx == NULL) {
/* malloc failed */
- ret = -1;
+ ret = -ENOMEM;
goto error;
}
{
int ret;
- ret = enable_ust_event(app, ua_sess, ua_event);
+ ret = enable_ust_object(app, ua_event->obj);
if (ret < 0) {
goto error;
}
{
int ret;
- ret = disable_ust_event(app, ua_sess, ua_event);
+ ret = disable_ust_object(app, ua_event->obj);
if (ret < 0) {
goto error;
}
return ret;
}
+/*
+ * Lookup ust app map for session and disable it on the tracer side.
+ */
+static
+int disable_ust_app_map(struct ust_app_session *ua_sess,
+ struct ust_app_map *ua_map, struct ust_app *app)
+{
+ int ret;
+
+ ret = disable_ust_map(app, ua_sess, ua_map);
+ if (ret < 0) {
+ goto error;
+ }
+
+ ua_map->enabled = 0;
+
+error:
+ return ret;
+}
+
/*
* Lookup ust app channel for session and enable it on the tracer side. This
* MUST be called with a RCU read side lock acquired.
return ret;
}
+/*
+ * Lookup ust app map for session and enable it on the tracer side. This
+ * MUST be called with a RCU read side lock acquired.
+ */
+static int enable_ust_app_map(struct ust_app_session *ua_sess,
+ struct ltt_ust_map *umap, struct ust_app *app)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_map_node;
+ struct ust_app_map *ua_map;
+
+ lttng_ht_lookup(ua_sess->maps, (void *)umap->name, &iter);
+ ua_map_node = lttng_ht_iter_get_node_str(&iter);
+ if (ua_map_node == NULL) {
+ DBG2("Unable to find map %s in ust session id %" PRIu64,
+ umap->name, ua_sess->tracing_id);
+ goto error;
+ }
+
+ ua_map = caa_container_of(ua_map_node, struct ust_app_map, node);
+
+ ret = enable_ust_map(app, ua_sess, ua_map);
+ if (ret < 0) {
+ goto error;
+ }
+
+error:
+ return ret;
+}
+
/*
* Ask the consumer to create a channel and get it if successful.
*
+ * Called with UST app session lock held.
+ *
* Return 0 on success or else a negative value.
*/
static int do_consumer_create_channel(struct ltt_ust_session *usess,
struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
- int bitness, struct ust_registry_session *registry)
+ int bitness, struct ust_registry_session *registry,
+ uint64_t trace_archive_id)
{
int ret;
unsigned int nb_fd = 0;
* stream we have to expect.
*/
ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
- registry);
+ registry, usess->current_trace_chunk);
if (ret < 0) {
goto error_ask;
}
return ret;
}
+static int create_map_object(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess, struct ust_app_map *ua_map)
+{
+ int i, ret, nr_counter_cpu;
+ struct ustctl_counter_dimension dimension[1] = {0};
+ struct ustctl_daemon_counter *daemon_counter;
+ struct lttng_ust_object_data **counter_cpus;
+ enum ustctl_counter_bitness bitness;
+ int *counter_cpu_fds;
+
+ assert(usess);
+ assert(ua_sess);
+ assert(ua_map);
+ assert(ua_map->bucket_count > 0);
+
+ DBG("Creating UST map \"%s\"", ua_map->name);
+
+ if (ua_map->bitness == LTTNG_MAP_BITNESS_32BITS) {
+ bitness = USTCTL_COUNTER_BITNESS_32;
+ } else {
+ bitness = USTCTL_COUNTER_BITNESS_64;
+ }
+
+ nr_counter_cpu = ustctl_get_nr_cpu_per_counter();
+ counter_cpu_fds = zmalloc(nr_counter_cpu * sizeof(*counter_cpu_fds));
+ if (!counter_cpu_fds) {
+ ret = -1;
+ goto end;
+ }
+
+ counter_cpus = zmalloc(nr_counter_cpu * sizeof(**counter_cpus));
+ if (!counter_cpus) {
+ ret = -1;
+ goto free_cpu_fds;
+ }
+
+ /* Need one fd for each cpu counter of the map. */
+ ret = lttng_fd_get(LTTNG_FD_APPS, nr_counter_cpu);
+ if (ret < 0) {
+ ERR("Exhausted number of available FD upon create map");
+ goto free_cpu_counters;
+ }
+
+ for (i = 0; i < nr_counter_cpu; i++) {
+ counter_cpu_fds[i] = shm_create_anonymous("ust-map-counter");
+ if (counter_cpu_fds[i] < 0) {
+ ERR("Error creating anonymous shared memory object");
+ ret = -1;
+ goto error;
+ }
+ }
+
+ dimension[0].size = ua_map->bucket_count;
+ dimension[0].has_underflow = false;
+ dimension[0].has_overflow = false;
+
+ daemon_counter = ustctl_create_counter(1, dimension, 0, -1,
+ nr_counter_cpu, counter_cpu_fds,
+ bitness,
+ USTCTL_COUNTER_ARITHMETIC_MODULAR,
+ USTCTL_COUNTER_ALLOC_PER_CPU,
+ ua_map->coalesce_hits);
+ assert(daemon_counter);
+
+ DBG("Created daemon counter succesfully");
+
+ ua_map->map_handle = daemon_counter;
+
+ ret = ustctl_create_counter_data(daemon_counter, &ua_map->obj);
+ assert(ret == 0);
+ DBG("Created counter data succesfully");
+
+ for (i = 0; i < nr_counter_cpu; i++) {
+ struct ust_app_map_counter *counter;
+
+ /* Create UST counter */
+ counter = ust_app_alloc_map_counter();
+ if (counter == NULL) {
+ ret = -ENOMEM;
+ goto release_counters;
+ }
+
+ ret = ustctl_create_counter_cpu_data(daemon_counter, i,
+ &counter->obj);
+ if (ret < 0) {
+ ERR("Creating map counter cpu data");
+ free(counter);
+ goto error;
+ }
+
+ cds_list_add_tail(&counter->list, &ua_map->counters.head);
+ ua_map->counters.count++;
+
+ DBG2("UST app map counter %d created successfully",
+ ua_map->counters.count);
+ }
+
+ ret = 0;
+ goto end;
+
+error:
+release_counters:
+ //TODO
+free_cpu_counters:
+ free(counter_cpus);
+
+free_cpu_fds:
+ free(counter_cpu_fds);
+end:
+ return ret;
+}
+
/*
* Duplicate the ust data object of the ust app stream and save it in the
* buffer registry stream.
assert(reg_stream);
assert(stream);
- /* Reserve the amount of file descriptor we need. */
+ /* Duplicating a stream requires 2 new fds. Reserve them. */
ret = lttng_fd_get(LTTNG_FD_APPS, 2);
if (ret < 0) {
ERR("Exhausted number of available FD upon duplicate stream");
return ret;
}
+/*
+ * Duplicate the ust data object of the ust app map_counter and save it in the
+ * buffer registry map_counter.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int duplicate_map_counter_object(struct buffer_reg_map_counter *reg_map_counter,
+ struct ust_app_map_counter *map_counter)
+{
+ int ret;
+
+ assert(reg_map_counter);
+ assert(map_counter);
+
+ /* Duplicating a map_counter requires 2 new fds. Reserve them. */
+ ret = lttng_fd_get(LTTNG_FD_APPS, 2);
+ if (ret < 0) {
+ ERR("Exhausted number of available FD upon duplicate map_counter");
+ goto error;
+ }
+
+ /* Duplicate object for map_counter once the original is in the registry. */
+ ret = ustctl_duplicate_ust_object_data(&map_counter->obj,
+ reg_map_counter->obj.ust);
+ if (ret < 0) {
+ ERR("Duplicate map_counter obj from %p to %p failed with ret %d",
+ reg_map_counter->obj.ust, map_counter->obj, ret);
+ lttng_fd_put(LTTNG_FD_APPS, 2);
+ goto error;
+ }
+ map_counter->handle = map_counter->obj->handle;
+
+error:
+ return ret;
+}
+
/*
* Duplicate the ust data object of the ust app. channel and save it in the
* buffer registry channel.
*
* Return 0 on success or else a negative value.
*/
-static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
+static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
struct ust_app_channel *ua_chan)
{
int ret;
- assert(reg_chan);
+ assert(buf_reg_chan);
assert(ua_chan);
- /* Need two fds for the channel. */
+ /* Duplicating a channel requires 1 new fd. Reserve it. */
ret = lttng_fd_get(LTTNG_FD_APPS, 1);
if (ret < 0) {
ERR("Exhausted number of available FD upon duplicate channel");
}
/* Duplicate object for stream once the original is in the registry. */
- ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
+ ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
if (ret < 0) {
ERR("Duplicate channel obj from %p to %p failed with ret: %d",
- reg_chan->obj.ust, ua_chan->obj, ret);
+ buf_reg_chan->obj.ust, ua_chan->obj, ret);
goto error;
}
ua_chan->handle = ua_chan->obj->handle;
return ret;
}
+/*
+ * Duplicate the ust data object of the ust app. map and save it in the
+ * buffer registry map.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int duplicate_map_object(struct buffer_reg_map *buf_reg_map,
+ struct ust_app_map *ua_map)
+{
+ int ret;
+
+ assert(buf_reg_map);
+ assert(ua_map);
+
+ /* Duplicating a map requires 1 new fd. Reserve it. */
+ ret = lttng_fd_get(LTTNG_FD_APPS, 1);
+ if (ret < 0) {
+ ERR("Exhausted number of available FD upon duplicate map");
+ goto error_fd_get;
+ }
+
+ /* Duplicate object for stream once the original is in the registry. */
+ ret = ustctl_duplicate_ust_object_data(&ua_map->obj, buf_reg_map->obj.ust);
+ if (ret < 0) {
+ ERR("Duplicate map obj from %p to %p failed with ret: %d",
+ buf_reg_map->obj.ust, ua_map->obj, ret);
+ goto error;
+ }
+ ua_map->handle = ua_map->obj->handle;
+
+ return 0;
+
+error:
+ lttng_fd_put(LTTNG_FD_APPS, 1);
+error_fd_get:
+ return ret;
+}
+
/*
* For a given channel buffer registry, setup all streams of the given ust
* application channel.
*
* Return 0 on success or else a negative value.
*/
-static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
+static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
struct ust_app_channel *ua_chan,
struct ust_app *app)
{
int ret = 0;
struct ust_app_stream *stream, *stmp;
- assert(reg_chan);
+ assert(buf_reg_chan);
assert(ua_chan);
DBG2("UST app setup buffer registry stream");
*/
reg_stream->obj.ust = stream->obj;
stream->obj = NULL;
- buffer_reg_stream_add(reg_stream, reg_chan);
+ buffer_reg_stream_add(reg_stream, buf_reg_chan);
/* We don't need the streams anymore. */
cds_list_del(&stream->list);
return ret;
}
+/*
+ * For a given map buffer registry, setup all counters of the given ust
+ * application map.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int setup_buffer_reg_map_counters(struct buffer_reg_map *buf_reg_map,
+ struct ust_app_map *ua_map,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct ust_app_map_counter *counter, *stmp;
+
+ assert(buf_reg_map);
+ assert(ua_map);
+
+ DBG2("UST app setup buffer registry counter");
+
+ /* Send all counters to application. */
+ cds_list_for_each_entry_safe(counter, stmp, &ua_map->counters.head, list) {
+ struct buffer_reg_map_counter *reg_counter;
+
+ ret = buffer_reg_map_counter_create(®_counter);
+ if (ret < 0) {
+ goto error;
+ }
+
+ /*
+ * Keep original pointer and nullify it in the counter so the delete
+ * counter call does not release the object.
+ */
+ reg_counter->obj.ust = counter->obj;
+ counter->obj = NULL;
+ buffer_reg_map_counter_add(reg_counter, buf_reg_map);
+
+ /* We don't need the counters anymore. */
+ cds_list_del(&counter->list);
+ delete_ust_app_map_counter(-1, counter, app);
+ }
+
+error:
+ return ret;
+}
+
/*
* Create a buffer registry channel for the given session registry and
* application channel object. If regp pointer is valid, it's set with the
struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
{
int ret;
- struct buffer_reg_channel *reg_chan = NULL;
+ struct buffer_reg_channel *buf_reg_chan = NULL;
assert(reg_sess);
assert(ua_chan);
DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
/* Create buffer registry channel. */
- ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, ®_chan);
+ ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
if (ret < 0) {
goto error_create;
}
- assert(reg_chan);
- reg_chan->consumer_key = ua_chan->key;
- reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
- reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
+ assert(buf_reg_chan);
+ buf_reg_chan->consumer_key = ua_chan->key;
+ buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
+ buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
/* Create and add a channel registry to session. */
ret = ust_registry_channel_add(reg_sess->reg.ust,
if (ret < 0) {
goto error;
}
- buffer_reg_channel_add(reg_sess, reg_chan);
+ buffer_reg_channel_add(reg_sess, buf_reg_chan);
if (regp) {
- *regp = reg_chan;
+ *regp = buf_reg_chan;
}
return 0;
error:
/* Safe because the registry channel object was not added to any HT. */
- buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
+ buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
+error_create:
+ return ret;
+}
+
+/*
+ * Create a buffer registry map for the given session registry and
+ * application map object. If regp pointer is valid, it's set with the
+ * created object. Important, the created object is NOT added to the session
+ * registry hash table.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int create_buffer_reg_map(struct buffer_reg_session *reg_sess,
+ struct ust_app_map *ua_map, struct buffer_reg_map **regp)
+{
+ int ret;
+ struct buffer_reg_map *buf_reg_map = NULL;
+
+ assert(reg_sess);
+ assert(ua_map);
+
+ DBG2("UST app creating buffer registry map for %s", ua_map->name);
+
+ /* Create buffer registry map. */
+ ret = buffer_reg_map_create(ua_map->tracing_map_id, &buf_reg_map);
+ if (ret < 0) {
+ goto error_create;
+ }
+ assert(buf_reg_map);
+
+ /* Create and add a map registry to session. */
+ ret = ust_registry_map_add(reg_sess->reg.ust, ua_map->tracing_map_id);
+ if (ret < 0) {
+ goto error;
+ }
+ buffer_reg_map_add(reg_sess, buf_reg_map);
+
+ if (regp) {
+ *regp = buf_reg_map;
+ }
+
+ return 0;
+
+error:
+ /* Safe because the registry map object was not added to any HT. */
+ buffer_reg_map_destroy(buf_reg_map, LTTNG_DOMAIN_UST);
error_create:
return ret;
}
* Return 0 on success else a negative value.
*/
static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
- struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
+ struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
struct ust_app *app)
{
int ret;
assert(reg_sess);
- assert(reg_chan);
+ assert(buf_reg_chan);
assert(ua_chan);
assert(ua_chan->obj);
DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
/* Setup all streams for the registry. */
- ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
+ ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
if (ret < 0) {
goto error;
}
- reg_chan->obj.ust = ua_chan->obj;
+ buf_reg_chan->obj.ust = ua_chan->obj;
ua_chan->obj = NULL;
return 0;
error:
- buffer_reg_channel_remove(reg_sess, reg_chan);
- buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
+ buffer_reg_channel_remove(reg_sess, buf_reg_chan);
+ buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
+ return ret;
+}
+
+/*
+ * Setup buffer registry map for the given session registry and application
+ * map object. If regp pointer is valid, it's set with the created object.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int setup_buffer_reg_map(struct buffer_reg_session *reg_sess,
+ struct ust_app_map *ua_map, struct buffer_reg_map *buf_reg_map,
+ struct ust_app *app)
+{
+ int ret;
+
+ assert(reg_sess);
+ assert(buf_reg_map);
+ assert(ua_map);
+ assert(ua_map->obj);
+
+ DBG2("UST app setup buffer registry map for %s", ua_map->name);
+
+ /* Setup all counters for the registry. */
+ ret = setup_buffer_reg_map_counters(buf_reg_map, ua_map, app);
+ if (ret < 0) {
+ goto error;
+ }
+
+ buf_reg_map->obj.ust = ua_map->obj;
+ ua_map->obj = NULL;
+ buf_reg_map->daemon_counter = ua_map->map_handle;
+ ua_map->map_handle = NULL;
+
+ return 0;
+
+error:
+ buffer_reg_map_remove(reg_sess, buf_reg_map);
+ buffer_reg_map_destroy(buf_reg_map, LTTNG_DOMAIN_UST);
return ret;
}
*
* Return 0 on success else a negative value.
*/
-static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
+static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
struct ust_app *app, struct ust_app_session *ua_sess,
struct ust_app_channel *ua_chan)
{
int ret;
struct buffer_reg_stream *reg_stream;
- assert(reg_chan);
+ assert(buf_reg_chan);
assert(app);
assert(ua_sess);
assert(ua_chan);
DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
- ret = duplicate_channel_object(reg_chan, ua_chan);
+ ret = duplicate_channel_object(buf_reg_chan, ua_chan);
if (ret < 0) {
goto error;
}
health_code_update();
/* Send all streams to application. */
- pthread_mutex_lock(®_chan->stream_list_lock);
- cds_list_for_each_entry(reg_stream, ®_chan->streams, lnode) {
+ pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
+ cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
struct ust_app_stream stream;
ret = duplicate_stream_object(reg_stream, &stream);
ua_chan->is_sent = 1;
error_stream_unlock:
- pthread_mutex_unlock(®_chan->stream_list_lock);
+ pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
error:
return ret;
}
/*
- * Create and send to the application the created buffers with per UID buffers.
+ * Send buffer registry map to the application.
*
* Return 0 on success else a negative value.
*/
-static int create_channel_per_uid(struct ust_app *app,
- struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan)
+static int send_map_uid_to_ust(struct buffer_reg_map *buf_reg_map,
+ struct ust_app *app, struct ust_app_session *ua_sess,
+ struct ust_app_map *ua_map)
{
int ret;
- struct buffer_reg_uid *reg_uid;
- struct buffer_reg_channel *reg_chan;
- bool created = false;
+ struct buffer_reg_map_counter *reg_map_counter;
+ assert(buf_reg_map);
assert(app);
- assert(usess);
assert(ua_sess);
- assert(ua_chan);
+ assert(ua_map);
- DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
+ DBG("UST app sending buffer registry map to ust sock %d", app->sock);
- reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
- /*
- * The session creation handles the creation of this global registry
- * object. If none can be find, there is a code flow problem or a
- * teardown race.
- */
- assert(reg_uid);
+ ret = duplicate_map_object(buf_reg_map, ua_map);
+ if (ret < 0) {
+ goto error;
+ }
- reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
- reg_uid);
- if (!reg_chan) {
- /* Create the buffer registry channel object. */
- ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, ®_chan);
- if (ret < 0) {
- ERR("Error creating the UST channel \"%s\" registry instance",
- ua_chan->name);
- goto error;
+ /* Send map to the application. */
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_send_counter_data_to_ust(app->sock,
+ ua_sess->handle, ua_map->obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ assert(ret == 0);
+ if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
}
- assert(reg_chan);
+ goto error_map_counter_unlock;
+ }
- /*
- * Create the buffers on the consumer side. This call populates the
- * ust app channel object with all streams and data object.
- */
- ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
- app->bits_per_long, reg_uid->registry->reg.ust);
- if (ret < 0) {
- ERR("Error creating UST channel \"%s\" on the consumer daemon",
- ua_chan->name);
+ ua_map->handle = ua_map->obj->handle;
- /*
- * Let's remove the previously created buffer registry channel so
- * it's not visible anymore in the session registry.
- */
- ust_registry_channel_del_free(reg_uid->registry->reg.ust,
- ua_chan->tracing_channel_id, false);
- buffer_reg_channel_remove(reg_uid->registry, reg_chan);
- buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
- goto error;
- }
+ health_code_update();
- /*
- * Setup the streams and add it to the session registry.
- */
- ret = setup_buffer_reg_channel(reg_uid->registry,
- ua_chan, reg_chan, app);
+ /* Send all map_counters to application. */
+ pthread_mutex_lock(&buf_reg_map->counter_list_lock);
+ cds_list_for_each_entry(reg_map_counter, &buf_reg_map->counters, lnode) {
+ struct ust_app_map_counter map_counter;
+
+ ret = duplicate_map_counter_object(reg_map_counter, &map_counter);
if (ret < 0) {
- ERR("Error setting up UST channel \"%s\"",
- ua_chan->name);
- goto error;
+ goto error_map_counter_unlock;
}
- created = true;
- }
+ pthread_mutex_lock(&app->sock_lock);
+ // Do send the per cpu counter here
+ ret = ustctl_send_counter_cpu_data_to_ust(app->sock,
+ ua_map->obj, map_counter.obj);
+ pthread_mutex_unlock(&app->sock_lock);
+ assert(ret == 0);
+ if (ret < 0) {
+ (void) release_ust_app_map_counter(-1, &map_counter, app);
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ }
+ goto error_map_counter_unlock;
+ }
+
+ /*
+ * The return value is not important here. This function will
+ * output an error if needed.
+ */
+ (void) release_ust_app_map_counter(-1, &map_counter, app);
+ }
+ ua_map->is_sent = 1;
+
+error_map_counter_unlock:
+ pthread_mutex_unlock(&buf_reg_map->counter_list_lock);
+error:
+ return ret;
+}
+
+/*
+ * Create and send to the application the created buffers with per UID buffers.
+ *
+ * This MUST be called with a RCU read side lock acquired.
+ * The session list lock and the session's lock must be acquired.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int create_channel_per_uid(struct ust_app *app,
+ struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan)
+{
+ int ret;
+ struct buffer_reg_uid *reg_uid;
+ struct buffer_reg_channel *buf_reg_chan;
+ struct ltt_session *session = NULL;
+ enum lttng_error_code notification_ret;
+ struct ust_registry_channel *ust_reg_chan;
+
+ assert(app);
+ assert(usess);
+ assert(ua_sess);
+ assert(ua_chan);
+
+ DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
+
+ reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+ /*
+ * The session creation handles the creation of this global registry
+ * object. If none can be find, there is a code flow problem or a
+ * teardown race.
+ */
+ assert(reg_uid);
+
+ buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
+ reg_uid);
+ if (buf_reg_chan) {
+ goto send_channel;
+ }
+
+ /* Create the buffer registry channel object. */
+ ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
+ if (ret < 0) {
+ ERR("Error creating the UST channel \"%s\" registry instance",
+ ua_chan->name);
+ goto error;
+ }
+
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+
+ /*
+ * Create the buffers on the consumer side. This call populates the
+ * ust app channel object with all streams and data object.
+ */
+ ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
+ app->bits_per_long, reg_uid->registry->reg.ust,
+ session->most_recent_chunk_id.value);
+ if (ret < 0) {
+ ERR("Error creating UST channel \"%s\" on the consumer daemon",
+ ua_chan->name);
+
+ /*
+ * Let's remove the previously created buffer registry channel so
+ * it's not visible anymore in the session registry.
+ */
+ ust_registry_channel_del_free(reg_uid->registry->reg.ust,
+ ua_chan->tracing_channel_id, false);
+ buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
+ buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
+ goto error;
+ }
+
+ /*
+ * Setup the streams and add it to the session registry.
+ */
+ ret = setup_buffer_reg_channel(reg_uid->registry,
+ ua_chan, buf_reg_chan, app);
+ if (ret < 0) {
+ ERR("Error setting up UST channel \"%s\"", ua_chan->name);
+ goto error;
+ }
+
+ /* Notify the notification subsystem of the channel's creation. */
+ pthread_mutex_lock(®_uid->registry->reg.ust->lock);
+ ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
+ ua_chan->tracing_channel_id);
+ assert(ust_reg_chan);
+ ust_reg_chan->consumer_key = ua_chan->key;
+ ust_reg_chan = NULL;
+ pthread_mutex_unlock(®_uid->registry->reg.ust->lock);
+
+ notification_ret = notification_thread_command_add_channel(
+ notification_thread_handle, session->name,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ ua_chan->name,
+ ua_chan->key, LTTNG_DOMAIN_UST,
+ ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+ if (notification_ret != LTTNG_OK) {
+ ret = - (int) notification_ret;
+ ERR("Failed to add channel to notification thread");
+ goto error;
+ }
+
+send_channel:
/* Send buffers to the application. */
- ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
+ ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
if (ret < 0) {
if (ret != -ENOTCONN) {
ERR("Error sending channel to application");
goto error;
}
- if (created) {
- enum lttng_error_code cmd_ret;
- struct ltt_session *session;
- uint64_t chan_reg_key;
- struct ust_registry_channel *chan_reg;
+error:
+ if (session) {
+ session_put(session);
+ }
+ return ret;
+}
- rcu_read_lock();
- chan_reg_key = ua_chan->tracing_channel_id;
+/*
+ * Create and send to the application the created buffers with per UID buffers.
+ *
+ * This MUST be called with a RCU read side lock acquired.
+ * The session list lock and the session's lock must be acquired.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int create_map_per_uid(struct ust_app *app,
+ struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
+ struct ust_app_map *ua_map)
+{
+ int ret;
+ struct buffer_reg_uid *buffer_reg_uid;
+ struct buffer_reg_map *buffer_reg_map;
+ struct ltt_session *session = NULL;
+ struct ust_registry_map *ust_reg_map;
- pthread_mutex_lock(®_uid->registry->reg.ust->lock);
- chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
- chan_reg_key);
- assert(chan_reg);
- chan_reg->consumer_key = ua_chan->key;
- chan_reg = NULL;
- pthread_mutex_unlock(®_uid->registry->reg.ust->lock);
-
- session = session_find_by_id(ua_sess->tracing_id);
- assert(session);
-
- cmd_ret = notification_thread_command_add_channel(
- notification_thread_handle, session->name,
- ua_sess->euid, ua_sess->egid,
- ua_chan->name,
- ua_chan->key,
- LTTNG_DOMAIN_UST,
- ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
- rcu_read_unlock();
- if (cmd_ret != LTTNG_OK) {
- ret = - (int) cmd_ret;
- ERR("Failed to add channel to notification thread");
- goto error;
+ assert(app);
+ assert(usess);
+ assert(ua_sess);
+ assert(ua_map);
+
+ DBG("UST app creating map %s with per UID buffers", ua_map->name);
+
+ buffer_reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+ /*
+ * The session creation handles the creation of this global registry
+ * object. If none can be find, there is a code flow problem or a
+ * teardown race.
+ */
+ assert(buffer_reg_uid);
+
+ buffer_reg_map = buffer_reg_map_find(ua_map->tracing_map_id,
+ buffer_reg_uid);
+ if (buffer_reg_map) {
+ goto send_map;
+ }
+
+ /* Create the buffer registry map object. */
+ ret = create_buffer_reg_map(buffer_reg_uid->registry, ua_map,
+ &buffer_reg_map);
+ if (ret < 0) {
+ ERR("Error creating the UST map \"%s\" registry instance",
+ ua_map->name);
+ goto error;
+ }
+
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+
+ /*
+ */
+ ret = create_map_object(usess, ua_sess, ua_map);
+ assert(ret == 0);
+ if (ret < 0) {
+ ERR("Error creating UST map object: map_name = \"%s\"", ua_map->name);
+ goto error;
+ }
+
+ /*
+ * Setup the streams and add it to the session registry.
+ */
+ ret = setup_buffer_reg_map(buffer_reg_uid->registry, ua_map,
+ buffer_reg_map, app);
+ if (ret < 0) {
+ ERR("Error setting up UST map \"%s\"", ua_map->name);
+ goto error;
+ }
+
+ /* Notify the notification subsystem of the map's creation. */
+ pthread_mutex_lock(&buffer_reg_uid->registry->reg.ust->lock);
+ ust_reg_map = ust_registry_map_find(buffer_reg_uid->registry->reg.ust,
+ ua_map->tracing_map_id);
+ assert(ust_reg_map);
+ ust_reg_map = NULL;
+ pthread_mutex_unlock(&buffer_reg_uid->registry->reg.ust->lock);
+
+send_map:
+ /* Send buffers to the application. */
+ ret = send_map_uid_to_ust(buffer_reg_map, app, ua_sess, ua_map);
+ if (ret < 0) {
+ if (ret != -ENOTCONN) {
+ ERR("Error sending map to application");
}
+ goto error;
}
error:
+ if (session) {
+ session_put(session);
+ }
return ret;
}
+//static int destroy_map_per_uid(struct ust_app *app,
+// struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
+// struct ust_app_map *ua_map)
+//{
+// int ret;
+// struct buffer_reg_uid *buffer_reg_uid;
+// struct buffer_reg_map *buffer_reg_map;
+//
+// assert(app);
+// assert(usess);
+// assert(ua_sess);
+// assert(ua_map);
+//
+// DBG("UST app destroy map %s with per UID buffers", ua_map->name);
+//
+// buffer_reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
+// /*
+// * The session creation handles the creation of this global registry
+// * object. If none can be find, there is a code flow problem or a
+// * teardown race.
+// */
+// assert(buffer_reg_uid);
+//
+// buffer_reg_map = buffer_reg_map_find(ua_map->tracing_map_id,
+// buffer_reg_uid);
+// if (!buffer_reg_map) {
+// ERR("Can't find map in buffer registry: map-name = '%s', uid = %d",
+// ua_map->name, app->uid);
+// ret = -1;
+// goto end;
+// }
+//
+// buffer_reg_map_destroy(buffer_reg_map, LTTNG_DOMAIN_UST);
+//
+// ret = 0;
+//end:
+// return ret;
+//}
+
/*
* Create and send to the application the created buffers with per PID buffers.
*
+ * Called with UST app session lock held.
+ * The session list lock and the session's lock must be acquired.
+ *
* Return 0 on success else a negative value.
*/
static int create_channel_per_pid(struct ust_app *app,
int ret;
struct ust_registry_session *registry;
enum lttng_error_code cmd_ret;
- struct ltt_session *session;
+ struct ltt_session *session = NULL;
uint64_t chan_reg_key;
- struct ust_registry_channel *chan_reg;
+ struct ust_registry_channel *ust_reg_chan;
assert(app);
assert(usess);
rcu_read_lock();
registry = get_session_registry(ua_sess);
+ /* The UST app session lock is held, registry shall not be null. */
assert(registry);
/* Create and add a new channel registry to session. */
goto error;
}
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+
/* Create and get channel on the consumer side. */
ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
- app->bits_per_long, registry);
+ app->bits_per_long, registry,
+ session->most_recent_chunk_id.value);
if (ret < 0) {
ERR("Error creating UST channel \"%s\" on the consumer daemon",
ua_chan->name);
- goto error;
+ goto error_remove_from_registry;
}
ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
if (ret != -ENOTCONN) {
ERR("Error sending channel to application");
}
- goto error;
+ goto error_remove_from_registry;
}
- session = session_find_by_id(ua_sess->tracing_id);
- assert(session);
-
chan_reg_key = ua_chan->key;
pthread_mutex_lock(®istry->lock);
- chan_reg = ust_registry_channel_find(registry, chan_reg_key);
- assert(chan_reg);
- chan_reg->consumer_key = ua_chan->key;
+ ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
+ assert(ust_reg_chan);
+ ust_reg_chan->consumer_key = ua_chan->key;
pthread_mutex_unlock(®istry->lock);
cmd_ret = notification_thread_command_add_channel(
notification_thread_handle, session->name,
- ua_sess->euid, ua_sess->egid,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
ua_chan->name,
- ua_chan->key,
- LTTNG_DOMAIN_UST,
+ ua_chan->key, LTTNG_DOMAIN_UST,
ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
if (cmd_ret != LTTNG_OK) {
ret = - (int) cmd_ret;
ERR("Failed to add channel to notification thread");
+ goto error_remove_from_registry;
+ }
+
+error_remove_from_registry:
+ if (ret) {
+ ust_registry_channel_del_free(registry, ua_chan->key, false);
+ }
+error:
+ rcu_read_unlock();
+ if (session) {
+ session_put(session);
+ }
+ return ret;
+}
+
+/*
+ * Create and send to the application the created buffers with per PID buffers.
+ *
+ * Called with UST app session lock held.
+ * The session list lock and the session's lock must be acquired.
+ *
+ * Return 0 on success else a negative value.
+ */
+static int create_map_per_pid(struct ust_app *app,
+ struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
+ struct ust_app_map *ua_map)
+{
+ int ret;
+ struct ust_registry_session *registry;
+ struct ltt_session *session = NULL;
+ uint64_t map_reg_key;
+ struct ust_registry_map *ust_reg_map;
+
+ assert(app);
+ assert(usess);
+ assert(ua_sess);
+ assert(ua_map);
+
+ DBG("UST app creating map %s with per PID buffers", ua_map->name);
+
+ rcu_read_lock();
+
+ registry = get_session_registry(ua_sess);
+ /* The UST app session lock is held, registry shall not be null. */
+ assert(registry);
+
+ /* Create and add a new map registry to session. */
+ ret = ust_registry_map_add(registry, ua_map->key);
+ if (ret < 0) {
+ ERR("Error creating the UST map \"%s\" registry instance",
+ ua_map->name);
goto error;
}
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+
+ /* Create and get map. */
+ ret = create_map_object(usess, ua_sess, ua_map);
+ if (ret < 0) {
+ ERR("Error creating UST map object: map_name = \"%s\" ",
+ ua_map->name);
+ goto error_remove_from_registry;
+ }
+
+ ret = send_map_pid_to_ust(app, ua_sess, ua_map);
+ if (ret < 0) {
+ if (ret != -ENOTCONN) {
+ ERR("Error sending map to application");
+ }
+ goto error_remove_from_registry;
+ }
+
+ map_reg_key = ua_map->key;
+ pthread_mutex_lock(®istry->lock);
+ ust_reg_map = ust_registry_map_find(registry, map_reg_key);
+ assert(ust_reg_map);
+ pthread_mutex_unlock(®istry->lock);
+
+error_remove_from_registry:
+ if (ret) {
+ ust_registry_map_del_free(registry, ua_map->key);
+ }
error:
rcu_read_unlock();
+ if (session) {
+ session_put(session);
+ }
return ret;
}
/*
* From an already allocated ust app channel, create the channel buffers if
- * need and send it to the application. This MUST be called with a RCU read
+ * needed and send them to the application. This MUST be called with a RCU read
* side lock acquired.
*
+ * Called with UST app session lock held.
+ *
* Return 0 on success or else a negative value. Returns -ENOTCONN if
* the application exited concurrently.
*/
-static int do_create_channel(struct ust_app *app,
+static int ust_app_channel_send(struct ust_app *app,
struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
struct ust_app_channel *ua_chan)
{
assert(app);
assert(usess);
+ assert(usess->active);
assert(ua_sess);
assert(ua_chan);
/* Initialize ust objd object using the received handle and add it. */
lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
- lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
+ lttng_ht_add_unique_ulong(app->ust_chan_objd, &ua_chan->ust_objd_node);
/* If channel is not enabled, disable it on the tracer */
if (!ua_chan->enabled) {
}
/*
- * Create UST app channel and create it on the tracer. Set ua_chanp of the
- * newly created channel if not NULL.
+ * From an already allocated ust app map, create the map buffers if
+ * needed and send them to the application. This MUST be called with a RCU read
+ * side lock acquired.
*
- * Called with UST app session lock and RCU read-side lock held.
+ * Called with UST app session lock held.
*
* Return 0 on success or else a negative value. Returns -ENOTCONN if
* the application exited concurrently.
*/
-static int create_ust_app_channel(struct ust_app_session *ua_sess,
- struct ltt_ust_channel *uchan, struct ust_app *app,
+static int ust_app_map_send(struct ust_app *app,
+ struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
+ struct ust_app_map *ua_map)
+{
+ int ret;
+
+ assert(app);
+ assert(usess);
+ assert(usess->active);
+ assert(ua_sess);
+ assert(ua_map);
+
+ /* Handle buffer type before sending the map to the application. */
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ ret = create_map_per_uid(app, usess, ua_sess, ua_map);
+ if (ret < 0) {
+ goto error;
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ ret = create_map_per_pid(app, usess, ua_sess, ua_map);
+ if (ret < 0) {
+ goto error;
+ }
+ break;
+ }
+ default:
+ assert(0);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Initialize ust objd object using the received handle and add it. */
+ lttng_ht_node_init_ulong(&ua_map->ust_objd_node, ua_map->handle);
+ lttng_ht_add_unique_ulong(app->ust_map_objd, &ua_map->ust_objd_node);
+
+ /* If map is not enabled, disable it on the tracer */
+ if (!ua_map->enabled) {
+ ret = disable_ust_map(app, ua_sess, ua_map);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+error:
+ return ret;
+}
+
+/*
+ * Create UST app channel and return it through ua_chanp if not NULL.
+ *
+ * Called with UST app session lock and RCU read-side lock held.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
+ struct ltt_ust_channel *uchan,
enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
struct ust_app_channel **ua_chanp)
{
if (ua_chan == NULL) {
/* Only malloc can fail here */
ret = -ENOMEM;
- goto error_alloc;
+ goto error;
}
shadow_copy_channel(ua_chan, uchan);
/* Set channel type. */
ua_chan->attr.type = type;
- ret = do_create_channel(app, usess, ua_sess, ua_chan);
- if (ret < 0) {
- goto error;
- }
-
- DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
- app->pid);
-
/* Only add the channel if successful on the tracer side. */
lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
end:
return 0;
error:
- delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
-error_alloc:
+ return ret;
+}
+
+/*
+ * Create UST app map and return it through ua_mapp if not NULL.
+ *
+ * Called with UST app session lock and RCU read-side lock held.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static int ust_app_map_allocate(struct ust_app_session *ua_sess,
+ struct ltt_ust_map *umap,
+ enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
+ struct ust_app_map **ua_mapp)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_map_node;
+ struct ust_app_map *ua_map;
+
+ DBG("Allocating map id = %"PRIu64, umap->id);
+
+ /* Lookup map in the ust app session */
+ lttng_ht_lookup(ua_sess->maps, (void *)umap->name, &iter);
+ ua_map_node = lttng_ht_iter_get_node_str(&iter);
+ if (ua_map_node != NULL) {
+ ua_map = caa_container_of(ua_map_node, struct ust_app_map, node);
+ goto end;
+ }
+
+ ua_map = alloc_ust_app_map(umap->name, ua_sess);
+ if (ua_map == NULL) {
+ /* Only malloc can fail here */
+ ret = -ENOMEM;
+ goto error;
+ }
+ //shadow_copy_map(ua_map, umap);
+ ua_map->tracing_map_id = umap->id;
+ ua_map->coalesce_hits = umap->coalesce_hits;
+ ua_map->dead_app_kv_values = &umap->dead_app_kv_values;
+ ua_map->bitness = umap->bitness;
+
+ /* Set map type. */
+ //ua_map->attr.type = type;
+ ua_map->bucket_count = umap->bucket_count;
+
+ /* Only add the map if successful on the tracer side. */
+ lttng_ht_add_unique_str(ua_sess->maps, &ua_map->node);
+end:
+ if (ua_mapp) {
+ *ua_mapp = ua_map;
+ }
+
+ /* Everything went well. */
+ return 0;
+
+error:
return ret;
}
/*
* Create UST app event and create it on the tracer side.
*
+ * Must be called with the RCU read side lock held.
* Called with ust app session mutex held.
*/
static
-int create_ust_app_event(struct ust_app_session *ua_sess,
+int create_ust_app_channel_event(struct ust_app_session *ua_sess,
struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
struct ust_app *app)
{
int ret = 0;
struct ust_app_event *ua_event;
- /* Get event node */
- ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
- uevent->filter, uevent->attr.loglevel, uevent->exclusion);
- if (ua_event != NULL) {
- ret = -EEXIST;
+ ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
+ if (ua_event == NULL) {
+ /* Only failure mode of alloc_ust_app_event(). */
+ ret = -ENOMEM;
goto end;
}
+ shadow_copy_event(ua_event, uevent);
+
+ /* Create it on the tracer side */
+ ret = create_ust_channel_event(app, ua_sess, ua_chan, ua_event);
+ if (ret < 0) {
+ /*
+ * Not found previously means that it does not exist on the
+ * tracer. If the application reports that the event existed,
+ * it means there is a bug in the sessiond or lttng-ust
+ * (or corruption, etc.)
+ */
+ if (ret == -LTTNG_UST_ERR_EXIST) {
+ ERR("Tracer for application reported that an event being created already existed: "
+ "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
+ uevent->attr.name,
+ app->pid, app->ppid, app->uid,
+ app->gid);
+ }
+ goto error;
+ }
+
+ add_unique_ust_app_event(ua_chan->events, ua_event);
+
+ DBG2("UST app create event completed: app = '%s' (ppid: %d)",
+ app->name, app->ppid);
+
+end:
+ return ret;
+
+error:
+ /* Valid. Calling here is already in a read side lock */
+ delete_ust_app_event(-1, ua_event, app);
+ return ret;
+}
+
+/*
+ * Create UST app event and create it on the tracer side.
+ *
+ * Must be called with the RCU read side lock held.
+ * Called with ust app session mutex held.
+ */
+static
+int create_ust_app_map_event(struct ust_app_session *ua_sess,
+ struct ust_app_map *ua_map, struct ltt_ust_event *uevent,
+ struct ust_app *app)
+{
+ int ret = 0;
+ uint64_t map_reg_key;
+ struct ust_app_event *ua_event;
+ struct ust_registry_session *registry;
- /* Does not exist so create one */
ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
if (ua_event == NULL) {
- /* Only malloc can failed so something is really wrong */
+ /* Only failure mode of alloc_ust_app_event(). */
ret = -ENOMEM;
goto end;
}
shadow_copy_event(ua_event, uevent);
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort event notify");
+ ret = 0;
+ goto error;
+ }
+
+ if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
+ map_reg_key = ua_map->tracing_map_id;
+ } else {
+ map_reg_key = ua_map->key;
+ }
+
+ pthread_mutex_lock(®istry->lock);
+ ret = ust_registry_map_add_token_key_mapping(registry, map_reg_key,
+ uevent->attr.token, uevent->key);
+ assert(ret == 0);
+ pthread_mutex_unlock(®istry->lock);
+
/* Create it on the tracer side */
- ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
+ ret = create_ust_map_event(app, ua_sess, ua_map, uevent->key, ua_event);
if (ret < 0) {
- /* Not found previously means that it does not exist on the tracer */
- assert(ret != -LTTNG_UST_ERR_EXIST);
+ /*
+ * Not found previously means that it does not exist on the
+ * tracer. If the application reports that the event existed,
+ * it means there is a bug in the sessiond or lttng-ust
+ * (or corruption, etc.)
+ */
+ if (ret == -LTTNG_UST_ERR_EXIST) {
+ ERR("Tracer for application reported that an event being created already existed: "
+ "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
+ uevent->attr.name,
+ app->pid, app->ppid, app->uid,
+ app->gid);
+ }
+
+ /*
+ * FIXME: frdeso: remove key from tokey->key mapping.
+ */
goto error;
}
- add_unique_ust_app_event(ua_chan, ua_event);
+ add_unique_ust_app_event(ua_map->events, ua_event);
- DBG2("UST app create event %s for PID %d completed", ua_event->name,
- app->pid);
+ DBG2("UST app create event completed: app = '%s', tracer token = %"PRIu64" (ppid: %d)",
+ app->name, uevent->attr.token, app->ppid);
end:
return ret;
return ret;
}
+/*
+ * Create UST app event notifier rule and create it on the tracer side.
+ *
+ * Must be called with the RCU read side lock held.
+ * Called with ust app session mutex held.
+ */
+static
+int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct ust_app_event_notifier_rule *ua_event_notifier_rule;
+
+ ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
+ if (ua_event_notifier_rule == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ /* Create it on the tracer side. */
+ ret = create_ust_event_notifier(app, ua_event_notifier_rule);
+ if (ret < 0) {
+ /*
+ * Not found previously means that it does not exist on the
+ * tracer. If the application reports that the event existed,
+ * it means there is a bug in the sessiond or lttng-ust
+ * (or corruption, etc.)
+ */
+ if (ret == -LTTNG_UST_ERR_EXIST) {
+ ERR("Tracer for application reported that an event notifier being created already exists: "
+ "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
+ lttng_trigger_get_tracer_token(trigger),
+ app->pid, app->ppid, app->uid,
+ app->gid);
+ }
+ goto error;
+ }
+
+ lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
+ &ua_event_notifier_rule->node);
+
+ DBG2("UST app create token event rule completed: app = '%s' (ppid: %d), token = %" PRIu64,
+ app->name, app->ppid, lttng_trigger_get_tracer_token(trigger));
+
+ goto end;
+
+error:
+ /* The RCU read side lock is already being held by the caller. */
+ delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
+end:
+ return ret;
+}
+
/*
* Create UST metadata and open it on the tracer side.
*
struct ust_app_channel *metadata;
struct consumer_socket *socket;
struct ust_registry_session *registry;
+ struct ltt_session *session = NULL;
assert(ua_sess);
assert(app);
assert(consumer);
registry = get_session_registry(ua_sess);
+ /* The UST app session is held registry shall not be null. */
assert(registry);
pthread_mutex_lock(®istry->lock);
*/
registry->metadata_key = metadata->key;
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+
/*
* Ask the metadata channel creation to the consumer. The metadata object
* will be created by the consumer and kept their. However, the stream is
* consumer.
*/
ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
- registry);
+ registry, session->current_trace_chunk);
if (ret < 0) {
/* Nullify the metadata key so we don't try to close it later on. */
registry->metadata_key = 0;
delete_ust_app_channel(-1, metadata, app);
error:
pthread_mutex_unlock(®istry->lock);
+ if (session) {
+ session_put(session);
+ }
return ret;
}
*/
struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
{
+ int ret;
struct ust_app *lta = NULL;
+ struct lttng_pipe *event_notifier_event_source_pipe = NULL;
assert(msg);
assert(sock >= 0);
goto error;
}
+ /*
+ * Reserve the two file descriptors of the event source pipe. The write
+ * end will be closed once it is passed to the application, at which
+ * point a single 'put' will be performed.
+ */
+ ret = lttng_fd_get(LTTNG_FD_APPS, 2);
+ if (ret) {
+ ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s' (ppid: %d)",
+ msg->name, (int) msg->ppid);
+ goto error;
+ }
+
+ event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
+ if (!event_notifier_event_source_pipe) {
+ PERROR("Failed to open application event source pipe: '%s' (ppid = %d)",
+ msg->name, msg->ppid);
+ goto error;
+ }
+
lta = zmalloc(sizeof(struct ust_app));
if (lta == NULL) {
PERROR("malloc");
- goto error;
+ goto error_free_pipe;
}
+ lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
+
lta->ppid = msg->ppid;
lta->uid = msg->uid;
lta->gid = msg->gid;
lta->v_major = msg->major;
lta->v_minor = msg->minor;
lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
- lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ lta->ust_chan_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ lta->ust_map_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
lta->notify_sock = -1;
+ lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
/* Copy name and make sure it's NULL terminated. */
strncpy(lta->name, msg->name, sizeof(lta->name));
lttng_ht_node_init_ulong(<a->sock_n, (unsigned long) lta->sock);
CDS_INIT_LIST_HEAD(<a->teardown_head);
-error:
return lta;
+
+error_free_pipe:
+ lttng_pipe_destroy(event_notifier_event_source_pipe);
+ lttng_fd_put(LTTNG_FD_APPS, 2);
+error:
+ return NULL;
}
/*
assert(app);
assert(app->notify_sock >= 0);
+ app->registration_time = time(NULL);
+
rcu_read_lock();
/*
return ret;
}
+/*
+ * Setup the base event notifier group.
+ *
+ * Return 0 on success else a negative value either an errno code or a
+ * LTTng-UST error code.
+ */
+int ust_app_setup_event_notifier_group(struct ust_app *app)
+{
+ int ret;
+ int event_pipe_write_fd;
+ struct lttng_ust_object_data *event_notifier_group = NULL;
+ enum lttng_error_code lttng_ret;
+ enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
+
+ assert(app);
+
+ /* Get the write side of the pipe. */
+ event_pipe_write_fd = lttng_pipe_get_writefd(
+ app->event_notifier_group.event_pipe);
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_create_event_notifier_group(app->sock,
+ event_pipe_write_fd, &event_notifier_group);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d",
+ ret, app->sock, event_pipe_write_fd);
+ } else {
+ DBG("Failed to create application event notifier group (application is dead): app socket fd = %d",
+ app->sock);
+ }
+
+ goto error;
+ }
+
+ ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
+ if (ret) {
+ ERR("Failed to close write end of the application's event source pipe: app = '%s' (ppid = %d)",
+ app->name, app->ppid);
+ goto error;
+ }
+
+ /*
+ * Release the file descriptor that was reserved for the write-end of
+ * the pipe.
+ */
+ lttng_fd_put(LTTNG_FD_APPS, 1);
+
+ lttng_ret = notification_thread_command_add_tracer_event_source(
+ notification_thread_handle,
+ lttng_pipe_get_readfd(app->event_notifier_group.event_pipe),
+ LTTNG_DOMAIN_UST);
+ if (lttng_ret != LTTNG_OK) {
+ ERR("Failed to add tracer event source to notification thread");
+ ret = - 1;
+ goto error;
+ }
+
+ /* Assign handle only when the complete setup is valid. */
+ app->event_notifier_group.object = event_notifier_group;
+
+ event_notifier_error_accounting_status = event_notifier_error_accounting_register_app(app);
+ if (event_notifier_error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+ ERR("Failed to setup event notifier error accounting for app");
+ ret = -1;
+ goto error;
+ }
+
+ return ret;
+
+error:
+ ustctl_release_object(app->sock, app->event_notifier_group.object);
+ free(app->event_notifier_group.object);
+ return ret;
+}
+
/*
* Unregister app by removing it from the global traceable app list and freeing
* the data struct.
/*
* Remove application from notify hash table. The thread handling the
* notify socket could have deleted the node so ignore on error because
- * either way it's valid. The close of that socket is handled by the other
- * thread.
+ * either way it's valid. The close of that socket is handled by the
+ * apps_notify_thread.
*/
iter.iter.node = <a->notify_sock_n.node;
(void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
rcu_read_lock();
+ /* Cleanup notify socket hash table */
+ if (ust_app_ht_by_notify_sock) {
+ cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
+ notify_sock_n.node) {
+ /*
+ * Assert that all notifiers are gone as all triggers
+ * are unregistered prior to this clean-up.
+ */
+ assert(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
+
+ ust_app_notify_sock_unregister(app->notify_sock);
+ }
+ }
+
if (ust_app_ht) {
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ret = lttng_ht_del(ust_app_ht, &iter);
}
}
- /* Cleanup notify socket hash table */
- if (ust_app_ht_by_notify_sock) {
- cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
- notify_sock_n.node) {
- ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
- assert(!ret);
- }
- }
rcu_read_unlock();
/* Destroy is done only when the ht is empty */
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
- if (usess == NULL || uchan == NULL) {
- ERR("Disabling UST global channel with NULL values");
- ret = -1;
- goto error;
- }
-
+ assert(usess->active);
DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
uchan->name, usess->id);
}
rcu_read_unlock();
-
-error:
return ret;
}
/*
- * For a specific UST session, enable the channel for all registered apps.
+ * For a specific UST session, disable the channel for all registered apps.
*/
-int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan)
+int ust_app_disable_map_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_map *umap)
{
int ret = 0;
struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_map_node;
struct ust_app *app;
struct ust_app_session *ua_sess;
+ struct ust_app_map *ua_map;
- if (usess == NULL || uchan == NULL) {
- ERR("Adding UST global channel to NULL values");
- ret = -1;
- goto error;
- }
-
- DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
- uchan->name, usess->id);
+ assert(usess->active);
+ DBG2("UST app disabling map %s from global domain for session id %" PRIu64,
+ umap->name, usess->id);
rcu_read_lock();
/* For every registered applications */
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ht_iter uiter;
if (!app->compatible) {
/*
* TODO: In time, we should notice the caller of this error by
continue;
}
- /* Enable channel onto application */
- ret = enable_ust_app_channel(ua_sess, uchan, app);
+ /* Get map */
+ lttng_ht_lookup(ua_sess->maps, (void *)umap->name, &uiter);
+ ua_map_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the session if found for the app, the map must be there */
+ assert(ua_map_node);
+
+ ua_map = caa_container_of(ua_map_node, struct ust_app_map, node);
+ /* The map must not be already disabled */
+ assert(ua_map->enabled == 1);
+
+ /* Disable map onto application */
+ ret = disable_ust_app_map(ua_sess, ua_map, app);
if (ret < 0) {
/* XXX: We might want to report this error at some point... */
continue;
}
rcu_read_unlock();
-
-error:
return ret;
}
/*
- * Disable an event in a channel and for a specific session.
+ * For a specific UST session, enable the channel for all registered apps.
*/
-int ust_app_disable_event_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_channel *uchan)
{
int ret = 0;
- struct lttng_ht_iter iter, uiter;
- struct lttng_ht_node_str *ua_chan_node;
+ struct lttng_ht_iter iter;
struct ust_app *app;
struct ust_app_session *ua_sess;
- struct ust_app_channel *ua_chan;
- struct ust_app_event *ua_event;
- DBG("UST app disabling event %s for all apps in channel "
- "%s for session id %" PRIu64,
- uevent->attr.name, uchan->name, usess->id);
+ assert(usess->active);
+ DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
+ uchan->name, usess->id);
rcu_read_lock();
- /* For all registered applications */
+ /* For every registered applications */
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
if (!app->compatible) {
/*
}
ua_sess = lookup_session_by_app(usess, app);
if (ua_sess == NULL) {
- /* Next app */
continue;
}
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == NULL) {
- DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
- "Skipping", uchan->name, usess->id, app->pid);
+ /* Enable channel onto application */
+ ret = enable_ust_app_channel(ua_sess, uchan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
continue;
}
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ }
- ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * For a specific UST session, enable the map for all registered apps.
+ */
+int ust_app_enable_map_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_map *umap)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ struct ust_app_session *ua_sess;
+
+ assert(usess->active);
+ DBG2("UST app enabling map %s to global domain for session id %" PRIu64,
+ umap->name, usess->id);
+
+ rcu_read_lock();
+
+ /* For every registered applications */
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+
+ /* Enable map onto application */
+ ret = enable_ust_app_map(ua_sess, umap, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
+ }
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Disable an event in a channel and for a specific session.
+ */
+int ust_app_disable_channel_event_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter, uiter;
+ struct lttng_ht_node_str *ua_chan_node;
+ struct ust_app *app;
+ struct ust_app_session *ua_sess;
+ struct ust_app_channel *ua_chan;
+ struct ust_app_event *ua_event;
+
+ assert(usess->active);
+ DBG("UST app disabling event %s for all apps in channel "
+ "%s for session id %" PRIu64,
+ uevent->attr.name, uchan->name, usess->id);
+
+ rcu_read_lock();
+
+ /* For all registered applications */
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ /* Next app */
+ continue;
+ }
+
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_chan_node == NULL) {
+ DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
+ "Skipping", uchan->name, usess->id, app->pid);
+ continue;
+ }
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+ ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
uevent->filter, uevent->attr.loglevel,
- uevent->exclusion);
+ uevent->exclusion, uevent->attr.token);
if (ua_event == NULL) {
DBG2("Event %s not found in channel %s for app pid %d."
"Skipping", uevent->attr.name, uchan->name, app->pid);
}
rcu_read_unlock();
-
return ret;
}
/*
- * For a specific UST session, create the channel for all registered apps.
+ * Disable an event in a map and for a specific session.
*/
-int ust_app_create_channel_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan)
+int ust_app_disable_map_event_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_map *umap, struct ltt_ust_event *uevent)
{
- int ret = 0, created;
- struct lttng_ht_iter iter;
+ int ret = 0;
+ struct lttng_ht_iter iter, uiter;
+ struct lttng_ht_node_str *ua_map_node;
struct ust_app *app;
- struct ust_app_session *ua_sess = NULL;
-
- /* Very wrong code flow */
- assert(usess);
- assert(uchan);
+ struct ust_app_session *ua_sess;
+ struct ust_app_map *ua_map;
+ struct ust_app_event *ua_event;
- DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
- uchan->name, usess->id);
+ assert(usess->active);
+ DBG("UST app disabling event %s for all apps in map "
+ "%s for session id %" PRIu64,
+ uevent->attr.name, umap->name, usess->id);
rcu_read_lock();
- /* For every registered applications */
+ /* For all registered applications */
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
if (!app->compatible) {
/*
*/
continue;
}
- if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
- /* Skip. */
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ /* Next app */
+ continue;
+ }
+
+ /* Lookup map in the ust app session */
+ lttng_ht_lookup(ua_sess->maps, (void *)umap->name, &uiter);
+ ua_map_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_map_node == NULL) {
+ DBG2("map %s not found in session id %" PRIu64 " for app pid %d."
+ "Skipping", umap->name, usess->id, app->pid);
+ continue;
+ }
+ ua_map = caa_container_of(ua_map_node, struct ust_app_map, node);
+
+ ua_event = find_ust_app_event(ua_map->events, uevent->attr.name,
+ uevent->filter, uevent->attr.loglevel,
+ uevent->exclusion, uevent->attr.token);
+ if (ua_event == NULL) {
+ DBG2("Event %s not found in map %s for app pid %d."
+ "Skipping", uevent->attr.name, umap->name, app->pid);
+ continue;
+ }
+
+ ret = disable_ust_app_event(ua_sess, ua_event, app);
+ if (ret < 0) {
+ /* XXX: Report error someday... */
continue;
}
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/* The ua_sess lock must be held by the caller. */
+static
+int ust_app_channel_create(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ltt_ust_channel *uchan, struct ust_app *app,
+ struct ust_app_channel **_ua_chan)
+{
+ int ret = 0;
+ struct ust_app_channel *ua_chan = NULL;
+
+ assert(ua_sess);
+ ASSERT_LOCKED(ua_sess->lock);
+
+ if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
+ sizeof(uchan->name))) {
+ copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
+ &uchan->attr);
+ ret = 0;
+ } else {
+ struct ltt_ust_context *uctx = NULL;
/*
- * Create session on the tracer side and add it to app session HT. Note
- * that if session exist, it will simply return a pointer to the ust
- * app session.
+ * Create channel onto application and synchronize its
+ * configuration.
*/
- ret = create_ust_app_session(usess, app, &ua_sess, &created);
+ ret = ust_app_channel_allocate(ua_sess, uchan,
+ LTTNG_UST_CHAN_PER_CPU, usess,
+ &ua_chan);
if (ret < 0) {
- switch (ret) {
- case -ENOTCONN:
- /*
- * The application's socket is not valid. Either a bad socket
- * or a timeout on it. We can't inform the caller that for a
- * specific app, the session failed so lets continue here.
- */
- ret = 0; /* Not an error. */
- continue;
- case -ENOMEM:
- default:
- goto error_rcu_unlock;
- }
+ goto error;
}
- assert(ua_sess);
- pthread_mutex_lock(&ua_sess->lock);
+ ret = ust_app_channel_send(app, usess,
+ ua_sess, ua_chan);
+ if (ret) {
+ goto error;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
+ /* Add contexts. */
+ cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
+ ret = create_ust_app_channel_context(ua_chan,
+ &uctx->ctx, app);
+ if (ret) {
+ goto error;
+ }
}
+ }
- if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
- sizeof(uchan->name))) {
- copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
- ret = 0;
- } else {
- /* Create channel onto application. We don't need the chan ref. */
- ret = create_ust_app_channel(ua_sess, uchan, app,
- LTTNG_UST_CHAN_PER_CPU, usess, NULL);
+error:
+ if (ret < 0) {
+ switch (ret) {
+ case -ENOTCONN:
+ /*
+ * The application's socket is not valid. Either a bad socket
+ * or a timeout on it. We can't inform the caller that for a
+ * specific app, the session failed so lets continue here.
+ */
+ ret = 0; /* Not an error. */
+ break;
+ case -ENOMEM:
+ default:
+ break;
}
- pthread_mutex_unlock(&ua_sess->lock);
- if (ret < 0) {
- /* Cleanup the created session if it's the case. */
- if (created) {
- destroy_app_session(app, ua_sess);
- }
- switch (ret) {
- case -ENOTCONN:
- /*
- * The application's socket is not valid. Either a bad socket
- * or a timeout on it. We can't inform the caller that for a
- * specific app, the session failed so lets continue here.
- */
- ret = 0; /* Not an error. */
- continue;
- case -ENOMEM:
- default:
- goto error_rcu_unlock;
- }
+ }
+
+ if (ret == 0 && _ua_chan) {
+ /*
+ * Only return the application's channel on success. Note
+ * that the channel can still be part of the application's
+ * channel hashtable on error.
+ */
+ *_ua_chan = ua_chan;
+ }
+ return ret;
+}
+
+/* The ua_sess lock must be held by the caller. */
+static
+int ust_app_map_create(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ltt_ust_map *umap, struct ust_app *app,
+ struct ust_app_map **_ua_map)
+{
+ int ret = 0;
+ struct ust_app_map *ua_map = NULL;
+
+ assert(ua_sess);
+ ASSERT_LOCKED(ua_sess->lock);
+
+ /*
+ * Create map onto application and synchronize its
+ * configuration.
+ */
+ ret = ust_app_map_allocate(ua_sess, umap,
+ LTTNG_UST_CHAN_PER_CPU, usess,
+ &ua_map);
+ if (ret < 0) {
+ goto error;
+ }
+
+ ret = ust_app_map_send(app, usess, ua_sess, ua_map);
+ if (ret) {
+ goto error;
+ }
+
+error:
+ if (ret < 0) {
+ switch (ret) {
+ case -ENOTCONN:
+ /*
+ * The application's socket is not valid. Either a bad socket
+ * or a timeout on it. We can't inform the caller that for a
+ * specific app, the session failed so lets continue here.
+ */
+ ret = 0; /* Not an error. */
+ break;
+ case -ENOMEM:
+ default:
+ break;
}
}
-error_rcu_unlock:
- rcu_read_unlock();
+ if (ret == 0 && _ua_map) {
+ /*
+ * Only return the application's map on success. Note
+ * that the map can still be part of the application's
+ * map hashtable on error.
+ */
+ *_ua_map = ua_map;
+ }
return ret;
}
/*
* Enable event for a specific session and channel on the tracer.
*/
-int ust_app_enable_event_glb(struct ltt_ust_session *usess,
+int ust_app_enable_channel_event_glb(struct ltt_ust_session *usess,
struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
{
int ret = 0;
struct ust_app_channel *ua_chan;
struct ust_app_event *ua_event;
+ assert(usess->active);
DBG("UST app enabling event %s for all apps for session id %" PRIu64,
uevent->attr.name, usess->id);
/* Get event node */
ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
- uevent->filter, uevent->attr.loglevel, uevent->exclusion);
+ uevent->filter, uevent->attr.loglevel, uevent->exclusion,
+ uevent->attr.token);
if (ua_event == NULL) {
DBG3("UST app enable event %s not found for app PID %d."
"Skipping app", uevent->attr.name, app->pid);
}
/*
- * For a specific existing UST session and UST channel, creates the event for
- * all registered apps.
+ * Enable event for a specific session and map on the tracer.
*/
-int ust_app_create_event_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
+int ust_app_enable_map_event_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_map *umap, struct ltt_ust_event *uevent)
{
int ret = 0;
struct lttng_ht_iter iter, uiter;
- struct lttng_ht_node_str *ua_chan_node;
+ struct lttng_ht_node_str *ua_map_node;
struct ust_app *app;
struct ust_app_session *ua_sess;
- struct ust_app_channel *ua_chan;
+ struct ust_app_map *ua_map;
+ struct ust_app_event *ua_event;
- DBG("UST app creating event %s for all apps for session id %" PRIu64,
+ assert(usess->active);
+ DBG("UST app enabling event %s for all apps for session id %" PRIu64,
uevent->attr.name, usess->id);
+ /*
+ * NOTE: At this point, this function is called only if the session and
+ * map passed are already created for all apps. and enabled on the
+ * tracer also.
+ */
+
rcu_read_lock();
/* For all registered applications */
continue;
}
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the channel is not found, there is a code flow error */
- assert(ua_chan_node);
+ /* Lookup map in the ust app session */
+ lttng_ht_lookup(ua_sess->maps, (void *)umap->name, &uiter);
+ ua_map_node = lttng_ht_iter_get_node_str(&uiter);
+ /*
+ * It is possible that the map cannot be found is
+ * the map/event creation occurs concurrently with
+ * an application exit.
+ */
+ if (!ua_map_node) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ua_map = caa_container_of(ua_map_node, struct ust_app_map, node);
- ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
- pthread_mutex_unlock(&ua_sess->lock);
+ /* Get event node */
+ ua_event = find_ust_app_event(ua_map->events, uevent->attr.name,
+ uevent->filter, uevent->attr.loglevel, uevent->exclusion,
+ uevent->attr.token);
+ if (ua_event == NULL) {
+ DBG3("UST app enable event %s not found for app PID %d."
+ "Skipping app", uevent->attr.name, app->pid);
+ goto next_app;
+ }
+
+ ret = enable_ust_app_event(ua_sess, ua_event, app);
if (ret < 0) {
- if (ret != -LTTNG_UST_ERR_EXIST) {
- /* Possible value at this point: -ENOMEM. If so, we stop! */
- break;
- }
- DBG2("UST app event %s already exist on app PID %d",
- uevent->attr.name, app->pid);
- continue;
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto error;
}
+ next_app:
+ pthread_mutex_unlock(&ua_sess->lock);
}
+error:
rcu_read_unlock();
-
return ret;
}
/*
- * Start tracing for a specific UST session and app.
+ * For a specific existing UST session and UST channel, creates the event for
+ * all registered apps.
*/
-static
-int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
+int ust_app_create_channel_event_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
{
int ret = 0;
+ struct lttng_ht_iter iter, uiter;
+ struct lttng_ht_node_str *ua_chan_node;
+ struct ust_app *app;
struct ust_app_session *ua_sess;
+ struct ust_app_channel *ua_chan;
- DBG("Starting tracing for ust app pid %d", app->pid);
+ assert(usess->active);
+ DBG("UST app creating event %s for all apps for session id %" PRIu64,
+ uevent->attr.name, usess->id);
rcu_read_lock();
- if (!app->compatible) {
- goto end;
- }
+ /* For all registered applications */
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- /* The session is in teardown process. Ignore and continue. */
- goto end;
- }
+ pthread_mutex_lock(&ua_sess->lock);
- pthread_mutex_lock(&ua_sess->lock);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- goto end;
- }
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the channel is not found, there is a code flow error */
+ assert(ua_chan_node);
- /* Upon restart, we skip the setup, already done */
- if (ua_sess->started) {
- goto skip_setup;
- }
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
- /* Create directories if consumer is LOCAL and has a path defined. */
- if (usess->consumer->type == CONSUMER_DST_LOCAL &&
- strlen(usess->consumer->dst.trace_path) > 0) {
- ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
- S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
+ ret = create_ust_app_channel_event(ua_sess, ua_chan, uevent, app);
+ pthread_mutex_unlock(&ua_sess->lock);
if (ret < 0) {
- if (errno != EEXIST) {
- ERR("Trace directory creation error");
- goto error_unlock;
+ if (ret != -LTTNG_UST_ERR_EXIST) {
+ /* Possible value at this point: -ENOMEM. If so, we stop! */
+ break;
}
+ DBG2("UST app event %s already exist on app PID %d",
+ uevent->attr.name, app->pid);
+ continue;
}
}
- /*
- * Create the metadata for the application. This returns gracefully if a
- * metadata was already set for the session.
- */
- ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
- if (ret < 0) {
- goto error_unlock;
- }
+ rcu_read_unlock();
+ return ret;
+}
- health_code_update();
+static
+int snapshot_key_values(struct ustctl_daemon_counter *map_handle,
+ struct lttng_ht *key_to_bucket_index_ht, int cpu,
+ const char *key_filter, struct lttng_ht *values)
+{
+ int ret;
+ struct lttng_ht_iter key_iter;
+ struct ust_registry_map_index_ht_entry *map_index_entry;
+
+ /* Iterate over all the formated_key -> counter index */
+ cds_lfht_for_each_entry(key_to_bucket_index_ht->ht,
+ &key_iter.iter, map_index_entry, node.node) {
+ bool overflow = 0, underflow = 0;
+ int64_t local_value = 0;
+ size_t dimension_indexes[1] = {map_index_entry->index};
+
+ if (key_filter && strcmp(key_filter,
+ map_index_entry->formated_key) != 0) {
+ continue;
+ }
-skip_setup:
- /* This start the UST tracing */
- pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_start_session(app->sock, ua_sess->handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("Error starting tracing for app pid: %d (ret: %d)",
- app->pid, ret);
- } else {
- DBG("UST app start session failed. Application is dead.");
- /*
- * This is normal behavior, an application can die during the
- * creation process. Don't report an error so the execution can
- * continue normally.
- */
- pthread_mutex_unlock(&ua_sess->lock);
+ ret = ustctl_counter_read(map_handle,
+ dimension_indexes, cpu, &local_value,
+ &overflow, &underflow);
+ if (ret) {
+ ERR("Error getting counter value from the tracer: key = '%s'",
+ map_index_entry->formated_key);
+ ret = -1;
goto end;
}
- goto error_unlock;
- }
-
- /* Indicate that the session has been started once */
- ua_sess->started = 1;
-
- pthread_mutex_unlock(&ua_sess->lock);
-
- health_code_update();
- /* Quiescent wait after starting trace */
- pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_wait_quiescent(app->sock);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app wait quiescent failed for app pid %d ret %d",
- app->pid, ret);
+ map_add_or_increment_map_values(values,
+ map_index_entry->formated_key, local_value,
+ underflow, overflow);
}
-
+ ret = 0;
end:
- rcu_read_unlock();
- health_code_update();
- return 0;
-
-error_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
- rcu_read_unlock();
- health_code_update();
- return -1;
+ return ret;
}
-/*
- * Stop tracing for a specific UST session and app.
- */
static
-int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
+int ust_app_map_list_values_per_uid_with_bitness_and_cpu(
+ const struct ltt_ust_session *usess,
+ const struct ltt_ust_map *umap,
+ uint32_t app_bitness,
+ uint32_t cpu,
+ const char *key_filter,
+ struct lttng_ht *values)
{
int ret = 0;
- struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
+ struct lttng_ht_iter iter;
+ struct buffer_reg_uid *buf_reg_uid;
+ struct buffer_reg_map *buf_reg_map;
+ struct ust_registry_session *ust_reg_sess;
+ struct lttng_ht_node_u64 *ust_reg_map_node;
+ struct ust_registry_map *ust_reg_map;
+
+ buf_reg_uid = buffer_reg_uid_find(usess->id, app_bitness, usess->uid);
+ if (!buf_reg_uid) {
+ /*
+ * Buffer registry entry for uid not found. Probably no app for
+ * this UID at the moment.
+ */
+ DBG("No buffer registry entry found for uid: ust-sess-id = %"PRIu64", bitness = %"PRIu32", uid = %d",
+ usess->id, app_bitness, usess->uid);
+ /*
+ * Not an error. Leave the key value pair unchanged and return.
+ */
+ ret = 0;
+ goto end;
+ }
- DBG("Stopping tracing for ust app pid %d", app->pid);
+ buf_reg_map = buffer_reg_map_find(umap->id, buf_reg_uid);
+ if (!buf_reg_uid) {
+ ERR("Error getting per-uid map buffer registry entry: map-id = %"PRIu64,
+ umap->id);
+ ret = -1;
+ goto end;
+ }
- rcu_read_lock();
+ ust_reg_sess = buf_reg_uid->registry->reg.ust;
- if (!app->compatible) {
- goto end_no_session;
+ /* Get the ust_reg map object from the registry */
+ // FIXME: frdeso: This can be changed to ust_registry_map_find() right?
+
+ lttng_ht_lookup(ust_reg_sess->maps, (void *) &umap->id, &iter);
+ ust_reg_map_node = lttng_ht_iter_get_node_u64(&iter);
+ if (!ust_reg_map_node) {
+ ERR("Error getting per-uid map buffer registry entry: map-id = %"PRIu64,
+ umap->id);
+ ret = -1;
+ goto end;
}
+ ust_reg_map = caa_container_of(ust_reg_map_node,
+ struct ust_registry_map, node);
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- goto end_no_session;
+ ret = snapshot_key_values(buf_reg_map->daemon_counter,
+ ust_reg_map->key_string_to_bucket_index_ht,
+ cpu, key_filter, values);
+ if (ret) {
+ abort();
}
- pthread_mutex_lock(&ua_sess->lock);
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- goto end_no_session;
+ ret = 0;
+end:
+ return ret;
+}
+
+static
+int ust_app_map_list_values_per_uid(const struct ltt_ust_session *usess,
+ const struct ltt_ust_map *umap,
+ const struct lttng_map_query *query,
+ struct lttng_map_content *map_content)
+{
+ int i, ret = 0;
+ enum lttng_map_query_status map_query_status;
+ const char *key_filter;
+ struct lttng_ht *values = NULL;
+ bool sum_cpus = lttng_map_query_get_config_sum_by_cpu(query);
+ enum lttng_map_query_config_buffer config_buffer;
+ enum lttng_map_query_config_cpu config_cpu;
+ int selected_cpu;
+
+ map_query_status = lttng_map_query_get_key_filter(query, &key_filter);
+ if (map_query_status == LTTNG_MAP_QUERY_STATUS_NONE) {
+ key_filter = NULL;
+ } else if (map_query_status != LTTNG_MAP_QUERY_STATUS_OK) {
+ ret = -1;
+ goto end;
}
- /*
- * If started = 0, it means that stop trace has been called for a session
- * that was never started. It's possible since we can have a fail start
- * from either the application manager thread or the command thread. Simply
- * indicate that this is a stop error.
- */
- if (!ua_sess->started) {
- goto error_rcu_unlock;
+ config_cpu = lttng_map_query_get_config_cpu(query);
+ if (config_cpu == LTTNG_MAP_QUERY_CONFIG_CPU_SUBSET) {
+ unsigned int count;
+ map_query_status = lttng_map_query_get_cpu_count(query, &count);
+ assert(map_query_status == LTTNG_MAP_QUERY_STATUS_OK);
+ assert(count == 1);
+
+ map_query_status = lttng_map_query_get_cpu_at_index(query, 0,
+ &selected_cpu);
+ assert(map_query_status == LTTNG_MAP_QUERY_STATUS_OK);
}
- health_code_update();
+ config_buffer = lttng_map_query_get_config_buffer(query);
+ if (config_buffer == LTTNG_MAP_QUERY_CONFIG_BUFFER_UST_UID_SUBSET) {
+ unsigned int count;
+ uid_t selected_uid;
- /* This inhibits UST tracing */
- pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_stop_session(app->sock, ua_sess->handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("Error stopping tracing for app pid: %d (ret: %d)",
- app->pid, ret);
- } else {
- DBG("UST app stop session failed. Application is dead.");
- /*
- * This is normal behavior, an application can die during the
- * creation process. Don't report an error so the execution can
- * continue normally.
- */
- goto end_unlock;
+ map_query_status = lttng_map_query_get_uid_count(query, &count);
+ assert(map_query_status == LTTNG_MAP_QUERY_STATUS_OK);
+ assert(count == 1);
+
+ map_query_status = lttng_map_query_get_uid_at_index(query, 0,
+ &selected_uid);
+ assert(map_query_status == LTTNG_MAP_QUERY_STATUS_OK);
+
+ if (selected_uid != usess->uid) {
+ ret = 0;
+ goto end;
}
- goto error_rcu_unlock;
}
- health_code_update();
-
- /* Quiescent wait after stopping trace */
- pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_wait_quiescent(app->sock);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app wait quiescent failed for app pid %d ret %d",
- app->pid, ret);
+ if (sum_cpus) {
+ values = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
}
- health_code_update();
+ for (i = 0; i < umap->nr_cpu; i++) {
+ if (config_cpu == LTTNG_MAP_QUERY_CONFIG_CPU_SUBSET) {
+ if (selected_cpu != i) {
+ continue;
+ }
+ }
- registry = get_session_registry(ua_sess);
- assert(registry);
+ if (!sum_cpus) {
+ values = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+ }
- /* Push metadata for application before freeing the application. */
- (void) push_metadata(registry, ua_sess->consumer);
+ ret = ust_app_map_list_values_per_uid_with_bitness_and_cpu(
+ usess, umap, 32, i, key_filter,
+ values);
+ if (ret) {
+ abort();
+ }
-end_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
-end_no_session:
- rcu_read_unlock();
- health_code_update();
- return 0;
+ ret = ust_app_map_list_values_per_uid_with_bitness_and_cpu(
+ usess, umap, 64, i, key_filter,
+ values);
+ if (ret) {
+ abort();
+ }
+ if (!sum_cpus) {
+ ret = map_new_content_section(map_content,
+ LTTNG_MAP_KEY_VALUE_PAIR_LIST_TYPE_UST_PER_UID,
+ sum_cpus, usess->uid, i, values);
+ if (ret) {
+ abort();
+ }
-error_rcu_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
- rcu_read_unlock();
- health_code_update();
- return -1;
+ lttng_ht_destroy(values);
+ }
+ }
+
+ if (sum_cpus) {
+ ret = map_new_content_section(map_content,
+ LTTNG_MAP_KEY_VALUE_PAIR_LIST_TYPE_UST_PER_UID,
+ sum_cpus, usess->uid, 0, values);
+ if (ret) {
+ abort();
+ }
+ lttng_ht_destroy(values);
+ }
+
+end:
+ return ret;
}
static
-int ust_app_flush_app_session(struct ust_app *app,
- struct ust_app_session *ua_sess)
+int append_dead_app_kv(struct ltt_ust_map *umap,
+ const char *key_filter,
+ struct lttng_map_content *map_content)
{
- int ret, retval = 0;
- struct lttng_ht_iter iter;
- struct ust_app_channel *ua_chan;
- struct consumer_socket *socket;
-
- DBG("Flushing app session buffers for ust app pid %d", app->pid);
+ int ret;
+ struct lttng_ht *dead_app_kv_ht;
+ struct map_kv_ht_entry *kv_entry;
+ struct lttng_ht_iter key_iter;
- rcu_read_lock();
+ struct lttng_ht *values = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
- if (!app->compatible) {
- goto end_not_compatible;
- }
+ pthread_mutex_lock(&(umap->dead_app_kv_values.lock));
- pthread_mutex_lock(&ua_sess->lock);
+ assert(umap->dead_app_kv_values.dead_app_kv_values_64bits);
+ dead_app_kv_ht = umap->dead_app_kv_values.dead_app_kv_values_64bits;
- if (ua_sess->deleted) {
- goto end_deleted;
+ cds_lfht_for_each_entry(dead_app_kv_ht->ht, &key_iter.iter, kv_entry,
+ node.node) {
+ if (key_filter && strcmp(key_filter, kv_entry->key) != 0) {
+ continue;
+ }
+ map_add_or_increment_map_values(values, kv_entry->key,
+ kv_entry->value, kv_entry->has_underflowed,
+ kv_entry->has_overflowed);
}
- health_code_update();
+ assert(umap->dead_app_kv_values.dead_app_kv_values_32bits);
- /* Flushing buffers */
- socket = consumer_find_socket_by_bitness(app->bits_per_long,
- ua_sess->consumer);
-
- /* Flush buffers and push metadata. */
- switch (ua_sess->buffer_type) {
- case LTTNG_BUFFER_PER_PID:
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
- node.node) {
- health_code_update();
- ret = consumer_flush_channel(socket, ua_chan->key);
- if (ret) {
- ERR("Error flushing consumer channel");
- retval = -1;
- continue;
- }
+ dead_app_kv_ht = umap->dead_app_kv_values.dead_app_kv_values_32bits;
+ cds_lfht_for_each_entry(dead_app_kv_ht->ht, &key_iter.iter, kv_entry,
+ node.node) {
+ if (key_filter && strcmp(key_filter, kv_entry->key) != 0) {
+ continue;
}
- break;
- case LTTNG_BUFFER_PER_UID:
- default:
- assert(0);
- break;
+ map_add_or_increment_map_values(values, kv_entry->key,
+ kv_entry->value, kv_entry->has_underflowed,
+ kv_entry->has_overflowed);
}
- health_code_update();
+ pthread_mutex_unlock(&umap->dead_app_kv_values.lock);
-end_deleted:
- pthread_mutex_unlock(&ua_sess->lock);
+ ret = map_new_content_section(map_content,
+ LTTNG_MAP_KEY_VALUE_PAIR_LIST_TYPE_UST_PER_PID_AGGREGATED,
+ true, 0, 0, values);
-end_not_compatible:
- rcu_read_unlock();
- health_code_update();
- return retval;
+ lttng_ht_destroy(values);
+ if (ret) {
+ ERR("Error appending deadapp kv");
+ goto end;
+ }
+
+
+ ret = 0;
+
+end:
+ return ret;
}
-/*
- * Flush buffers for all applications for a specific UST session.
- * Called with UST session lock held.
- */
static
-int ust_app_flush_session(struct ltt_ust_session *usess)
-
+int ust_app_map_list_values_per_pid_with_bitness_and_cpu(
+ const struct ltt_ust_session *usess,
+ struct ust_app *app,
+ struct ltt_ust_map *umap,
+ uint32_t app_bitness,
+ uint32_t cpu,
+ const char *key_filter,
+ struct lttng_ht *values)
{
int ret = 0;
- DBG("Flushing session buffers for all ust apps");
+ struct lttng_ht_iter map_iter;
+ struct lttng_ht_node_str *ua_map_node;
+ struct ust_app_map *ua_map;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *ust_reg_sess;
+ struct ust_registry_map *ust_reg_map;
- rcu_read_lock();
+ if (app->bits_per_long != app_bitness) {
+ ret = 0;
+ goto end;;
+ }
- /* Flush buffers and push metadata. */
- switch (usess->buffer_type) {
- case LTTNG_BUFFER_PER_UID:
- {
- struct buffer_reg_uid *reg;
- struct lttng_ht_iter iter;
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ ret = 0;
+ goto end;;
+ }
- /* Flush all per UID buffers associated to that session. */
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
- struct ust_registry_session *ust_session_reg;
- struct buffer_reg_channel *reg_chan;
- struct consumer_socket *socket;
+ ust_reg_sess = get_session_registry(ua_sess);
+ if (!ust_reg_sess) {
+ DBG("Application session is being torn down. Skip application.");
+ ret = 0;
+ goto end;;
+ }
- /* Get consumer socket to use to push the metadata.*/
- socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
- if (!socket) {
- /* Ignore request if no consumer is found for the session. */
- continue;
- }
+ /* Lookup map in the ust app session */
+ lttng_ht_lookup(ua_sess->maps, (void *)umap->name, &map_iter);
+ ua_map_node = lttng_ht_iter_get_node_str(&map_iter);
- cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- reg_chan, node.node) {
- /*
- * The following call will print error values so the return
- * code is of little importance because whatever happens, we
- * have to try them all.
- */
- (void) consumer_flush_channel(socket, reg_chan->consumer_key);
- }
+ assert(ua_map_node != NULL);
+ ua_map = caa_container_of(ua_map_node, struct ust_app_map, node);
- ust_session_reg = reg->registry->reg.ust;
- /* Push metadata. */
- (void) push_metadata(ust_session_reg, usess->consumer);
- }
- break;
- }
- case LTTNG_BUFFER_PER_PID:
- {
- struct ust_app_session *ua_sess;
- struct lttng_ht_iter iter;
- struct ust_app *app;
+ pthread_mutex_lock(&ust_reg_sess->lock);
+ ust_reg_map = ust_registry_map_find(ust_reg_sess, ua_map->key);
+ pthread_mutex_unlock(&ust_reg_sess->lock);
+ assert(ust_reg_map);
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- continue;
- }
- (void) ust_app_flush_app_session(app, ua_sess);
- }
- break;
- }
- default:
- ret = -1;
- assert(0);
- break;
+ ret = snapshot_key_values(ua_map->map_handle,
+ ust_reg_map->key_string_to_bucket_index_ht,
+ cpu, key_filter, values);
+ if (ret) {
+ ERR("Error snapshoting the content of map");
+ goto end;
}
- rcu_read_unlock();
- health_code_update();
+end:
return ret;
}
static
-int ust_app_clear_quiescent_app_session(struct ust_app *app,
- struct ust_app_session *ua_sess)
+int ust_app_map_list_values_per_pid(const struct ltt_ust_session *usess,
+ struct ltt_ust_map *umap,
+ const struct lttng_map_query *query,
+ struct lttng_map_content *map_content)
{
- int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app_channel *ua_chan;
- struct consumer_socket *socket;
-
- DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
+ enum lttng_map_query_status map_query_status;
+ const char *key_filter;
+ struct lttng_ht *values;
+ bool sum_cpus = lttng_map_query_get_config_sum_by_cpu(query);
+ bool sum_pids = lttng_map_query_get_config_sum_by_pid(query);
+ enum lttng_map_query_config_cpu config_cpu;
+ int selected_cpu, i, ret = 0;
+ struct lttng_ht_iter app_iter;
+ struct ust_app *app;
- rcu_read_lock();
+ values = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
- if (!app->compatible) {
- goto end_not_compatible;
+ map_query_status = lttng_map_query_get_key_filter(query, &key_filter);
+ if (map_query_status == LTTNG_MAP_QUERY_STATUS_NONE) {
+ key_filter = NULL;
+ } else if (map_query_status != LTTNG_MAP_QUERY_STATUS_OK) {
+ ret = -1;
+ goto end;
}
- pthread_mutex_lock(&ua_sess->lock);
+ config_cpu = lttng_map_query_get_config_cpu(query);
+ if (config_cpu == LTTNG_MAP_QUERY_CONFIG_CPU_SUBSET) {
+ unsigned int count;
+ map_query_status = lttng_map_query_get_cpu_count(query, &count);
+ assert(map_query_status == LTTNG_MAP_QUERY_STATUS_OK);
+ assert(count == 1);
- if (ua_sess->deleted) {
- goto end_unlock;
+ map_query_status = lttng_map_query_get_cpu_at_index(query, 0,
+ &selected_cpu);
+ assert(map_query_status == LTTNG_MAP_QUERY_STATUS_OK);
}
- health_code_update();
-
- socket = consumer_find_socket_by_bitness(app->bits_per_long,
- ua_sess->consumer);
- if (!socket) {
- ERR("Failed to find consumer (%" PRIu32 ") socket",
- app->bits_per_long);
- ret = -1;
- goto end_unlock;
+ /* Sum all cpus and pids on the same table. */
+ if (sum_cpus && sum_pids) {
+ values = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
}
- /* Clear quiescent state. */
- switch (ua_sess->buffer_type) {
- case LTTNG_BUFFER_PER_PID:
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
- ua_chan, node.node) {
- health_code_update();
- ret = consumer_clear_quiescent_channel(socket,
- ua_chan->key);
+ if (!sum_cpus && sum_pids) {
+ /* Iterate over all currently registered apps. */
+ for (i = 0; i < umap->nr_cpu; i++) {
+ values = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+ cds_lfht_for_each_entry(ust_app_ht->ht, &app_iter.iter, app, pid_n.node) {
+ ret = ust_app_map_list_values_per_pid_with_bitness_and_cpu(
+ usess, app, umap, 32, i, key_filter, values);
+ if (ret) {
+ abort();
+ }
+ ret = ust_app_map_list_values_per_pid_with_bitness_and_cpu(
+ usess, app, umap, 64, i, key_filter, values);
+ if (ret) {
+ abort();
+ }
+ }
+ ret = map_new_content_section(map_content,
+ LTTNG_MAP_KEY_VALUE_PAIR_LIST_TYPE_UST_PER_PID,
+ sum_cpus, app->pid, i, values);
if (ret) {
- ERR("Error clearing quiescent state for consumer channel");
- ret = -1;
- continue;
+ abort();
}
- }
- break;
- case LTTNG_BUFFER_PER_UID:
- default:
- assert(0);
- ret = -1;
- break;
- }
-
- health_code_update();
-
-end_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
-end_not_compatible:
- rcu_read_unlock();
- health_code_update();
- return ret;
-}
+ lttng_ht_destroy(values);
+ }
+ } else {
+ /* Iterate over all currently registered apps. */
+ cds_lfht_for_each_entry(ust_app_ht->ht, &app_iter.iter, app, pid_n.node) {
-/*
- * Clear quiescent state in each stream for all applications for a
- * specific UST session.
- * Called with UST session lock held.
- */
-static
-int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
+ if (sum_cpus && !sum_pids) {
+ values = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+ }
-{
- int ret = 0;
+ for (i = 0; i < umap->nr_cpu; i++) {
- DBG("Clearing stream quiescent state for all ust apps");
+ if (config_cpu == LTTNG_MAP_QUERY_CONFIG_CPU_SUBSET) {
+ if (selected_cpu != i) {
+ continue;
+ }
+ }
- rcu_read_lock();
+ if (!sum_cpus && !sum_pids) {
+ values = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
+ }
- switch (usess->buffer_type) {
- case LTTNG_BUFFER_PER_UID:
- {
- struct lttng_ht_iter iter;
- struct buffer_reg_uid *reg;
+ ret = ust_app_map_list_values_per_pid_with_bitness_and_cpu(
+ usess, app, umap, 32, i, key_filter, values);
+ if (ret) {
+ abort();
+ }
+ ret = ust_app_map_list_values_per_pid_with_bitness_and_cpu(
+ usess, app, umap, 64, i, key_filter, values);
+ if (ret) {
+ abort();
+ }
- /*
- * Clear quiescent for all per UID buffers associated to
- * that session.
- */
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
- struct consumer_socket *socket;
- struct buffer_reg_channel *reg_chan;
+ if (!sum_cpus && !sum_pids) {
+ ret = map_new_content_section(map_content,
+ LTTNG_MAP_KEY_VALUE_PAIR_LIST_TYPE_UST_PER_PID,
+ sum_cpus, app->pid, i, values);
+ if (ret) {
+ abort();
+ }
- /* Get associated consumer socket.*/
- socket = consumer_find_socket_by_bitness(
- reg->bits_per_long, usess->consumer);
- if (!socket) {
- /*
- * Ignore request if no consumer is found for
- * the session.
- */
- continue;
+ lttng_ht_destroy(values);
+ }
}
+ if (sum_cpus && !sum_pids) {
+ ret = map_new_content_section(map_content,
+ LTTNG_MAP_KEY_VALUE_PAIR_LIST_TYPE_UST_PER_PID,
+ sum_cpus, app->pid, i, values);
+ if (ret) {
+ abort();
+ }
- cds_lfht_for_each_entry(reg->registry->channels->ht,
- &iter.iter, reg_chan, node.node) {
- /*
- * The following call will print error values so
- * the return code is of little importance
- * because whatever happens, we have to try them
- * all.
- */
- (void) consumer_clear_quiescent_channel(socket,
- reg_chan->consumer_key);
+ lttng_ht_destroy(values);
}
}
- break;
}
- case LTTNG_BUFFER_PER_PID:
- {
- struct ust_app_session *ua_sess;
- struct lttng_ht_iter iter;
- struct ust_app *app;
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
- pid_n.node) {
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- continue;
- }
- (void) ust_app_clear_quiescent_app_session(app,
- ua_sess);
+ if (sum_cpus && sum_pids) {
+ ret = map_new_content_section(map_content,
+ LTTNG_MAP_KEY_VALUE_PAIR_LIST_TYPE_UST_PER_PID,
+ sum_cpus, 0, 0, values);
+ if (ret) {
+ abort();
}
- break;
+ lttng_ht_destroy(values);
}
- default:
- ret = -1;
- assert(0);
- break;
+
+ /* Append dead app aggregated key-value pairs. */
+ ret = append_dead_app_kv(umap, key_filter, map_content);
+ if (ret) {
+ ERR("Error appending values from dead apps map");
+ goto end;
}
- rcu_read_unlock();
- health_code_update();
+end:
return ret;
}
-/*
- * Destroy a specific UST session in apps.
- */
-static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
+int ust_app_map_list_values(const struct ltt_ust_session *usess,
+ struct ltt_ust_map *umap,
+ const struct lttng_map_query *query,
+ struct lttng_map_content **map_content)
{
int ret;
- struct ust_app_session *ua_sess;
- struct lttng_ht_iter iter;
- struct lttng_ht_node_u64 *node;
-
- DBG("Destroy tracing for ust app pid %d", app->pid);
+ struct lttng_map_content *local_map_content = NULL;
- rcu_read_lock();
-
- if (!app->compatible) {
+ local_map_content = lttng_map_content_create(usess->buffer_type);
+ if (!local_map_content) {
+ ERR("Error creating a map content list");
+ ret = -1;
goto end;
}
-
- __lookup_session_by_app(usess, app, &iter);
- node = lttng_ht_iter_get_node_u64(&iter);
- if (node == NULL) {
- /* Session is being or is deleted. */
- goto end;
- }
- ua_sess = caa_container_of(node, struct ust_app_session, node);
-
- health_code_update();
- destroy_app_session(app, ua_sess);
-
- health_code_update();
-
- /* Quiescent wait after stopping trace */
- pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_wait_quiescent(app->sock);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app wait quiescent failed for app pid %d ret %d",
- app->pid, ret);
+ rcu_read_lock();
+ if (usess->buffer_type == LTTNG_BUFFER_PER_UID) {
+ ret = ust_app_map_list_values_per_uid(usess, umap, query,
+ local_map_content);
+ if (ret) {
+ ERR("Error adding per-uid map value");
+ ret = -1;
+ goto end;
+ }
+ } else {
+ ret = ust_app_map_list_values_per_pid(usess, umap, query,
+ local_map_content);
+ if (ret) {
+ ERR("Error adding per-pid map value");
+ ret = -1;
+ goto end;
+ }
}
+ *map_content = local_map_content;
+ local_map_content = NULL;
+ ret = 0;
end:
rcu_read_unlock();
- health_code_update();
- return 0;
+
+ lttng_map_content_destroy(local_map_content);
+ return ret;
}
/*
- * Start tracing for the UST session.
+ * For a specific existing UST session and UST map, creates the event for
+ * all registered apps.
*/
-int ust_app_start_trace_all(struct ltt_ust_session *usess)
+int ust_app_create_map_event_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_map *umap, struct ltt_ust_event *uevent)
{
int ret = 0;
- struct lttng_ht_iter iter;
+ struct lttng_ht_iter iter, uiter;
+ struct lttng_ht_node_str *ua_map_node;
struct ust_app *app;
+ struct ust_app_session *ua_sess;
+ struct ust_app_map *ua_map;
- DBG("Starting all UST traces");
+ assert(usess->active);
+ DBG("UST app creating event %s in map %s for all apps for session id %" PRIu64,
+ uevent->attr.name, umap->name, usess->id);
rcu_read_lock();
- /*
- * In a start-stop-start use-case, we need to clear the quiescent state
- * of each channel set by the prior stop command, thus ensuring that a
- * following stop or destroy is sure to grab a timestamp_end near those
- * operations, even if the packet is empty.
- */
- (void) ust_app_clear_quiescent_session(usess);
-
+ /* For all registered applications */
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = ust_app_start_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
continue;
}
- }
-
- rcu_read_unlock();
- return 0;
-}
+ pthread_mutex_lock(&ua_sess->lock);
-/*
- * Start tracing for the UST session.
- * Called with UST session lock held.
- */
-int ust_app_stop_trace_all(struct ltt_ust_session *usess)
-{
- int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- DBG("Stopping all UST traces");
+ /* Lookup map in the ust app session */
+ lttng_ht_lookup(ua_sess->maps, (void *)umap->name, &uiter);
+ ua_map_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the map is not found, there is a code flow error */
+ assert(ua_map_node);
- rcu_read_lock();
+ ua_map = caa_container_of(ua_map_node, struct ust_app_map, node);
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = ust_app_stop_trace(usess, app);
+ ret = create_ust_app_map_event(ua_sess, ua_map, uevent, app);
+ assert(!ret);
+ pthread_mutex_unlock(&ua_sess->lock);
if (ret < 0) {
- /* Continue to next apps even on error */
+ if (ret != -LTTNG_UST_ERR_EXIST) {
+ /* Possible value at this point: -ENOMEM. If so, we stop! */
+ break;
+ }
+ DBG2("UST app event %s already exist on app PID %d",
+ uevent->attr.name, app->pid);
continue;
}
}
- (void) ust_app_flush_session(usess);
-
rcu_read_unlock();
-
- return 0;
+ return ret;
}
/*
- * Destroy app UST session.
+ * Start tracing for a specific UST session and app.
+ *
+ * Called with UST app session lock held.
+ *
*/
-int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
+static
+int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
{
int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
+ struct ust_app_session *ua_sess;
- DBG("Destroy all UST traces");
+ DBG("Starting tracing for ust app pid %d", app->pid);
rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = destroy_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
- }
+ if (!app->compatible) {
+ goto end;
}
- rcu_read_unlock();
-
- return 0;
-}
-
-static
-void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
-{
- int ret = 0;
- struct lttng_ht_iter iter, uiter;
- struct ust_app_session *ua_sess = NULL;
- struct ust_app_channel *ua_chan;
- struct ust_app_event *ua_event;
- struct ust_app_ctx *ua_ctx;
- int is_created = 0;
-
- ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
- if (ret < 0) {
- /* Tracer is probably gone or ENOMEM. */
- goto error;
- }
- if (!is_created) {
- /* App session already created. */
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ /* The session is in teardown process. Ignore and continue. */
goto end;
}
- assert(ua_sess);
pthread_mutex_lock(&ua_sess->lock);
goto end;
}
- /*
- * We can iterate safely here over all UST app session since the create ust
- * app session above made a shadow copy of the UST global domain from the
- * ltt ust session.
- */
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
- node.node) {
- ret = do_create_channel(app, usess, ua_sess, ua_chan);
- if (ret < 0 && ret != -ENOTCONN) {
- /*
- * Stop everything. On error, the application
- * failed, no more file descriptor are available
- * or ENOMEM so stopping here is the only thing
- * we can do for now. The only exception is
- * -ENOTCONN, which indicates that the application
- * has exit.
- */
- goto error_unlock;
- }
+ if (ua_sess->enabled) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto end;
+ }
- /*
- * Add context using the list so they are enabled in the same order the
- * user added them.
- */
- cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
- ret = create_ust_channel_context(ua_chan, ua_ctx, app);
- if (ret < 0) {
- goto error_unlock;
- }
- }
+ /* Upon restart, we skip the setup, already done */
+ if (ua_sess->started) {
+ goto skip_setup;
+ }
+ health_code_update();
- /* For each events */
- cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
- node.node) {
- ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
- if (ret < 0) {
- goto error_unlock;
- }
+skip_setup:
+ /* This starts the UST tracing */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_start_session(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("Error starting tracing for app pid: %d (ret: %d)",
+ app->pid, ret);
+ } else {
+ DBG("UST app start session failed. Application is dead.");
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally.
+ */
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto end;
}
+ goto error_unlock;
}
+ /* Indicate that the session has been started once */
+ ua_sess->started = 1;
+ ua_sess->enabled = 1;
+
pthread_mutex_unlock(&ua_sess->lock);
- if (usess->active) {
- ret = ust_app_start_trace(usess, app);
- if (ret < 0) {
- goto error;
- }
+ health_code_update();
- DBG2("UST trace started for app pid %d", app->pid);
+ /* Quiescent wait after starting trace */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app wait quiescent failed for app pid %d ret %d",
+ app->pid, ret);
}
+
end:
- /* Everything went well at this point. */
- return;
+ rcu_read_unlock();
+ health_code_update();
+ return 0;
error_unlock:
pthread_mutex_unlock(&ua_sess->lock);
-error:
- if (ua_sess) {
- destroy_app_session(app, ua_sess);
- }
- return;
+ rcu_read_unlock();
+ health_code_update();
+ return -1;
}
+/*
+ * Stop tracing for a specific UST session and app.
+ */
static
-void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
+int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
{
+ int ret = 0;
struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- return;
- }
- destroy_app_session(app, ua_sess);
-}
-
-/*
- * Add channels/events from UST global domain to registered apps at sock.
- *
- * Called with session lock held.
- * Called with RCU read-side lock held.
- */
-void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
-{
- assert(usess);
+ DBG("Stopping tracing for ust app pid %d", app->pid);
- DBG2("UST app global update for app sock %d for session id %" PRIu64,
- app->sock, usess->id);
+ rcu_read_lock();
if (!app->compatible) {
- return;
+ goto end_no_session;
}
- if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
- ust_app_global_create(usess, app);
- } else {
- ust_app_global_destroy(usess, app);
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ goto end_no_session;
}
-}
-/*
- * Called with session lock held.
- */
-void ust_app_global_update_all(struct ltt_ust_session *usess)
-{
- struct lttng_ht_iter iter;
- struct ust_app *app;
+ pthread_mutex_lock(&ua_sess->lock);
- rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto end_no_session;
}
- rcu_read_unlock();
-}
-/*
- * Add context to a specific channel for global UST domain.
- */
-int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
-{
- int ret = 0;
- struct lttng_ht_node_str *ua_chan_node;
- struct lttng_ht_iter iter, uiter;
- struct ust_app_channel *ua_chan = NULL;
- struct ust_app_session *ua_sess;
- struct ust_app *app;
+ /*
+ * If started = 0, it means that stop trace has been called for a session
+ * that was never started. It's possible since we can have a fail start
+ * from either the application manager thread or the command thread. Simply
+ * indicate that this is a stop error.
+ */
+ if (!ua_sess->started) {
+ goto error_rcu_unlock;
+ }
- rcu_read_lock();
+ health_code_update();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
+ /* This inhibits UST tracing */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_stop_session(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("Error stopping tracing for app pid: %d (ret: %d)",
+ app->pid, ret);
+ } else {
+ DBG("UST app stop session failed. Application is dead.");
/*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally.
*/
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- continue;
+ goto end_unlock;
}
+ goto error_rcu_unlock;
+ }
- pthread_mutex_lock(&ua_sess->lock);
-
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ health_code_update();
+ ua_sess->enabled = 0;
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == NULL) {
- goto next_app;
- }
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
- node);
- ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
- if (ret < 0) {
- goto next_app;
- }
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
+ /* Quiescent wait after stopping trace */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app wait quiescent failed for app pid %d ret %d",
+ app->pid, ret);
}
+ health_code_update();
+
+ registry = get_session_registry(ua_sess);
+
+ /* The UST app session is held registry shall not be null. */
+ assert(registry);
+
+ /* Push metadata for application before freeing the application. */
+ (void) push_metadata(registry, ua_sess->consumer);
+
+end_unlock:
+ pthread_mutex_unlock(&ua_sess->lock);
+end_no_session:
rcu_read_unlock();
- return ret;
+ health_code_update();
+ return 0;
+
+error_rcu_unlock:
+ pthread_mutex_unlock(&ua_sess->lock);
+ rcu_read_unlock();
+ health_code_update();
+ return -1;
}
-/*
- * Enable event for a channel from a UST session for a specific PID.
- */
-int ust_app_enable_event_pid(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
+static
+int ust_app_flush_app_session(struct ust_app *app,
+ struct ust_app_session *ua_sess)
{
- int ret = 0;
+ int ret, retval = 0;
struct lttng_ht_iter iter;
- struct lttng_ht_node_str *ua_chan_node;
- struct ust_app *app;
- struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
- struct ust_app_event *ua_event;
+ struct consumer_socket *socket;
- DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
+ DBG("Flushing app session buffers for ust app pid %d", app->pid);
rcu_read_lock();
- app = ust_app_find_by_pid(pid);
- if (app == NULL) {
- ERR("UST app enable event per PID %d not found", pid);
- ret = -1;
- goto end;
- }
-
if (!app->compatible) {
- ret = 0;
- goto end;
- }
-
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- ret = 0;
- goto end;
+ goto end_not_compatible;
}
pthread_mutex_lock(&ua_sess->lock);
if (ua_sess->deleted) {
- ret = 0;
- goto end_unlock;
+ goto end_deleted;
}
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
- ua_chan_node = lttng_ht_iter_get_node_str(&iter);
- /* If the channel is not found, there is a code flow error */
- assert(ua_chan_node);
+ health_code_update();
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ /* Flushing buffers */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ ua_sess->consumer);
- ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
- uevent->filter, uevent->attr.loglevel, uevent->exclusion);
- if (ua_event == NULL) {
- ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
- if (ret < 0) {
- goto end_unlock;
- }
- } else {
- ret = enable_ust_app_event(ua_sess, ua_event, app);
- if (ret < 0) {
- goto end_unlock;
+ /* Flush buffers and push metadata. */
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
+ node.node) {
+ health_code_update();
+ ret = consumer_flush_channel(socket, ua_chan->key);
+ if (ret) {
+ ERR("Error flushing consumer channel");
+ retval = -1;
+ continue;
+ }
}
+ break;
+ case LTTNG_BUFFER_PER_UID:
+ default:
+ assert(0);
+ break;
}
-end_unlock:
+ health_code_update();
+
+end_deleted:
pthread_mutex_unlock(&ua_sess->lock);
-end:
+
+end_not_compatible:
rcu_read_unlock();
- return ret;
+ health_code_update();
+ return retval;
}
/*
- * Receive registration and populate the given msg structure.
- *
- * On success return 0 else a negative value returned by the ustctl call.
+ * Flush buffers for all applications for a specific UST session.
+ * Called with UST session lock held.
*/
-int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
+static
+int ust_app_flush_session(struct ltt_ust_session *usess)
+
{
- int ret;
- uint32_t pid, ppid, uid, gid;
+ int ret = 0;
- assert(msg);
+ DBG("Flushing session buffers for all ust apps");
- ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
- &pid, &ppid, &uid, &gid,
- &msg->bits_per_long,
- &msg->uint8_t_alignment,
- &msg->uint16_t_alignment,
- &msg->uint32_t_alignment,
- &msg->uint64_t_alignment,
- &msg->long_alignment,
- &msg->byte_order,
- msg->name);
- if (ret < 0) {
- switch (-ret) {
- case EPIPE:
- case ECONNRESET:
- case LTTNG_UST_ERR_EXITING:
- DBG3("UST app recv reg message failed. Application died");
- break;
- case LTTNG_UST_ERR_UNSUP_MAJOR:
- ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
- msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
- LTTNG_UST_ABI_MINOR_VERSION);
- break;
- default:
- ERR("UST app recv reg message failed with ret %d", ret);
- break;
- }
- goto error;
- }
- msg->pid = (pid_t) pid;
- msg->ppid = (pid_t) ppid;
- msg->uid = (uid_t) uid;
- msg->gid = (gid_t) gid;
+ rcu_read_lock();
-error:
- return ret;
-}
+ /* Flush buffers and push metadata. */
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+ struct lttng_ht_iter iter;
-/*
- * Return a ust app session object using the application object and the
- * session object descriptor has a key. If not found, NULL is returned.
- * A RCU read side lock MUST be acquired when calling this function.
-*/
-static struct ust_app_session *find_session_by_objd(struct ust_app *app,
- int objd)
-{
- struct lttng_ht_node_ulong *node;
- struct lttng_ht_iter iter;
- struct ust_app_session *ua_sess = NULL;
+ /* Flush all per UID buffers associated to that session. */
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct ust_registry_session *ust_session_reg;
+ struct buffer_reg_channel *buf_reg_chan;
+ struct consumer_socket *socket;
- assert(app);
+ /* Get consumer socket to use to push the metadata.*/
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ /* Ignore request if no consumer is found for the session. */
+ continue;
+ }
- lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
- if (node == NULL) {
- DBG2("UST app session find by objd %d not found", objd);
- goto error;
- }
-
- ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
-
-error:
- return ua_sess;
-}
-
-/*
- * Return a ust app channel object using the application object and the channel
- * object descriptor has a key. If not found, NULL is returned. A RCU read side
- * lock MUST be acquired before calling this function.
- */
-static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
- int objd)
-{
- struct lttng_ht_node_ulong *node;
- struct lttng_ht_iter iter;
- struct ust_app_channel *ua_chan = NULL;
-
- assert(app);
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ buf_reg_chan, node.node) {
+ /*
+ * The following call will print error values so the return
+ * code is of little importance because whatever happens, we
+ * have to try them all.
+ */
+ (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
+ }
- lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
- if (node == NULL) {
- DBG2("UST app channel find by objd %d not found", objd);
- goto error;
+ ust_session_reg = reg->registry->reg.ust;
+ /* Push metadata. */
+ (void) push_metadata(ust_session_reg, usess->consumer);
+ }
+ break;
}
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app_session *ua_sess;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
- ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+ (void) ust_app_flush_app_session(app, ua_sess);
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ assert(0);
+ break;
+ }
-error:
- return ua_chan;
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
}
-/*
- * Reply to a register channel notification from an application on the notify
- * socket. The channel metadata is also created.
- *
- * The session UST registry lock is acquired in this function.
- *
- * On success 0 is returned else a negative value.
- */
-static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
- size_t nr_fields, struct ustctl_field *fields)
+static
+int ust_app_clear_quiescent_app_session(struct ust_app *app,
+ struct ust_app_session *ua_sess)
{
- int ret, ret_code = 0;
- uint32_t chan_id, reg_count;
- uint64_t chan_reg_key;
- enum ustctl_channel_header type;
- struct ust_app *app;
+ int ret = 0;
+ struct lttng_ht_iter iter;
struct ust_app_channel *ua_chan;
- struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
- struct ust_registry_channel *chan_reg;
+ struct consumer_socket *socket;
- rcu_read_lock();
+ DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
- /* Lookup application. If not found, there is a code flow error. */
- app = find_app_by_notify_sock(sock);
- if (!app) {
- DBG("Application socket %d is being teardown. Abort event notify",
- sock);
- ret = 0;
- free(fields);
- goto error_rcu_unlock;
- }
+ rcu_read_lock();
- /* Lookup channel by UST object descriptor. */
- ua_chan = find_channel_by_objd(app, cobjd);
- if (!ua_chan) {
- DBG("Application channel is being teardown. Abort event notify");
- ret = 0;
- free(fields);
- goto error_rcu_unlock;
+ if (!app->compatible) {
+ goto end_not_compatible;
}
- assert(ua_chan->session);
- ua_sess = ua_chan->session;
-
- /* Get right session registry depending on the session buffer type. */
- registry = get_session_registry(ua_sess);
- assert(registry);
+ pthread_mutex_lock(&ua_sess->lock);
- /* Depending on the buffer type, a different channel key is used. */
- if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
- chan_reg_key = ua_chan->tracing_channel_id;
- } else {
- chan_reg_key = ua_chan->key;
+ if (ua_sess->deleted) {
+ goto end_unlock;
}
- pthread_mutex_lock(®istry->lock);
-
- chan_reg = ust_registry_channel_find(registry, chan_reg_key);
- assert(chan_reg);
-
- if (!chan_reg->register_done) {
- reg_count = ust_registry_get_event_count(chan_reg);
- if (reg_count < 31) {
- type = USTCTL_CHANNEL_HEADER_COMPACT;
- } else {
- type = USTCTL_CHANNEL_HEADER_LARGE;
- }
+ health_code_update();
- chan_reg->nr_ctx_fields = nr_fields;
- chan_reg->ctx_fields = fields;
- chan_reg->header_type = type;
- } else {
- /* Get current already assigned values. */
- type = chan_reg->header_type;
- free(fields);
- /* Set to NULL so the error path does not do a double free. */
- fields = NULL;
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ ua_sess->consumer);
+ if (!socket) {
+ ERR("Failed to find consumer (%" PRIu32 ") socket",
+ app->bits_per_long);
+ ret = -1;
+ goto end_unlock;
}
- /* Channel id is set during the object creation. */
- chan_id = chan_reg->chan_id;
- /* Append to metadata */
- if (!chan_reg->metadata_dumped) {
- ret_code = ust_metadata_channel_statedump(registry, chan_reg);
- if (ret_code) {
- ERR("Error appending channel metadata (errno = %d)", ret_code);
- goto reply;
+ /* Clear quiescent state. */
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
+ ua_chan, node.node) {
+ health_code_update();
+ ret = consumer_clear_quiescent_channel(socket,
+ ua_chan->key);
+ if (ret) {
+ ERR("Error clearing quiescent state for consumer channel");
+ ret = -1;
+ continue;
+ }
}
+ break;
+ case LTTNG_BUFFER_PER_UID:
+ default:
+ assert(0);
+ ret = -1;
+ break;
}
-reply:
- DBG3("UST app replying to register channel key %" PRIu64
- " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
- ret_code);
-
- ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app reply channel failed with ret %d", ret);
- } else {
- DBG3("UST app reply channel failed. Application died");
- }
- goto error;
- }
+ health_code_update();
- /* This channel registry registration is completed. */
- chan_reg->register_done = 1;
+end_unlock:
+ pthread_mutex_unlock(&ua_sess->lock);
-error:
- pthread_mutex_unlock(®istry->lock);
-error_rcu_unlock:
+end_not_compatible:
rcu_read_unlock();
- if (ret) {
- free(fields);
- }
+ health_code_update();
return ret;
}
/*
- * Add event to the UST channel registry. When the event is added to the
- * registry, the metadata is also created. Once done, this replies to the
- * application with the appropriate error code.
- *
- * The session UST registry lock is acquired in the function.
- *
- * On success 0 is returned else a negative value.
+ * Clear quiescent state in each stream for all applications for a
+ * specific UST session.
+ * Called with UST session lock held.
*/
-static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
- char *sig, size_t nr_fields, struct ustctl_field *fields,
- int loglevel_value, char *model_emf_uri)
-{
- int ret, ret_code;
- uint32_t event_id = 0;
- uint64_t chan_reg_key;
- struct ust_app *app;
- struct ust_app_channel *ua_chan;
- struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
-
- rcu_read_lock();
+static
+int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
- /* Lookup application. If not found, there is a code flow error. */
- app = find_app_by_notify_sock(sock);
- if (!app) {
- DBG("Application socket %d is being teardown. Abort event notify",
- sock);
- ret = 0;
- free(sig);
- free(fields);
- free(model_emf_uri);
- goto error_rcu_unlock;
- }
+{
+ int ret = 0;
- /* Lookup channel by UST object descriptor. */
- ua_chan = find_channel_by_objd(app, cobjd);
- if (!ua_chan) {
- DBG("Application channel is being teardown. Abort event notify");
- ret = 0;
- free(sig);
- free(fields);
- free(model_emf_uri);
- goto error_rcu_unlock;
- }
+ DBG("Clearing stream quiescent state for all ust apps");
- assert(ua_chan->session);
- ua_sess = ua_chan->session;
+ rcu_read_lock();
- registry = get_session_registry(ua_sess);
- assert(registry);
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct lttng_ht_iter iter;
+ struct buffer_reg_uid *reg;
- if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
- chan_reg_key = ua_chan->tracing_channel_id;
- } else {
- chan_reg_key = ua_chan->key;
- }
+ /*
+ * Clear quiescent for all per UID buffers associated to
+ * that session.
+ */
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct consumer_socket *socket;
+ struct buffer_reg_channel *buf_reg_chan;
- pthread_mutex_lock(®istry->lock);
+ /* Get associated consumer socket.*/
+ socket = consumer_find_socket_by_bitness(
+ reg->bits_per_long, usess->consumer);
+ if (!socket) {
+ /*
+ * Ignore request if no consumer is found for
+ * the session.
+ */
+ continue;
+ }
- /*
- * From this point on, this call acquires the ownership of the sig, fields
- * and model_emf_uri meaning any free are done inside it if needed. These
- * three variables MUST NOT be read/write after this.
- */
- ret_code = ust_registry_create_event(registry, chan_reg_key,
- sobjd, cobjd, name, sig, nr_fields, fields,
- loglevel_value, model_emf_uri, ua_sess->buffer_type,
- &event_id, app);
-
- /*
- * The return value is returned to ustctl so in case of an error, the
- * application can be notified. In case of an error, it's important not to
- * return a negative error or else the application will get closed.
- */
- ret = ustctl_reply_register_event(sock, event_id, ret_code);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app reply event failed with ret %d", ret);
- } else {
- DBG3("UST app reply event failed. Application died");
+ cds_lfht_for_each_entry(reg->registry->channels->ht,
+ &iter.iter, buf_reg_chan, node.node) {
+ /*
+ * The following call will print error values so
+ * the return code is of little importance
+ * because whatever happens, we have to try them
+ * all.
+ */
+ (void) consumer_clear_quiescent_channel(socket,
+ buf_reg_chan->consumer_key);
+ }
}
- /*
- * No need to wipe the create event since the application socket will
- * get close on error hence cleaning up everything by itself.
- */
- goto error;
+ break;
}
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app_session *ua_sess;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
- DBG3("UST registry event %s with id %" PRId32 " added successfully",
- name, event_id);
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
+ pid_n.node) {
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+ (void) ust_app_clear_quiescent_app_session(app,
+ ua_sess);
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ assert(0);
+ break;
+ }
-error:
- pthread_mutex_unlock(®istry->lock);
-error_rcu_unlock:
rcu_read_unlock();
+ health_code_update();
return ret;
}
/*
- * Add enum to the UST session registry. Once done, this replies to the
- * application with the appropriate error code.
- *
- * The session UST registry lock is acquired within this function.
- *
- * On success 0 is returned else a negative value.
+ * Destroy a specific UST session in apps.
*/
-static int add_enum_ust_registry(int sock, int sobjd, char *name,
- struct ustctl_enum_entry *entries, size_t nr_entries)
+static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
{
- int ret = 0, ret_code;
- struct ust_app *app;
+ int ret;
struct ust_app_session *ua_sess;
- struct ust_registry_session *registry;
- uint64_t enum_id = -1ULL;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_u64 *node;
+
+ DBG("Destroy tracing for ust app pid %d", app->pid);
rcu_read_lock();
- /* Lookup application. If not found, there is a code flow error. */
- app = find_app_by_notify_sock(sock);
- if (!app) {
- /* Return an error since this is not an error */
- DBG("Application socket %d is being torn down. Aborting enum registration",
- sock);
- free(entries);
- goto error_rcu_unlock;
+ if (!app->compatible) {
+ goto end;
}
- /* Lookup session by UST object descriptor. */
- ua_sess = find_session_by_objd(app, sobjd);
- if (!ua_sess) {
- /* Return an error since this is not an error */
- DBG("Application session is being torn down. Aborting enum registration.");
- free(entries);
- goto error_rcu_unlock;
+ __lookup_session_by_app(usess, app, &iter);
+ node = lttng_ht_iter_get_node_u64(&iter);
+ if (node == NULL) {
+ /* Session is being or is deleted. */
+ goto end;
}
+ ua_sess = caa_container_of(node, struct ust_app_session, node);
- registry = get_session_registry(ua_sess);
- assert(registry);
+ health_code_update();
+ destroy_app_session(app, ua_sess);
- pthread_mutex_lock(®istry->lock);
+ health_code_update();
+
+ /* Quiescent wait after stopping trace */
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app wait quiescent failed for app pid %d ret %d",
+ app->pid, ret);
+ }
+end:
+ rcu_read_unlock();
+ health_code_update();
+ return 0;
+}
+
+/*
+ * Start tracing for the UST session.
+ */
+int ust_app_start_trace_all(struct ltt_ust_session *usess)
+{
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ DBG("Starting all UST traces");
/*
- * From this point on, the callee acquires the ownership of
- * entries. The variable entries MUST NOT be read/written after
- * call.
+ * Even though the start trace might fail, flag this session active so
+ * other application coming in are started by default.
*/
- ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
- entries, nr_entries, &enum_id);
- entries = NULL;
+ usess->active = 1;
+
+ rcu_read_lock();
/*
- * The return value is returned to ustctl so in case of an error, the
- * application can be notified. In case of an error, it's important not to
- * return a negative error or else the application will get closed.
+ * In a start-stop-start use-case, we need to clear the quiescent state
+ * of each channel set by the prior stop command, thus ensuring that a
+ * following stop or destroy is sure to grab a timestamp_end near those
+ * operations, even if the packet is empty.
*/
- ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app reply enum failed with ret %d", ret);
- } else {
- DBG3("UST app reply enum failed. Application died");
- }
- /*
- * No need to wipe the create enum since the application socket will
- * get close on error hence cleaning up everything by itself.
- */
- goto error;
- }
+ (void) ust_app_clear_quiescent_session(usess);
- DBG3("UST registry enum %s added successfully or already found", name);
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
-error:
- pthread_mutex_unlock(®istry->lock);
-error_rcu_unlock:
rcu_read_unlock();
- return ret;
+
+ return 0;
}
/*
- * Handle application notification through the given notify socket.
- *
- * Return 0 on success or else a negative value.
+ * Start tracing for the UST session.
+ * Called with UST session lock held.
*/
-int ust_app_recv_notify(int sock)
+int ust_app_stop_trace_all(struct ltt_ust_session *usess)
{
- int ret;
- enum ustctl_notify_cmd cmd;
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
- DBG3("UST app receiving notify from sock %d", sock);
+ DBG("Stopping all UST traces");
- ret = ustctl_recv_notify(sock, &cmd);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app recv notify failed with ret %d", ret);
- } else {
- DBG3("UST app recv notify failed. Application died");
+ /*
+ * Even though the stop trace might fail, flag this session inactive so
+ * other application coming in are not started by default.
+ */
+ usess->active = 0;
+
+ rcu_read_lock();
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = ust_app_stop_trace(usess, app);
+ if (ret < 0) {
+ /* Continue to next apps even on error */
+ continue;
}
- goto error;
}
- switch (cmd) {
- case USTCTL_NOTIFY_CMD_EVENT:
- {
- int sobjd, cobjd, loglevel_value;
- char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
- size_t nr_fields;
- struct ustctl_field *fields;
+ (void) ust_app_flush_session(usess);
- DBG2("UST app ustctl register event received");
+ rcu_read_unlock();
- ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
- &loglevel_value, &sig, &nr_fields, &fields,
- &model_emf_uri);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app recv event failed with ret %d", ret);
- } else {
- DBG3("UST app recv event failed. Application died");
- }
- goto error;
- }
+ return 0;
+}
- /*
- * Add event to the UST registry coming from the notify socket. This
- * call will free if needed the sig, fields and model_emf_uri. This
- * code path loses the ownsership of these variables and transfer them
- * to the this function.
- */
- ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
- fields, loglevel_value, model_emf_uri);
- if (ret < 0) {
- goto error;
- }
+/*
+ * Destroy app UST session.
+ */
+int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
- break;
- }
- case USTCTL_NOTIFY_CMD_CHANNEL:
- {
- int sobjd, cobjd;
- size_t nr_fields;
- struct ustctl_field *fields;
+ DBG("Destroy all UST traces");
- DBG2("UST app ustctl register channel received");
+ rcu_read_lock();
- ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
- &fields);
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = destroy_trace(usess, app);
if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app recv channel failed with ret %d", ret);
- } else {
- DBG3("UST app recv channel failed. Application died");
+ /* Continue to next apps even on error */
+ continue;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* The ua_sess lock must be held by the caller. */
+static
+int find_or_create_ust_app_channel(
+ struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app *app,
+ struct ltt_ust_channel *uchan,
+ struct ust_app_channel **ua_chan)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_chan_node;
+
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&iter);
+ if (ua_chan_node) {
+ *ua_chan = caa_container_of(ua_chan_node,
+ struct ust_app_channel, node);
+ goto end;
+ }
+
+ ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
+ if (ret) {
+ goto end;
+ }
+end:
+ return ret;
+}
+
+/* The ua_sess lock must be held by the caller. */
+static
+int find_or_create_ust_app_map(
+ struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app *app,
+ struct ltt_ust_map *umap,
+ struct ust_app_map **ua_map)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_map_node;
+
+ lttng_ht_lookup(ua_sess->maps, (void *) umap->name, &iter);
+ ua_map_node = lttng_ht_iter_get_node_str(&iter);
+ if (ua_map_node) {
+ *ua_map = caa_container_of(ua_map_node,
+ struct ust_app_map, node);
+ goto end;
+ }
+
+ DBG("UST map id = %"PRIu64" not found. Creating it.", umap->id);
+ ret = ust_app_map_create(usess, ua_sess, umap, app, ua_map);
+ if (ret) {
+ goto end;
+ }
+end:
+ return ret;
+}
+
+static
+int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
+ struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct ust_app_event *ua_event = NULL;
+
+ ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
+ uevent->filter, uevent->attr.loglevel, uevent->exclusion,
+ uevent->attr.token);
+ if (!ua_event) {
+ ret = create_ust_app_channel_event(ua_sess, ua_chan, uevent, app);
+
+ if (ret < 0) {
+ goto end;
+ }
+ } else {
+ if (ua_event->enabled != uevent->enabled) {
+ ret = uevent->enabled ?
+ enable_ust_app_event(ua_sess, ua_event, app) :
+ disable_ust_app_event(ua_sess, ua_event, app);
+ }
+ }
+
+end:
+ return ret;
+}
+
+/* Called with RCU read-side lock held. */
+static
+int ust_app_map_synchronize_event(struct ust_app_map *ua_map,
+ struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct ust_app_event *ua_event = NULL;
+
+ ua_event = find_ust_app_event(ua_map->events, uevent->attr.name,
+ uevent->filter, uevent->attr.loglevel, uevent->exclusion,
+ uevent->attr.token);
+ if (!ua_event) {
+ ret = create_ust_app_map_event(ua_sess, ua_map, uevent, app);
+ if (ret < 0) {
+ goto end;
+ }
+ } else {
+ if (ua_event->enabled != uevent->enabled) {
+ ret = uevent->enabled ?
+ enable_ust_app_event(ua_sess, ua_event, app) :
+ disable_ust_app_event(ua_sess, ua_event, app);
+ }
+ }
+
+end:
+ return ret;
+}
+
+static
+void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
+{
+ int ret = 0;
+ enum lttng_error_code ret_code;
+ enum lttng_trigger_status t_status;
+ struct lttng_ht_iter app_trigger_iter;
+ struct lttng_triggers *triggers = NULL;
+ struct ust_app_event_notifier_rule *event_notifier_rule;
+ unsigned int count, i;
+
+ /*
+ * Currrently, registering or unregistering a trigger with an
+ * event rule condition causes a full synchronization of the event
+ * notifiers.
+ *
+ * The first step attempts to add an event notifier for all registered
+ * triggers that apply to the user space tracers. Then, the
+ * application's event notifiers rules are all checked against the list
+ * of registered triggers. Any event notifier that doesn't have a
+ * matching trigger can be assumed to have been disabled.
+ *
+ * All of this is inefficient, but is put in place to get the feature
+ * rolling as it is simpler at this moment. It will be optimized Soon™
+ * to allow the state of enabled
+ * event notifiers to be synchronized in a piece-wise way.
+ */
+
+ /* Get all triggers using uid 0 (root) */
+ ret_code = notification_thread_command_list_triggers(
+ notification_thread_handle, 0, &triggers);
+ if (ret_code != LTTNG_OK) {
+ ret = -1;
+ goto end;
+ }
+
+ assert(triggers);
+
+ t_status = lttng_triggers_get_count(triggers, &count);
+ if (t_status != LTTNG_TRIGGER_STATUS_OK) {
+ ret = -1;
+ goto end;
+ }
+
+ for (i = 0; i < count; i++) {
+ const struct lttng_condition *condition;
+ const struct lttng_event_rule *event_rule;
+ struct lttng_trigger *trigger;
+ const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
+ enum lttng_condition_status condition_status;
+ uint64_t token;
+
+ trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
+ assert(trigger);
+
+ token = lttng_trigger_get_tracer_token(trigger);
+ condition = lttng_trigger_get_const_condition(trigger);
+
+ if (!lttng_trigger_needs_tracer_notifier(trigger)) {
+ continue;
+ }
+
+ condition_status = lttng_condition_on_event_get_rule(condition, &event_rule);
+ assert(condition_status == LTTNG_CONDITION_STATUS_OK);
+
+ if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
+ /* Skip kernel related triggers. */
+ continue;
+ }
+
+ /*
+ * Find or create the associated token event rule. The caller
+ * holds the RCU read lock, so this is safe to call without
+ * explicitly acquiring it here.
+ */
+ looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
+ app->token_to_event_notifier_rule_ht, token);
+ if (!looked_up_event_notifier_rule) {
+ ret = create_ust_app_event_notifier_rule(trigger, app);
+ if (ret < 0) {
+ goto end;
}
- goto error;
}
+ }
+
+ rcu_read_lock();
+ /* Remove all unknown event sources from the app. */
+ cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+ &app_trigger_iter.iter, event_notifier_rule,
+ node.node) {
+ const uint64_t app_token = event_notifier_rule->token;
+ bool found = false;
/*
- * The fields ownership are transfered to this function call meaning
- * that if needed it will be freed. After this, it's invalid to access
- * fields or clean it up.
+ * Check if the app event trigger still exists on the
+ * notification side.
*/
- ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
- fields);
- if (ret < 0) {
- goto error;
+ for (i = 0; i < count; i++) {
+ uint64_t notification_thread_token;
+ const struct lttng_trigger *trigger =
+ lttng_triggers_get_at_index(
+ triggers, i);
+
+ assert(trigger);
+
+ notification_thread_token =
+ lttng_trigger_get_tracer_token(trigger);
+
+ if (notification_thread_token == app_token) {
+ found = true;
+ break;
+ }
}
- break;
+ if (found) {
+ /* Still valid. */
+ continue;
+ }
+
+ /*
+ * This trigger was unregistered, disable it on the tracer's
+ * side.
+ */
+ ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
+ &app_trigger_iter);
+ assert(ret == 0);
+
+ /* Callee logs errors. */
+ (void) disable_ust_object(app, event_notifier_rule->obj);
+
+ delete_ust_app_event_notifier_rule(
+ app->sock, event_notifier_rule, app);
}
- case USTCTL_NOTIFY_CMD_ENUM:
- {
- int sobjd;
- char name[LTTNG_UST_SYM_NAME_LEN];
- size_t nr_entries;
- struct ustctl_enum_entry *entries;
- DBG2("UST app ustctl register enum received");
+ rcu_read_unlock();
- ret = ustctl_recv_register_enum(sock, &sobjd, name,
- &entries, &nr_entries);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app recv enum failed with ret %d", ret);
+end:
+ lttng_triggers_destroy(triggers);
+ return;
+}
+
+/*
+ * Called with RCU read-side lock held.
+ */
+static
+void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct cds_lfht_iter uchan_iter;
+ struct ltt_ust_channel *uchan;
+
+ assert(usess);
+ assert(ua_sess);
+ assert(app);
+
+ cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
+ uchan, node.node) {
+ struct ust_app_channel *ua_chan;
+ struct cds_lfht_iter uevent_iter;
+ struct ltt_ust_event *uevent;
+
+ /*
+ * Search for a matching ust_app_channel. If none is found,
+ * create it. Creating the channel will cause the ua_chan
+ * structure to be allocated, the channel buffers to be
+ * allocated (if necessary) and sent to the application, and
+ * all enabled contexts will be added to the channel.
+ */
+ ret = find_or_create_ust_app_channel(usess, ua_sess,
+ app, uchan, &ua_chan);
+ if (ret) {
+ /* Tracer is probably gone or ENOMEM. */
+ goto end;
+ }
+
+ if (!ua_chan) {
+ /* ua_chan will be NULL for the metadata channel */
+ continue;
+ }
+
+ cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
+ node.node) {
+ ret = ust_app_channel_synchronize_event(ua_chan,
+ uevent, ua_sess, app);
+ if (ret) {
+ goto end;
+ }
+ }
+
+ if (ua_chan->enabled != uchan->enabled) {
+ ret = uchan->enabled ?
+ enable_ust_app_channel(ua_sess, uchan, app) :
+ disable_ust_app_channel(ua_sess, ua_chan, app);
+ if (ret) {
+ goto end;
+ }
+ }
+ }
+end:
+ return;
+}
+
+/*
+ * Called with RCU read-side lock held.
+ */
+static
+void ust_app_synchronize_all_maps(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct cds_lfht_iter umap_iter;
+ struct ltt_ust_map *umap;
+
+ assert(usess);
+ assert(ua_sess);
+ assert(app);
+
+ cds_lfht_for_each_entry(usess->domain_global.maps->ht, &umap_iter,
+ umap, node.node) {
+ struct ust_app_map *ua_map;
+ struct cds_lfht_iter uevent_iter;
+ struct ltt_ust_event *uevent;
+
+ DBG("Synchronizing UST map id = %"PRIu64, umap->id);
+
+ ret = find_or_create_ust_app_map(usess, ua_sess,
+ app, umap, &ua_map);
+ if (ret) {
+ /* Tracer is probably gone or ENOMEM. */
+ goto end;
+ }
+
+ DBG("Synchronizing all events of UST map id = %"PRIu64, umap->id);
+ cds_lfht_for_each_entry(umap->events->ht, &uevent_iter, uevent,
+ node.node) {
+ ret = ust_app_map_synchronize_event(ua_map,
+ uevent, ua_sess, app);
+ if (ret) {
+ goto end;
+ }
+ }
+
+ if (ua_map->enabled != umap->enabled) {
+ if (umap->enabled) {
+ DBG("Map disabled on the tracer side but shouldn't");
+ ret = enable_ust_app_map(ua_sess, umap, app);
} else {
- DBG3("UST app recv enum failed. Application died");
+ DBG("Map enabled on the tracer side but shouldn't");
+ ret = disable_ust_app_map(ua_sess, ua_map, app);
+ }
+ if (ret) {
+ goto end;
+ }
+ }
+ }
+end:
+ return;
+}
+
+/*
+ * The caller must ensure that the application is compatible and is tracked
+ * by the process attribute trackers.
+ */
+static
+void ust_app_synchronize(struct ltt_ust_session *usess,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct ust_app_session *ua_sess = NULL;
+
+ /*
+ * The application's configuration should only be synchronized for
+ * active sessions.
+ */
+ assert(usess->active);
+
+ ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
+ if (ret < 0) {
+ /* Tracer is probably gone or ENOMEM. */
+ goto error;
+ }
+ assert(ua_sess);
+
+
+ rcu_read_lock();
+
+ pthread_mutex_lock(&ua_sess->lock);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto end;
+ }
+ ust_app_synchronize_all_channels(usess, ua_sess, app);
+ ust_app_synchronize_all_maps(usess, ua_sess, app);
+
+ /*
+ * Create the metadata for the application. This returns gracefully if a
+ * metadata was already set for the session.
+ *
+ * The metadata channel must be created after the data channels as the
+ * consumer daemon assumes this ordering. When interacting with a relay
+ * daemon, the consumer will use this assumption to send the
+ * "STREAMS_SENT" message to the relay daemon.
+ */
+ ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
+ if (ret < 0) {
+ goto error_unlock;
+ }
+
+ rcu_read_unlock();
+
+end:
+ pthread_mutex_unlock(&ua_sess->lock);
+ /* Everything went well at this point. */
+ return;
+
+error_unlock:
+ rcu_read_unlock();
+ pthread_mutex_unlock(&ua_sess->lock);
+error:
+ if (ua_sess) {
+ destroy_app_session(app, ua_sess);
+ }
+ return;
+}
+
+static
+void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
+{
+ struct ust_app_session *ua_sess;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ return;
+ }
+ destroy_app_session(app, ua_sess);
+}
+
+/*
+ * Add channels/events from UST global domain to registered apps at sock.
+ *
+ * Called with session lock held.
+ * Called with RCU read-side lock held.
+ */
+void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
+{
+ assert(usess);
+ assert(usess->active);
+
+ DBG2("UST app global update for app sock %d for session id %" PRIu64,
+ app->sock, usess->id);
+
+ if (!app->compatible) {
+ return;
+ }
+ if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
+ usess, app->pid) &&
+ trace_ust_id_tracker_lookup(
+ LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
+ usess, app->uid) &&
+ trace_ust_id_tracker_lookup(
+ LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
+ usess, app->gid)) {
+ /*
+ * Synchronize the application's internal tracing configuration
+ * and start tracing.
+ */
+ ust_app_synchronize(usess, app);
+ ust_app_start_trace(usess, app);
+ } else {
+ ust_app_global_destroy(usess, app);
+ }
+}
+
+/*
+ * Add all event notifiers to an application.
+ *
+ * Called with session lock held.
+ * Called with RCU read-side lock held.
+ */
+void ust_app_global_update_event_notifier_rules(struct ust_app *app)
+{
+ DBG2("UST application global event notifier rules update: app = '%s' (ppid: %d)",
+ app->name, app->ppid);
+
+ if (!app->compatible) {
+ return;
+ }
+
+ if (app->event_notifier_group.object == NULL) {
+ WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' (ppid: %d)",
+ app->name, app->ppid);
+ return;
+ }
+
+ ust_app_synchronize_event_notifier_rules(app);
+}
+
+/*
+ * Called with session lock held.
+ */
+void ust_app_global_update_all(struct ltt_ust_session *usess)
+{
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
+ rcu_read_unlock();
+}
+
+void ust_app_global_update_all_event_notifier_rules(void)
+{
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update_event_notifier_rules(app);
+ }
+
+ rcu_read_unlock();
+}
+
+void ust_app_update_event_notifier_error_count(struct lttng_trigger *trigger)
+{
+ uint64_t error_count = 0;
+ enum event_notifier_error_accounting_status status;
+ struct lttng_condition *condition = lttng_trigger_get_condition(trigger);
+
+ status = event_notifier_error_accounting_get_count(trigger, &error_count);
+ if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
+ ERR("Error getting trigger error count.");
+ }
+
+ lttng_condition_on_event_set_error_count(condition, error_count);
+}
+
+/*
+ * Add context to a specific channel for global UST domain.
+ */
+int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
+{
+ int ret = 0;
+ struct lttng_ht_node_str *ua_chan_node;
+ struct lttng_ht_iter iter, uiter;
+ struct ust_app_channel *ua_chan = NULL;
+ struct ust_app_session *ua_sess;
+ struct ust_app *app;
+
+ assert(usess->active);
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+
+ pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
+
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_chan_node == NULL) {
+ goto next_app;
+ }
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
+ node);
+ ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
+ if (ret < 0) {
+ goto next_app;
+ }
+ next_app:
+ pthread_mutex_unlock(&ua_sess->lock);
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Receive registration and populate the given msg structure.
+ *
+ * On success return 0 else a negative value returned by the ustctl call.
+ */
+int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
+{
+ int ret;
+ uint32_t pid, ppid, uid, gid;
+
+ assert(msg);
+
+ ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
+ &pid, &ppid, &uid, &gid,
+ &msg->bits_per_long,
+ &msg->uint8_t_alignment,
+ &msg->uint16_t_alignment,
+ &msg->uint32_t_alignment,
+ &msg->uint64_t_alignment,
+ &msg->long_alignment,
+ &msg->byte_order,
+ msg->name);
+ if (ret < 0) {
+ switch (-ret) {
+ case EPIPE:
+ case ECONNRESET:
+ case LTTNG_UST_ERR_EXITING:
+ DBG3("UST app recv reg message failed. Application died");
+ break;
+ case LTTNG_UST_ERR_UNSUP_MAJOR:
+ ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
+ msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
+ LTTNG_UST_ABI_MINOR_VERSION);
+ break;
+ default:
+ ERR("UST app recv reg message failed with ret %d", ret);
+ break;
+ }
+ goto error;
+ }
+ msg->pid = (pid_t) pid;
+ msg->ppid = (pid_t) ppid;
+ msg->uid = (uid_t) uid;
+ msg->gid = (gid_t) gid;
+
+error:
+ return ret;
+}
+
+/*
+ * Return a ust app session object using the application object and the
+ * session object descriptor has a key. If not found, NULL is returned.
+ * A RCU read side lock MUST be acquired when calling this function.
+*/
+static struct ust_app_session *find_session_by_objd(struct ust_app *app,
+ int objd)
+{
+ struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter iter;
+ struct ust_app_session *ua_sess = NULL;
+
+ assert(app);
+
+ lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
+ node = lttng_ht_iter_get_node_ulong(&iter);
+ if (node == NULL) {
+ DBG2("UST app session find by objd %d not found", objd);
+ goto error;
+ }
+
+ ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
+
+error:
+ return ua_sess;
+}
+
+/*
+ * Return a ust app channel object using the application object and the channel
+ * object descriptor has a key. If not found, NULL is returned. A RCU read side
+ * lock MUST be acquired before calling this function.
+ */
+static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
+ int objd)
+{
+ struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter iter;
+ struct ust_app_channel *ua_chan = NULL;
+
+ assert(app);
+
+ lttng_ht_lookup(app->ust_chan_objd, (void *)((unsigned long) objd), &iter);
+ node = lttng_ht_iter_get_node_ulong(&iter);
+ if (node == NULL) {
+ DBG2("UST app channel find by objd %d not found", objd);
+ goto error;
+ }
+
+ ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
+
+error:
+ return ua_chan;
+}
+
+/*
+ * Return a ust app map object using the application object and the map
+ * object descriptor has a key. If not found, NULL is returned. A RCU read side
+ * lock MUST be acquired before calling this function.
+ */
+static struct ust_app_map *find_map_by_objd(struct ust_app *app,
+ int objd)
+{
+ struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter iter;
+ struct ust_app_map *ua_map = NULL;
+
+ assert(app);
+
+ lttng_ht_lookup(app->ust_map_objd, (void *)((unsigned long) objd), &iter);
+ node = lttng_ht_iter_get_node_ulong(&iter);
+ if (node == NULL) {
+ DBG2("UST app map find by objd %d not found", objd);
+ goto error;
+ }
+
+ ua_map = caa_container_of(node, struct ust_app_map, ust_objd_node);
+
+error:
+ return ua_map;
+}
+
+/*
+ * Reply to a register channel notification from an application on the notify
+ * socket. The channel metadata is also created.
+ *
+ * The session UST registry lock is acquired in this function.
+ *
+ * On success 0 is returned else a negative value.
+ */
+static int reply_ust_register_channel(int sock, int cobjd,
+ size_t nr_fields, struct ustctl_field *fields)
+{
+ int ret, ret_code = 0;
+ uint32_t chan_id;
+ uint64_t chan_reg_key;
+ enum ustctl_channel_header type;
+ struct ust_app *app;
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+ struct ust_registry_channel *ust_reg_chan;
+
+ rcu_read_lock();
+
+ /* Lookup application. If not found, there is a code flow error. */
+ app = find_app_by_notify_sock(sock);
+ if (!app) {
+ DBG("Application socket %d is being torn down. Abort event notify",
+ sock);
+ ret = 0;
+ goto error_rcu_unlock;
+ }
+
+ /* Lookup channel by UST object descriptor. */
+ ua_chan = find_channel_by_objd(app, cobjd);
+ if (!ua_chan) {
+ DBG("Application channel is being torn down. Abort event notify");
+ ret = 0;
+ goto error_rcu_unlock;
+ }
+
+ assert(ua_chan->session);
+ ua_sess = ua_chan->session;
+
+ /* Get right session registry depending on the session buffer type. */
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort event notify");
+ ret = 0;
+ goto error_rcu_unlock;
+ };
+
+ /* Depending on the buffer type, a different channel key is used. */
+ if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
+ chan_reg_key = ua_chan->tracing_channel_id;
+ } else {
+ chan_reg_key = ua_chan->key;
+ }
+
+ pthread_mutex_lock(®istry->lock);
+
+ ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
+ assert(ust_reg_chan);
+
+ if (!ust_reg_chan->register_done) {
+ /*
+ * TODO: eventually use the registry event count for
+ * this channel to better guess header type for per-pid
+ * buffers.
+ */
+ type = USTCTL_CHANNEL_HEADER_LARGE;
+ ust_reg_chan->nr_ctx_fields = nr_fields;
+ ust_reg_chan->ctx_fields = fields;
+ fields = NULL;
+ ust_reg_chan->header_type = type;
+ } else {
+ /* Get current already assigned values. */
+ type = ust_reg_chan->header_type;
+ }
+ /* Channel id is set during the object creation. */
+ chan_id = ust_reg_chan->chan_id;
+
+ /* Append to metadata */
+ if (!ust_reg_chan->metadata_dumped) {
+ ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
+ if (ret_code) {
+ ERR("Error appending channel metadata (errno = %d)", ret_code);
+ goto reply;
+ }
+ }
+
+reply:
+ DBG3("UST app replying to register channel key %" PRIu64
+ " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
+ ret_code);
+
+ ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app reply channel failed with ret %d", ret);
+ } else {
+ DBG3("UST app reply channel failed. Application died");
+ }
+ goto error;
+ }
+
+ /* This channel registry registration is completed. */
+ ust_reg_chan->register_done = 1;
+
+error:
+ pthread_mutex_unlock(®istry->lock);
+error_rcu_unlock:
+ rcu_read_unlock();
+ free(fields);
+ return ret;
+}
+
+static int add_event_ust_chan_registry(int sock, struct ust_app *ua,
+ struct ust_app_channel *ua_chan, int sobjd, int cobjd, char *name,
+ char *sig, size_t nr_fields, struct ustctl_field *fields,
+ int loglevel_value, char *model_emf_uri)
+{
+ int ret, ret_code;
+ uint32_t event_id = 0;
+ uint64_t chan_reg_key;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+ /*
+ * The counter index is unused for channel events. It's only used for
+ * map events.
+ */
+ uint64_t counter_index = 0;
+
+ assert(ua_chan->session);
+ ua_sess = ua_chan->session;
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort event notify");
+ ret = 0;
+ goto error;
+ }
+
+ if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
+ chan_reg_key = ua_chan->tracing_channel_id;
+ } else {
+ chan_reg_key = ua_chan->key;
+ }
+
+ pthread_mutex_lock(®istry->lock);
+
+ /*
+ * From this point on, this call acquires the ownership of the sig, fields
+ * and model_emf_uri meaning any free are done inside it if needed. These
+ * three variables MUST NOT be read/write after this.
+ */
+ ret_code = ust_registry_chan_create_event(registry, chan_reg_key,
+ sobjd, cobjd, name, sig, nr_fields, fields,
+ loglevel_value, model_emf_uri, ua_sess->buffer_type,
+ &event_id, ua);
+ sig = NULL;
+ fields = NULL;
+ model_emf_uri = NULL;
+
+ /*
+ * The return value is returned to ustctl so in case of an error, the
+ * application can be notified. In case of an error, it's important not to
+ * return a negative error or else the application will get closed.
+ */
+ ret = ustctl_reply_register_event(sock, event_id, counter_index, ret_code);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app reply event failed with ret %d", ret);
+ } else {
+ DBG3("UST app reply event failed. Application died");
+ }
+ /*
+ * No need to wipe the create event since the application socket will
+ * get close on error hence cleaning up everything by itself.
+ */
+ goto error;
+ }
+
+ DBG3("UST registry event %s with id %" PRId32 " added successfully",
+ name, event_id);
+
+error:
+ pthread_mutex_unlock(®istry->lock);
+ return ret;
+}
+
+static int add_event_ust_map_registry(int sock, struct ust_app *ua,
+ struct ust_app_map *ua_map, int sobjd, int cobjd, char *name,
+ char *sig, size_t nr_fields, struct ustctl_field *fields,
+ int loglevel_value, char *model_emf_uri, uint64_t tracer_token)
+{
+ int ret, ret_code;
+ uint64_t map_reg_key, counter_index;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+
+ assert(ua_map->session);
+ ua_sess = ua_map->session;
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort event notify");
+ ret = 0;
+ goto error;
+ }
+
+ if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
+ map_reg_key = ua_map->tracing_map_id;
+ } else {
+ map_reg_key = ua_map->key;
+ }
+
+ pthread_mutex_lock(®istry->lock);
+
+ /*
+ * From this point on, this call acquires the ownership of the sig, fields
+ * and model_emf_uri meaning any free are done inside it if needed. These
+ * three variables MUST NOT be read/write after this.
+ */
+ DBG("Registry_map_create_event on map=%"PRIu64" with token=%"PRIu64,
+ map_reg_key, tracer_token);
+ ret_code = ust_registry_map_create_event(registry, map_reg_key,
+ sobjd, cobjd, name, sig, nr_fields, fields,
+ loglevel_value, model_emf_uri, ua_sess->buffer_type,
+ tracer_token, &counter_index, ua);
+ assert(!ret_code);
+
+ sig = NULL;
+ fields = NULL;
+ model_emf_uri = NULL;
+
+ /*
+ * The return value is returned to ustctl so in case of an error, the
+ * application can be notified. In case of an error, it's important not to
+ * return a negative error or else the application will get closed.
+ */
+ ret = ustctl_reply_register_event(sock, counter_index, counter_index,
+ ret_code);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app reply event failed with ret %d", ret);
+ } else {
+ DBG3("UST app reply event failed. Application died");
+ }
+ /*
+ * No need to wipe the create event since the application socket will
+ * get close on error hence cleaning up everything by itself.
+ */
+ goto error;
+ }
+
+ DBG3("UST registry map event %s with counter index %" PRIu64 " added successfully",
+ name, counter_index);
+
+error:
+ pthread_mutex_unlock(®istry->lock);
+ return ret;
+}
+
+
+/*
+ * Add event to the UST channel registry. When the event is added to the
+ * registry, the metadata is also created. Once done, this replies to the
+ * application with the appropriate error code.
+ *
+ * The session UST registry lock is acquired in the function.
+ *
+ * On success 0 is returned else a negative value.
+ */
+static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
+ char *sig, size_t nr_fields, struct ustctl_field *fields,
+ int loglevel_value, char *model_emf_uri, uint64_t tracer_token)
+{
+ int ret;
+ struct ust_app *app;
+ struct ust_app_channel *ua_chan = NULL;
+ struct ust_app_map *ua_map = NULL;
+
+ rcu_read_lock();
+
+ /* Lookup application. If not found, there is a code flow error. */
+ app = find_app_by_notify_sock(sock);
+ if (!app) {
+ DBG("Application socket %d is being torn down. Abort event notify",
+ sock);
+ ret = 0;
+ goto end;
+ }
+
+ /* Lookup channel by UST object descriptor. */
+ ua_chan = find_channel_by_objd(app, cobjd);
+ if (ua_chan) {
+ ret = add_event_ust_chan_registry(sock, app, ua_chan, sobjd, cobjd,
+ name, sig, nr_fields, fields, loglevel_value,
+ model_emf_uri);
+ if (ret) {
+ ERR("Error adding channel event to registry: event_name = '%s'", name);
+ }
+ goto found;
+ }
+
+ /* Lookup map by UST object descriptor. */
+ ua_map = find_map_by_objd(app, cobjd);
+ if (ua_map) {
+ ret = add_event_ust_map_registry(sock, app, ua_map, sobjd, cobjd,
+ name, sig, nr_fields, fields, loglevel_value,
+ model_emf_uri, tracer_token);
+ if (ret) {
+ ERR("Error adding map event to registry: event_name = '%s'", name);
+ goto end;
+ }
+ goto found;
+ }
+
+ if (!ua_chan && !ua_map) {
+ DBG("Application event container is being torn down. Abort event notify");
+ ret = 0;
+ goto end;
+ }
+
+found:
+ ret = 0;
+
+end:
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Add enum to the UST session registry. Once done, this replies to the
+ * application with the appropriate error code.
+ *
+ * The session UST registry lock is acquired within this function.
+ *
+ * On success 0 is returned else a negative value.
+ */
+static int add_enum_ust_registry(int sock, int sobjd, char *name,
+ struct ustctl_enum_entry *entries, size_t nr_entries)
+{
+ int ret = 0, ret_code;
+ struct ust_app *app;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+ uint64_t enum_id = -1ULL;
+
+ rcu_read_lock();
+
+ /* Lookup application. If not found, there is a code flow error. */
+ app = find_app_by_notify_sock(sock);
+ if (!app) {
+ /* Return an error since this is not an error */
+ DBG("Application socket %d is being torn down. Aborting enum registration",
+ sock);
+ free(entries);
+ goto error_rcu_unlock;
+ }
+
+ /* Lookup session by UST object descriptor. */
+ ua_sess = find_session_by_objd(app, sobjd);
+ if (!ua_sess) {
+ /* Return an error since this is not an error */
+ DBG("Application session is being torn down (session not found). Aborting enum registration.");
+ free(entries);
+ goto error_rcu_unlock;
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down (registry not found). Aborting enum registration.");
+ free(entries);
+ goto error_rcu_unlock;
+ }
+
+ pthread_mutex_lock(®istry->lock);
+
+ /*
+ * From this point on, the callee acquires the ownership of
+ * entries. The variable entries MUST NOT be read/written after
+ * call.
+ */
+ ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
+ entries, nr_entries, &enum_id);
+ entries = NULL;
+
+ /*
+ * The return value is returned to ustctl so in case of an error, the
+ * application can be notified. In case of an error, it's important not to
+ * return a negative error or else the application will get closed.
+ */
+ ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app reply enum failed with ret %d", ret);
+ } else {
+ DBG3("UST app reply enum failed. Application died");
+ }
+ /*
+ * No need to wipe the create enum since the application socket will
+ * get close on error hence cleaning up everything by itself.
+ */
+ goto error;
+ }
+
+ DBG3("UST registry enum %s added successfully or already found", name);
+
+error:
+ pthread_mutex_unlock(®istry->lock);
+error_rcu_unlock:
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Handle application notification through the given notify socket.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int ust_app_recv_notify(int sock)
+{
+ int ret;
+ enum ustctl_notify_cmd cmd;
+
+ DBG3("UST app receiving notify from sock %d", sock);
+
+ ret = ustctl_recv_notify(sock, &cmd);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app recv notify failed with ret %d", ret);
+ } else {
+ DBG3("UST app recv notify failed. Application died");
+ }
+ goto error;
+ }
+
+ switch (cmd) {
+ case USTCTL_NOTIFY_CMD_EVENT:
+ {
+ int sobjd, cobjd, loglevel_value;
+ char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
+ size_t nr_fields;
+ uint64_t tracer_token = 0;
+ struct ustctl_field *fields;
+
+ DBG2("UST app ustctl register event received");
+
+ ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
+ &loglevel_value, &sig, &nr_fields, &fields,
+ &model_emf_uri, &tracer_token);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app recv event failed with ret %d", ret);
+ } else {
+ DBG3("UST app recv event failed. Application died");
+ }
+ goto error;
+ }
+
+ /*
+ * Add event to the UST registry coming from the notify socket. This
+ * call will free if needed the sig, fields and model_emf_uri. This
+ * code path loses the ownsership of these variables and transfer them
+ * to the this function.
+ */
+ ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
+ fields, loglevel_value, model_emf_uri, tracer_token);
+ if (ret < 0) {
+ goto error;
+ }
+
+ break;
+ }
+ case USTCTL_NOTIFY_CMD_CHANNEL:
+ {
+ int sobjd, cobjd;
+ size_t nr_fields;
+ struct ustctl_field *fields;
+
+ DBG2("UST app ustctl register channel received");
+
+ ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
+ &fields);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app recv channel failed with ret %d", ret);
+ } else {
+ DBG3("UST app recv channel failed. Application died");
+ }
+ goto error;
+ }
+
+ /*
+ * The fields ownership are transfered to this function call meaning
+ * that if needed it will be freed. After this, it's invalid to access
+ * fields or clean it up.
+ */
+ ret = reply_ust_register_channel(sock, cobjd, nr_fields,
+ fields);
+ if (ret < 0) {
+ goto error;
+ }
+
+ break;
+ }
+ case USTCTL_NOTIFY_CMD_ENUM:
+ {
+ int sobjd;
+ char name[LTTNG_UST_SYM_NAME_LEN];
+ size_t nr_entries;
+ struct ustctl_enum_entry *entries;
+
+ DBG2("UST app ustctl register enum received");
+
+ ret = ustctl_recv_register_enum(sock, &sobjd, name,
+ &entries, &nr_entries);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app recv enum failed with ret %d", ret);
+ } else {
+ DBG3("UST app recv enum failed. Application died");
+ }
+ goto error;
+ }
+
+ /* Callee assumes ownership of entries */
+ ret = add_enum_ust_registry(sock, sobjd, name,
+ entries, nr_entries);
+ if (ret < 0) {
+ goto error;
+ }
+
+ break;
+ }
+ default:
+ /* Should NEVER happen. */
+ assert(0);
+ }
+
+error:
+ return ret;
+}
+
+/*
+ * Once the notify socket hangs up, this is called. First, it tries to find the
+ * corresponding application. On failure, the call_rcu to close the socket is
+ * executed. If an application is found, it tries to delete it from the notify
+ * socket hash table. Whathever the result, it proceeds to the call_rcu.
+ *
+ * Note that an object needs to be allocated here so on ENOMEM failure, the
+ * call RCU is not done but the rest of the cleanup is.
+ */
+void ust_app_notify_sock_unregister(int sock)
+{
+ int err_enomem = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ struct ust_app_notify_sock_obj *obj;
+
+ assert(sock >= 0);
+
+ rcu_read_lock();
+
+ obj = zmalloc(sizeof(*obj));
+ if (!obj) {
+ /*
+ * An ENOMEM is kind of uncool. If this strikes we continue the
+ * procedure but the call_rcu will not be called. In this case, we
+ * accept the fd leak rather than possibly creating an unsynchronized
+ * state between threads.
+ *
+ * TODO: The notify object should be created once the notify socket is
+ * registered and stored independantely from the ust app object. The
+ * tricky part is to synchronize the teardown of the application and
+ * this notify object. Let's keep that in mind so we can avoid this
+ * kind of shenanigans with ENOMEM in the teardown path.
+ */
+ err_enomem = 1;
+ } else {
+ obj->fd = sock;
+ }
+
+ DBG("UST app notify socket unregister %d", sock);
+
+ /*
+ * Lookup application by notify socket. If this fails, this means that the
+ * hash table delete has already been done by the application
+ * unregistration process so we can safely close the notify socket in a
+ * call RCU.
+ */
+ app = find_app_by_notify_sock(sock);
+ if (!app) {
+ goto close_socket;
+ }
+
+ iter.iter.node = &app->notify_sock_n.node;
+
+ /*
+ * Whatever happens here either we fail or succeed, in both cases we have
+ * to close the socket after a grace period to continue to the call RCU
+ * here. If the deletion is successful, the application is not visible
+ * anymore by other threads and is it fails it means that it was already
+ * deleted from the hash table so either way we just have to close the
+ * socket.
+ */
+ (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+
+close_socket:
+ rcu_read_unlock();
+
+ /*
+ * Close socket after a grace period to avoid for the socket to be reused
+ * before the application object is freed creating potential race between
+ * threads trying to add unique in the global hash table.
+ */
+ if (!err_enomem) {
+ call_rcu(&obj->head, close_notify_sock_rcu);
+ }
+}
+
+/*
+ * Destroy a ust app data structure and free its memory.
+ */
+void ust_app_destroy(struct ust_app *app)
+{
+ if (!app) {
+ return;
+ }
+
+ call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+}
+
+/*
+ * Take a snapshot for a given UST session. The snapshot is sent to the given
+ * output.
+ *
+ * Returns LTTNG_OK on success or a LTTNG_ERR error code.
+ */
+enum lttng_error_code ust_app_snapshot_record(
+ const struct ltt_ust_session *usess,
+ const struct consumer_output *output, int wait,
+ uint64_t nb_packets_per_stream)
+{
+ int ret = 0;
+ enum lttng_error_code status = LTTNG_OK;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ char *trace_path = NULL;
+
+ assert(usess);
+ assert(output);
+
+ rcu_read_lock();
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct buffer_reg_channel *buf_reg_chan;
+ struct consumer_socket *socket;
+ char pathname[PATH_MAX];
+ size_t consumer_path_offset = 0;
+
+ if (!reg->registry->reg.ust->metadata_key) {
+ /* Skip since no metadata is present */
+ continue;
+ }
+
+ /* Get consumer socket to use to push the metadata.*/
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ status = LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ memset(pathname, 0, sizeof(pathname));
+ ret = snprintf(pathname, sizeof(pathname),
+ DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
+ reg->uid, reg->bits_per_long);
+ if (ret < 0) {
+ PERROR("snprintf snapshot path");
+ status = LTTNG_ERR_INVALID;
+ goto error;
+ }
+ /* Free path allowed on previous iteration. */
+ free(trace_path);
+ trace_path = setup_channel_trace_path(usess->consumer, pathname,
+ &consumer_path_offset);
+ if (!trace_path) {
+ status = LTTNG_ERR_INVALID;
+ goto error;
+ }
+ /* Add the UST default trace dir to path. */
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ buf_reg_chan, node.node) {
+ status = consumer_snapshot_channel(socket,
+ buf_reg_chan->consumer_key,
+ output, 0, usess->uid,
+ usess->gid, &trace_path[consumer_path_offset], wait,
+ nb_packets_per_stream);
+ if (status != LTTNG_OK) {
+ goto error;
+ }
+ }
+ status = consumer_snapshot_channel(socket,
+ reg->registry->reg.ust->metadata_key, output, 1,
+ usess->uid, usess->gid, &trace_path[consumer_path_offset],
+ wait, 0);
+ if (status != LTTNG_OK) {
+ goto error;
+ }
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct consumer_socket *socket;
+ struct lttng_ht_iter chan_iter;
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+ char pathname[PATH_MAX];
+ size_t consumer_path_offset = 0;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+
+ /* Get the right consumer socket for the application. */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ output);
+ if (!socket) {
+ status = LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ /* Add the UST default trace dir to path. */
+ memset(pathname, 0, sizeof(pathname));
+ ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
+ ua_sess->path);
+ if (ret < 0) {
+ status = LTTNG_ERR_INVALID;
+ PERROR("snprintf snapshot path");
+ goto error;
+ }
+ /* Free path allowed on previous iteration. */
+ free(trace_path);
+ trace_path = setup_channel_trace_path(usess->consumer, pathname,
+ &consumer_path_offset);
+ if (!trace_path) {
+ status = LTTNG_ERR_INVALID;
+ goto error;
+ }
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+ ua_chan, node.node) {
+ status = consumer_snapshot_channel(socket,
+ ua_chan->key, output, 0,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ &trace_path[consumer_path_offset], wait,
+ nb_packets_per_stream);
+ switch (status) {
+ case LTTNG_OK:
+ break;
+ case LTTNG_ERR_CHAN_NOT_FOUND:
+ continue;
+ default:
+ goto error;
+ }
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
+ status = consumer_snapshot_channel(socket,
+ registry->metadata_key, output, 1,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ &trace_path[consumer_path_offset], wait, 0);
+ switch (status) {
+ case LTTNG_OK:
+ break;
+ case LTTNG_ERR_CHAN_NOT_FOUND:
+ continue;
+ default:
+ goto error;
+ }
+ }
+ break;
+ }
+ default:
+ assert(0);
+ break;
+ }
+
+error:
+ free(trace_path);
+ rcu_read_unlock();
+ return status;
+}
+
+/*
+ * Return the size taken by one more packet per stream.
+ */
+uint64_t ust_app_get_size_one_more_packet_per_stream(
+ const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
+{
+ uint64_t tot_size = 0;
+ struct ust_app *app;
+ struct lttng_ht_iter iter;
+
+ assert(usess);
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct buffer_reg_channel *buf_reg_chan;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ buf_reg_chan, node.node) {
+ if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
+ /*
+ * Don't take channel into account if we
+ * already grab all its packets.
+ */
+ continue;
+ }
+ tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
+ }
+ rcu_read_unlock();
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct lttng_ht_iter chan_iter;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+ ua_chan, node.node) {
+ if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
+ /*
+ * Don't take channel into account if we
+ * already grab all its packets.
+ */
+ continue;
+ }
+ tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
+ }
+ }
+ rcu_read_unlock();
+ break;
+ }
+ default:
+ assert(0);
+ break;
+ }
+
+ return tot_size;
+}
+
+int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
+ struct cds_list_head *buffer_reg_uid_list,
+ struct consumer_output *consumer, uint64_t uchan_id,
+ int overwrite, uint64_t *discarded, uint64_t *lost)
+{
+ int ret;
+ uint64_t consumer_chan_key;
+
+ *discarded = 0;
+ *lost = 0;
+
+ ret = buffer_reg_uid_consumer_channel_key(
+ buffer_reg_uid_list, uchan_id, &consumer_chan_key);
+ if (ret < 0) {
+ /* Not found */
+ ret = 0;
+ goto end;
+ }
+
+ if (overwrite) {
+ ret = consumer_get_lost_packets(ust_session_id,
+ consumer_chan_key, consumer, lost);
+ } else {
+ ret = consumer_get_discarded_events(ust_session_id,
+ consumer_chan_key, consumer, discarded);
+ }
+
+end:
+ return ret;
+}
+
+int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
+ struct ltt_ust_channel *uchan,
+ struct consumer_output *consumer, int overwrite,
+ uint64_t *discarded, uint64_t *lost)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_chan_node;
+ struct ust_app *app;
+ struct ust_app_session *ua_sess;
+ struct ust_app_channel *ua_chan;
+
+ *discarded = 0;
+ *lost = 0;
+
+ rcu_read_lock();
+ /*
+ * Iterate over every registered applications. Sum counters for
+ * all applications containing requested session and channel.
+ */
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ht_iter uiter;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+
+ /* Get channel */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the session is found for the app, the channel must be there */
+ assert(ua_chan_node);
+
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+ if (overwrite) {
+ uint64_t _lost;
+
+ ret = consumer_get_lost_packets(usess->id, ua_chan->key,
+ consumer, &_lost);
+ if (ret < 0) {
+ break;
}
- goto error;
- }
+ (*lost) += _lost;
+ } else {
+ uint64_t _discarded;
- /* Callee assumes ownership of entries */
- ret = add_enum_ust_registry(sock, sobjd, name,
- entries, nr_entries);
- if (ret < 0) {
- goto error;
+ ret = consumer_get_discarded_events(usess->id,
+ ua_chan->key, consumer, &_discarded);
+ if (ret < 0) {
+ break;
+ }
+ (*discarded) += _discarded;
}
-
- break;
- }
- default:
- /* Should NEVER happen. */
- assert(0);
}
-error:
+ rcu_read_unlock();
return ret;
}
-/*
- * Once the notify socket hangs up, this is called. First, it tries to find the
- * corresponding application. On failure, the call_rcu to close the socket is
- * executed. If an application is found, it tries to delete it from the notify
- * socket hash table. Whathever the result, it proceeds to the call_rcu.
- *
- * Note that an object needs to be allocated here so on ENOMEM failure, the
- * call RCU is not done but the rest of the cleanup is.
- */
-void ust_app_notify_sock_unregister(int sock)
+static
+int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
+ struct ust_app *app)
{
- int err_enomem = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
- struct ust_app_notify_sock_obj *obj;
+ int ret = 0;
+ struct ust_app_session *ua_sess;
- assert(sock >= 0);
+ DBG("Regenerating the metadata for ust app pid %d", app->pid);
rcu_read_lock();
- obj = zmalloc(sizeof(*obj));
- if (!obj) {
- /*
- * An ENOMEM is kind of uncool. If this strikes we continue the
- * procedure but the call_rcu will not be called. In this case, we
- * accept the fd leak rather than possibly creating an unsynchronized
- * state between threads.
- *
- * TODO: The notify object should be created once the notify socket is
- * registered and stored independantely from the ust app object. The
- * tricky part is to synchronize the teardown of the application and
- * this notify object. Let's keep that in mind so we can avoid this
- * kind of shenanigans with ENOMEM in the teardown path.
- */
- err_enomem = 1;
- } else {
- obj->fd = sock;
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ /* The session is in teardown process. Ignore and continue. */
+ goto end;
}
- DBG("UST app notify socket unregister %d", sock);
+ pthread_mutex_lock(&ua_sess->lock);
- /*
- * Lookup application by notify socket. If this fails, this means that the
- * hash table delete has already been done by the application
- * unregistration process so we can safely close the notify socket in a
- * call RCU.
- */
- app = find_app_by_notify_sock(sock);
- if (!app) {
- goto close_socket;
+ if (ua_sess->deleted) {
+ goto end_unlock;
}
- iter.iter.node = &app->notify_sock_n.node;
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
- /*
- * Whatever happens here either we fail or succeed, in both cases we have
- * to close the socket after a grace period to continue to the call RCU
- * here. If the deletion is successful, the application is not visible
- * anymore by other threads and is it fails it means that it was already
- * deleted from the hash table so either way we just have to close the
- * socket.
- */
- (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+end_unlock:
+ pthread_mutex_unlock(&ua_sess->lock);
-close_socket:
+end:
rcu_read_unlock();
-
- /*
- * Close socket after a grace period to avoid for the socket to be reused
- * before the application object is freed creating potential race between
- * threads trying to add unique in the global hash table.
- */
- if (!err_enomem) {
- call_rcu(&obj->head, close_notify_sock_rcu);
- }
+ health_code_update();
+ return ret;
}
/*
- * Destroy a ust app data structure and free its memory.
+ * Regenerate the statedump for each app in the session.
*/
-void ust_app_destroy(struct ust_app *app)
+int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
{
- if (!app) {
- return;
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ DBG("Regenerating the metadata for all UST apps");
+
+ rcu_read_lock();
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ continue;
+ }
+
+ ret = ust_app_regenerate_statedump(usess, app);
+ if (ret < 0) {
+ /* Continue to the next app even on error */
+ continue;
+ }
}
- call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+ rcu_read_unlock();
+
+ return 0;
}
/*
- * Take a snapshot for a given UST session. The snapshot is sent to the given
- * output.
+ * Rotate all the channels of a session.
*
- * Return 0 on success or else a negative value.
+ * Return LTTNG_OK on success or else an LTTng error code.
*/
-int ust_app_snapshot_record(struct ltt_ust_session *usess,
- struct snapshot_output *output, int wait,
- uint64_t nb_packets_per_stream)
+enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
{
- int ret = 0;
+ int ret;
+ enum lttng_error_code cmd_ret = LTTNG_OK;
struct lttng_ht_iter iter;
struct ust_app *app;
- char pathname[PATH_MAX];
+ struct ltt_ust_session *usess = session->ust_session;
assert(usess);
- assert(output);
rcu_read_lock();
struct buffer_reg_uid *reg;
cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
- struct buffer_reg_channel *reg_chan;
+ struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
+ if (!reg->registry->reg.ust->metadata_key) {
+ /* Skip since no metadata is present */
+ continue;
+ }
+
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
usess->consumer);
if (!socket) {
- ret = -EINVAL;
- goto error;
- }
-
- memset(pathname, 0, sizeof(pathname));
- ret = snprintf(pathname, sizeof(pathname),
- DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
- reg->uid, reg->bits_per_long);
- if (ret < 0) {
- PERROR("snprintf snapshot path");
+ cmd_ret = LTTNG_ERR_INVALID;
goto error;
}
- /* Add the UST default trace dir to path. */
+ /* Rotate the data channels. */
cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- reg_chan, node.node) {
- ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
- output, 0, usess->uid, usess->gid, pathname, wait,
- nb_packets_per_stream);
+ buf_reg_chan, node.node) {
+ ret = consumer_rotate_channel(socket,
+ buf_reg_chan->consumer_key,
+ usess->uid, usess->gid,
+ usess->consumer,
+ /* is_metadata_channel */ false);
if (ret < 0) {
+ cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
}
- ret = consumer_snapshot_channel(socket,
- reg->registry->reg.ust->metadata_key, output, 1,
- usess->uid, usess->gid, pathname, wait, 0);
+
+ (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+
+ ret = consumer_rotate_channel(socket,
+ reg->registry->reg.ust->metadata_key,
+ usess->uid, usess->gid,
+ usess->consumer,
+ /* is_metadata_channel */ true);
if (ret < 0) {
+ cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
}
/* Get the right consumer socket for the application. */
socket = consumer_find_socket_by_bitness(app->bits_per_long,
- output->consumer);
+ usess->consumer);
if (!socket) {
- ret = -EINVAL;
+ cmd_ret = LTTNG_ERR_INVALID;
goto error;
}
- /* Add the UST default trace dir to path. */
- memset(pathname, 0, sizeof(pathname));
- ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
- ua_sess->path);
- if (ret < 0) {
- PERROR("snprintf snapshot path");
- goto error;
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
}
+ /* Rotate the data channels. */
cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
ua_chan, node.node) {
- ret = consumer_snapshot_channel(socket, ua_chan->key, output,
- 0, ua_sess->euid, ua_sess->egid, pathname, wait,
- nb_packets_per_stream);
+ ret = consumer_rotate_channel(socket,
+ ua_chan->key,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ ua_sess->consumer,
+ /* is_metadata_channel */ false);
if (ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
+ continue;
+ cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
}
- registry = get_session_registry(ua_sess);
- assert(registry);
- ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
- 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
+ /* Rotate the metadata channel. */
+ (void) push_metadata(registry, usess->consumer);
+ ret = consumer_rotate_channel(socket,
+ registry->metadata_key,
+ lttng_credentials_get_uid(&ua_sess->effective_credentials),
+ lttng_credentials_get_gid(&ua_sess->effective_credentials),
+ ua_sess->consumer,
+ /* is_metadata_channel */ true);
if (ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
+ continue;
+ cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
}
break;
}
+ cmd_ret = LTTNG_OK;
+
+error:
+ rcu_read_unlock();
+ return cmd_ret;
+}
+
+enum lttng_error_code ust_app_create_channel_subdirectories(
+ const struct ltt_ust_session *usess)
+{
+ enum lttng_error_code ret = LTTNG_OK;
+ struct lttng_ht_iter iter;
+ enum lttng_trace_chunk_status chunk_status;
+ char *pathname_index;
+ int fmt_ret;
+
+ assert(usess->current_trace_chunk);
+ rcu_read_lock();
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ fmt_ret = asprintf(&pathname_index,
+ DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
+ reg->uid, reg->bits_per_long);
+ if (fmt_ret < 0) {
+ ERR("Failed to format channel index directory");
+ ret = LTTNG_ERR_CREATE_DIR_FAIL;
+ goto error;
+ }
+
+ /*
+ * Create the index subdirectory which will take care
+ * of implicitly creating the channel's path.
+ */
+ chunk_status = lttng_trace_chunk_create_subdirectory(
+ usess->current_trace_chunk,
+ pathname_index);
+ free(pathname_index);
+ if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+ ret = LTTNG_ERR_CREATE_DIR_FAIL;
+ goto error;
+ }
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app *app;
+
+ /*
+ * Create the toplevel ust/ directory in case no apps are running.
+ */
+ chunk_status = lttng_trace_chunk_create_subdirectory(
+ usess->current_trace_chunk,
+ DEFAULT_UST_TRACE_DIR);
+ if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+ ret = LTTNG_ERR_CREATE_DIR_FAIL;
+ goto error;
+ }
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
+ pid_n.node) {
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
+
+ fmt_ret = asprintf(&pathname_index,
+ DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
+ ua_sess->path);
+ if (fmt_ret < 0) {
+ ERR("Failed to format channel index directory");
+ ret = LTTNG_ERR_CREATE_DIR_FAIL;
+ goto error;
+ }
+ /*
+ * Create the index subdirectory which will take care
+ * of implicitly creating the channel's path.
+ */
+ chunk_status = lttng_trace_chunk_create_subdirectory(
+ usess->current_trace_chunk,
+ pathname_index);
+ free(pathname_index);
+ if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+ ret = LTTNG_ERR_CREATE_DIR_FAIL;
+ goto error;
+ }
+ }
+ break;
+ }
+ default:
+ abort();
+ }
+
+ ret = LTTNG_OK;
error:
rcu_read_unlock();
return ret;
}
/*
- * Return the size taken by one more packet per stream.
+ * Clear all the channels of a session.
+ *
+ * Return LTTNG_OK on success or else an LTTng error code.
*/
-uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
- uint64_t cur_nr_packets)
+static
+enum lttng_error_code ust_app_clear_session_channels(struct ltt_session *session)
{
- uint64_t tot_size = 0;
- struct ust_app *app;
+ int ret;
+ enum lttng_error_code cmd_ret = LTTNG_OK;
struct lttng_ht_iter iter;
+ struct ust_app *app;
+ struct ltt_ust_session *usess = session->ust_session;
assert(usess);
+ rcu_read_lock();
+
+ if (usess->active) {
+ ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
+ cmd_ret = LTTNG_ERR_FATAL;
+ goto end;
+ }
+
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
- struct buffer_reg_channel *reg_chan;
+ struct buffer_reg_channel *buf_reg_chan;
+ struct consumer_socket *socket;
+
+ /* Get consumer socket to use to push the metadata.*/
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ cmd_ret = LTTNG_ERR_INVALID;
+ goto error_socket;
+ }
- rcu_read_lock();
+ /* Clear the data channels. */
cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- reg_chan, node.node) {
- if (cur_nr_packets >= reg_chan->num_subbuf) {
- /*
- * Don't take channel into account if we
- * already grab all its packets.
- */
- continue;
+ buf_reg_chan, node.node) {
+ ret = consumer_clear_channel(socket,
+ buf_reg_chan->consumer_key);
+ if (ret < 0) {
+ goto error;
}
- tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
}
- rcu_read_unlock();
+
+ (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+
+ /*
+ * Clear the metadata channel.
+ * Metadata channel is not cleared per se but we still need to
+ * perform a rotation operation on it behind the scene.
+ */
+ ret = consumer_clear_channel(socket,
+ reg->registry->reg.ust->metadata_key);
+ if (ret < 0) {
+ goto error;
+ }
}
break;
}
case LTTNG_BUFFER_PER_PID:
{
- rcu_read_lock();
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct consumer_socket *socket;
+ struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
- struct lttng_ht_iter chan_iter;
+ struct ust_registry_session *registry;
ua_sess = lookup_session_by_app(usess, app);
if (!ua_sess) {
continue;
}
+ /* Get the right consumer socket for the application. */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ cmd_ret = LTTNG_ERR_INVALID;
+ goto error_socket;
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
+
+ /* Clear the data channels. */
cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
ua_chan, node.node) {
- if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
- /*
- * Don't take channel into account if we
- * already grab all its packets.
- */
+ ret = consumer_clear_channel(socket, ua_chan->key);
+ if (ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+ continue;
+ }
+ goto error;
+ }
+ }
+
+ (void) push_metadata(registry, usess->consumer);
+
+ /*
+ * Clear the metadata channel.
+ * Metadata channel is not cleared per se but we still need to
+ * perform rotation operation on it behind the scene.
+ */
+ ret = consumer_clear_channel(socket, registry->metadata_key);
+ if (ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
continue;
}
- tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
+ goto error;
}
}
- rcu_read_unlock();
break;
}
default:
break;
}
- return tot_size;
+ cmd_ret = LTTNG_OK;
+ goto end;
+
+error:
+ switch (-ret) {
+ case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
+ cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
+ break;
+ default:
+ cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
+ }
+
+error_socket:
+end:
+ rcu_read_unlock();
+ return cmd_ret;
}
-int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
- struct cds_list_head *buffer_reg_uid_list,
- struct consumer_output *consumer, uint64_t uchan_id,
- int overwrite, uint64_t *discarded, uint64_t *lost)
+static
+enum lttng_error_code ust_app_clear_session_maps_per_uid(
+ struct ltt_ust_session *usess, struct ltt_ust_map *umap,
+ uint32_t app_bitness)
{
- int ret;
- uint64_t consumer_chan_key;
+ struct lttng_ht_iter iter;
+ struct buffer_reg_uid *buf_reg_uid;
+ struct buffer_reg_map *buf_reg_map;
+ struct ust_registry_session *ust_reg_sess;
+ struct lttng_ht_node_u64 *ust_reg_map_node;
+ struct ust_registry_map *ust_reg_map;
+ struct ust_registry_map_index_ht_entry *map_index_entry;
+ enum lttng_error_code status;
+
+ buf_reg_uid = buffer_reg_uid_find(usess->id, app_bitness, usess->uid);
+ if (!buf_reg_uid) {
+ /*
+ * Buffer registry entry for uid not found. Probably no app for
+ * this UID at the moment.
+ */
+ DBG("No buffer registry entry found for uid: ust-sess-id = %"PRIu64", bitness = %"PRIu32", uid = %d",
+ usess->id, app_bitness, usess->uid);
+ /*
+ * Not an error. Leave the key value pair unchanged and return.
+ */
+ status = LTTNG_OK;
+ goto end;
+ }
- ret = buffer_reg_uid_consumer_channel_key(
- buffer_reg_uid_list, ust_session_id,
- uchan_id, &consumer_chan_key);
- if (ret < 0) {
+ buf_reg_map = buffer_reg_map_find(umap->id, buf_reg_uid);
+ if (!buf_reg_uid) {
+ ERR("Error getting per-uid map buffer registry entry: map-id = %"PRIu64,
+ umap->id);
+ status = LTTNG_ERR_UNK;
goto end;
}
- if (overwrite) {
- ret = consumer_get_lost_packets(ust_session_id,
- consumer_chan_key, consumer, lost);
- *discarded = 0;
- } else {
- ret = consumer_get_discarded_events(ust_session_id,
- consumer_chan_key, consumer, discarded);
- *lost = 0;
+ ust_reg_sess = buf_reg_uid->registry->reg.ust;
+
+ /* Get the ust_reg map object from the registry */
+ // FIXME: frdeso: This can be changed to ust_registry_map_find() right?
+
+ lttng_ht_lookup(ust_reg_sess->maps, (void *) &umap->id, &iter);
+ ust_reg_map_node = lttng_ht_iter_get_node_u64(&iter);
+ if (!ust_reg_map_node) {
+ ERR("Error getting per-uid map buffer registry entry: map-id = %"PRIu64,
+ umap->id);
+ status = LTTNG_ERR_UNK;
+ goto end;
+ }
+ ust_reg_map = caa_container_of(ust_reg_map_node,
+ struct ust_registry_map, node);
+
+ cds_lfht_for_each_entry(ust_reg_map->key_string_to_bucket_index_ht->ht,
+ &iter.iter, map_index_entry, node.node) {
+ int ret;
+ size_t dimension_indexes[1] = {map_index_entry->index};
+
+ ret = ustctl_counter_clear(buf_reg_map->daemon_counter, dimension_indexes);
+ if (ret) {
+ ERR("clearing counter index %"PRIu64, map_index_entry->index);
+ //fixme: frdeso: convert ust errors to tools errors
+ status = LTTNG_ERR_UNK;
+ goto end;
+ }
}
+ status = LTTNG_OK;
+
end:
- return ret;
+ return status;
}
-int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
- struct ltt_ust_channel *uchan,
- struct consumer_output *consumer, int overwrite,
- uint64_t *discarded, uint64_t *lost)
+static
+enum lttng_error_code ust_app_clear_session_maps_per_pid(
+ struct ltt_ust_session *usess, struct ltt_ust_map *umap,
+ uint32_t app_bitness)
{
- int ret = 0;
- struct lttng_ht_iter iter;
- struct lttng_ht_node_str *ua_chan_node;
+ struct lttng_ht_iter app_iter;
+ enum lttng_error_code status;
struct ust_app *app;
- struct ust_app_session *ua_sess;
- struct ust_app_channel *ua_chan;
+ struct map_kv_ht_entry *kv_entry;
+ struct lttng_ht_iter iter;
- rcu_read_lock();
- /*
- * Iterate over every registered applications, return when we
- * found one in the right session and channel.
- */
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ht_iter uiter;
+ cds_lfht_for_each_entry(ust_app_ht->ht, &app_iter.iter, app, pid_n.node) {
+ struct lttng_ht_iter map_iter, key_iter;
+ struct lttng_ht_node_str *ua_map_node;
+ struct ust_app_map *ua_map;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *ust_reg_sess;
+ struct ust_registry_map *ust_reg_map;
+ struct ust_registry_map_index_ht_entry *map_index_entry;
+
+ if (app->bits_per_long != app_bitness) {
+ continue;
+ }
ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
+ if (!ua_sess) {
+ /* Session not associated with this app. */
continue;
}
- /* Get channel */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the session is found for the app, the channel must be there */
- assert(ua_chan_node);
+ ust_reg_sess = get_session_registry(ua_sess);
+ if (!ust_reg_sess) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ /* Lookup map in the ust app session */
+ lttng_ht_lookup(ua_sess->maps, (void *)umap->name, &map_iter);
+ ua_map_node = lttng_ht_iter_get_node_str(&map_iter);
- if (overwrite) {
- ret = consumer_get_lost_packets(usess->id, ua_chan->key,
- consumer, lost);
- *discarded = 0;
- goto end;
- } else {
- ret = consumer_get_discarded_events(usess->id,
- ua_chan->key, consumer, discarded);
- *lost = 0;
- goto end;
+ assert(ua_map_node != NULL);
+ ua_map = caa_container_of(ua_map_node, struct ust_app_map, node);
+
+ pthread_mutex_lock(&ust_reg_sess->lock);
+ ust_reg_map = ust_registry_map_find(ust_reg_sess, ua_map->key);
+ pthread_mutex_unlock(&ust_reg_sess->lock);
+ assert(ust_reg_map);
+
+ /* Iterate over all the formated_key -> counter index */
+ cds_lfht_for_each_entry(ust_reg_map->key_string_to_bucket_index_ht->ht,
+ &key_iter.iter, map_index_entry, node.node) {
+
+ int ret;
+ size_t dimension_indexes[1] = {map_index_entry->index};
+
+ ret = ustctl_counter_clear(ua_map->map_handle,
+ dimension_indexes);
+ if (ret) {
+ ERR("clearing counter index %"PRIu64, map_index_entry->index);
+ //fixme: frdeso: convert ust errors to tools errors
+ status = LTTNG_ERR_UNK;
+ goto end;
+ }
+ }
+ }
+
+ /*
+ * Emptying the dead app key values.
+ */
+ pthread_mutex_lock(&umap->dead_app_kv_values.lock);
+
+ if (app_bitness == 32) {
+ cds_lfht_for_each_entry(umap->dead_app_kv_values.dead_app_kv_values_32bits->ht,
+ &iter.iter, kv_entry, node.node) {
+ kv_entry->value = 0;
+ }
+ } else {
+
+ cds_lfht_for_each_entry(umap->dead_app_kv_values.dead_app_kv_values_64bits->ht,
+ &iter.iter, kv_entry, node.node) {
+ kv_entry->value = 0;
}
}
+ pthread_mutex_unlock(&umap->dead_app_kv_values.lock);
+
+ status = LTTNG_OK;
end:
- rcu_read_unlock();
- return ret;
+ return status;
}
static
-int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
- struct ust_app *app)
+enum lttng_error_code ust_app_clear_session_maps(struct ltt_session *session)
{
- int ret = 0;
- struct ust_app_session *ua_sess;
+ struct ltt_ust_session *usess = session->ust_session;
+ enum lttng_error_code status;
+ struct lttng_ht_iter iter;
+ struct ltt_ust_map *umap;
- DBG("Regenerating the metadata for ust app pid %d", app->pid);
+ cds_lfht_for_each_entry(usess->domain_global.maps->ht, &iter.iter,
+ umap, node.node) {
- rcu_read_lock();
+ if (usess->buffer_type == LTTNG_BUFFER_PER_UID) {
+ status = ust_app_clear_session_maps_per_uid(session->ust_session,
+ umap, 32);
+ assert(status == LTTNG_OK);
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- /* The session is in teardown process. Ignore and continue. */
- goto end;
- }
+ status = ust_app_clear_session_maps_per_uid(session->ust_session,
+ umap, 64);
+ assert(status == LTTNG_OK);
+ //fixme:frdeso:error handling
+ } else {
+ status = ust_app_clear_session_maps_per_pid(session->ust_session,
+ umap, 32);
+ assert(status == LTTNG_OK);
- pthread_mutex_lock(&ua_sess->lock);
+ status = ust_app_clear_session_maps_per_pid(session->ust_session,
+ umap, 64);
+ assert(status == LTTNG_OK);
+ //fixme:frdeso:error handling
+ //
+ //
+ }
- if (ua_sess->deleted) {
- goto end_unlock;
}
- pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
- pthread_mutex_unlock(&app->sock_lock);
+ return LTTNG_OK;
+}
-end_unlock:
- pthread_mutex_unlock(&ua_sess->lock);
+enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
+{
+ enum lttng_error_code cmd_ret;
+
+
+ cmd_ret = ust_app_clear_session_channels(session);
+ if (cmd_ret != LTTNG_OK) {
+ ERR("Clearing session's channels");
+ goto end;
+ }
+ cmd_ret = ust_app_clear_session_maps(session);
+ if (cmd_ret != LTTNG_OK) {
+ ERR("Clearing session's maps");
+ goto end;
+ }
end:
- rcu_read_unlock();
- health_code_update();
- return ret;
+ return cmd_ret;
}
/*
- * Regenerate the statedump for each app in the session.
+ * This function skips the metadata channel as the begin/end timestamps of a
+ * metadata packet are useless.
+ *
+ * Moreover, opening a packet after a "clear" will cause problems for live
+ * sessions as it will introduce padding that was not part of the first trace
+ * chunk. The relay daemon expects the content of the metadata stream of
+ * successive metadata trace chunks to be strict supersets of one another.
+ *
+ * For example, flushing a packet at the beginning of the metadata stream of
+ * a trace chunk resulting from a "clear" session command will cause the
+ * size of the metadata stream of the new trace chunk to not match the size of
+ * the metadata stream of the original chunk. This will confuse the relay
+ * daemon as the same "offset" in a metadata stream will no longer point
+ * to the same content.
*/
-int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
+enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
{
- int ret = 0;
+ enum lttng_error_code ret = LTTNG_OK;
struct lttng_ht_iter iter;
- struct ust_app *app;
+ struct ltt_ust_session *usess = session->ust_session;
- DBG("Regenerating the metadata for all UST apps");
+ assert(usess);
rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- continue;
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry (
+ reg, &usess->buffer_reg_uid_list, lnode) {
+ struct buffer_reg_channel *buf_reg_chan;
+ struct consumer_socket *socket;
+
+ socket = consumer_find_socket_by_bitness(
+ reg->bits_per_long, usess->consumer);
+ if (!socket) {
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+
+ cds_lfht_for_each_entry(reg->registry->channels->ht,
+ &iter.iter, buf_reg_chan, node.node) {
+ const int open_ret =
+ consumer_open_channel_packets(
+ socket,
+ buf_reg_chan->consumer_key);
+
+ if (open_ret < 0) {
+ ret = LTTNG_ERR_UNK;
+ goto error;
+ }
+ }
}
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app *app;
- ret = ust_app_regenerate_statedump(usess, app);
- if (ret < 0) {
- /* Continue to the next app even on error */
- continue;
+ cds_lfht_for_each_entry (
+ ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct consumer_socket *socket;
+ struct lttng_ht_iter chan_iter;
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+
+ /* Get the right consumer socket for the application. */
+ socket = consumer_find_socket_by_bitness(
+ app->bits_per_long, usess->consumer);
+ if (!socket) {
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
+
+ cds_lfht_for_each_entry(ua_sess->channels->ht,
+ &chan_iter.iter, ua_chan, node.node) {
+ const int open_ret =
+ consumer_open_channel_packets(
+ socket,
+ ua_chan->key);
+
+ if (open_ret < 0) {
+ /*
+ * Per-PID buffer and application going
+ * away.
+ */
+ if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
+ continue;
+ }
+
+ ret = LTTNG_ERR_UNK;
+ goto error;
+ }
+ }
}
+ break;
+ }
+ default:
+ abort();
+ break;
}
+error:
rcu_read_unlock();
-
- return 0;
+ return ret;
}