#include <linux/file.h>
#include <linux/anon_inodes.h>
#include <wrapper/file.h>
-#include <linux/jhash.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/dmi.h>
#include <wrapper/tracepoint.h>
#include <wrapper/list.h>
#include <wrapper/types.h>
+#include <wrapper/barrier.h>
#include <lttng/kernel-version.h>
#include <lttng/events.h>
+#include <lttng/lttng-bytecode.h>
#include <lttng/tracer.h>
+#include <lttng/event-notifier-notification.h>
#include <lttng/abi-old.h>
#include <lttng/endian.h>
#include <lttng/string-utils.h>
+#include <lttng/utils.h>
#include <ringbuffer/backend.h>
#include <ringbuffer/frontend.h>
+#include <counter/counter.h>
#include <wrapper/time.h>
#define METADATA_CACHE_DEFAULT_SIZE 4096
static LIST_HEAD(sessions);
+static LIST_HEAD(event_notifier_groups);
static LIST_HEAD(lttng_transport_list);
+static LIST_HEAD(lttng_counter_transport_list);
/*
* Protect the sessions and metadata caches.
*/
static DEFINE_MUTEX(sessions_mutex);
static struct kmem_cache *event_cache;
+static struct kmem_cache *event_notifier_cache;
-static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
-static void lttng_session_sync_enablers(struct lttng_session *session);
-static void lttng_enabler_destroy(struct lttng_enabler *enabler);
+static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
+static void lttng_session_sync_event_enablers(struct lttng_session *session);
+static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
+static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler);
+static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
static void _lttng_event_destroy(struct lttng_event *event);
-static void _lttng_channel_destroy(struct lttng_channel *chan);
+static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier);
+static void _lttng_channel_destroy(struct lttng_channel *channel);
+static void _lttng_session_counter_destroy(struct lttng_counter *counter);
static int _lttng_event_unregister(struct lttng_event *event);
+static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier);
static
int _lttng_event_metadata_statedump(struct lttng_session *session,
- struct lttng_channel *chan,
struct lttng_event *event);
static
int _lttng_session_metadata_statedump(struct lttng_session *session);
const struct lttng_event_field *field,
size_t nesting);
+static bool lttng_event_container_is_metadata_channel(struct lttng_event_container *container)
+{
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ {
+ struct lttng_channel *chan = lttng_event_container_get_channel(container);
+
+ return chan->channel_type == METADATA_CHANNEL;
+ }
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ return false;
+ default:
+ return false;
+ }
+}
+
+static bool lttng_event_within_metadata_channel(struct lttng_event *event)
+{
+ return lttng_event_container_is_metadata_channel(event->container);
+}
+
void synchronize_trace(void)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0))
synchronize_rcu();
#else
synchronize_sched();
#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
#ifdef CONFIG_PREEMPT_RT_FULL
synchronize_rcu();
#endif
-#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
+#else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
#ifdef CONFIG_PREEMPT_RT
synchronize_rcu();
#endif
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
+#endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
}
void lttng_lock_sessions(void)
mutex_unlock(&sessions_mutex);
}
+static struct lttng_transport *lttng_transport_find(const char *name)
+{
+ struct lttng_transport *transport;
+
+ list_for_each_entry(transport, <tng_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
/*
* Called with sessions lock held.
*/
goto err;
INIT_LIST_HEAD(&session->chan);
INIT_LIST_HEAD(&session->events);
+ INIT_LIST_HEAD(&session->counters);
lttng_guid_gen(&session->uuid);
metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
sizeof(metadata_cache->uuid));
INIT_LIST_HEAD(&session->enablers_head);
for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
- INIT_HLIST_HEAD(&session->events_ht.table[i]);
+ INIT_HLIST_HEAD(&session->events_name_ht.table[i]);
+ for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
+ INIT_HLIST_HEAD(&session->events_key_ht.table[i]);
list_add(&session->list, &sessions);
session->pid_tracker.session = session;
session->pid_tracker.tracker_type = TRACKER_PID;
return NULL;
}
+static
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+ struct lttng_counter_transport *transport;
+
+ list_for_each_entry(transport, <tng_counter_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
+static
+struct lttng_counter *lttng_kernel_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const size_t *dimensions_sizes,
+ bool coalesce_hits)
+{
+ struct lttng_counter_transport *counter_transport = NULL;
+ struct lttng_counter *counter = NULL;
+ struct lttng_event_container *container;
+
+ counter_transport = lttng_counter_transport_find(counter_transport_name);
+ if (!counter_transport) {
+ printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
+ counter_transport_name);
+ goto notransport;
+ }
+ if (!try_module_get(counter_transport->owner)) {
+ printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
+ goto notransport;
+ }
+
+ counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
+ if (!counter)
+ goto nomem;
+ container = lttng_counter_get_event_container(counter);
+ container->type = LTTNG_EVENT_CONTAINER_COUNTER;
+ container->coalesce_hits = coalesce_hits;
+ /* Create event notifier error counter. */
+ counter->ops = &counter_transport->ops;
+ counter->transport = counter_transport;
+ mutex_init(&counter->map.lock);
+
+ counter->counter = counter->ops->counter_create(
+ number_dimensions, dimensions_sizes, 0);
+ if (!counter->counter) {
+ printk(KERN_WARNING "LTTng: Error creating counter");
+ goto create_error;
+ }
+
+ return counter;
+
+create_error:
+ lttng_kvfree(counter);
+nomem:
+ if (counter_transport)
+ module_put(counter_transport->owner);
+notransport:
+ return NULL;
+}
+
+static
+void lttng_kernel_counter_destroy(struct lttng_counter *counter)
+{
+ counter->ops->counter_destroy(counter->counter);
+ module_put(counter->transport->owner);
+ lttng_kvfree(counter->map.descriptors);
+ lttng_kvfree(counter);
+}
+
+int lttng_event_notifier_group_set_error_counter(
+ struct lttng_event_notifier_group *event_notifier_group,
+ const char *counter_transport_name,
+ size_t counter_len)
+{
+ struct lttng_counter *counter;
+ int ret;
+
+ /*
+ * Lock sessions to provide mutual exclusion against concurrent
+ * modification of trigger group, which would result in
+ * overwriting the error counter if set concurrently.
+ */
+ mutex_lock(&sessions_mutex);
+
+ if (event_notifier_group->error_counter) {
+ printk(KERN_ERR "Error counter already set in event notifier group\n");
+ ret = -EBUSY;
+ goto error;
+ }
+
+ counter = lttng_kernel_counter_create(counter_transport_name,
+ 1, &counter_len, false);
+ if (!counter) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ event_notifier_group->error_counter_len = counter_len;
+ /*
+ * store-release to publish error counter matches load-acquire
+ * in record_error. Ensures the counter is created and the
+ * error_counter_len is set before they are used.
+ */
+ lttng_smp_store_release(&event_notifier_group->error_counter,
+ counter);
+
+ mutex_unlock(&sessions_mutex);
+ return 0;
+
+error:
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
+{
+ struct lttng_transport *transport = NULL;
+ struct lttng_event_notifier_group *event_notifier_group;
+ const char *transport_name = "relay-event-notifier";
+ size_t subbuf_size = 4096; //TODO
+ size_t num_subbuf = 16; //TODO
+ unsigned int switch_timer_interval = 0;
+ unsigned int read_timer_interval = 0;
+ int i;
+
+ mutex_lock(&sessions_mutex);
+
+ transport = lttng_transport_find(transport_name);
+ if (!transport) {
+ printk(KERN_WARNING "LTTng: transport %s not found\n",
+ transport_name);
+ goto notransport;
+ }
+ if (!try_module_get(transport->owner)) {
+ printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
+ transport_name);
+ goto notransport;
+ }
+
+ event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
+ GFP_KERNEL);
+ if (!event_notifier_group)
+ goto nomem;
+
+ /*
+ * Initialize the ring buffer used to store event notifier
+ * notifications.
+ */
+ event_notifier_group->ops = &transport->ops;
+ event_notifier_group->chan = transport->ops.channel_create(
+ transport_name, event_notifier_group, NULL,
+ subbuf_size, num_subbuf, switch_timer_interval,
+ read_timer_interval);
+ if (!event_notifier_group->chan)
+ goto create_error;
+
+ event_notifier_group->transport = transport;
+
+ INIT_LIST_HEAD(&event_notifier_group->enablers_head);
+ INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
+ for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++)
+ INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
+
+ list_add(&event_notifier_group->node, &event_notifier_groups);
+
+ mutex_unlock(&sessions_mutex);
+
+ return event_notifier_group;
+
+create_error:
+ lttng_kvfree(event_notifier_group);
+nomem:
+ if (transport)
+ module_put(transport->owner);
+notransport:
+ mutex_unlock(&sessions_mutex);
+ return NULL;
+}
+
+struct lttng_counter *lttng_session_create_counter(
+ struct lttng_session *session,
+ const char *counter_transport_name,
+ size_t number_dimensions, const size_t *dimensions_sizes,
+ bool coalesce_hits)
+{
+ struct lttng_counter *counter;
+ struct lttng_event_container *container;
+
+ counter = lttng_kernel_counter_create(counter_transport_name,
+ number_dimensions, dimensions_sizes,
+ coalesce_hits);
+ if (!counter) {
+ goto counter_error;
+ }
+ container = lttng_counter_get_event_container(counter);
+
+ mutex_lock(&sessions_mutex);
+ container->session = session;
+ list_add(&counter->node, &session->counters);
+ mutex_unlock(&sessions_mutex);
+
+ return counter;
+
+counter_error:
+ return NULL;
+}
+
void metadata_cache_destroy(struct kref *kref)
{
struct lttng_metadata_cache *cache =
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
struct lttng_metadata_stream *metadata_stream;
- struct lttng_enabler *enabler, *tmpenabler;
+ struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
+ struct lttng_counter *counter, *tmpcounter;
int ret;
mutex_lock(&sessions_mutex);
WRITE_ONCE(session->active, 0);
list_for_each_entry(chan, &session->chan, list) {
- ret = lttng_syscalls_unregister(chan);
+ ret = lttng_syscalls_unregister_event_container(lttng_channel_get_event_container(chan));
+ WARN_ON(ret);
+ }
+ list_for_each_entry(counter, &session->counters, node) {
+ ret = lttng_syscalls_unregister_event_container(lttng_counter_get_event_container(counter));
WARN_ON(ret);
}
list_for_each_entry(event, &session->events, list) {
WARN_ON(ret);
}
synchronize_trace(); /* Wait for in-flight events to complete */
- list_for_each_entry_safe(enabler, tmpenabler,
+ list_for_each_entry(chan, &session->chan, list) {
+ ret = lttng_syscalls_destroy_event_container(lttng_channel_get_event_container(chan));
+ WARN_ON(ret);
+ }
+ list_for_each_entry(counter, &session->counters, node) {
+ ret = lttng_syscalls_destroy_event_container(lttng_counter_get_event_container(counter));
+ WARN_ON(ret);
+ }
+ list_for_each_entry_safe(event_enabler, tmp_event_enabler,
&session->enablers_head, node)
- lttng_enabler_destroy(enabler);
+ lttng_event_enabler_destroy(event_enabler);
list_for_each_entry_safe(event, tmpevent, &session->events, list)
_lttng_event_destroy(event);
list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
BUG_ON(chan->channel_type == METADATA_CHANNEL);
_lttng_channel_destroy(chan);
}
+ list_for_each_entry_safe(counter, tmpcounter, &session->counters, node)
+ _lttng_session_counter_destroy(counter);
+ mutex_lock(&session->metadata_cache->lock);
list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
_lttng_metadata_channel_hangup(metadata_stream);
+ mutex_unlock(&session->metadata_cache->lock);
lttng_id_tracker_destroy(&session->pid_tracker, false);
lttng_id_tracker_destroy(&session->vpid_tracker, false);
lttng_id_tracker_destroy(&session->uid_tracker, false);
lttng_kvfree(session);
}
+void lttng_event_notifier_group_destroy(
+ struct lttng_event_notifier_group *event_notifier_group)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler;
+ struct lttng_event_notifier *event_notifier, *tmpevent_notifier;
+ int ret;
+
+ if (!event_notifier_group)
+ return;
+
+ mutex_lock(&sessions_mutex);
+
+ ret = lttng_syscalls_unregister_event_notifier_group(event_notifier_group);
+ WARN_ON(ret);
+
+ list_for_each_entry_safe(event_notifier, tmpevent_notifier,
+ &event_notifier_group->event_notifiers_head, list) {
+ ret = _lttng_event_notifier_unregister(event_notifier);
+ WARN_ON(ret);
+ }
+
+ /* Wait for in-flight event notifier to complete */
+ synchronize_trace();
+
+ irq_work_sync(&event_notifier_group->wakeup_pending);
+
+ kfree(event_notifier_group->sc_filter);
+
+ list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler,
+ &event_notifier_group->enablers_head, node)
+ lttng_event_notifier_enabler_destroy(event_notifier_enabler);
+
+ list_for_each_entry_safe(event_notifier, tmpevent_notifier,
+ &event_notifier_group->event_notifiers_head, list)
+ _lttng_event_notifier_destroy(event_notifier);
+
+ if (event_notifier_group->error_counter) {
+ struct lttng_counter *error_counter = event_notifier_group->error_counter;
+
+ lttng_kernel_counter_destroy(error_counter);
+ event_notifier_group->error_counter = NULL;
+ }
+
+ event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
+ module_put(event_notifier_group->transport->owner);
+ list_del(&event_notifier_group->node);
+
+ mutex_unlock(&sessions_mutex);
+ lttng_kvfree(event_notifier_group);
+}
+
int lttng_session_statedump(struct lttng_session *session)
{
int ret;
session->tstate = 1;
/* We need to sync enablers with session before activation. */
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/*
* Snapshot the number of events per channel to know the type of header
/* Set transient enabler state to "disabled" */
session->tstate = 0;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/* Set each stream's quiescent state. */
list_for_each_entry(chan, &session->chan, list) {
return ret;
}
-int lttng_channel_enable(struct lttng_channel *channel)
+int lttng_event_container_enable(struct lttng_event_container *container)
{
int ret = 0;
mutex_lock(&sessions_mutex);
- if (channel->channel_type == METADATA_CHANNEL) {
+ if (lttng_event_container_is_metadata_channel(container)) {
ret = -EPERM;
goto end;
}
- if (channel->enabled) {
+ if (container->enabled) {
ret = -EEXIST;
goto end;
}
/* Set transient enabler state to "enabled" */
- channel->tstate = 1;
- lttng_session_sync_enablers(channel->session);
+ container->tstate = 1;
+ lttng_session_sync_event_enablers(container->session);
/* Set atomically the state to "enabled" */
- WRITE_ONCE(channel->enabled, 1);
+ WRITE_ONCE(container->enabled, 1);
end:
mutex_unlock(&sessions_mutex);
return ret;
}
-int lttng_channel_disable(struct lttng_channel *channel)
+int lttng_event_container_disable(struct lttng_event_container *container)
{
int ret = 0;
mutex_lock(&sessions_mutex);
- if (channel->channel_type == METADATA_CHANNEL) {
+ if (lttng_event_container_is_metadata_channel(container)) {
ret = -EPERM;
goto end;
}
- if (!channel->enabled) {
+ if (!container->enabled) {
ret = -EEXIST;
goto end;
}
/* Set atomically the state to "disabled" */
- WRITE_ONCE(channel->enabled, 0);
+ WRITE_ONCE(container->enabled, 0);
/* Set transient enabler state to "enabled" */
- channel->tstate = 0;
- lttng_session_sync_enablers(channel->session);
+ container->tstate = 0;
+ lttng_session_sync_event_enablers(container->session);
end:
mutex_unlock(&sessions_mutex);
return ret;
int ret = 0;
mutex_lock(&sessions_mutex);
- if (event->chan->channel_type == METADATA_CHANNEL) {
+ if (lttng_event_within_metadata_channel(event)) {
ret = -EPERM;
goto end;
}
int ret = 0;
mutex_lock(&sessions_mutex);
- if (event->chan->channel_type == METADATA_CHANNEL) {
+ if (lttng_event_within_metadata_channel(event)) {
ret = -EPERM;
goto end;
}
return ret;
}
-static struct lttng_transport *lttng_transport_find(const char *name)
+int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier)
{
- struct lttng_transport *transport;
+ int ret = 0;
- list_for_each_entry(transport, <tng_transport_list, node) {
- if (!strcmp(transport->name, name))
- return transport;
+ mutex_lock(&sessions_mutex);
+ if (event_notifier->enabled) {
+ ret = -EEXIST;
+ goto end;
}
- return NULL;
+ switch (event_notifier->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ WRITE_ONCE(event_notifier->enabled, 1);
+ break;
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_KRETPROBE:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier)
+{
+ int ret = 0;
+
+ mutex_lock(&sessions_mutex);
+ if (!event_notifier->enabled) {
+ ret = -EEXIST;
+ goto end;
+ }
+ switch (event_notifier->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ WRITE_ONCE(event_notifier->enabled, 0);
+ break;
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_KRETPROBE:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
}
struct lttng_channel *lttng_channel_create(struct lttng_session *session,
unsigned int read_timer_interval,
enum channel_type channel_type)
{
- struct lttng_channel *chan;
+ struct lttng_event_container *container;
+ struct lttng_channel *chan = NULL;
struct lttng_transport *transport = NULL;
mutex_lock(&sessions_mutex);
goto active; /* Refuse to add channel to active session */
transport = lttng_transport_find(transport_name);
if (!transport) {
- printk(KERN_WARNING "LTTng transport %s not found\n",
+ printk(KERN_WARNING "LTTng: transport %s not found\n",
transport_name);
goto notransport;
}
if (!try_module_get(transport->owner)) {
- printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
goto notransport;
}
- chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
+ chan = lttng_kvzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
if (!chan)
goto nomem;
- chan->session = session;
+ container = lttng_channel_get_event_container(chan);
+ container->type = LTTNG_EVENT_CONTAINER_CHANNEL;
+ container->session = session;
+ container->tstate = 1;
+ container->enabled = 1;
+ /*
+ * The ring buffer always coalesces hits from various event
+ * enablers matching a given event to a single event record within the
+ * ring buffer.
+ */
+ container->coalesce_hits = true;
+
chan->id = session->free_chan_id++;
chan->ops = &transport->ops;
/*
* should be already accessible.
*/
chan->chan = transport->ops.channel_create(transport_name,
- chan, buf_addr, subbuf_size, num_subbuf,
+ container, buf_addr, subbuf_size, num_subbuf,
switch_timer_interval, read_timer_interval);
if (!chan->chan)
goto create_error;
- chan->tstate = 1;
- chan->enabled = 1;
chan->transport = transport;
chan->channel_type = channel_type;
list_add(&chan->list, &session->chan);
return chan;
create_error:
- kfree(chan);
+ lttng_kvfree(chan);
nomem:
if (transport)
module_put(transport->owner);
return NULL;
}
+static
+void _lttng_session_counter_destroy(struct lttng_counter *counter)
+{
+ list_del(&counter->node);
+ lttng_kernel_counter_destroy(counter);
+}
+
/*
* Only used internally at session destruction for per-cpu channels, and
* when metadata channel is released.
module_put(chan->transport->owner);
list_del(&chan->list);
lttng_destroy_context(chan->ctx);
- kfree(chan);
+ lttng_kvfree(chan);
}
void lttng_metadata_channel_destroy(struct lttng_channel *chan)
{
BUG_ON(chan->channel_type != METADATA_CHANNEL);
-
/* Protect the metadata cache with the sessions_mutex. */
mutex_lock(&sessions_mutex);
_lttng_channel_destroy(chan);
wake_up_interruptible(&stream->read_wait);
}
+static
+bool lttng_event_container_current_id_full(struct lttng_event_container *container)
+{
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ {
+ struct lttng_channel *channel = lttng_event_container_get_channel(container);
+
+ return channel->free_event_id == -1U;
+ }
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ {
+ struct lttng_counter *counter = lttng_event_container_get_counter(container);
+ size_t nr_dimensions, max_nr_elem;
+
+ if (lttng_counter_get_nr_dimensions(&counter->counter->config,
+ counter->counter, &nr_dimensions))
+ return true;
+ WARN_ON_ONCE(nr_dimensions != 1);
+ if (nr_dimensions != 1)
+ return true;
+ if (lttng_counter_get_max_nr_elem(&counter->counter->config,
+ counter->counter, &max_nr_elem))
+ return true;
+ return counter->free_index >= max_nr_elem;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ return true;
+ }
+}
+
+
+static
+int lttng_event_container_allocate_id(struct lttng_event_container *container,
+ const char *key_string, size_t *id)
+{
+ struct lttng_session *session = container->session;
+ struct lttng_event *event;
+
+ if (key_string[0]) {
+ struct hlist_head *head;
+
+ head = utils_borrow_hash_table_bucket(session->events_key_ht.table,
+ LTTNG_EVENT_HT_SIZE, key_string);
+ lttng_hlist_for_each_entry(event, head, key_hlist) {
+ if (!strcmp(key_string, event->key)) {
+ /* Same key, use same id. */
+ *id = event->id;
+ return 0;
+ }
+ }
+ }
+
+ if (lttng_event_container_current_id_full(container)) {
+ return -EMFILE;
+ }
+
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ {
+ struct lttng_channel *channel = lttng_event_container_get_channel(container);
+ *id = channel->free_event_id++;
+ break;
+ }
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ {
+ struct lttng_counter *counter = lttng_event_container_get_counter(container);
+ *id = counter->free_index++;
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ return 0;
+}
+
+static
+int format_event_key(char *key_string, const struct lttng_counter_key *key,
+ const char *event_name)
+{
+ const struct lttng_counter_key_dimension *dim;
+ size_t i, left = LTTNG_KEY_TOKEN_STRING_LEN_MAX;
+
+ key_string[0] = '\0';
+ if (!key || !key->nr_dimensions)
+ return 0;
+ /* Currently event keys can only be specified on a single dimension. */
+ if (key->nr_dimensions != 1)
+ return -EINVAL;
+ dim = &key->key_dimensions[0];
+ for (i = 0; i < dim->nr_key_tokens; i++) {
+ const struct lttng_key_token *token = &dim->key_tokens[i];
+ size_t token_len;
+ const char *str;
+
+ switch (token->type) {
+ case LTTNG_KEY_TOKEN_STRING:
+ str = token->arg.string;
+ break;
+ case LTTNG_KEY_TOKEN_EVENT_NAME:
+ str = event_name;
+ break;
+ default:
+ return -EINVAL;
+ }
+ token_len = strlen(str);
+ if (token_len >= left)
+ return -EINVAL;
+ strcat(key_string, str);
+ left -= token_len;
+ }
+ return 0;
+}
+
+static
+bool match_event_token(struct lttng_event_container *container,
+ struct lttng_event *event, uint64_t token)
+{
+ if (container->coalesce_hits)
+ return true;
+ if (event->user_token == token)
+ return true;
+ return false;
+}
+
+static
+int lttng_counter_append_descriptor(struct lttng_counter *counter,
+ uint64_t user_token,
+ size_t index,
+ const char *key)
+{
+ struct lttng_counter_map *map = &counter->map;
+ struct lttng_counter_map_descriptor *last;
+ int ret = 0;
+
+ if (strlen(key) >= LTTNG_KERNEL_COUNTER_KEY_LEN) {
+ WARN_ON_ONCE(1);
+ return -EOVERFLOW;
+ }
+ mutex_lock(&map->lock);
+ if (map->nr_descriptors == map->alloc_len) {
+ struct lttng_counter_map_descriptor *new_table, *old_table;
+ size_t old_len = map->nr_descriptors;
+ size_t new_len = max_t(size_t, old_len + 1, map->alloc_len * 2);
+
+ old_table = map->descriptors;
+ new_table = lttng_kvzalloc(sizeof(struct lttng_counter_map_descriptor) * new_len,
+ GFP_KERNEL);
+ if (!new_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ if (old_table)
+ memcpy(new_table, old_table, old_len * sizeof(struct lttng_counter_map_descriptor));
+
+ map->descriptors = new_table;
+ map->alloc_len = new_len;
+ lttng_kvfree(old_table);
+ }
+ last = &map->descriptors[map->nr_descriptors++];
+ last->user_token = user_token;
+ last->array_index = index;
+ strcpy(last->key, key);
+unlock:
+ mutex_unlock(&map->lock);
+ return ret;
+}
+
/*
* Supports event creation while tracing session is active.
* Needs to be called with sessions mutex held.
*/
-struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
+struct lttng_event *_lttng_event_create(struct lttng_event_container *container,
struct lttng_kernel_event *event_param,
+ const struct lttng_counter_key *key,
void *filter,
const struct lttng_event_desc *event_desc,
- enum lttng_kernel_instrumentation itype)
+ enum lttng_kernel_instrumentation itype,
+ uint64_t token)
{
- struct lttng_session *session = chan->session;
+ struct lttng_session *session;
struct lttng_event *event;
- const char *event_name;
- struct hlist_head *head;
- size_t name_len;
- uint32_t hash;
+ char event_name[LTTNG_KERNEL_SYM_NAME_LEN];
+ struct hlist_head *name_head, *key_head;
+ char key_string[LTTNG_KEY_TOKEN_STRING_LEN_MAX];
int ret;
- if (chan->free_event_id == -1U) {
- ret = -EMFILE;
- goto full;
- }
-
+ session = container->session;
switch (itype) {
case LTTNG_KERNEL_TRACEPOINT:
- event_name = event_desc->name;
+ if (strlen(event_desc->name) >= LTTNG_KERNEL_SYM_NAME_LEN) {
+ ret = -EINVAL;
+ goto type_error;
+ }
+ strcpy(event_name, event_desc->name);
break;
case LTTNG_KERNEL_KPROBE:
case LTTNG_KERNEL_UPROBE:
- case LTTNG_KERNEL_KRETPROBE:
- case LTTNG_KERNEL_NOOP:
case LTTNG_KERNEL_SYSCALL:
- event_name = event_param->name;
+ if (strlen(event_param->name) >= LTTNG_KERNEL_SYM_NAME_LEN) {
+ ret = -EINVAL;
+ goto type_error;
+ }
+ strcpy(event_name, event_param->name);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ if (strlen(event_param->name) >= LTTNG_KERNEL_SYM_NAME_LEN) {
+ ret = -EINVAL;
+ goto type_error;
+ }
+ strcpy(event_name, event_param->name);
+ if (strlen(event_name) + strlen("_entry") >= LTTNG_KERNEL_SYM_NAME_LEN) {
+ ret = -EINVAL;
+ goto type_error;
+ }
+ strcat(event_name, "_entry");
break;
case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
+ case LTTNG_KERNEL_NOOP: /* Fall-through. */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
goto type_error;
}
- name_len = strlen(event_name);
- hash = jhash(event_name, name_len, 0);
- head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
- lttng_hlist_for_each_entry(event, head, hlist) {
+
+ if (format_event_key(key_string, key, event_name)) {
+ ret = -EINVAL;
+ goto type_error;
+ }
+
+ name_head = utils_borrow_hash_table_bucket(session->events_name_ht.table,
+ LTTNG_EVENT_HT_SIZE, event_name);
+ lttng_hlist_for_each_entry(event, name_head, name_hlist) {
+ bool same_event = false, same_container = false, same_key = false,
+ same_token = false;
+
WARN_ON_ONCE(!event->desc);
- if (!strncmp(event->desc->name, event_name,
- LTTNG_KERNEL_SYM_NAME_LEN - 1)
- && chan == event->chan) {
+ if (event_desc) {
+ if (event->desc == event_desc)
+ same_event = true;
+ } else {
+ if (!strcmp(event_name, event->desc->name))
+ same_event = true;
+ }
+ if (container == event->container) {
+ same_container = true;
+ if (match_event_token(container, event, token))
+ same_token = true;
+ }
+ if (key_string[0] == '\0' || !strcmp(key_string, event->key))
+ same_key = true;
+ if (same_event && same_container && same_key && same_token) {
ret = -EEXIST;
goto exist;
}
ret = -ENOMEM;
goto cache_error;
}
- event->chan = chan;
+ event->container = container;
event->filter = filter;
- event->id = chan->free_event_id++;
event->instrumentation = itype;
event->evtype = LTTNG_TYPE_EVENT;
- INIT_LIST_HEAD(&event->bytecode_runtime_head);
+ if (!container->coalesce_hits)
+ event->user_token = token;
+ INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
INIT_LIST_HEAD(&event->enablers_ref_head);
+ if (lttng_event_container_allocate_id(container, key_string,
+ &event->id)) {
+ ret = -EMFILE;
+ goto full;
+ }
+ if (key_string[0]) {
+ key_head = utils_borrow_hash_table_bucket(session->events_key_ht.table,
+ LTTNG_EVENT_HT_SIZE, key_string);
+ hlist_add_head(&event->key_hlist, key_head);
+ }
+ strcpy(event->key, key_string);
switch (itype) {
case LTTNG_KERNEL_TRACEPOINT:
/* Event will be enabled by enabler sync. */
event->enabled = 0;
event->registered = 0;
- event->desc = lttng_event_get(event_name);
+ event->desc = lttng_event_desc_get(event_name);
if (!event->desc) {
ret = -ENOENT;
goto register_error;
* registration.
*/
smp_wmb();
- ret = lttng_kprobes_register(event_name,
+ ret = lttng_kprobes_register_event(event_name,
event_param->u.kprobe.symbol_name,
event_param->u.kprobe.offset,
event_param->u.kprobe.addr,
ret = -EINVAL;
goto register_error;
}
+ event->u.kprobe.user_token = token;
ret = try_module_get(event->desc->owner);
WARN_ON_ONCE(!ret);
+
+ /* Append descriptor to counter. */
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ {
+ struct lttng_counter *counter;
+ const char *name = "<UNKNOWN>";
+ int ret;
+
+ counter = lttng_event_container_get_counter(container);
+ if (event->key[0])
+ name = event->key;
+ else
+ name = event_name;
+ ret = lttng_counter_append_descriptor(counter,
+ token, event->id,
+ name);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ }
+ break;
+ }
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ default:
+ break;
+ }
break;
case LTTNG_KERNEL_KRETPROBE:
{
*/
event->enabled = 0;
event->registered = 1;
+ event->u.kretprobe.user_token = token;
+
+ /* Append descriptor to counter. */
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ {
+ struct lttng_counter *counter;
+ const char *name = "<UNKNOWN>";
+ int ret;
+
+ counter = lttng_event_container_get_counter(container);
+ if (event->key[0])
+ name = event->key;
+ else
+ name = event_name;
+ ret = lttng_counter_append_descriptor(counter,
+ token, event->id,
+ name);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ }
+ break;
+ }
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ default:
+ break;
+ }
+
event_return =
kmem_cache_zalloc(event_cache, GFP_KERNEL);
if (!event_return) {
ret = -ENOMEM;
goto register_error;
}
- event_return->chan = chan;
+ event_return->container = container;
event_return->filter = filter;
- event_return->id = chan->free_event_id++;
+
+ strcpy(event_name, event_param->name);
+ if (strlen(event_name) + strlen("_return") >= LTTNG_KERNEL_SYM_NAME_LEN) {
+ ret = -EINVAL;
+ goto register_error;
+ }
+ strcat(event_name, "_return");
+ if (format_event_key(key_string, key, event_name)) {
+ ret = -EINVAL;
+ goto register_error;
+ }
+ if (lttng_event_container_allocate_id(container, key_string, &event_return->id)) {
+ kmem_cache_free(event_cache, event_return);
+ ret = -EMFILE;
+ goto register_error;
+ }
+ key_head = utils_borrow_hash_table_bucket(session->events_key_ht.table,
+ LTTNG_EVENT_HT_SIZE, key_string);
+ hlist_add_head(&event_return->key_hlist, key_head);
event_return->enabled = 0;
event_return->registered = 1;
event_return->instrumentation = itype;
+ INIT_LIST_HEAD(&event_return->filter_bytecode_runtime_head);
+ INIT_LIST_HEAD(&event_return->enablers_ref_head);
+ event_return->u.kretprobe.user_token = token;
+ strcpy(event_return->key, key_string);
/*
* Populate lttng_event structure before kretprobe registration.
*/
WARN_ON_ONCE(!ret);
ret = try_module_get(event->desc->owner);
WARN_ON_ONCE(!ret);
- ret = _lttng_event_metadata_statedump(chan->session, chan,
- event_return);
+
+ /* Append exit descriptor to counter. */
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ {
+ struct lttng_counter *counter;
+ const char *name = "<UNKNOWN>";
+ int ret;
+
+ counter = lttng_event_container_get_counter(container);
+ if (event_return->key[0])
+ name = event_return->key;
+ else
+ name = event_name;
+ ret = lttng_counter_append_descriptor(counter,
+ token, event_return->id,
+ name);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ }
+ break;
+ }
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ default:
+ break;
+ }
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ ret = _lttng_event_metadata_statedump(session, event_return);
+ WARN_ON_ONCE(ret > 0);
+ if (ret) {
+ kmem_cache_free(event_cache, event_return);
+ module_put(event->desc->owner);
+ module_put(event->desc->owner);
+ goto statedump_error;
+ }
+ break;
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ default:
+ break;
+ }
+ list_add(&event_return->list, &session->events);
+ break;
+ }
+ case LTTNG_KERNEL_SYSCALL:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
+ event->registered = 0;
+ event->desc = event_desc;
+ switch (event_param->u.syscall.entryexit) {
+ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
+ ret = -EINVAL;
+ goto register_error;
+ case LTTNG_KERNEL_SYSCALL_ENTRY:
+ event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
+ break;
+ case LTTNG_KERNEL_SYSCALL_EXIT:
+ event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
+ break;
+ }
+ switch (event_param->u.syscall.abi) {
+ case LTTNG_KERNEL_SYSCALL_ABI_ALL:
+ ret = -EINVAL;
+ goto register_error;
+ case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
+ event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
+ event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
+ break;
+ }
+ if (!event->desc) {
+ ret = -EINVAL;
+ goto register_error;
+ }
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event->enabled = 0;
+ event->registered = 1;
+ event->u.uprobe.user_token = token;
+
+ /*
+ * Populate lttng_event structure before event
+ * registration.
+ */
+ smp_wmb();
+
+ ret = lttng_uprobes_register_event(event_name,
+ event_param->u.uprobe.fd,
+ event);
+ if (ret)
+ goto register_error;
+ ret = try_module_get(event->desc->owner);
+ WARN_ON_ONCE(!ret);
+
+ /* Append descriptor to counter. */
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ {
+ struct lttng_counter *counter;
+ const char *name = "<UNKNOWN>";
+ int ret;
+
+ counter = lttng_event_container_get_counter(container);
+ if (event->key[0])
+ name = event->key;
+ else
+ name = event_name;
+ ret = lttng_counter_append_descriptor(counter,
+ token, event->id,
+ name);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ }
+ break;
+ }
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ default:
+ break;
+ }
+ break;
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
+ case LTTNG_KERNEL_NOOP: /* Fall-through.*/
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto register_error;
+ }
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ ret = _lttng_event_metadata_statedump(session, event);
WARN_ON_ONCE(ret > 0);
if (ret) {
- kmem_cache_free(event_cache, event_return);
- module_put(event->desc->owner);
- module_put(event->desc->owner);
goto statedump_error;
}
- list_add(&event_return->list, &chan->session->events);
break;
- }
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ default:
+ break;
+ }
+ hlist_add_head(&event->name_hlist, name_head);
+ list_add(&event->list, &session->events);
+ return event;
+
+statedump_error:
+ /* If a statedump error occurs, events will not be readable. */
+register_error:
+full:
+ kmem_cache_free(event_cache, event);
+cache_error:
+exist:
+type_error:
+ return ERR_PTR(ret);
+}
+
+struct lttng_event_notifier *_lttng_event_notifier_create(
+ const struct lttng_event_desc *event_desc,
+ uint64_t token, uint64_t error_counter_index,
+ struct lttng_event_notifier_group *event_notifier_group,
+ struct lttng_kernel_event_notifier *event_notifier_param,
+ void *filter, enum lttng_kernel_instrumentation itype)
+{
+ struct lttng_event_notifier *event_notifier;
+ struct lttng_counter *error_counter;
+ const char *event_name;
+ struct hlist_head *head;
+ int ret;
+
+ switch (itype) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ event_name = event_desc->name;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ case LTTNG_KERNEL_SYSCALL:
+ event_name = event_notifier_param->event.name;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto type_error;
+ }
+
+ head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table,
+ LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name);
+ lttng_hlist_for_each_entry(event_notifier, head, hlist) {
+ WARN_ON_ONCE(!event_notifier->desc);
+ if (!strncmp(event_notifier->desc->name, event_name,
+ LTTNG_KERNEL_SYM_NAME_LEN - 1)
+ && event_notifier_group == event_notifier->group
+ && token == event_notifier->user_token) {
+ ret = -EEXIST;
+ goto exist;
+ }
+ }
+
+ event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
+ if (!event_notifier) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+
+ event_notifier->group = event_notifier_group;
+ event_notifier->user_token = token;
+ event_notifier->error_counter_index = error_counter_index;
+ event_notifier->num_captures = 0;
+ event_notifier->filter = filter;
+ event_notifier->instrumentation = itype;
+ event_notifier->evtype = LTTNG_TYPE_EVENT;
+ event_notifier->send_notification = lttng_event_notifier_notification_send;
+ INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
+ INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head);
+ INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
+
+ switch (itype) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ /* Event will be enabled by enabler sync. */
+ event_notifier->enabled = 0;
+ event_notifier->registered = 0;
+ event_notifier->desc = lttng_event_desc_get(event_name);
+ if (!event_notifier->desc) {
+ ret = -ENOENT;
+ goto register_error;
+ }
+ /* Populate lttng_event_notifier structure before event registration. */
+ smp_wmb();
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ event_notifier->enabled = 0;
+ event_notifier->registered = 1;
+ /*
+ * Populate lttng_event_notifier structure before event
+ * registration.
+ */
+ smp_wmb();
+ ret = lttng_kprobes_register_event_notifier(
+ event_notifier_param->event.u.kprobe.symbol_name,
+ event_notifier_param->event.u.kprobe.offset,
+ event_notifier_param->event.u.kprobe.addr,
+ event_notifier);
+ if (ret) {
+ ret = -EINVAL;
+ goto register_error;
+ }
+ ret = try_module_get(event_notifier->desc->owner);
+ WARN_ON_ONCE(!ret);
+ break;
case LTTNG_KERNEL_NOOP:
case LTTNG_KERNEL_SYSCALL:
/*
* Needs to be explicitly enabled after creation, since
* we may want to apply filters.
*/
- event->enabled = 0;
- event->registered = 0;
- event->desc = event_desc;
- if (!event->desc) {
+ event_notifier->enabled = 0;
+ event_notifier->registered = 0;
+ event_notifier->desc = event_desc;
+ switch (event_notifier_param->event.u.syscall.entryexit) {
+ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
+ ret = -EINVAL;
+ goto register_error;
+ case LTTNG_KERNEL_SYSCALL_ENTRY:
+ event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
+ break;
+ case LTTNG_KERNEL_SYSCALL_EXIT:
+ event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
+ break;
+ }
+ switch (event_notifier_param->event.u.syscall.abi) {
+ case LTTNG_KERNEL_SYSCALL_ABI_ALL:
+ ret = -EINVAL;
+ goto register_error;
+ case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
+ event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
+ event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
+ break;
+ }
+
+ if (!event_notifier->desc) {
ret = -EINVAL;
goto register_error;
}
* Needs to be explicitly enabled after creation, since
* we may want to apply filters.
*/
- event->enabled = 0;
- event->registered = 1;
+ event_notifier->enabled = 0;
+ event_notifier->registered = 1;
/*
- * Populate lttng_event structure before event
- * registration.
+ * Populate lttng_event_notifier structure before
+ * event_notifier registration.
*/
smp_wmb();
- ret = lttng_uprobes_register(event_param->name,
- event_param->u.uprobe.fd,
- event);
+ ret = lttng_uprobes_register_event_notifier(
+ event_notifier_param->event.name,
+ event_notifier_param->event.u.uprobe.fd,
+ event_notifier);
if (ret)
goto register_error;
- ret = try_module_get(event->desc->owner);
+ ret = try_module_get(event_notifier->desc->owner);
WARN_ON_ONCE(!ret);
break;
- case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
goto register_error;
}
- ret = _lttng_event_metadata_statedump(chan->session, chan, event);
- WARN_ON_ONCE(ret > 0);
- if (ret) {
- goto statedump_error;
+
+ list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
+ hlist_add_head(&event_notifier->hlist, head);
+
+ /*
+ * Clear the error counter bucket. The sessiond keeps track of which
+ * bucket is currently in use. We trust it. The session lock
+ * synchronizes against concurrent creation of the error
+ * counter.
+ */
+ error_counter = event_notifier_group->error_counter;
+ if (error_counter) {
+ size_t dimension_index[1];
+
+ /*
+ * Check that the index is within the boundary of the counter.
+ */
+ if (event_notifier->error_counter_index >= event_notifier_group->error_counter_len) {
+ printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
+ event_notifier_group->error_counter_len, event_notifier->error_counter_index);
+ ret = -EINVAL;
+ goto register_error;
+ }
+
+ dimension_index[0] = event_notifier->error_counter_index;
+ ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
+ if (ret) {
+ printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
+ event_notifier->error_counter_index);
+ goto register_error;
+ }
}
- hlist_add_head(&event->hlist, head);
- list_add(&event->list, &chan->session->events);
- return event;
-statedump_error:
- /* If a statedump error occurs, events will not be readable. */
+ return event_notifier;
+
register_error:
- kmem_cache_free(event_cache, event);
+ kmem_cache_free(event_notifier_cache, event_notifier);
cache_error:
exist:
type_error:
-full:
return ERR_PTR(ret);
}
-struct lttng_event *lttng_event_create(struct lttng_channel *chan,
+int lttng_kernel_counter_read(struct lttng_counter *counter,
+ const size_t *dim_indexes, int32_t cpu,
+ int64_t *val, bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_read(counter->counter, dim_indexes,
+ cpu, val, overflow, underflow);
+}
+
+int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
+ const size_t *dim_indexes, int64_t *val,
+ bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_aggregate(counter->counter, dim_indexes,
+ val, overflow, underflow);
+}
+
+int lttng_kernel_counter_clear(struct lttng_counter *counter,
+ const size_t *dim_indexes)
+{
+ return counter->ops->counter_clear(counter->counter, dim_indexes);
+}
+
+struct lttng_event *lttng_event_create(struct lttng_event_container *container,
struct lttng_kernel_event *event_param,
+ const struct lttng_counter_key *key,
void *filter,
const struct lttng_event_desc *event_desc,
- enum lttng_kernel_instrumentation itype)
+ enum lttng_kernel_instrumentation itype,
+ uint64_t token)
{
struct lttng_event *event;
mutex_lock(&sessions_mutex);
- event = _lttng_event_create(chan, event_param, filter, event_desc,
- itype);
+ event = _lttng_event_create(container, event_param, key, filter, event_desc,
+ itype, token);
mutex_unlock(&sessions_mutex);
return event;
}
+struct lttng_event_notifier *lttng_event_notifier_create(
+ const struct lttng_event_desc *event_desc,
+ uint64_t id, uint64_t error_counter_index,
+ struct lttng_event_notifier_group *event_notifier_group,
+ struct lttng_kernel_event_notifier *event_notifier_param,
+ void *filter, enum lttng_kernel_instrumentation itype)
+{
+ struct lttng_event_notifier *event_notifier;
+
+ mutex_lock(&sessions_mutex);
+ event_notifier = _lttng_event_notifier_create(event_desc, id,
+ error_counter_index, event_notifier_group,
+ event_notifier_param, filter, itype);
+ mutex_unlock(&sessions_mutex);
+ return event_notifier;
+}
+
/* Only used for tracepoints for now. */
static
void register_event(struct lttng_event *event)
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
- desc->probe_callback,
- event);
+ desc->probe_callback, event);
break;
case LTTNG_KERNEL_SYSCALL:
- ret = lttng_syscall_filter_enable(event->chan,
- desc->name);
+ ret = lttng_syscall_filter_enable_event(event->container, event);
break;
case LTTNG_KERNEL_KPROBE:
case LTTNG_KERNEL_UPROBE:
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
- event->desc->probe_callback,
- event);
+ event->desc->probe_callback, event);
break;
case LTTNG_KERNEL_KPROBE:
- lttng_kprobes_unregister(event);
+ lttng_kprobes_unregister_event(event);
ret = 0;
break;
case LTTNG_KERNEL_KRETPROBE:
ret = 0;
break;
case LTTNG_KERNEL_SYSCALL:
- ret = lttng_syscall_filter_disable(event->chan,
- desc->name);
+ ret = lttng_syscall_filter_disable_event(event->container, event);
break;
case LTTNG_KERNEL_NOOP:
ret = 0;
break;
case LTTNG_KERNEL_UPROBE:
- lttng_uprobes_unregister(event);
+ lttng_uprobes_unregister_event(event);
ret = 0;
break;
case LTTNG_KERNEL_FUNCTION: /* Fall-through */
return ret;
}
+/* Only used for tracepoints for now. */
+static
+void register_event_notifier(struct lttng_event_notifier *event_notifier)
+{
+ const struct lttng_event_desc *desc;
+ int ret = -EINVAL;
+
+ if (event_notifier->registered)
+ return;
+
+ desc = event_notifier->desc;
+ switch (event_notifier->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
+ desc->event_notifier_callback,
+ event_notifier);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ if (!ret)
+ event_notifier->registered = 1;
+}
+
+static
+int _lttng_event_notifier_unregister(
+ struct lttng_event_notifier *event_notifier)
+{
+ const struct lttng_event_desc *desc;
+ int ret = -EINVAL;
+
+ if (!event_notifier->registered)
+ return 0;
+
+ desc = event_notifier->desc;
+ switch (event_notifier->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname,
+ event_notifier->desc->event_notifier_callback,
+ event_notifier);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ lttng_kprobes_unregister_event_notifier(event_notifier);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ lttng_uprobes_unregister_event_notifier(event_notifier);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ if (!ret)
+ event_notifier->registered = 0;
+ return ret;
+}
+
/*
* Only used internally at session destruction.
*/
static
void _lttng_event_destroy(struct lttng_event *event)
{
+ struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
+
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- lttng_event_put(event->desc);
+ lttng_event_desc_put(event->desc);
break;
case LTTNG_KERNEL_KPROBE:
module_put(event->desc->owner);
- lttng_kprobes_destroy_private(event);
+ lttng_kprobes_destroy_event_private(event);
break;
case LTTNG_KERNEL_KRETPROBE:
module_put(event->desc->owner);
break;
case LTTNG_KERNEL_UPROBE:
module_put(event->desc->owner);
- lttng_uprobes_destroy_private(event);
+ lttng_uprobes_destroy_event_private(event);
break;
case LTTNG_KERNEL_FUNCTION: /* Fall-through */
default:
}
list_del(&event->list);
lttng_destroy_context(event->ctx);
+ lttng_free_event_filter_runtime(event);
+ /* Free event enabler refs */
+ list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
+ &event->enablers_ref_head, node)
+ kfree(enabler_ref);
kmem_cache_free(event_cache, event);
}
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
+{
+ struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
+
+ switch (event_notifier->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_event_desc_put(event_notifier->desc);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ module_put(event_notifier->desc->owner);
+ lttng_kprobes_destroy_event_notifier_private(event_notifier);
+ break;
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_SYSCALL:
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ module_put(event_notifier->desc->owner);
+ lttng_uprobes_destroy_event_notifier_private(event_notifier);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ list_del(&event_notifier->list);
+ lttng_free_event_notifier_filter_runtime(event_notifier);
+ /* Free event enabler refs */
+ list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
+ &event_notifier->enablers_ref_head, node)
+ kfree(enabler_ref);
+ kmem_cache_free(event_notifier_cache, event_notifier);
+}
+
struct lttng_id_tracker *get_tracker(struct lttng_session *session,
enum tracker_type tracker_type)
{
return 1;
}
-static
int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
struct lttng_enabler *enabler)
{
const char *desc_name, *enabler_name;
+ bool compat = false, entry = false;
enabler_name = enabler->event_param.name;
switch (enabler->event_param.instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
desc_name = desc->name;
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
+ return lttng_match_enabler_star_glob(desc_name, enabler_name);
+ case LTTNG_ENABLER_FORMAT_NAME:
+ return lttng_match_enabler_name(desc_name, enabler_name);
+ default:
+ return -EINVAL;
+ }
break;
case LTTNG_KERNEL_SYSCALL:
desc_name = desc->name;
- if (!strncmp(desc_name, "compat_", strlen("compat_")))
+ if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
desc_name += strlen("compat_");
+ compat = true;
+ }
if (!strncmp(desc_name, "syscall_exit_",
strlen("syscall_exit_"))) {
desc_name += strlen("syscall_exit_");
} else if (!strncmp(desc_name, "syscall_entry_",
strlen("syscall_entry_"))) {
desc_name += strlen("syscall_entry_");
+ entry = true;
} else {
WARN_ON_ONCE(1);
return -EINVAL;
}
+ switch (enabler->event_param.u.syscall.entryexit) {
+ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
+ break;
+ case LTTNG_KERNEL_SYSCALL_ENTRY:
+ if (!entry)
+ return 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL_EXIT:
+ if (entry)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (enabler->event_param.u.syscall.abi) {
+ case LTTNG_KERNEL_SYSCALL_ABI_ALL:
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
+ if (compat)
+ return 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
+ if (!compat)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (enabler->event_param.u.syscall.match) {
+ case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
+ return lttng_match_enabler_star_glob(desc_name, enabler_name);
+ case LTTNG_ENABLER_FORMAT_NAME:
+ return lttng_match_enabler_name(desc_name, enabler_name);
+ default:
+ return -EINVAL;
+ }
+ break;
+ case LTTNG_KERNEL_SYSCALL_MATCH_NR:
+ return -EINVAL; /* Not implemented. */
+ default:
+ return -EINVAL;
+ }
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
- switch (enabler->type) {
- case LTTNG_ENABLER_STAR_GLOB:
- return lttng_match_enabler_star_glob(desc_name, enabler_name);
- case LTTNG_ENABLER_NAME:
- return lttng_match_enabler_name(desc_name, enabler_name);
- default:
- return -EINVAL;
- }
}
static
-int lttng_event_match_enabler(struct lttng_event *event,
- struct lttng_enabler *enabler)
+bool lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
+ struct lttng_event *event)
+{
+ struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
+ event_enabler);
+
+ if (base_enabler->event_param.instrumentation == event->instrumentation
+ && lttng_desc_match_enabler(event->desc, base_enabler) > 0
+ && event->container == event_enabler->container
+ && match_event_token(event->container, event, event_enabler->base.user_token))
+ return true;
+ else
+ return false;
+}
+
+static
+int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_event_notifier *event_notifier)
{
- if (enabler->event_param.instrumentation != event->instrumentation)
+ struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(
+ event_notifier_enabler);
+
+ if (base_enabler->event_param.instrumentation != event_notifier->instrumentation)
return 0;
- if (lttng_desc_match_enabler(event->desc, enabler)
- && event->chan == enabler->chan)
+ if (lttng_desc_match_enabler(event_notifier->desc, base_enabler) > 0
+ && event_notifier->group == event_notifier_enabler->group
+ && event_notifier->user_token == event_notifier_enabler->base.user_token)
return 1;
else
return 0;
}
static
-struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+struct lttng_enabler_ref *lttng_enabler_ref(
+ struct list_head *enablers_ref_list,
struct lttng_enabler *enabler)
{
struct lttng_enabler_ref *enabler_ref;
- list_for_each_entry(enabler_ref,
- &event->enablers_ref_head, node) {
+ list_for_each_entry(enabler_ref, enablers_ref_list, node) {
if (enabler_ref->ref == enabler)
return enabler_ref;
}
}
static
-void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
+void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
{
- struct lttng_session *session = enabler->chan->session;
struct lttng_probe_desc *probe_desc;
const struct lttng_event_desc *desc;
int i;
* our enabler, create an associated lttng_event if not
* already present.
*/
+ list_for_each_entry(probe_desc, probe_list, head) {
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ struct lttng_event *event;
+
+ desc = probe_desc->event_desc[i];
+ if (lttng_desc_match_enabler(desc,
+ lttng_event_enabler_as_enabler(event_enabler)) <= 0)
+ continue;
+
+ /* Try to create an event for this event probe. */
+ event = _lttng_event_create(event_enabler->container,
+ NULL, &event_enabler->key, NULL, desc,
+ LTTNG_KERNEL_TRACEPOINT,
+ event_enabler->base.user_token);
+ /* Skip if event is already found. */
+ if (IS_ERR(event) && PTR_ERR(event) == -EEXIST)
+ continue;
+ if (IS_ERR(event)) {
+ printk(KERN_INFO "LTTng: Unable to create event %s\n",
+ probe_desc->event_desc[i]->name);
+ }
+ }
+ }
+}
+
+static
+void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
+ struct lttng_probe_desc *probe_desc;
+ const struct lttng_event_desc *desc;
+ int i;
+ struct list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ /*
+ * For each probe event, if we find that a probe event matches
+ * our enabler, create an associated lttng_event_notifier if not
+ * already present.
+ */
list_for_each_entry(probe_desc, probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
int found = 0;
struct hlist_head *head;
- const char *event_name;
- size_t name_len;
- uint32_t hash;
- struct lttng_event *event;
+ struct lttng_event_notifier *event_notifier;
+
+ desc = probe_desc->event_desc[i];
+ if (lttng_desc_match_enabler(desc,
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)) <= 0)
+ continue;
+
+ /*
+ * Check if already created.
+ */
+ head = utils_borrow_hash_table_bucket(
+ event_notifier_group->event_notifiers_ht.table,
+ LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name);
+ lttng_hlist_for_each_entry(event_notifier, head, hlist) {
+ if (event_notifier->desc == desc
+ && event_notifier->user_token == event_notifier_enabler->base.user_token)
+ found = 1;
+ }
+ if (found)
+ continue;
+
+ /*
+ * We need to create a event_notifier for this event probe.
+ */
+ event_notifier = _lttng_event_notifier_create(desc,
+ event_notifier_enabler->base.user_token,
+ event_notifier_enabler->error_counter_index,
+ event_notifier_group, NULL, NULL,
+ LTTNG_KERNEL_TRACEPOINT);
+ if (IS_ERR(event_notifier)) {
+ printk(KERN_INFO "Unable to create event_notifier %s\n",
+ probe_desc->event_desc[i]->name);
+ }
+ }
+ }
+}
+
+static
+void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
+{
+ int ret;
+
+ ret = lttng_syscalls_register_event(event_enabler, NULL);
+ WARN_ON_ONCE(ret);
+}
+
+static
+void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ int ret;
+
+ ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL);
+ WARN_ON_ONCE(ret);
+ ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL);
+ WARN_ON_ONCE(ret);
+}
+
+/*
+ * Create struct lttng_event if it is missing and present in the list of
+ * tracepoint probes.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
+{
+ switch (event_enabler->base.event_param.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_create_tracepoint_event_if_missing(event_enabler);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ lttng_create_syscall_event_if_missing(event_enabler);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+/*
+ * Create events associated with an event_enabler (if not already present),
+ * and add backward reference from the event to the enabler.
+ * Should be called with sessions mutex held.
+ */
+static
+int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
+{
+ struct lttng_event_container *container = event_enabler->container;
+ struct lttng_session *session = container->session;
+ struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
+ struct lttng_event *event;
- desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc, enabler))
- continue;
- event_name = desc->name;
- name_len = strlen(event_name);
+ if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
+ base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
+ base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
+ !strcmp(base_enabler->event_param.name, "*")) {
+ int enabled = base_enabler->enabled;
+ enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
- /*
- * Check if already created.
- */
- hash = jhash(event_name, name_len, 0);
- head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
- lttng_hlist_for_each_entry(event, head, hlist) {
- if (event->desc == desc
- && event->chan == enabler->chan)
- found = 1;
- }
- if (found)
- continue;
+ if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
+ WRITE_ONCE(container->syscall_all_entry, enabled);
+
+ if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
+ WRITE_ONCE(container->syscall_all_exit, enabled);
+ }
+
+ /* First ensure that probe events are created for this enabler. */
+ lttng_create_event_if_missing(event_enabler);
+
+ /* For each event matching event_enabler in session event list. */
+ list_for_each_entry(event, &session->events, list) {
+ struct lttng_enabler_ref *enabler_ref;
+ if (!lttng_event_enabler_match_event(event_enabler, event))
+ continue;
+ enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
+ lttng_event_enabler_as_enabler(event_enabler));
+ if (!enabler_ref) {
/*
- * We need to create an event for this
- * event probe.
+ * If no backward ref, create it.
+ * Add backward ref from event to event_enabler.
*/
- event = _lttng_event_create(enabler->chan,
- NULL, NULL, desc,
- LTTNG_KERNEL_TRACEPOINT);
- if (!event) {
- printk(KERN_INFO "Unable to create event %s\n",
- probe_desc->event_desc[i]->name);
+ enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
+ if (!enabler_ref)
+ return -ENOMEM;
+ enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
+ list_add(&enabler_ref->node,
+ &event->enablers_ref_head);
+ /* Append descriptor to counter. */
+ switch (container->type) {
+ case LTTNG_EVENT_CONTAINER_COUNTER:
+ {
+ struct lttng_counter *counter;
+ const char *name = "<UNKNOWN>";
+ int ret;
+
+ counter = lttng_event_container_get_counter(container);
+ if (event->key[0])
+ name = event->key;
+ else if (event->desc && event->desc->name)
+ name = event->desc->name;
+ ret = lttng_counter_append_descriptor(counter,
+ event_enabler->base.user_token, event->id,
+ name);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ return ret;
+ }
+ break;
+ }
+ case LTTNG_EVENT_CONTAINER_CHANNEL:
+ default:
+ break;
}
}
- }
-}
-static
-void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
-{
- int ret;
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(event->desc,
+ lttng_static_ctx,
+ &event->filter_bytecode_runtime_head,
+ <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
- ret = lttng_syscalls_register(enabler->chan, NULL);
- WARN_ON_ONCE(ret);
+ /* TODO: merge event context. */
+ }
+ return 0;
}
/*
- * Create struct lttng_event if it is missing and present in the list of
+ * Create struct lttng_event_notifier if it is missing and present in the list of
* tracepoint probes.
* Should be called with sessions mutex held.
*/
static
-void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler)
{
- switch (enabler->event_param.instrumentation) {
+ switch (event_notifier_enabler->base.event_param.instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- lttng_create_tracepoint_if_missing(enabler);
+ lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
break;
case LTTNG_KERNEL_SYSCALL:
- lttng_create_syscall_if_missing(enabler);
+ lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
break;
default:
WARN_ON_ONCE(1);
}
/*
- * Create events associated with an enabler (if not already present),
- * and add backward reference from the event to the enabler.
- * Should be called with sessions mutex held.
+ * Create event_notifiers associated with a event_notifier enabler (if not already present).
*/
static
-int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+int lttng_event_notifier_enabler_ref_event_notifiers(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
{
- struct lttng_session *session = enabler->chan->session;
- struct lttng_event *event;
+ struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
+ struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
+ struct lttng_event_notifier *event_notifier;
- /* First ensure that probe events are created for this enabler. */
- lttng_create_event_if_missing(enabler);
+ if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
+ base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
+ base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
+ !strcmp(base_enabler->event_param.name, "*")) {
- /* For each event matching enabler in session event list. */
- list_for_each_entry(event, &session->events, list) {
+ int enabled = base_enabler->enabled;
+ enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
+
+ if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
+ WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled);
+
+ if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
+ WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled);
+
+ }
+
+ /* First ensure that probe event_notifiers are created for this enabler. */
+ lttng_create_event_notifier_if_missing(event_notifier_enabler);
+
+ /* Link the created event_notifier with its associated enabler. */
+ list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
struct lttng_enabler_ref *enabler_ref;
- if (!lttng_event_match_enabler(event, enabler))
+ if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier))
continue;
- enabler_ref = lttng_event_enabler_ref(event, enabler);
+
+ enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head,
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
if (!enabler_ref) {
/*
* If no backward ref, create it.
- * Add backward ref from event to enabler.
+ * Add backward ref from event_notifier to enabler.
*/
enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
if (!enabler_ref)
return -ENOMEM;
- enabler_ref->ref = enabler;
+
+ enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
+ event_notifier_enabler);
list_add(&enabler_ref->node,
- &event->enablers_ref_head);
+ &event_notifier->enablers_ref_head);
}
/*
* Link filter bytecodes if not linked yet.
*/
- lttng_enabler_event_link_bytecode(event, enabler);
+ lttng_enabler_link_bytecode(event_notifier->desc,
+ lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
+ <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
- /* TODO: merge event context. */
+ /* Link capture bytecodes if not linked yet. */
+ lttng_enabler_link_bytecode(event_notifier->desc,
+ lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head,
+ &event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier->num_captures = event_notifier_enabler->num_captures;
}
return 0;
}
struct lttng_session *session;
list_for_each_entry(session, &sessions, list)
- lttng_session_lazy_sync_enablers(session);
+ lttng_session_lazy_sync_event_enablers(session);
+ return 0;
+}
+
+static bool lttng_event_notifier_group_has_active_event_notifiers(
+ struct lttng_event_notifier_group *event_notifier_group)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+
+ list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head,
+ node) {
+ if (event_notifier_enabler->base.enabled)
+ return true;
+ }
+ return false;
+}
+
+bool lttng_event_notifier_active(void)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+
+ list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
+ if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
+ return true;
+ }
+ return false;
+}
+
+int lttng_fix_pending_event_notifiers(void)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+
+ list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
+ lttng_event_notifier_group_sync_enablers(event_notifier_group);
return 0;
}
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
struct lttng_kernel_event *event_param,
- struct lttng_channel *chan)
+ const struct lttng_counter_key *key,
+ struct lttng_event_container *container)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
- enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
- if (!enabler)
+ event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
+ if (!event_enabler)
return NULL;
- enabler->type = type;
- INIT_LIST_HEAD(&enabler->filter_bytecode_head);
- memcpy(&enabler->event_param, event_param,
- sizeof(enabler->event_param));
- enabler->chan = chan;
+ event_enabler->base.format_type = format_type;
+ INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
+ memcpy(&event_enabler->base.event_param, event_param,
+ sizeof(event_enabler->base.event_param));
+ event_enabler->container = container;
/* ctx left NULL */
- enabler->enabled = 0;
- enabler->evtype = LTTNG_TYPE_ENABLER;
+ event_enabler->base.enabled = 0;
+ event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
+ event_enabler->base.user_token = event_param->token;
+ if (key)
+ event_enabler->key = *key;
mutex_lock(&sessions_mutex);
- list_add(&enabler->node, &enabler->chan->session->enablers_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ list_add(&event_enabler->node, &event_enabler->container->session->enablers_head);
+ lttng_session_lazy_sync_event_enablers(event_enabler->container->session);
mutex_unlock(&sessions_mutex);
- return enabler;
+ return event_enabler;
}
-int lttng_enabler_enable(struct lttng_enabler *enabler)
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
{
mutex_lock(&sessions_mutex);
- enabler->enabled = 1;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+ lttng_session_lazy_sync_event_enablers(event_enabler->container->session);
mutex_unlock(&sessions_mutex);
return 0;
}
-int lttng_enabler_disable(struct lttng_enabler *enabler)
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
{
mutex_lock(&sessions_mutex);
- enabler->enabled = 0;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+ lttng_session_lazy_sync_event_enablers(event_enabler->container->session);
mutex_unlock(&sessions_mutex);
return 0;
}
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+static
+int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
- struct lttng_filter_bytecode_node *bytecode_node;
+ struct lttng_bytecode_node *bytecode_node;
uint32_t bytecode_len;
int ret;
ret = get_user(bytecode_len, &bytecode->len);
if (ret)
return ret;
- bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+ bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
GFP_KERNEL);
if (!bytecode_node)
return -ENOMEM;
sizeof(*bytecode) + bytecode_len);
if (ret)
goto error_free;
+
+ bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
bytecode_node->enabler = enabler;
/* Enforce length based on allocated size */
bytecode_node->bc.len = bytecode_len;
list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+
return 0;
error_free:
- kfree(bytecode_node);
+ lttng_kvfree(bytecode_node);
+ return ret;
+}
+
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ int ret;
+ ret = lttng_enabler_attach_filter_bytecode(
+ lttng_event_enabler_as_enabler(event_enabler), bytecode);
+ if (ret)
+ goto error;
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->container->session);
+ return 0;
+
+error:
return ret;
}
switch (event->instrumentation) {
case LTTNG_KERNEL_UPROBE:
- return lttng_uprobes_add_callsite(event, callsite);
+ return lttng_uprobes_event_add_callsite(event, callsite);
default:
return -EINVAL;
}
}
-int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
struct lttng_kernel_context *context_param)
{
return -ENOSYS;
static
void lttng_enabler_destroy(struct lttng_enabler *enabler)
{
- struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_bytecode_node *filter_node, *tmp_filter_node;
/* Destroy filter bytecode */
list_for_each_entry_safe(filter_node, tmp_filter_node,
&enabler->filter_bytecode_head, node) {
- kfree(filter_node);
+ lttng_kvfree(filter_node);
}
+}
+
+static
+void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
+{
+ lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
/* Destroy contexts */
- lttng_destroy_context(enabler->ctx);
+ lttng_destroy_context(event_enabler->ctx);
+
+ list_del(&event_enabler->node);
+ kfree(event_enabler);
+}
+
+struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
+ struct lttng_event_notifier_group *event_notifier_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_kernel_event_notifier *event_notifier_param)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+
+ event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
+ if (!event_notifier_enabler)
+ return NULL;
+
+ event_notifier_enabler->base.format_type = format_type;
+ INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
+ INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
+ event_notifier_enabler->num_captures = 0;
+
+ memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
+ sizeof(event_notifier_enabler->base.event_param));
+ event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER;
+
+ event_notifier_enabler->base.enabled = 0;
+ event_notifier_enabler->base.user_token = event_notifier_param->event.token;
+ event_notifier_enabler->group = event_notifier_group;
+
+ mutex_lock(&sessions_mutex);
+ list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head);
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+
+ mutex_unlock(&sessions_mutex);
+
+ return event_notifier_enabler;
+}
+
+int lttng_event_notifier_enabler_enable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ mutex_lock(&sessions_mutex);
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_event_notifier_enabler_disable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ mutex_lock(&sessions_mutex);
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_event_notifier_enabler_attach_filter_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ int ret;
+
+ ret = lttng_enabler_attach_filter_bytecode(
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
+ bytecode);
+ if (ret)
+ goto error;
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ return 0;
+
+error:
+ return ret;
+}
+
+int lttng_event_notifier_enabler_attach_capture_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_kernel_capture_bytecode __user *bytecode)
+{
+ struct lttng_bytecode_node *bytecode_node;
+ struct lttng_enabler *enabler =
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
+ uint32_t bytecode_len;
+ int ret;
+
+ ret = get_user(bytecode_len, &bytecode->len);
+ if (ret)
+ return ret;
+
+ bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
+ GFP_KERNEL);
+ if (!bytecode_node)
+ return -ENOMEM;
+
+ ret = copy_from_user(&bytecode_node->bc, bytecode,
+ sizeof(*bytecode) + bytecode_len);
+ if (ret)
+ goto error_free;
+
+ bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
+ bytecode_node->enabler = enabler;
+
+ /* Enforce length based on allocated size */
+ bytecode_node->bc.len = bytecode_len;
+ list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier_enabler->num_captures++;
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ goto end;
- list_del(&enabler->node);
- kfree(enabler);
+error_free:
+ lttng_kvfree(bytecode_node);
+end:
+ return ret;
+}
+
+int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+
+ switch (event_notifier->instrumentation) {
+ case LTTNG_KERNEL_UPROBE:
+ return lttng_uprobes_event_notifier_add_callsite(event_notifier,
+ callsite);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_event_notifier_enabler_attach_context(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_kernel_context *context_param)
+{
+ return -ENOSYS;
+}
+
+static
+void lttng_event_notifier_enabler_destroy(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ if (!event_notifier_enabler) {
+ return;
+ }
+
+ list_del(&event_notifier_enabler->node);
+
+ lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+ kfree(event_notifier_enabler);
}
/*
- * lttng_session_sync_enablers should be called just before starting a
+ * lttng_session_sync_event_enablers should be called just before starting a
* session.
* Should be called with sessions mutex held.
*/
static
-void lttng_session_sync_enablers(struct lttng_session *session)
+void lttng_session_sync_event_enablers(struct lttng_session *session)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
struct lttng_event *event;
- list_for_each_entry(enabler, &session->enablers_head, node)
- lttng_enabler_ref_events(enabler);
+ list_for_each_entry(event_enabler, &session->enablers_head, node)
+ lttng_event_enabler_ref_events(event_enabler);
/*
* For each event, if at least one of its enablers is enabled,
- * and its channel and session transient states are enabled, we
+ * and its event container and session transient states are enabled, we
* enable the event, else we disable it.
*/
list_for_each_entry(event, &session->events, list) {
}
/*
* Enabled state is based on union of enablers, with
- * intesection of session and channel transient enable
+ * intesection of session and event container transient enable
* states.
*/
- enabled = enabled && session->tstate && event->chan->tstate;
+ enabled = enabled && session->tstate && event->container->tstate;
WRITE_ONCE(event->enabled, enabled);
/*
/* Enable filters */
list_for_each_entry(runtime,
- &event->bytecode_runtime_head, node)
- lttng_filter_sync_state(runtime);
+ &event->filter_bytecode_runtime_head, node)
+ lttng_bytecode_filter_sync_state(runtime);
}
}
* Should be called with sessions mutex held.
*/
static
-void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
{
/* We can skip if session is not active */
if (!session->active)
return;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
+}
+
+static
+void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+ struct lttng_event_notifier *event_notifier;
+
+ list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
+ lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
+
+ /*
+ * For each event_notifier, if at least one of its enablers is enabled,
+ * we enable the event_notifier, else we disable it.
+ */
+ list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_bytecode = 0;
+
+ switch (event_notifier->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ /* Enable event_notifiers */
+ list_for_each_entry(enabler_ref,
+ &event_notifier->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+ break;
+ default:
+ /* Not handled with sync. */
+ continue;
+ }
+
+ WRITE_ONCE(event_notifier->enabled, enabled);
+ /*
+ * Sync tracepoint registration with event_notifier enabled
+ * state.
+ */
+ if (enabled) {
+ if (!event_notifier->registered)
+ register_event_notifier(event_notifier);
+ } else {
+ if (event_notifier->registered)
+ _lttng_event_notifier_unregister(event_notifier);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ list_for_each_entry(enabler_ref,
+ &event_notifier->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_bytecode = 1;
+ break;
+ }
+ }
+ event_notifier->has_enablers_without_bytecode =
+ has_enablers_without_bytecode;
+
+ /* Enable filters */
+ list_for_each_entry(runtime,
+ &event_notifier->filter_bytecode_runtime_head, node)
+ lttng_bytecode_filter_sync_state(runtime);
+
+ /* Enable captures */
+ list_for_each_entry(runtime,
+ &event_notifier->capture_bytecode_runtime_head, node)
+ lttng_bytecode_capture_sync_state(runtime);
+
+ WRITE_ONCE(event_notifier->eval_capture, !!event_notifier->num_captures);
+ }
}
/*
size_t len;
va_list ap;
- WARN_ON_ONCE(!READ_ONCE(session->active));
+ WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
va_start(ap, fmt);
str = kvasprintf(GFP_KERNEL, fmt, ap);
*/
static
int _lttng_event_metadata_statedump(struct lttng_session *session,
- struct lttng_channel *chan,
struct lttng_event *event)
{
+ struct lttng_channel *chan;
int ret = 0;
- if (event->metadata_dumped || !READ_ONCE(session->active))
+ WARN_ON_ONCE(event->container->type != LTTNG_EVENT_CONTAINER_CHANNEL);
+ chan = lttng_event_container_get_channel(event->container);
+ if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
return 0;
if (chan->channel_type == METADATA_CHANNEL)
return 0;
ret = lttng_metadata_printf(session,
"event {\n"
" name = \"%s\";\n"
- " id = %u;\n"
+ " id = %zu;\n"
" stream_id = %u;\n",
event->desc->name,
event->id,
- event->chan->id);
+ chan->id);
if (ret)
goto end;
{
int ret = 0;
- if (chan->metadata_dumped || !READ_ONCE(session->active))
+ if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
return 0;
if (chan->channel_type == METADATA_CHANNEL)
struct lttng_event *event;
int ret = 0;
- if (!READ_ONCE(session->active))
+ if (!LTTNG_READ_ONCE(session->active))
return 0;
lttng_metadata_begin(session);
}
list_for_each_entry(event, &session->events, list) {
- ret = _lttng_event_metadata_statedump(session, event->chan, event);
+ /* Skip counter container. */
+ if (event->container->type != LTTNG_EVENT_CONTAINER_CHANNEL)
+ continue;
+ ret = _lttng_event_metadata_statedump(session, event);
if (ret)
goto end;
}
}
EXPORT_SYMBOL_GPL(lttng_transport_unregister);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+ /*
+ * Make sure no page fault can be triggered by the module about to be
+ * registered. We deal with this here so we don't have to call
+ * vmalloc_sync_mappings() in each module's init.
+ */
+ wrapper_vmalloc_sync_mappings();
+
+ mutex_lock(&sessions_mutex);
+ list_add_tail(&transport->node, <tng_counter_transport_list);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
+
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+ mutex_lock(&sessions_mutex);
+ list_del(&transport->node);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
+
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
enum cpuhp_state lttng_hp_prepare;
enum cpuhp_state lttng_hp_online;
cpuhp_remove_multi_state(lttng_hp_prepare);
}
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
static int lttng_init_cpu_hotplug(void)
{
return 0;
static void lttng_exit_cpu_hotplug(void)
{
}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
static int __init lttng_events_init(void)
event_cache = KMEM_CACHE(lttng_event, 0);
if (!event_cache) {
ret = -ENOMEM;
- goto error_kmem;
+ goto error_kmem_event;
+ }
+ event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0);
+ if (!event_notifier_cache) {
+ ret = -ENOMEM;
+ goto error_kmem_event_notifier;
}
ret = lttng_abi_init();
if (ret)
error_logger:
lttng_abi_exit();
error_abi:
+ kmem_cache_destroy(event_notifier_cache);
+error_kmem_event_notifier:
kmem_cache_destroy(event_cache);
-error_kmem:
+error_kmem_event:
lttng_tracepoint_exit();
error_tp:
lttng_context_exit();
list_for_each_entry_safe(session, tmpsession, &sessions, list)
lttng_session_destroy(session);
kmem_cache_destroy(event_cache);
+ kmem_cache_destroy(event_notifier_cache);
lttng_tracepoint_exit();
lttng_context_exit();
printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",