obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-discard.o
obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-overwrite.o
obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-client.o
+ obj-$(CONFIG_LTTNG) += lttng-ring-buffer-trigger-client.o
obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-discard.o
obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-overwrite.o
obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-mmap-client.o
#define UNKNOWN_SYSCALL_NRARGS 6
#undef TP_PROBE_CB
-#define TP_PROBE_CB(_template) &syscall_entry_probe
+#define TP_PROBE_CB(_template) &syscall_entry_event_probe
LTTNG_TRACEPOINT_EVENT(syscall_entry_unknown,
TP_PROTO(int id, unsigned long *args),
)
#undef TP_PROBE_CB
-#define TP_PROBE_CB(_template) &syscall_exit_probe
+#define TP_PROBE_CB(_template) &syscall_exit_event_probe
LTTNG_TRACEPOINT_EVENT(syscall_exit_unknown,
TP_PROTO(int id, long ret, unsigned long *args),
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
v_inc(config, &bufb->array[sb_bindex]->records_commit);
}
-#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
-static inline
-void subbuffer_count_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
- unsigned long idx)
-{
-}
-#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
/*
* Reader has exclusive subbuffer access for record consumption. No need to
_v_dec(config, &bufb->array[sb_bindex]->records_unread);
v_inc(config, &bufb->records_read);
}
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_backend *bufb,
+ unsigned long idx)
+{
+}
+static inline
+void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_backend *bufb)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
static inline
unsigned long subbuffer_get_records_count(
*
* RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
* ready to read. Lower latencies before the reader is woken up. Mainly suitable
- * for drivers.
+ * for drivers. Going through an "irq_work" allows triggering this type of wakeup
+ * even from NMI context: the wakeup will be slightly delayed until the next
+ * interrupts are handled.
*
* RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
* has the responsibility to perform wakeups.
enum {
RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
RING_BUFFER_WAKEUP_BY_WRITER, /*
- * writer wakes up reader,
- * not lock-free
- * (takes spinlock).
+ * writer wakes up reader through
+ * irq_work.
*/
} wakeup;
/*
#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
#include <linux/kref.h>
+#include <linux/irq_work.h>
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/backend_types.h>
#include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
wait_queue_head_t read_wait; /* reader wait queue */
wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
+ struct irq_work wakeup_pending; /* Pending wakeup irq work */
int finalized; /* Has channel been finalized */
struct channel_iter iter; /* Channel read-side iterator */
struct kref ref; /* Reference count */
union v_atomic records_overrun; /* Number of overwritten records */
wait_queue_head_t read_wait; /* reader buffer-level wait queue */
wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
+ struct irq_work wakeup_pending; /* Pending wakeup irq work */
int finalized; /* buffer has been finalized */
struct timer_list switch_timer; /* timer for periodical switch */
struct timer_list read_timer; /* timer for read poll */
extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
struct lib_ring_buffer *buf);
+/*
+ * Ensure that the current subbuffer is put after client code has read the
+ * payload of the current record. Has an effect when the end of subbuffer is
+ * reached. It is not required if get_next_record is called successively.
+ * However, it should be invoked before returning data to user-space to ensure
+ * that the get/put subbuffer state is quiescent.
+ */
+extern void lib_ring_buffer_put_current_record(struct lib_ring_buffer *buf);
+
/*
* channel_get_next_record advances the buffer read position to the next record.
* It returns either the size of the next record, -EAGAIN if there is currently
{
struct channel *chan = buf->backend.chan;
+ irq_work_sync(&buf->wakeup_pending);
+
lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
lttng_kvfree(buf->commit_hot);
lttng_kvfree(buf->commit_cold);
}
EXPORT_SYMBOL_GPL(channel_reset);
+static void lib_ring_buffer_pending_wakeup_buf(struct irq_work *entry)
+{
+ struct lib_ring_buffer *buf = container_of(entry, struct lib_ring_buffer,
+ wakeup_pending);
+ wake_up_interruptible(&buf->read_wait);
+}
+
+static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry)
+{
+ struct channel *chan = container_of(entry, struct channel, wakeup_pending);
+ wake_up_interruptible(&chan->read_wait);
+}
+
/*
* Must be called under cpu hotplug protection.
*/
init_waitqueue_head(&buf->read_wait);
init_waitqueue_head(&buf->write_wait);
+ init_irq_work(&buf->wakeup_pending, lib_ring_buffer_pending_wakeup_buf);
raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
/*
kref_init(&chan->ref);
init_waitqueue_head(&chan->read_wait);
init_waitqueue_head(&chan->hp_wait);
+ init_irq_work(&chan->wakeup_pending, lib_ring_buffer_pending_wakeup_chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
+ irq_work_sync(&chan->wakeup_pending);
+
channel_unregister_notifiers(chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
commit_count, idx);
/*
- * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
+ * RING_BUFFER_WAKEUP_BY_WRITER uses an irq_work to issue
+ * the wakeups.
*/
if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
&& atomic_long_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
- wake_up_interruptible(&buf->read_wait);
- wake_up_interruptible(&chan->read_wait);
+ irq_work_queue(&buf->wakeup_pending);
+ irq_work_queue(&chan->wakeup_pending);
}
}
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
+void lib_ring_buffer_put_current_record(struct lib_ring_buffer *buf)
+{
+ struct lib_ring_buffer_iter *iter;
+
+ if (!buf)
+ return;
+ iter = &buf->iter;
+ if (iter->state != ITER_NEXT_RECORD)
+ return;
+ iter->read_offset += iter->payload_len;
+ iter->state = ITER_TEST_RECORD;
+ if (iter->read_offset - iter->consumed >= iter->data_size) {
+ lib_ring_buffer_put_next_subbuf(buf);
+ iter->state = ITER_GET_SUBBUF;
+ }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_put_current_record);
+
static int buf_is_higher(void *a, void *b)
{
struct lib_ring_buffer *bufa = a;
return -EFAULT;
}
read_count += copy_len;
- };
- return read_count;
+ }
+ goto put_record;
nodata:
*ppos = 0;
chan->iter.len_left = 0;
+put_record:
+ lib_ring_buffer_put_current_record(buf);
return read_count;
}
#include <lttng-tracer.h>
#include <lttng-tp-mempool.h>
#include <lib/ringbuffer/frontend_types.h>
+#include <lib/ringbuffer/iterator.h>
/*
* This is LTTng's own personal way to create a system call as an external
#endif
static const struct file_operations lttng_session_fops;
+static const struct file_operations lttng_trigger_group_fops;
static const struct file_operations lttng_channel_fops;
static const struct file_operations lttng_metadata_fops;
static const struct file_operations lttng_event_fops;
return ret;
}
+static
+void trigger_send_notification_work_wakeup(struct irq_work *entry)
+{
+ struct lttng_trigger_group *trigger_group = container_of(entry,
+ struct lttng_trigger_group, wakeup_pending);
+ wake_up_interruptible(&trigger_group->read_wait);
+}
+
+static
+int lttng_abi_create_trigger_group(void)
+{
+ struct lttng_trigger_group *trigger_group;
+ struct file *trigger_group_file;
+ int trigger_group_fd, ret;
+
+ trigger_group = lttng_trigger_group_create();
+ if (!trigger_group)
+ return -ENOMEM;
+
+ trigger_group_fd = lttng_get_unused_fd();
+ if (trigger_group_fd < 0) {
+ ret = trigger_group_fd;
+ goto fd_error;
+ }
+ trigger_group_file = anon_inode_getfile("[lttng_trigger_group]",
+ <tng_trigger_group_fops,
+ trigger_group, O_RDWR);
+ if (IS_ERR(trigger_group_file)) {
+ ret = PTR_ERR(trigger_group_file);
+ goto file_error;
+ }
+
+ trigger_group->file = trigger_group_file;
+ init_waitqueue_head(&trigger_group->read_wait);
+ init_irq_work(&trigger_group->wakeup_pending,
+ trigger_send_notification_work_wakeup);
+ fd_install(trigger_group_fd, trigger_group_file);
+ return trigger_group_fd;
+
+file_error:
+ put_unused_fd(trigger_group_fd);
+fd_error:
+ lttng_trigger_group_destroy(trigger_group);
+ return ret;
+}
+
static
int lttng_abi_tracepoint_list(void)
{
* Returns after all previously running probes have completed
* LTTNG_KERNEL_TRACER_ABI_VERSION
* Returns the LTTng kernel tracer ABI version
+ * LTTNG_KERNEL_TRIGGER_GROUP_CREATE
+ * Returns a LTTng trigger group file descriptor
*
* The returned session will be deleted when its file descriptor is closed.
*/
case LTTNG_KERNEL_OLD_SESSION:
case LTTNG_KERNEL_SESSION:
return lttng_abi_create_session();
+ case LTTNG_KERNEL_TRIGGER_GROUP_CREATE:
+ return lttng_abi_create_trigger_group();
case LTTNG_KERNEL_OLD_TRACER_VERSION:
{
struct lttng_kernel_tracer_version v;
#endif
};
+/*
+ * When encountering empty buffer, flush current sub-buffer if non-empty
+ * and retry (if new data available to read after flush).
+ */
+static
+ssize_t lttng_trigger_group_notif_read(struct file *filp, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct lttng_trigger_group *trigger_group = filp->private_data;
+ struct channel *chan = trigger_group->chan;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+ ssize_t read_count = 0, len;
+ size_t read_offset;
+
+ might_sleep();
+ if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
+ return -EFAULT;
+
+ /* Finish copy of previous record */
+ if (*ppos != 0) {
+ if (read_count < count) {
+ len = chan->iter.len_left;
+ read_offset = *ppos;
+ goto skip_get_next;
+ }
+ }
+
+ while (read_count < count) {
+ size_t copy_len, space_left;
+
+ len = lib_ring_buffer_get_next_record(chan, buf);
+len_test:
+ if (len < 0) {
+ /*
+ * Check if buffer is finalized (end of file).
+ */
+ if (len == -ENODATA) {
+ /* A 0 read_count will tell about end of file */
+ goto nodata;
+ }
+ if (filp->f_flags & O_NONBLOCK) {
+ if (!read_count)
+ read_count = -EAGAIN;
+ goto nodata;
+ } else {
+ int error;
+
+ /*
+ * No data available at the moment, return what
+ * we got.
+ */
+ if (read_count)
+ goto nodata;
+
+ /*
+ * Wait for returned len to be >= 0 or -ENODATA.
+ */
+ error = wait_event_interruptible(
+ trigger_group->read_wait,
+ ((len = lib_ring_buffer_get_next_record(
+ chan, buf)), len != -EAGAIN));
+ CHAN_WARN_ON(chan, len == -EBUSY);
+ if (error) {
+ read_count = error;
+ goto nodata;
+ }
+ CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
+ goto len_test;
+ }
+ }
+ read_offset = buf->iter.read_offset;
+skip_get_next:
+ space_left = count - read_count;
+ if (len <= space_left) {
+ copy_len = len;
+ chan->iter.len_left = 0;
+ *ppos = 0;
+ } else {
+ copy_len = space_left;
+ chan->iter.len_left = len - copy_len;
+ *ppos = read_offset + copy_len;
+ }
+ if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
+ &user_buf[read_count],
+ copy_len)) {
+ /*
+ * Leave the len_left and ppos values at their current
+ * state, as we currently have a valid event to read.
+ */
+ return -EFAULT;
+ }
+ read_count += copy_len;
+ }
+ goto put_record;
+
+nodata:
+ *ppos = 0;
+ chan->iter.len_left = 0;
+
+put_record:
+ lib_ring_buffer_put_current_record(buf);
+ return read_count;
+}
+
+/*
+ * If the ring buffer is non empty (even just a partial subbuffer), return that
+ * there is data available. Perform a ring buffer flush if we encounter a
+ * non-empty ring buffer which does not have any consumeable subbuffer available.
+ */
+static
+unsigned int lttng_trigger_group_notif_poll(struct file *filp,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct lttng_trigger_group *trigger_group = filp->private_data;
+ struct channel *chan = trigger_group->chan;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+ int finalized, disabled;
+ unsigned long consumed, offset;
+ size_t subbuffer_header_size = config->cb.subbuffer_header_size();
+
+ if (filp->f_mode & FMODE_READ) {
+ poll_wait_set_exclusive(wait);
+ poll_wait(filp, &trigger_group->read_wait, wait);
+
+ finalized = lib_ring_buffer_is_finalized(config, buf);
+ disabled = lib_ring_buffer_channel_is_disabled(chan);
+
+ /*
+ * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
+ * finalized load before offsets loads.
+ */
+ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+retry:
+ if (disabled)
+ return POLLERR;
+
+ offset = lib_ring_buffer_get_offset(config, buf);
+ consumed = lib_ring_buffer_get_consumed(config, buf);
+
+ /*
+ * If there is no buffer available to consume.
+ */
+ if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
+ /*
+ * If there is a non-empty subbuffer, flush and try again.
+ */
+ if (subbuf_offset(offset, chan) > subbuffer_header_size) {
+ lib_ring_buffer_switch_remote(buf);
+ goto retry;
+ }
+
+ if (finalized)
+ return POLLHUP;
+ else {
+ /*
+ * The memory barriers
+ * __wait_event()/wake_up_interruptible() take
+ * care of "raw_spin_is_locked" memory ordering.
+ */
+ if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
+ goto retry;
+ else
+ return 0;
+ }
+ } else {
+ if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
+ >= chan->backend.buf_size)
+ return POLLPRI | POLLRDBAND;
+ else
+ return POLLIN | POLLRDNORM;
+ }
+ }
+
+ return mask;
+}
+
+/**
+ * lttng_trigger_group_notif_open - trigger ring buffer open file operation
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Open implementation. Makes sure only one open instance of a buffer is
+ * done at a given moment.
+ */
+static int lttng_trigger_group_notif_open(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger_group *trigger_group = inode->i_private;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+
+ file->private_data = trigger_group;
+ return lib_ring_buffer_open(inode, file, buf);
+}
+
+/**
+ * lttng_trigger_group_notif_release - trigger ring buffer release file operation
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Release implementation.
+ */
+static int lttng_trigger_group_notif_release(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger_group *trigger_group = file->private_data;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+ int ret;
+
+ ret = lib_ring_buffer_release(inode, file, buf);
+ if (ret)
+ return ret;
+ fput(trigger_group->file);
+ return 0;
+}
+
+static const struct file_operations lttng_trigger_group_notif_fops = {
+ .owner = THIS_MODULE,
+ .open = lttng_trigger_group_notif_open,
+ .release = lttng_trigger_group_notif_release,
+ .read = lttng_trigger_group_notif_read,
+ .poll = lttng_trigger_group_notif_poll,
+};
+
/**
* lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
* @filp: the file
static
int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
- const struct file_operations *fops)
+ const struct file_operations *fops, const char *name)
{
int stream_fd, ret;
struct file *stream_file;
ret = stream_fd;
goto fd_error;
}
- stream_file = anon_inode_getfile("[lttng_stream]", fops,
- stream_priv, O_RDWR);
+ stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
if (IS_ERR(stream_file)) {
ret = PTR_ERR(stream_file);
goto file_error;
stream_priv = buf;
ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
- <tng_stream_ring_buffer_file_operations);
+ <tng_stream_ring_buffer_file_operations,
+ "[lttng_stream]");
if (ret < 0)
goto fd_error;
}
ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
- <tng_metadata_ring_buffer_file_operations);
+ <tng_metadata_ring_buffer_file_operations,
+ "[lttng_metadata_stream]");
if (ret < 0)
goto fd_error;
return ret;
}
+static
+int lttng_abi_open_trigger_group_stream(struct file *notif_file)
+{
+ struct lttng_trigger_group *trigger_group = notif_file->private_data;
+ struct channel *chan = trigger_group->chan;
+ struct lib_ring_buffer *buf;
+ int ret;
+ void *stream_priv;
+
+ buf = trigger_group->ops->buffer_read_open(chan);
+ if (!buf)
+ return -ENOENT;
+
+ /* The trigger notification fd holds a reference on the trigger group */
+ if (!atomic_long_add_unless(¬if_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+ trigger_group->buf = buf;
+ stream_priv = trigger_group;
+ ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
+ <tng_trigger_group_notif_fops,
+ "[lttng_trigger_stream]");
+ if (ret < 0)
+ goto fd_error;
+
+ return ret;
+
+fd_error:
+ atomic_long_dec(¬if_file->f_count);
+refcount_error:
+ trigger_group->ops->buffer_read_close(buf);
+ return ret;
+}
+
static
int lttng_abi_create_event(struct file *channel_file,
struct lttng_kernel_event *event_param)
}
if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
|| event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
if (strutils_is_star_glob_pattern(event_param->name)) {
/*
* If the event name is a star globbing pattern,
* we create the special star globbing enabler.
*/
- enabler = lttng_enabler_create(LTTNG_ENABLER_STAR_GLOB,
+ event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
event_param, channel);
} else {
- enabler = lttng_enabler_create(LTTNG_ENABLER_NAME,
+ event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
event_param, channel);
}
- priv = enabler;
+ priv = event_enabler;
} else {
struct lttng_event *event;
return ret;
}
+static
+long lttng_trigger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct lttng_trigger *trigger;
+ struct lttng_trigger_enabler *trigger_enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ switch (cmd) {
+ case LTTNG_KERNEL_ENABLE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_enable(trigger);
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_enable(trigger_enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_DISABLE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_disable(trigger);
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_disable(trigger_enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_FILTER:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ return -EINVAL;
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_attach_bytecode(trigger_enabler,
+ (struct lttng_kernel_filter_bytecode __user *) arg);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_ADD_CALLSITE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_add_callsite(trigger,
+ (struct lttng_kernel_event_callsite __user *) arg);
+ case LTTNG_TYPE_ENABLER:
+ return -EINVAL;
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static
+int lttng_trigger_release(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger *trigger;
+ struct lttng_trigger_enabler *trigger_enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ if (!evtype)
+ return 0;
+
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ if (trigger)
+ fput(trigger->group->file);
+ break;
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ if (trigger_enabler)
+ fput(trigger_enabler->group->file);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct file_operations lttng_trigger_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_trigger_release,
+ .unlocked_ioctl = lttng_trigger_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_trigger_ioctl,
+#endif
+};
+
+static
+int lttng_abi_create_trigger(struct file *trigger_group_file,
+ struct lttng_kernel_trigger *trigger_param)
+{
+ struct lttng_trigger_group *trigger_group = trigger_group_file->private_data;
+ int trigger_fd, ret;
+ struct file *trigger_file;
+ void *priv;
+
+ switch (trigger_param->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_UPROBE:
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ trigger_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ /* Placing a trigger on kretprobe is not supported. */
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ ret = -EINVAL;
+ goto inval_instr;
+ }
+
+ trigger_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+
+ trigger_fd = lttng_get_unused_fd();
+ if (trigger_fd < 0) {
+ ret = trigger_fd;
+ goto fd_error;
+ }
+
+ trigger_file = anon_inode_getfile("[lttng_trigger]",
+ <tng_trigger_fops,
+ NULL, O_RDWR);
+ if (IS_ERR(trigger_file)) {
+ ret = PTR_ERR(trigger_file);
+ goto file_error;
+ }
+
+ /* The trigger holds a reference on the trigger group. */
+ if (!atomic_long_add_unless(&trigger_group_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+
+ if (trigger_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
+ || trigger_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
+ struct lttng_trigger_enabler *enabler;
+
+ if (strutils_is_star_glob_pattern(trigger_param->name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ enabler = lttng_trigger_enabler_create(trigger_group,
+ LTTNG_ENABLER_FORMAT_STAR_GLOB, trigger_param);
+ } else {
+ enabler = lttng_trigger_enabler_create(trigger_group,
+ LTTNG_ENABLER_FORMAT_NAME, trigger_param);
+ }
+ priv = enabler;
+ } else {
+ struct lttng_trigger *trigger;
+
+ /*
+ * We tolerate no failure path after trigger creation. It
+ * will stay invariant for the rest of the session.
+ */
+ trigger = lttng_trigger_create(NULL, trigger_param->id,
+ trigger_group, trigger_param, NULL,
+ trigger_param->instrumentation);
+ WARN_ON_ONCE(!trigger);
+ if (IS_ERR(trigger)) {
+ ret = PTR_ERR(trigger);
+ goto trigger_error;
+ }
+ priv = trigger;
+ }
+ trigger_file->private_data = priv;
+ fd_install(trigger_fd, trigger_file);
+ return trigger_fd;
+
+trigger_error:
+ atomic_long_dec(&trigger_group_file->f_count);
+refcount_error:
+ fput(trigger_file);
+file_error:
+ put_unused_fd(trigger_fd);
+fd_error:
+inval_instr:
+ return ret;
+}
+
+static
+long lttng_trigger_group_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case LTTNG_KERNEL_TRIGGER_GROUP_NOTIFICATION_FD:
+ {
+ return lttng_abi_open_trigger_group_stream(file);
+ }
+ case LTTNG_KERNEL_TRIGGER_CREATE:
+ {
+ struct lttng_kernel_trigger utrigger_param;
+
+ if (copy_from_user(&utrigger_param,
+ (struct lttng_kernel_trigger __user *) arg,
+ sizeof(utrigger_param)))
+ return -EFAULT;
+ return lttng_abi_create_trigger(file, &utrigger_param);
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static
+int lttng_trigger_group_release(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger_group *trigger_group = file->private_data;
+
+ if (trigger_group)
+ lttng_trigger_group_destroy(trigger_group);
+ return 0;
+}
+
+static const struct file_operations lttng_trigger_group_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_trigger_group_release,
+ .unlocked_ioctl = lttng_trigger_group_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_trigger_group_ioctl,
+#endif
+};
+
/**
* lttng_channel_ioctl - lttng syscall through ioctl
*
long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct lttng_event *event;
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
enum lttng_event_type *evtype = file->private_data;
switch (cmd) {
event = file->private_data;
return lttng_event_enable(event);
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- return lttng_enabler_enable(enabler);
+ event_enabler = file->private_data;
+ return lttng_event_enabler_enable(event_enabler);
default:
WARN_ON_ONCE(1);
return -ENOSYS;
event = file->private_data;
return lttng_event_disable(event);
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- return lttng_enabler_disable(enabler);
+ event_enabler = file->private_data;
+ return lttng_event_enabler_disable(event_enabler);
default:
WARN_ON_ONCE(1);
return -ENOSYS;
return -EINVAL;
case LTTNG_TYPE_ENABLER:
{
- enabler = file->private_data;
- return lttng_enabler_attach_bytecode(enabler,
+ event_enabler = file->private_data;
+ return lttng_event_enabler_attach_bytecode(event_enabler,
(struct lttng_kernel_filter_bytecode __user *) arg);
}
default:
int lttng_event_release(struct inode *inode, struct file *file)
{
struct lttng_event *event;
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
enum lttng_event_type *evtype = file->private_data;
if (!evtype)
fput(event->chan->file);
break;
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- if (enabler)
- fput(enabler->chan->file);
+ event_enabler = file->private_data;
+ if (event_enabler)
+ fput(event_enabler->chan->file);
break;
default:
WARN_ON_ONCE(1);
} u;
} __attribute__((packed));
+#define LTTNG_KERNEL_TRIGGER_PADDING1 16
+#define LTTNG_KERNEL_TRIGGER_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
+struct lttng_kernel_trigger {
+ uint64_t id;
+ char name[LTTNG_KERNEL_SYM_NAME_LEN]; /* event name */
+ enum lttng_kernel_instrumentation instrumentation;
+ char padding[LTTNG_KERNEL_TRIGGER_PADDING1];
+
+ /* Per instrumentation type configuration */
+ union {
+ struct lttng_kernel_kretprobe kretprobe;
+ struct lttng_kernel_kprobe kprobe;
+ struct lttng_kernel_function_tracer ftrace;
+ struct lttng_kernel_uprobe uprobe;
+ char padding[LTTNG_KERNEL_TRIGGER_PADDING2];
+ } u;
+} __attribute__((packed));
+
struct lttng_kernel_tracer_version {
uint32_t major;
uint32_t minor;
#define LTTNG_KERNEL_SYSCALL_LIST _IO(0xF6, 0x4A)
#define LTTNG_KERNEL_TRACER_ABI_VERSION \
_IOR(0xF6, 0x4B, struct lttng_kernel_tracer_abi_version)
+#define LTTNG_KERNEL_TRIGGER_GROUP_CREATE _IO(0xF6, 0x4C)
+
+/* Trigger group file descriptor ioctl */
+#define LTTNG_KERNEL_TRIGGER_GROUP_NOTIFICATION_FD \
+ _IO(0xF6, 0x30)
+#define LTTNG_KERNEL_TRIGGER_CREATE \
+ _IOW(0xF6, 0x31, struct lttng_kernel_trigger)
/* Session FD ioctl */
/* lttng-abi-old.h reserve 0x50, 0x51, 0x52, and 0x53. */
#include <linux/file.h>
#include <linux/anon_inodes.h>
#include <wrapper/file.h>
-#include <linux/jhash.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/uuid.h>
#include <lttng-abi-old.h>
#include <lttng-endian.h>
#include <lttng-string-utils.h>
+#include <lttng-utils.h>
#include <wrapper/ringbuffer/backend.h>
#include <wrapper/ringbuffer/frontend.h>
#define METADATA_CACHE_DEFAULT_SIZE 4096
static LIST_HEAD(sessions);
+static LIST_HEAD(trigger_groups);
static LIST_HEAD(lttng_transport_list);
/*
* Protect the sessions and metadata caches.
*/
static DEFINE_MUTEX(sessions_mutex);
static struct kmem_cache *event_cache;
+static struct kmem_cache *trigger_cache;
-static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
-static void lttng_session_sync_enablers(struct lttng_session *session);
-static void lttng_enabler_destroy(struct lttng_enabler *enabler);
+static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
+static void lttng_session_sync_event_enablers(struct lttng_session *session);
+static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
+static void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler);
+static void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group);
static void _lttng_event_destroy(struct lttng_event *event);
+static void _lttng_trigger_destroy(struct lttng_trigger *trigger);
static void _lttng_channel_destroy(struct lttng_channel *chan);
static int _lttng_event_unregister(struct lttng_event *event);
+static int _lttng_trigger_unregister(struct lttng_trigger *trigger);
static
int _lttng_event_metadata_statedump(struct lttng_session *session,
struct lttng_channel *chan,
mutex_unlock(&sessions_mutex);
}
+static struct lttng_transport *lttng_transport_find(const char *name)
+{
+ struct lttng_transport *transport;
+
+ list_for_each_entry(transport, <tng_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
/*
* Called with sessions lock held.
*/
return NULL;
}
+struct lttng_trigger_group *lttng_trigger_group_create(void)
+{
+ struct lttng_transport *transport = NULL;
+ struct lttng_trigger_group *trigger_group;
+ const char *transport_name = "relay-trigger";
+ size_t subbuf_size = 4096; //TODO
+ size_t num_subbuf = 16; //TODO
+ unsigned int switch_timer_interval = 0;
+ unsigned int read_timer_interval = 0;
+ int i;
+
+ mutex_lock(&sessions_mutex);
+
+ transport = lttng_transport_find(transport_name);
+ if (!transport) {
+ printk(KERN_WARNING "LTTng transport %s not found\n",
+ transport_name);
+ goto notransport;
+ }
+ if (!try_module_get(transport->owner)) {
+ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ goto notransport;
+ }
+
+ trigger_group = lttng_kvzalloc(sizeof(struct lttng_trigger_group),
+ GFP_KERNEL);
+ if (!trigger_group)
+ goto nomem;
+
+ trigger_group->ops = &transport->ops;
+ trigger_group->chan = transport->ops.channel_create(transport_name,
+ trigger_group, NULL, subbuf_size, num_subbuf,
+ switch_timer_interval, read_timer_interval);
+ if (!trigger_group->chan)
+ goto create_error;
+
+ trigger_group->transport = transport;
+ INIT_LIST_HEAD(&trigger_group->enablers_head);
+ INIT_LIST_HEAD(&trigger_group->triggers_head);
+ for (i = 0; i < LTTNG_TRIGGER_HT_SIZE; i++)
+ INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]);
+
+ list_add(&trigger_group->node, &trigger_groups);
+ mutex_unlock(&sessions_mutex);
+
+ return trigger_group;
+
+create_error:
+ lttng_kvfree(trigger_group);
+nomem:
+ if (transport)
+ module_put(transport->owner);
+notransport:
+ mutex_unlock(&sessions_mutex);
+ return NULL;
+}
+
void metadata_cache_destroy(struct kref *kref)
{
struct lttng_metadata_cache *cache =
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
struct lttng_metadata_stream *metadata_stream;
- struct lttng_enabler *enabler, *tmpenabler;
+ struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
int ret;
mutex_lock(&sessions_mutex);
WRITE_ONCE(session->active, 0);
list_for_each_entry(chan, &session->chan, list) {
- ret = lttng_syscalls_unregister(chan);
+ ret = lttng_syscalls_unregister_event(chan);
WARN_ON(ret);
}
list_for_each_entry(event, &session->events, list) {
WARN_ON(ret);
}
synchronize_trace(); /* Wait for in-flight events to complete */
- list_for_each_entry_safe(enabler, tmpenabler,
+ list_for_each_entry_safe(event_enabler, tmp_event_enabler,
&session->enablers_head, node)
- lttng_enabler_destroy(enabler);
+ lttng_event_enabler_destroy(event_enabler);
list_for_each_entry_safe(event, tmpevent, &session->events, list)
_lttng_event_destroy(event);
list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
lttng_kvfree(session);
}
+void lttng_trigger_group_destroy(struct lttng_trigger_group *trigger_group)
+{
+ struct lttng_trigger_enabler *trigger_enabler, *tmp_trigger_enabler;
+ struct lttng_trigger *trigger, *tmptrigger;
+ int ret;
+
+ if (!trigger_group)
+ return;
+
+ mutex_lock(&sessions_mutex);
+
+ ret = lttng_syscalls_unregister_trigger(trigger_group);
+ WARN_ON(ret);
+
+ list_for_each_entry_safe(trigger, tmptrigger,
+ &trigger_group->triggers_head, list) {
+ ret = _lttng_trigger_unregister(trigger);
+ WARN_ON(ret);
+ }
+
+ synchronize_trace(); /* Wait for in-flight triggers to complete */
+
+ irq_work_sync(&trigger_group->wakeup_pending);
+
+ list_for_each_entry_safe(trigger_enabler, tmp_trigger_enabler,
+ &trigger_group->enablers_head, node)
+ lttng_trigger_enabler_destroy(trigger_enabler);
+
+ list_for_each_entry_safe(trigger, tmptrigger,
+ &trigger_group->triggers_head, list)
+ _lttng_trigger_destroy(trigger);
+
+ trigger_group->ops->channel_destroy(trigger_group->chan);
+ module_put(trigger_group->transport->owner);
+ list_del(&trigger_group->node);
+ mutex_unlock(&sessions_mutex);
+ lttng_kvfree(trigger_group);
+}
+
int lttng_session_statedump(struct lttng_session *session)
{
int ret;
session->tstate = 1;
/* We need to sync enablers with session before activation. */
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/*
* Snapshot the number of events per channel to know the type of header
/* Set transient enabler state to "disabled" */
session->tstate = 0;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/* Set each stream's quiescent state. */
list_for_each_entry(chan, &session->chan, list) {
}
/* Set transient enabler state to "enabled" */
channel->tstate = 1;
- lttng_session_sync_enablers(channel->session);
+ lttng_session_sync_event_enablers(channel->session);
/* Set atomically the state to "enabled" */
WRITE_ONCE(channel->enabled, 1);
end:
WRITE_ONCE(channel->enabled, 0);
/* Set transient enabler state to "enabled" */
channel->tstate = 0;
- lttng_session_sync_enablers(channel->session);
+ lttng_session_sync_event_enablers(channel->session);
end:
mutex_unlock(&sessions_mutex);
return ret;
return ret;
}
-static struct lttng_transport *lttng_transport_find(const char *name)
+int lttng_trigger_enable(struct lttng_trigger *trigger)
{
- struct lttng_transport *transport;
+ int ret = 0;
- list_for_each_entry(transport, <tng_transport_list, node) {
- if (!strcmp(transport->name, name))
- return transport;
+ mutex_lock(&sessions_mutex);
+ if (trigger->enabled) {
+ ret = -EEXIST;
+ goto end;
}
- return NULL;
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ WRITE_ONCE(trigger->enabled, 1);
+ break;
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_KRETPROBE:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+int lttng_trigger_disable(struct lttng_trigger *trigger)
+{
+ int ret = 0;
+
+ mutex_lock(&sessions_mutex);
+ if (!trigger->enabled) {
+ ret = -EEXIST;
+ goto end;
+ }
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ WRITE_ONCE(trigger->enabled, 0);
+ break;
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_KRETPROBE:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
}
struct lttng_channel *lttng_channel_create(struct lttng_session *session,
struct lttng_event *event;
const char *event_name;
struct hlist_head *head;
- size_t name_len;
- uint32_t hash;
int ret;
if (chan->free_event_id == -1U) {
ret = -EINVAL;
goto type_error;
}
- name_len = strlen(event_name);
- hash = jhash(event_name, name_len, 0);
- head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+
+ head = utils_borrow_hash_table_bucket(session->events_ht.table,
+ LTTNG_EVENT_HT_SIZE, event_name);
lttng_hlist_for_each_entry(event, head, hlist) {
WARN_ON_ONCE(!event->desc);
if (!strncmp(event->desc->name, event_name,
/* Event will be enabled by enabler sync. */
event->enabled = 0;
event->registered = 0;
- event->desc = lttng_event_get(event_name);
+ event->desc = lttng_event_desc_get(event_name);
if (!event->desc) {
ret = -ENOENT;
goto register_error;
* registration.
*/
smp_wmb();
- ret = lttng_kprobes_register(event_name,
+ ret = lttng_kprobes_register_event(event_name,
event_param->u.kprobe.symbol_name,
event_param->u.kprobe.offset,
event_param->u.kprobe.addr,
*/
smp_wmb();
- ret = lttng_uprobes_register(event_param->name,
+ ret = lttng_uprobes_register_event(event_param->name,
event_param->u.uprobe.fd,
event);
if (ret)
return ERR_PTR(ret);
}
+static
+void lttng_trigger_send_notification(struct lttng_trigger *trigger)
+{
+ struct lttng_trigger_group *trigger_group = trigger->group;
+ struct lib_ring_buffer_ctx ctx;
+ int ret;
+
+ if (unlikely(!READ_ONCE(trigger->enabled)))
+ return;
+
+ lib_ring_buffer_ctx_init(&ctx, trigger_group->chan, NULL, sizeof(trigger->id),
+ lttng_alignof(trigger->id), -1);
+ ret = trigger_group->ops->event_reserve(&ctx, 0);
+ if (ret < 0) {
+ //TODO: error handling with counter maps
+ //silently drop for now. WARN_ON_ONCE(1);
+ return;
+ }
+ lib_ring_buffer_align_ctx(&ctx, lttng_alignof(trigger->id));
+ trigger_group->ops->event_write(&ctx, &trigger->id, sizeof(trigger->id));
+ trigger_group->ops->event_commit(&ctx);
+ irq_work_queue(&trigger_group->wakeup_pending);
+}
+
+struct lttng_trigger *_lttng_trigger_create(
+ const struct lttng_event_desc *event_desc,
+ uint64_t id, struct lttng_trigger_group *trigger_group,
+ struct lttng_kernel_trigger *trigger_param, void *filter,
+ enum lttng_kernel_instrumentation itype)
+{
+ struct lttng_trigger *trigger;
+ const char *event_name;
+ struct hlist_head *head;
+ int ret;
+
+ switch (itype) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ event_name = event_desc->name;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ case LTTNG_KERNEL_SYSCALL:
+ event_name = trigger_param->name;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto type_error;
+ }
+
+ head = utils_borrow_hash_table_bucket(trigger_group->triggers_ht.table,
+ LTTNG_TRIGGER_HT_SIZE, event_name);
+ lttng_hlist_for_each_entry(trigger, head, hlist) {
+ WARN_ON_ONCE(!trigger->desc);
+ if (!strncmp(trigger->desc->name, event_name,
+ LTTNG_KERNEL_SYM_NAME_LEN - 1)
+ && trigger_group == trigger->group
+ && id == trigger->id) {
+ ret = -EEXIST;
+ goto exist;
+ }
+ }
+
+ trigger = kmem_cache_zalloc(trigger_cache, GFP_KERNEL);
+ if (!trigger) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+ trigger->group = trigger_group;
+ trigger->id = id;
+ trigger->filter = filter;
+ trigger->instrumentation = itype;
+ trigger->evtype = LTTNG_TYPE_EVENT;
+ trigger->send_notification = lttng_trigger_send_notification;
+ INIT_LIST_HEAD(&trigger->bytecode_runtime_head);
+ INIT_LIST_HEAD(&trigger->enablers_ref_head);
+
+ switch (itype) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ /* Event will be enabled by enabler sync. */
+ trigger->enabled = 0;
+ trigger->registered = 0;
+ trigger->desc = lttng_event_desc_get(event_name);
+ if (!trigger->desc) {
+ ret = -ENOENT;
+ goto register_error;
+ }
+ /* Populate lttng_trigger structure before event registration. */
+ smp_wmb();
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ trigger->enabled = 0;
+ trigger->registered = 1;
+ /*
+ * Populate lttng_trigger structure before event
+ * registration.
+ */
+ smp_wmb();
+ ret = lttng_kprobes_register_trigger(
+ trigger_param->u.kprobe.symbol_name,
+ trigger_param->u.kprobe.offset,
+ trigger_param->u.kprobe.addr,
+ trigger);
+ if (ret) {
+ ret = -EINVAL;
+ goto register_error;
+ }
+ ret = try_module_get(trigger->desc->owner);
+ WARN_ON_ONCE(!ret);
+ break;
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_SYSCALL:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ trigger->enabled = 0;
+ trigger->registered = 0;
+ trigger->desc = event_desc;
+ if (!trigger->desc) {
+ ret = -EINVAL;
+ goto register_error;
+ }
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ trigger->enabled = 0;
+ trigger->registered = 1;
+
+ /*
+ * Populate lttng_trigger structure before trigger
+ * registration.
+ */
+ smp_wmb();
+
+ ret = lttng_uprobes_register_trigger(trigger_param->name,
+ trigger_param->u.uprobe.fd,
+ trigger);
+ if (ret)
+ goto register_error;
+ ret = try_module_get(trigger->desc->owner);
+ WARN_ON_ONCE(!ret);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto register_error;
+ }
+
+ list_add(&trigger->list, &trigger_group->triggers_head);
+ hlist_add_head(&trigger->hlist, head);
+ return trigger;
+
+register_error:
+ kmem_cache_free(trigger_cache, trigger);
+cache_error:
+exist:
+type_error:
+ return ERR_PTR(ret);
+}
+
struct lttng_event *lttng_event_create(struct lttng_channel *chan,
struct lttng_kernel_event *event_param,
void *filter,
return event;
}
+struct lttng_trigger *lttng_trigger_create(
+ const struct lttng_event_desc *event_desc,
+ uint64_t id, struct lttng_trigger_group *trigger_group,
+ struct lttng_kernel_trigger *trigger_param, void *filter,
+ enum lttng_kernel_instrumentation itype)
+{
+ struct lttng_trigger *trigger;
+
+ mutex_lock(&sessions_mutex);
+ trigger = _lttng_trigger_create(event_desc, id, trigger_group,
+ trigger_param, filter, itype);
+ mutex_unlock(&sessions_mutex);
+ return trigger;
+}
+
/* Only used for tracepoints for now. */
static
void register_event(struct lttng_event *event)
event);
break;
case LTTNG_KERNEL_SYSCALL:
- ret = lttng_syscall_filter_enable(event->chan,
+ ret = lttng_syscall_filter_enable_event(event->chan,
desc->name);
break;
case LTTNG_KERNEL_KPROBE:
event);
break;
case LTTNG_KERNEL_KPROBE:
- lttng_kprobes_unregister(event);
+ lttng_kprobes_unregister_event(event);
ret = 0;
break;
case LTTNG_KERNEL_KRETPROBE:
ret = 0;
break;
case LTTNG_KERNEL_SYSCALL:
- ret = lttng_syscall_filter_disable(event->chan,
+ ret = lttng_syscall_filter_disable_event(event->chan,
desc->name);
break;
case LTTNG_KERNEL_NOOP:
ret = 0;
break;
case LTTNG_KERNEL_UPROBE:
- lttng_uprobes_unregister(event);
+ lttng_uprobes_unregister_event(event);
ret = 0;
break;
default:
return ret;
}
+/* Only used for tracepoints for now. */
+static
+void register_trigger(struct lttng_trigger *trigger)
+{
+ const struct lttng_event_desc *desc;
+ int ret = -EINVAL;
+
+ if (trigger->registered)
+ return;
+
+ desc = trigger->desc;
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
+ desc->trigger_callback,
+ trigger);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_enable_trigger(trigger);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ if (!ret)
+ trigger->registered = 1;
+}
+
+static
+int _lttng_trigger_unregister(struct lttng_trigger *trigger)
+{
+ const struct lttng_event_desc *desc;
+ int ret = -EINVAL;
+
+ if (!trigger->registered)
+ return 0;
+
+ desc = trigger->desc;
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_unregister(trigger->desc->kname,
+ trigger->desc->trigger_callback,
+ trigger);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ lttng_kprobes_unregister_trigger(trigger);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ lttng_uprobes_unregister_trigger(trigger);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_disable_trigger(trigger);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ if (!ret)
+ trigger->registered = 0;
+ return ret;
+}
+
/*
* Only used internally at session destruction.
*/
{
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- lttng_event_put(event->desc);
+ lttng_event_desc_put(event->desc);
break;
case LTTNG_KERNEL_KPROBE:
module_put(event->desc->owner);
- lttng_kprobes_destroy_private(event);
+ lttng_kprobes_destroy_event_private(event);
break;
case LTTNG_KERNEL_KRETPROBE:
module_put(event->desc->owner);
lttng_kretprobes_destroy_private(event);
break;
- case LTTNG_KERNEL_FUNCTION:
- module_put(event->desc->owner);
- lttng_ftrace_destroy_private(event);
+ case LTTNG_KERNEL_FUNCTION:
+ module_put(event->desc->owner);
+ lttng_ftrace_destroy_private(event);
+ break;
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_SYSCALL:
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ module_put(event->desc->owner);
+ lttng_uprobes_destroy_event_private(event);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ list_del(&event->list);
+ lttng_destroy_context(event->ctx);
+ kmem_cache_free(event_cache, event);
+}
+
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_trigger_destroy(struct lttng_trigger *trigger)
+{
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_event_desc_put(trigger->desc);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ module_put(trigger->desc->owner);
+ lttng_kprobes_destroy_trigger_private(trigger);
break;
case LTTNG_KERNEL_NOOP:
case LTTNG_KERNEL_SYSCALL:
break;
case LTTNG_KERNEL_UPROBE:
- module_put(event->desc->owner);
- lttng_uprobes_destroy_private(event);
+ module_put(trigger->desc->owner);
+ lttng_uprobes_destroy_trigger_private(trigger);
break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
default:
WARN_ON_ONCE(1);
}
- list_del(&event->list);
- lttng_destroy_context(event->ctx);
- kmem_cache_free(event_cache, event);
+ list_del(&trigger->list);
+ kmem_cache_free(trigger_cache, trigger);
}
struct lttng_id_tracker *get_tracker(struct lttng_session *session,
return 1;
}
-static
int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
struct lttng_enabler *enabler)
{
WARN_ON_ONCE(1);
return -EINVAL;
}
- switch (enabler->type) {
- case LTTNG_ENABLER_STAR_GLOB:
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
return lttng_match_enabler_star_glob(desc_name, enabler_name);
- case LTTNG_ENABLER_NAME:
+ case LTTNG_ENABLER_FORMAT_NAME:
return lttng_match_enabler_name(desc_name, enabler_name);
default:
return -EINVAL;
}
static
-int lttng_event_match_enabler(struct lttng_event *event,
- struct lttng_enabler *enabler)
+int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
+ struct lttng_event *event)
+{
+ struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
+ event_enabler);
+
+ if (base_enabler->event_param.instrumentation != event->instrumentation)
+ return 0;
+ if (lttng_desc_match_enabler(event->desc, base_enabler)
+ && event->chan == event_enabler->chan)
+ return 1;
+ else
+ return 0;
+}
+
+static
+int lttng_trigger_enabler_match_trigger(struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_trigger *trigger)
{
- if (enabler->event_param.instrumentation != event->instrumentation)
+ struct lttng_enabler *base_enabler = lttng_trigger_enabler_as_enabler(
+ trigger_enabler);
+
+ if (base_enabler->event_param.instrumentation != trigger->instrumentation)
return 0;
- if (lttng_desc_match_enabler(event->desc, enabler)
- && event->chan == enabler->chan)
+ if (lttng_desc_match_enabler(trigger->desc, base_enabler)
+ && trigger->group == trigger_enabler->group
+ && trigger->id == trigger_enabler->id)
return 1;
else
return 0;
}
static
-struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+struct lttng_enabler_ref *lttng_enabler_ref(
+ struct list_head *enablers_ref_list,
struct lttng_enabler *enabler)
{
struct lttng_enabler_ref *enabler_ref;
- list_for_each_entry(enabler_ref,
- &event->enablers_ref_head, node) {
+ list_for_each_entry(enabler_ref, enablers_ref_list, node) {
if (enabler_ref->ref == enabler)
return enabler_ref;
}
}
static
-void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
+void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
{
- struct lttng_session *session = enabler->chan->session;
+ struct lttng_session *session = event_enabler->chan->session;
struct lttng_probe_desc *probe_desc;
const struct lttng_event_desc *desc;
int i;
for (i = 0; i < probe_desc->nr_events; i++) {
int found = 0;
struct hlist_head *head;
- const char *event_name;
- size_t name_len;
- uint32_t hash;
struct lttng_event *event;
desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc, enabler))
+ if (!lttng_desc_match_enabler(desc,
+ lttng_event_enabler_as_enabler(event_enabler)))
continue;
- event_name = desc->name;
- name_len = strlen(event_name);
/*
* Check if already created.
*/
- hash = jhash(event_name, name_len, 0);
- head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+ head = utils_borrow_hash_table_bucket(
+ session->events_ht.table, LTTNG_EVENT_HT_SIZE,
+ desc->name);
lttng_hlist_for_each_entry(event, head, hlist) {
if (event->desc == desc
- && event->chan == enabler->chan)
+ && event->chan == event_enabler->chan)
found = 1;
}
if (found)
* We need to create an event for this
* event probe.
*/
- event = _lttng_event_create(enabler->chan,
+ event = _lttng_event_create(event_enabler->chan,
NULL, NULL, desc,
LTTNG_KERNEL_TRACEPOINT);
if (!event) {
}
static
-void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
+void lttng_create_tracepoint_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
+{
+ struct lttng_trigger_group *trigger_group = trigger_enabler->group;
+ struct lttng_probe_desc *probe_desc;
+ const struct lttng_event_desc *desc;
+ int i;
+ struct list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ /*
+ * For each probe event, if we find that a probe event matches
+ * our enabler, create an associated lttng_trigger if not
+ * already present.
+ */
+ list_for_each_entry(probe_desc, probe_list, head) {
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int found = 0;
+ struct hlist_head *head;
+ struct lttng_trigger *trigger;
+
+ desc = probe_desc->event_desc[i];
+ if (!lttng_desc_match_enabler(desc,
+ lttng_trigger_enabler_as_enabler(trigger_enabler)))
+ continue;
+
+ /*
+ * Check if already created.
+ */
+ head = utils_borrow_hash_table_bucket(
+ trigger_group->triggers_ht.table,
+ LTTNG_TRIGGER_HT_SIZE, desc->name);
+ lttng_hlist_for_each_entry(trigger, head, hlist) {
+ if (trigger->desc == desc
+ && trigger->id == trigger_enabler->id)
+ found = 1;
+ }
+ if (found)
+ continue;
+
+ /*
+ * We need to create a trigger for this event probe.
+ */
+ trigger = _lttng_trigger_create(desc,
+ trigger_enabler->id, trigger_group, NULL, NULL,
+ LTTNG_KERNEL_TRACEPOINT);
+ if (IS_ERR(trigger)) {
+ printk(KERN_INFO "Unable to create trigger %s\n",
+ probe_desc->event_desc[i]->name);
+ }
+ }
+ }
+}
+
+static
+void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
+{
+ int ret;
+
+ ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
+ WARN_ON_ONCE(ret);
+}
+
+static
+void lttng_create_syscall_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
{
int ret;
- ret = lttng_syscalls_register(enabler->chan, NULL);
+ ret = lttng_syscalls_register_trigger(trigger_enabler, NULL);
+ WARN_ON_ONCE(ret);
+ ret = lttng_syscals_create_matching_triggers(trigger_enabler, NULL);
WARN_ON_ONCE(ret);
}
* Should be called with sessions mutex held.
*/
static
-void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
{
- switch (enabler->event_param.instrumentation) {
+ switch (event_enabler->base.event_param.instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- lttng_create_tracepoint_if_missing(enabler);
+ lttng_create_tracepoint_event_if_missing(event_enabler);
break;
case LTTNG_KERNEL_SYSCALL:
- lttng_create_syscall_if_missing(enabler);
+ lttng_create_syscall_event_if_missing(event_enabler);
break;
default:
WARN_ON_ONCE(1);
}
/*
- * Create events associated with an enabler (if not already present),
+ * Create events associated with an event_enabler (if not already present),
* and add backward reference from the event to the enabler.
* Should be called with sessions mutex held.
*/
static
-int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
{
- struct lttng_session *session = enabler->chan->session;
+ struct lttng_session *session = event_enabler->chan->session;
struct lttng_event *event;
/* First ensure that probe events are created for this enabler. */
- lttng_create_event_if_missing(enabler);
+ lttng_create_event_if_missing(event_enabler);
- /* For each event matching enabler in session event list. */
+ /* For each event matching event_enabler in session event list. */
list_for_each_entry(event, &session->events, list) {
struct lttng_enabler_ref *enabler_ref;
- if (!lttng_event_match_enabler(event, enabler))
+ if (!lttng_event_enabler_match_event(event_enabler, event))
continue;
- enabler_ref = lttng_event_enabler_ref(event, enabler);
+ enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
+ lttng_event_enabler_as_enabler(event_enabler));
if (!enabler_ref) {
/*
* If no backward ref, create it.
- * Add backward ref from event to enabler.
+ * Add backward ref from event to event_enabler.
*/
enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
if (!enabler_ref)
return -ENOMEM;
- enabler_ref->ref = enabler;
+ enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
list_add(&enabler_ref->node,
&event->enablers_ref_head);
}
/*
* Link filter bytecodes if not linked yet.
*/
- lttng_enabler_event_link_bytecode(event, enabler);
+ lttng_enabler_link_bytecode(event->desc,
+ lttng_static_ctx,
+ &event->bytecode_runtime_head,
+ lttng_event_enabler_as_enabler(event_enabler));
/* TODO: merge event context. */
}
return 0;
}
+/*
+ * Create struct lttng_trigger if it is missing and present in the list of
+ * tracepoint probes.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
+{
+ switch (trigger_enabler->base.event_param.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_create_tracepoint_trigger_if_missing(trigger_enabler);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ lttng_create_syscall_trigger_if_missing(trigger_enabler);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+/*
+ * Create triggers associated with a trigger enabler (if not already present).
+ */
+static
+int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler)
+{
+ struct lttng_trigger_group *trigger_group = trigger_enabler->group;
+ struct lttng_trigger *trigger;
+
+ /* First ensure that probe triggers are created for this enabler. */
+ lttng_create_trigger_if_missing(trigger_enabler);
+
+ /* Link the created trigger with its associated enabler. */
+ list_for_each_entry(trigger, &trigger_group->triggers_head, list) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger))
+ continue;
+
+ enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head,
+ lttng_trigger_enabler_as_enabler(trigger_enabler));
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from trigger to enabler.
+ */
+ enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
+ if (!enabler_ref)
+ return -ENOMEM;
+
+ enabler_ref->ref = lttng_trigger_enabler_as_enabler(
+ trigger_enabler);
+ list_add(&enabler_ref->node,
+ &trigger->enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(trigger->desc,
+ trigger_group->ctx, &trigger->bytecode_runtime_head,
+ lttng_trigger_enabler_as_enabler(trigger_enabler));
+ }
+ return 0;
+}
+
/*
* Called at module load: connect the probe on all enablers matching
* this event.
struct lttng_session *session;
list_for_each_entry(session, &sessions, list)
- lttng_session_lazy_sync_enablers(session);
+ lttng_session_lazy_sync_event_enablers(session);
+ return 0;
+}
+
+static bool lttng_trigger_group_has_active_triggers(
+ struct lttng_trigger_group *trigger_group)
+{
+ struct lttng_trigger_enabler *trigger_enabler;
+
+ list_for_each_entry(trigger_enabler, &trigger_group->enablers_head,
+ node) {
+ if (trigger_enabler->base.enabled)
+ return true;
+ }
+ return false;
+}
+
+bool lttng_trigger_active(void)
+{
+ struct lttng_trigger_group *trigger_group;
+
+ list_for_each_entry(trigger_group, &trigger_groups, node) {
+ if (lttng_trigger_group_has_active_triggers(trigger_group))
+ return true;
+ }
+ return false;
+}
+
+int lttng_fix_pending_triggers(void)
+{
+ struct lttng_trigger_group *trigger_group;
+
+ list_for_each_entry(trigger_group, &trigger_groups, node)
+ lttng_trigger_group_sync_enablers(trigger_group);
return 0;
}
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
struct lttng_kernel_event *event_param,
struct lttng_channel *chan)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
- enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
- if (!enabler)
+ event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
+ if (!event_enabler)
return NULL;
- enabler->type = type;
- INIT_LIST_HEAD(&enabler->filter_bytecode_head);
- memcpy(&enabler->event_param, event_param,
- sizeof(enabler->event_param));
- enabler->chan = chan;
+ event_enabler->base.format_type = format_type;
+ INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
+ memcpy(&event_enabler->base.event_param, event_param,
+ sizeof(event_enabler->base.event_param));
+ event_enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 0;
- enabler->evtype = LTTNG_TYPE_ENABLER;
+ event_enabler->base.enabled = 0;
+ event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
mutex_lock(&sessions_mutex);
- list_add(&enabler->node, &enabler->chan->session->enablers_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
- return enabler;
+ return event_enabler;
}
-int lttng_enabler_enable(struct lttng_enabler *enabler)
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
{
mutex_lock(&sessions_mutex);
- enabler->enabled = 1;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
return 0;
}
-int lttng_enabler_disable(struct lttng_enabler *enabler)
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
{
mutex_lock(&sessions_mutex);
- enabler->enabled = 0;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
return 0;
}
+static
int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
sizeof(*bytecode) + bytecode_len);
if (ret)
goto error_free;
+
bytecode_node->enabler = enabler;
/* Enforce length based on allocated size */
bytecode_node->bc.len = bytecode_len;
list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+
return 0;
error_free:
return ret;
}
+int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ int ret;
+ ret = lttng_enabler_attach_bytecode(
+ lttng_event_enabler_as_enabler(event_enabler), bytecode);
+ if (ret)
+ goto error;
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
+ return 0;
+
+error:
+ return ret;
+}
+
int lttng_event_add_callsite(struct lttng_event *event,
struct lttng_kernel_event_callsite __user *callsite)
{
switch (event->instrumentation) {
case LTTNG_KERNEL_UPROBE:
- return lttng_uprobes_add_callsite(event, callsite);
+ return lttng_uprobes_event_add_callsite(event, callsite);
default:
return -EINVAL;
}
}
-int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
struct lttng_kernel_context *context_param)
{
return -ENOSYS;
&enabler->filter_bytecode_head, node) {
kfree(filter_node);
}
+}
+
+static
+void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
+{
+ lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
/* Destroy contexts */
- lttng_destroy_context(enabler->ctx);
+ lttng_destroy_context(event_enabler->ctx);
+
+ list_del(&event_enabler->node);
+ kfree(event_enabler);
+}
+
+struct lttng_trigger_enabler *lttng_trigger_enabler_create(
+ struct lttng_trigger_group *trigger_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_kernel_trigger *trigger_param)
+{
+ struct lttng_trigger_enabler *trigger_enabler;
+
+ trigger_enabler = kzalloc(sizeof(*trigger_enabler), GFP_KERNEL);
+ if (!trigger_enabler)
+ return NULL;
+
+ trigger_enabler->base.format_type = format_type;
+ INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head);
+
+ trigger_enabler->id = trigger_param->id;
+
+ memcpy(&trigger_enabler->base.event_param.name, trigger_param->name,
+ sizeof(trigger_enabler->base.event_param.name));
+ trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation;
+ trigger_enabler->base.evtype = LTTNG_TYPE_ENABLER;
+
+ trigger_enabler->base.enabled = 0;
+ trigger_enabler->group = trigger_group;
+
+ mutex_lock(&sessions_mutex);
+ list_add(&trigger_enabler->node, &trigger_enabler->group->enablers_head);
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+
+ mutex_unlock(&sessions_mutex);
+
+ return trigger_enabler;
+}
+
+int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler)
+{
+ mutex_lock(&sessions_mutex);
+ lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1;
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler)
+{
+ mutex_lock(&sessions_mutex);
+ lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0;
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_trigger_enabler_attach_bytecode(struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ int ret;
+
+ ret = lttng_enabler_attach_bytecode(
+ lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode);
+ if (ret)
+ goto error;
+
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ return 0;
+
+error:
+ return ret;
+}
+
+int lttng_trigger_add_callsite(struct lttng_trigger *trigger,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_UPROBE:
+ return lttng_uprobes_trigger_add_callsite(trigger, callsite);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_trigger_enabler_attach_context(struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_kernel_context *context_param)
+{
+ return -ENOSYS;
+}
+
+static
+void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler)
+{
+ if (!trigger_enabler) {
+ return;
+ }
+
+ list_del(&trigger_enabler->node);
- list_del(&enabler->node);
- kfree(enabler);
+ lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler));
+ kfree(trigger_enabler);
}
/*
- * lttng_session_sync_enablers should be called just before starting a
+ * lttng_session_sync_event_enablers should be called just before starting a
* session.
* Should be called with sessions mutex held.
*/
static
-void lttng_session_sync_enablers(struct lttng_session *session)
+void lttng_session_sync_event_enablers(struct lttng_session *session)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
struct lttng_event *event;
- list_for_each_entry(enabler, &session->enablers_head, node)
- lttng_enabler_ref_events(enabler);
+ list_for_each_entry(event_enabler, &session->enablers_head, node)
+ lttng_event_enabler_ref_events(event_enabler);
/*
* For each event, if at least one of its enablers is enabled,
* and its channel and session transient states are enabled, we
* Should be called with sessions mutex held.
*/
static
-void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
{
/* We can skip if session is not active */
if (!session->active)
return;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
+}
+
+static
+void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group)
+{
+ struct lttng_trigger_enabler *trigger_enabler;
+ struct lttng_trigger *trigger;
+
+ list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node)
+ lttng_trigger_enabler_ref_triggers(trigger_enabler);
+
+ /*
+ * For each trigger, if at least one of its enablers is enabled,
+ * we enable the trigger, else we disable it.
+ */
+ list_for_each_entry(trigger, &trigger_group->triggers_head, list) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_bytecode = 0;
+
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ /* Enable triggers */
+ list_for_each_entry(enabler_ref,
+ &trigger->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+ break;
+ default:
+ /* Not handled with sync. */
+ continue;
+ }
+
+ WRITE_ONCE(trigger->enabled, enabled);
+ /*
+ * Sync tracepoint registration with trigger enabled
+ * state.
+ */
+ if (enabled) {
+ if (!trigger->registered)
+ register_trigger(trigger);
+ } else {
+ if (trigger->registered)
+ _lttng_trigger_unregister(trigger);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ list_for_each_entry(enabler_ref,
+ &trigger->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_bytecode = 1;
+ break;
+ }
+ }
+ trigger->has_enablers_without_bytecode =
+ has_enablers_without_bytecode;
+
+ /* Enable filters */
+ list_for_each_entry(runtime,
+ &trigger->bytecode_runtime_head, node)
+ lttng_filter_sync_state(runtime);
+ }
}
/*
event_cache = KMEM_CACHE(lttng_event, 0);
if (!event_cache) {
ret = -ENOMEM;
- goto error_kmem;
+ goto error_kmem_event;
+ }
+ trigger_cache = KMEM_CACHE(lttng_trigger, 0);
+ if (!trigger_cache) {
+ ret = -ENOMEM;
+ goto error_kmem_trigger;
}
ret = lttng_abi_init();
if (ret)
error_logger:
lttng_abi_exit();
error_abi:
+ kmem_cache_destroy(trigger_cache);
+error_kmem_trigger:
kmem_cache_destroy(event_cache);
-error_kmem:
+error_kmem_event:
lttng_tracepoint_exit();
error_tp:
lttng_context_exit();
list_for_each_entry_safe(session, tmpsession, &sessions, list)
lttng_session_destroy(session);
kmem_cache_destroy(event_cache);
+ kmem_cache_destroy(trigger_cache);
lttng_tracepoint_exit();
lttng_context_exit();
printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
#include <lttng-tracer.h>
#include <lttng-abi.h>
#include <lttng-abi-old.h>
+#include <linux/irq_work.h>
#define lttng_is_signed_type(type) (((type)(-1)) < 0)
struct lttng_probe_ctx {
struct lttng_event *event;
+ struct lttng_trigger *trigger; // Not sure if we will ever need it.
uint8_t interruptible;
};
const struct lttng_event_field *fields; /* event payload */
unsigned int nr_fields;
struct module *owner;
+ void *trigger_callback;
};
struct lttng_probe_desc {
const char *filter_stack_data);
int link_failed;
struct list_head node; /* list of bytecode runtime in event */
- struct lttng_event *event;
+ struct lttng_ctx *ctx;
};
/*
};
struct lttng_uprobe_handler {
- struct lttng_event *event;
+ union {
+ struct lttng_event *event;
+ struct lttng_trigger *trigger;
+ } u;
loff_t offset;
struct uprobe_consumer up_consumer;
struct list_head node;
};
+struct lttng_kprobe {
+ struct kprobe kp;
+ char *symbol_name;
+};
+
+struct lttng_uprobe {
+ struct inode *inode;
+ struct list_head head;
+};
+
+struct lttng_syscall {
+ struct list_head node; /* chain registered syscall trigger */
+ unsigned int syscall_id;
+ bool is_compat;
+};
+
/*
* lttng_event structure is referred to by the tracing fast path. It must be
* kept small.
struct lttng_ctx *ctx;
enum lttng_kernel_instrumentation instrumentation;
union {
- struct {
- struct kprobe kp;
- char *symbol_name;
- } kprobe;
+ struct lttng_kprobe kprobe;
struct {
struct lttng_krp *lttng_krp;
char *symbol_name;
struct {
char *symbol_name;
} ftrace;
- struct {
- struct inode *inode;
- struct list_head head;
- } uprobe;
+ struct lttng_uprobe uprobe;
} u;
struct list_head list; /* Event list in session */
unsigned int metadata_dumped:1;
int has_enablers_without_bytecode;
};
-enum lttng_enabler_type {
- LTTNG_ENABLER_STAR_GLOB,
- LTTNG_ENABLER_NAME,
+// FIXME: Really similar to lttng_event above. Could those be merged ?
+struct lttng_trigger {
+ enum lttng_event_type evtype; /* First field. */
+ uint64_t id;
+ int enabled;
+ int registered; /* has reg'd tracepoint probe */
+ const struct lttng_event_desc *desc;
+ void *filter;
+ struct list_head list; /* Trigger list in trigger group */
+
+ enum lttng_kernel_instrumentation instrumentation;
+ union {
+ struct lttng_kprobe kprobe;
+ struct lttng_uprobe uprobe;
+ struct lttng_syscall syscall;
+ } u;
+
+ /* Backward references: list of lttng_enabler_ref (ref to enablers) */
+ struct list_head enablers_ref_head;
+ struct hlist_node hlist; /* session ht of triggers */
+ /* list of struct lttng_bytecode_runtime, sorted by seqnum */
+ struct list_head bytecode_runtime_head;
+ int has_enablers_without_bytecode;
+
+ void (*send_notification)(struct lttng_trigger *trigger);
+ struct lttng_trigger_group *group; /* Weak ref */
+};
+
+enum lttng_enabler_format_type {
+ LTTNG_ENABLER_FORMAT_STAR_GLOB,
+ LTTNG_ENABLER_FORMAT_NAME,
};
/*
struct lttng_enabler {
enum lttng_event_type evtype; /* First field. */
- enum lttng_enabler_type type;
+ enum lttng_enabler_format_type format_type;
- struct list_head node; /* per-session list of enablers */
/* head list of struct lttng_ust_filter_bytecode_node */
struct list_head filter_bytecode_head;
struct lttng_kernel_event event_param;
+ unsigned int enabled:1;
+};
+
+struct lttng_event_enabler {
+ struct lttng_enabler base;
+ struct list_head node; /* per-session list of enablers */
struct lttng_channel *chan;
+ /*
+ * Unused, but kept around to make it explicit that the tracer can do
+ * it.
+ */
struct lttng_ctx *ctx;
- unsigned int enabled:1;
};
+struct lttng_trigger_enabler {
+ struct lttng_enabler base;
+ uint64_t id;
+ struct list_head node; /* List of trigger enablers */
+ struct lttng_trigger_group *group;
+};
+
+static inline
+struct lttng_enabler *lttng_event_enabler_as_enabler(
+ struct lttng_event_enabler *event_enabler)
+{
+ return &event_enabler->base;
+}
+
+static inline
+struct lttng_enabler *lttng_trigger_enabler_as_enabler(
+ struct lttng_trigger_enabler *trigger_enabler)
+{
+ return &trigger_enabler->base;
+}
+
struct lttng_channel_ops {
struct channel *(*channel_create)(const char *name,
- struct lttng_channel *lttng_chan,
+ void *priv,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
struct hlist_head table[LTTNG_EVENT_HT_SIZE];
};
+#define LTTNG_TRIGGER_HT_BITS 12
+#define LTTNG_TRIGGER_HT_SIZE (1U << LTTNG_TRIGGER_HT_BITS)
+
+struct lttng_trigger_ht {
+ struct hlist_head table[LTTNG_TRIGGER_HT_SIZE];
+};
+
struct lttng_channel {
unsigned int id;
struct channel *chan; /* Channel buffers */
struct lttng_id_tracker vgid_tracker;
unsigned int metadata_dumped:1,
tstate:1; /* Transient enable state */
- /* List of enablers */
+ /* List of event enablers */
struct list_head enablers_head;
- /* Hash table of events */
+/* Hash table of events */
struct lttng_event_ht events_ht;
char name[LTTNG_KERNEL_SESSION_NAME_LEN];
char creation_time[LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN];
};
+struct lttng_trigger_group {
+ struct file *file; /* File associated to trigger group */
+ struct file *notif_file; /* File used to expose notifications to userspace. */
+ struct list_head node; /* Trigger group list */
+ struct list_head enablers_head; /* List of enablers */
+ struct list_head triggers_head; /* List of triggers */
+ struct lttng_trigger_ht triggers_ht; /* Hash table of triggers */
+ struct lttng_ctx *ctx; /* Contexts for filters. */
+ struct lttng_channel_ops *ops;
+ struct lttng_transport *transport;
+ struct channel *chan; /* Ring buffer channel for trigger group. */
+ struct lib_ring_buffer *buf; /* Ring buffer for trigger group. */
+ wait_queue_head_t read_wait;
+ struct irq_work wakeup_pending; /* Pending wakeup irq work. */
+
+ struct list_head *trigger_syscall_dispatch;
+ struct list_head *trigger_compat_syscall_dispatch;
+
+ unsigned int syscall_all:1,
+ sys_enter_registered:1;
+};
+
struct lttng_metadata_cache {
char *data; /* Metadata cache */
unsigned int cache_alloc; /* Metadata allocated size (bytes) */
struct list_head *lttng_get_probe_list_head(void);
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
struct lttng_kernel_event *event_param,
struct lttng_channel *chan);
-int lttng_enabler_enable(struct lttng_enabler *enabler);
-int lttng_enabler_disable(struct lttng_enabler *enabler);
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler);
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler);
+struct lttng_trigger_enabler *lttng_trigger_enabler_create(
+ struct lttng_trigger_group *trigger_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_kernel_trigger *trigger_param);
+
+int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler);
+int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler);
int lttng_fix_pending_events(void);
+int lttng_fix_pending_triggers(void);
int lttng_session_active(void);
+bool lttng_trigger_active(void);
struct lttng_session *lttng_session_create(void);
int lttng_session_enable(struct lttng_session *session);
int lttng_session_statedump(struct lttng_session *session);
void metadata_cache_destroy(struct kref *kref);
+struct lttng_trigger_group *lttng_trigger_group_create(void);
+void lttng_trigger_group_destroy(struct lttng_trigger_group *trigger_group);
+
struct lttng_channel *lttng_channel_create(struct lttng_session *session,
const char *transport_name,
void *buf_addr,
void *filter,
const struct lttng_event_desc *internal_desc);
+struct lttng_trigger *lttng_trigger_create(
+ const struct lttng_event_desc *trigger_desc,
+ uint64_t id,
+ struct lttng_trigger_group *trigger_group,
+ struct lttng_kernel_trigger *trigger_param,
+ void *filter,
+ enum lttng_kernel_instrumentation itype);
+struct lttng_trigger *_lttng_trigger_create(
+ const struct lttng_event_desc *trigger_desc,
+ uint64_t id,
+ struct lttng_trigger_group *trigger_group,
+ struct lttng_kernel_trigger *trigger_param,
+ void *filter,
+ enum lttng_kernel_instrumentation itype);
+
int lttng_channel_enable(struct lttng_channel *channel);
int lttng_channel_disable(struct lttng_channel *channel);
int lttng_event_enable(struct lttng_event *event);
int lttng_event_disable(struct lttng_event *event);
+int lttng_trigger_enable(struct lttng_trigger *trigger);
+int lttng_trigger_disable(struct lttng_trigger *trigger);
+
void lttng_transport_register(struct lttng_transport *transport);
void lttng_transport_unregister(struct lttng_transport *transport);
int lttng_probe_register(struct lttng_probe_desc *desc);
void lttng_probe_unregister(struct lttng_probe_desc *desc);
-const struct lttng_event_desc *lttng_event_get(const char *name);
-void lttng_event_put(const struct lttng_event_desc *desc);
+const struct lttng_event_desc *lttng_event_desc_get(const char *name);
+void lttng_event_desc_put(const struct lttng_event_desc *desc);
int lttng_probes_init(void);
void lttng_probes_exit(void);
void lttng_clock_ref(void);
void lttng_clock_unref(void);
+int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
+ struct lttng_enabler *enabler);
+
#if defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
-int lttng_syscalls_register(struct lttng_channel *chan, void *filter);
-int lttng_syscalls_unregister(struct lttng_channel *chan);
-int lttng_syscall_filter_enable(struct lttng_channel *chan,
+int lttng_syscalls_register_event(struct lttng_channel *chan, void *filter);
+int lttng_syscalls_unregister_event(struct lttng_channel *chan);
+int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
const char *name);
-int lttng_syscall_filter_disable(struct lttng_channel *chan,
+int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
const char *name);
long lttng_channel_syscall_mask(struct lttng_channel *channel,
struct lttng_kernel_syscall_mask __user *usyscall_mask);
+
+int lttng_syscalls_register_trigger(struct lttng_trigger_enabler *trigger_enabler, void *filter);
+int lttng_syscals_create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler, void *filter);
+int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *group);
+int lttng_syscall_filter_enable_trigger(struct lttng_trigger *trigger);
+int lttng_syscall_filter_disable_trigger(struct lttng_trigger *trigger);
#else
-static inline int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
+static inline int lttng_syscalls_register_event(
+ struct lttng_channel *chan, void *filter)
{
return -ENOSYS;
}
-static inline int lttng_syscalls_unregister(struct lttng_channel *chan)
+static inline int lttng_syscalls_unregister_event(struct lttng_channel *chan)
{
return 0;
}
-static inline int lttng_syscall_filter_enable(struct lttng_channel *chan,
+static inline int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
const char *name)
{
return -ENOSYS;
}
-static inline int lttng_syscall_filter_disable(struct lttng_channel *chan,
+static inline int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
const char *name)
{
return -ENOSYS;
{
return -ENOSYS;
}
+
+static inline int lttng_syscalls_register_trigger(
+ struct lttng_trigger_group *group, void *filter)
+{
+ return -ENOSYS;
+}
+
+static inline int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *group)
+{
+ return 0;
+}
+
+static inline int lttng_syscall_filter_enable_trigger(struct lttng_trigger_group *group,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
+static inline int lttng_syscall_filter_disable_trigger(struct lttng_trigger_group *group,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
#endif
void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime);
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode);
+int lttng_trigger_enabler_attach_bytecode(struct lttng_trigger_enabler *trigger_enabler,
struct lttng_kernel_filter_bytecode __user *bytecode);
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
+
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx *ctx,
+ struct list_head *bytecode_runtime_head,
struct lttng_enabler *enabler);
int lttng_probes_init(void);
extern int lttng_statedump_start(struct lttng_session *session);
#ifdef CONFIG_KPROBES
-int lttng_kprobes_register(const char *name,
+int lttng_kprobes_register_event(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
struct lttng_event *event);
-void lttng_kprobes_unregister(struct lttng_event *event);
-void lttng_kprobes_destroy_private(struct lttng_event *event);
+void lttng_kprobes_unregister_event(struct lttng_event *event);
+void lttng_kprobes_destroy_event_private(struct lttng_event *event);
+int lttng_kprobes_register_trigger(const char *symbol_name,
+ uint64_t offset,
+ uint64_t addr,
+ struct lttng_trigger *trigger);
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger);
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger);
#else
static inline
-int lttng_kprobes_register(const char *name,
+int lttng_kprobes_register_event(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
}
static inline
-void lttng_kprobes_unregister(struct lttng_event *event)
+void lttng_kprobes_unregister_event(struct lttng_event *event)
{
}
static inline
-void lttng_kprobes_destroy_private(struct lttng_event *event)
+void lttng_kprobes_destroy_event_private(struct lttng_event *event)
+{
+}
+
+static inline
+int lttng_kprobes_register_trigger(const char *symbol_name,
+ uint64_t offset,
+ uint64_t addr,
+ struct lttng_trigger *trigger)
+{
+ return -ENOSYS;
+}
+
+static inline
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger)
+{
+}
+
+static inline
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger)
{
}
#endif
int lttng_event_add_callsite(struct lttng_event *event,
struct lttng_kernel_event_callsite *callsite);
+int lttng_trigger_add_callsite(struct lttng_trigger *trigger,
+ struct lttng_kernel_event_callsite *callsite);
+
#ifdef CONFIG_UPROBES
-int lttng_uprobes_register(const char *name,
+int lttng_uprobes_register_event(const char *name,
int fd, struct lttng_event *event);
-int lttng_uprobes_add_callsite(struct lttng_event *event,
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
struct lttng_kernel_event_callsite *callsite);
-void lttng_uprobes_unregister(struct lttng_event *event);
-void lttng_uprobes_destroy_private(struct lttng_event *event);
+void lttng_uprobes_unregister_event(struct lttng_event *event);
+void lttng_uprobes_destroy_event_private(struct lttng_event *event);
+int lttng_uprobes_register_trigger(const char *name,
+ int fd, struct lttng_trigger *trigger);
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
+ struct lttng_kernel_event_callsite *callsite);
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger);
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger);
#else
static inline
-int lttng_uprobes_register(const char *name,
+int lttng_uprobes_register_event(const char *name,
int fd, struct lttng_event *event)
{
return -ENOSYS;
}
static inline
-int lttng_uprobes_add_callsite(struct lttng_event *event,
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
+ struct lttng_kernel_event_callsite *callsite)
+{
+ return -ENOSYS;
+}
+
+static inline
+void lttng_uprobes_unregister_event(struct lttng_event *event)
+{
+}
+
+static inline
+void lttng_uprobes_destroy_event_private(struct lttng_event *event)
+{
+}
+
+static inline
+int lttng_uprobes_register_trigger(const char *name,
+ int fd, struct lttng_trigger *trigger)
+{
+ return -ENOSYS;
+}
+
+static inline
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
struct lttng_kernel_event_callsite *callsite)
{
return -ENOSYS;
}
static inline
-void lttng_uprobes_unregister(struct lttng_event *event)
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger)
{
}
static inline
-void lttng_uprobes_destroy_private(struct lttng_event *event)
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger)
{
}
#endif
return ret;
}
-static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
+static int specialize_context_lookup_name(struct lttng_ctx *ctx,
+ struct bytecode_runtime *bytecode,
struct load_op *insn)
{
uint16_t offset;
offset = ((struct get_symbol *) insn->data)->offset;
name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
- return lttng_get_context_index(lttng_static_ctx, name);
+ return lttng_get_context_index(ctx, name);
}
static int specialize_load_object(const struct lttng_event_field *field,
return 0;
}
-static int specialize_context_lookup(struct bytecode_runtime *runtime,
+static int specialize_context_lookup(struct lttng_ctx *ctx,
+ struct bytecode_runtime *runtime,
struct load_op *insn,
struct vstack_load *load)
{
struct filter_get_index_data gid;
ssize_t data_offset;
- idx = specialize_context_lookup_name(runtime, insn);
+ idx = specialize_context_lookup_name(ctx, runtime, insn);
if (idx < 0) {
return -ENOENT;
}
return 0;
}
-static int specialize_event_payload_lookup(struct lttng_event *event,
+static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
struct bytecode_runtime *runtime,
struct load_op *insn,
struct vstack_load *load)
{
const char *name;
uint16_t offset;
- const struct lttng_event_desc *desc = event->desc;
unsigned int i, nr_fields;
bool found = false;
uint32_t field_offset = 0;
struct filter_get_index_data gid;
ssize_t data_offset;
- nr_fields = desc->nr_fields;
+ nr_fields = event_desc->nr_fields;
offset = ((struct get_symbol *) insn->data)->offset;
name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
for (i = 0; i < nr_fields; i++) {
- field = &desc->fields[i];
+ field = &event_desc->fields[i];
if (!strcmp(field->name, name)) {
found = true;
break;
return ret;
}
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
+int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc,
struct bytecode_runtime *bytecode)
{
void *pc, *next_pc, *start_pc;
int ret = -EINVAL;
struct vstack _stack;
struct vstack *stack = &_stack;
+ struct lttng_ctx *ctx = bytecode->p.ctx;
vstack_init(stack);
goto end;
case LOAD_ROOT_CONTEXT:
/* Lookup context field. */
- ret = specialize_context_lookup(bytecode, insn,
+ ret = specialize_context_lookup(ctx, bytecode, insn,
&vstack_ax(stack)->load);
if (ret)
goto end;
goto end;
case LOAD_ROOT_PAYLOAD:
/* Lookup event payload field. */
- ret = specialize_event_payload_lookup(event,
+ ret = specialize_payload_lookup(event_desc,
bytecode, insn,
&vstack_ax(stack)->load);
if (ret)
}
static
-int apply_field_reloc(struct lttng_event *event,
+int apply_field_reloc(const struct lttng_event_desc *event_desc,
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
const char *field_name,
enum filter_op filter_op)
{
- const struct lttng_event_desc *desc;
const struct lttng_event_field *fields, *field = NULL;
unsigned int nr_fields, i;
struct load_op *op;
dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
/* Lookup event by name */
- desc = event->desc;
- if (!desc)
+ if (!event_desc)
return -EINVAL;
- fields = desc->fields;
+ fields = event_desc->fields;
if (!fields)
return -EINVAL;
- nr_fields = desc->nr_fields;
+ nr_fields = event_desc->nr_fields;
for (i = 0; i < nr_fields; i++) {
if (!strcmp(fields[i].name, field_name)) {
field = &fields[i];
}
static
-int apply_context_reloc(struct lttng_event *event,
- struct bytecode_runtime *runtime,
+int apply_context_reloc(struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
const char *context_name,
}
static
-int apply_reloc(struct lttng_event *event,
+int apply_reloc(const struct lttng_event_desc *event_desc,
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
op = (struct load_op *) &runtime->code[reloc_offset];
switch (op->op) {
case FILTER_OP_LOAD_FIELD_REF:
- return apply_field_reloc(event, runtime, runtime_len,
+ return apply_field_reloc(event_desc, runtime, runtime_len,
reloc_offset, name, op->op);
case FILTER_OP_GET_CONTEXT_REF:
- return apply_context_reloc(event, runtime, runtime_len,
+ return apply_context_reloc(runtime, runtime_len,
reloc_offset, name, op->op);
case FILTER_OP_GET_SYMBOL:
case FILTER_OP_GET_SYMBOL_FIELD:
static
int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
- struct lttng_event *event)
+ struct list_head *bytecode_runtime_head)
{
struct lttng_bytecode_runtime *bc_runtime;
- list_for_each_entry(bc_runtime,
- &event->bytecode_runtime_head, node) {
+ list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
if (bc_runtime->bc == filter_bytecode)
return 1;
}
* bytecode runtime.
*/
static
-int _lttng_filter_event_link_bytecode(struct lttng_event *event,
+int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx *ctx,
struct lttng_filter_bytecode_node *filter_bytecode,
struct list_head *insert_loc)
{
if (!filter_bytecode)
return 0;
/* Bytecode already linked */
- if (bytecode_is_linked(filter_bytecode, event))
+ if (bytecode_is_linked(filter_bytecode, insert_loc))
return 0;
dbg_printk("Linking...\n");
goto alloc_error;
}
runtime->p.bc = filter_bytecode;
- runtime->p.event = event;
+ runtime->p.ctx = ctx;
runtime->len = filter_bytecode->bc.reloc_offset;
/* copy original bytecode */
memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
const char *name =
(const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
- ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
+ ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
if (ret) {
goto link_error;
}
goto link_error;
}
/* Specialize bytecode */
- ret = lttng_filter_specialize_bytecode(event, runtime);
+ ret = lttng_filter_specialize_bytecode(event_desc, runtime);
if (ret) {
goto link_error;
}
/*
* Link bytecode for all enablers referenced by an event.
*/
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx *ctx,
+ struct list_head *bytecode_runtime_head,
struct lttng_enabler *enabler)
{
struct lttng_filter_bytecode_node *bc;
struct lttng_bytecode_runtime *runtime;
/* Can only be called for events with desc attached */
- WARN_ON_ONCE(!event->desc);
+ WARN_ON_ONCE(!event_desc);
/* Link each bytecode. */
list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
struct list_head *insert_loc;
list_for_each_entry(runtime,
- &event->bytecode_runtime_head, node) {
+ bytecode_runtime_head, node) {
if (runtime->bc == bc) {
found = 1;
break;
* order.
*/
list_for_each_entry_reverse(runtime,
- &event->bytecode_runtime_head, node) {
+ bytecode_runtime_head, node) {
if (runtime->bc->bc.seqnum < bc->bc.seqnum) {
/* insert here */
insert_loc = &runtime->node;
}
}
/* Add to head to list */
- insert_loc = &event->bytecode_runtime_head;
+ insert_loc = bytecode_runtime_head;
add_within:
dbg_printk("linking bytecode\n");
- ret = _lttng_filter_event_link_bytecode(event, bc,
- insert_loc);
+ ret = _lttng_filter_link_bytecode(event_desc, ctx, bc,
+ insert_loc);
if (ret) {
dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
}
const char *lttng_filter_print_op(enum filter_op op);
int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode);
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
+int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc,
struct bytecode_runtime *bytecode);
uint64_t lttng_filter_false(void *filter_data,
}
ret = lttng_fix_pending_events();
WARN_ON_ONCE(ret);
+ ret = lttng_fix_pending_triggers();
+ WARN_ON_ONCE(ret);
lazy_nesting--;
}
* the probe immediately, since we cannot delay event
* registration because they are needed ASAP.
*/
- if (lttng_session_active())
+ if (lttng_session_active() || lttng_trigger_active())
fixup_lazy_probes();
end:
lttng_unlock_sessions();
* Called with sessions lock held.
*/
static
-const struct lttng_event_desc *find_event(const char *name)
+const struct lttng_event_desc *find_event_desc(const char *name)
{
struct lttng_probe_desc *probe_desc;
int i;
/*
* Called with sessions lock held.
*/
-const struct lttng_event_desc *lttng_event_get(const char *name)
+const struct lttng_event_desc *lttng_event_desc_get(const char *name)
{
- const struct lttng_event_desc *event;
+ const struct lttng_event_desc *event_desc;
int ret;
- event = find_event(name);
- if (!event)
+ event_desc = find_event_desc(name);
+ if (!event_desc)
return NULL;
- ret = try_module_get(event->owner);
+ ret = try_module_get(event_desc->owner);
WARN_ON_ONCE(!ret);
- return event;
+ return event_desc;
}
-EXPORT_SYMBOL_GPL(lttng_event_get);
+EXPORT_SYMBOL_GPL(lttng_event_desc_get);
/*
* Called with sessions lock held.
*/
-void lttng_event_put(const struct lttng_event_desc *event)
+void lttng_event_desc_put(const struct lttng_event_desc *event_desc)
{
- module_put(event->owner);
+ module_put(event_desc->owner);
}
-EXPORT_SYMBOL_GPL(lttng_event_put);
+EXPORT_SYMBOL_GPL(lttng_event_desc_put);
static
void *tp_list_start(struct seq_file *m, loff_t *pos)
static
struct channel *_channel_create(const char *name,
- struct lttng_channel *lttng_chan, void *buf_addr,
+ void *priv, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
+ struct lttng_channel *lttng_chan = priv;
struct channel *chan;
chan = channel_create(&client_config, name, lttng_chan, buf_addr,
static
struct channel *_channel_create(const char *name,
- struct lttng_channel *lttng_chan, void *buf_addr,
+ void *priv, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
+ struct lttng_channel *lttng_chan = priv;
struct channel *chan;
chan = channel_create(&client_config, name,
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
+ * lttng-ring-buffer-trigger-client.c
+ *
+ * LTTng lib ring buffer trigger client.
+ *
+ * Copyright (C) 2010-2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng-tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING "trigger"
+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_NONE
+#include "lttng-ring-buffer-trigger-client.h"
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
+ * lttng-ring-buffer-trigger-client.h
+ *
+ * LTTng lib ring buffer trigger client template.
+ *
+ * Copyright (C) 2010-2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <lttng-events.h>
+#include <lttng-tracer.h>
+
+static struct lttng_transport lttng_relay_transport;
+
+struct trigger_packet_header {
+ uint32_t magic; /* 0x75D11D57 */
+ uint32_t checksum; /* 0 if unused */
+ uint32_t content_size; /* in bits */
+ uint32_t packet_size; /* in bits */
+ uint8_t compression_scheme; /* 0 if unused */
+ uint8_t encryption_scheme; /* 0 if unused */
+ uint8_t checksum_scheme; /* 0 if unused */
+ uint8_t major; /* CTF spec major version number */
+ uint8_t minor; /* CTF spec minor version number */
+ uint8_t header_end[0];
+};
+
+struct trigger_record_header {
+ uint8_t header_end[0]; /* End of header */
+};
+
+static const struct lib_ring_buffer_config client_config;
+
+static inline
+u64 lib_ring_buffer_clock_read(struct channel *chan)
+{
+ return 0;
+}
+
+static inline
+size_t record_header_size(const struct lib_ring_buffer_config *config,
+ struct channel *chan, size_t offset,
+ size_t *pre_header_padding,
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ return 0;
+}
+
+#include <wrapper/ringbuffer/api.h>
+
+static u64 client_ring_buffer_clock_read(struct channel *chan)
+{
+ return 0;
+}
+
+static
+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+ struct channel *chan, size_t offset,
+ size_t *pre_header_padding,
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ return 0;
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+ return offsetof(struct trigger_packet_header, header_end);
+}
+
+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+ unsigned int subbuf_idx)
+{
+ struct channel *chan = buf->backend.chan;
+ struct trigger_packet_header *header =
+ (struct trigger_packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size);
+
+ header->magic = TSDL_MAGIC_NUMBER;
+ header->checksum = 0; /* 0 if unused */
+ header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
+ header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
+ header->compression_scheme = 0; /* 0 if unused */
+ header->encryption_scheme = 0; /* 0 if unused */
+ header->checksum_scheme = 0; /* 0 if unused */
+ header->major = CTF_SPEC_MAJOR;
+ header->minor = CTF_SPEC_MINOR;
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+ unsigned int subbuf_idx, unsigned long data_size)
+{
+ struct channel *chan = buf->backend.chan;
+ struct trigger_packet_header *header =
+ (struct trigger_packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size);
+ unsigned long records_lost = 0;
+
+ header->content_size = data_size * CHAR_BIT; /* in bits */
+ header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+ /*
+ * We do not care about the records lost count, because the trigger
+ * channel waits and retry.
+ */
+ (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
+ WARN_ON_ONCE(records_lost != 0);
+}
+
+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+ int cpu, const char *name)
+{
+ return 0;
+}
+
+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
+{
+}
+
+static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *buf, uint64_t *timestamp_begin)
+{
+ return -ENOSYS;
+}
+
+static int client_timestamp_end(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *timestamp_end)
+{
+ return -ENOSYS;
+}
+
+static int client_events_discarded(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *events_discarded)
+{
+ return -ENOSYS;
+}
+
+static int client_current_timestamp(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *ts)
+{
+ return -ENOSYS;
+}
+
+static int client_content_size(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *content_size)
+{
+ return -ENOSYS;
+}
+
+static int client_packet_size(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *packet_size)
+{
+ return -ENOSYS;
+}
+
+static int client_stream_id(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *stream_id)
+{
+ return -ENOSYS;
+}
+
+static int client_sequence_number(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *seq)
+{
+ return -ENOSYS;
+}
+
+static
+int client_instance_id(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *id)
+{
+ return -ENOSYS;
+}
+
+static void client_record_get(const struct lib_ring_buffer_config *config,
+ struct channel *chan, struct lib_ring_buffer *buf,
+ size_t offset, size_t *header_len,
+ size_t *payload_len, u64 *timestamp)
+{
+ struct trigger_record_header header;
+ int ret;
+
+ ret = lib_ring_buffer_read(&buf->backend, offset, &header,
+ offsetof(struct trigger_record_header, header_end));
+ CHAN_WARN_ON(chan, ret != offsetof(struct trigger_record_header, header_end));
+ *header_len = offsetof(struct trigger_record_header, header_end);
+ /*
+ * Currently, only 64-bit trigger ID.
+ */
+ *payload_len = sizeof(uint64_t);
+ *timestamp = 0;
+}
+
+static const struct lib_ring_buffer_config client_config = {
+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .cb.record_header_size = client_record_header_size,
+ .cb.subbuffer_header_size = client_packet_header_size,
+ .cb.buffer_begin = client_buffer_begin,
+ .cb.buffer_end = client_buffer_end,
+ .cb.buffer_create = client_buffer_create,
+ .cb.buffer_finalize = client_buffer_finalize,
+ .cb.record_get = client_record_get,
+
+ .tsc_bits = 0,
+ .alloc = RING_BUFFER_ALLOC_GLOBAL,
+ .sync = RING_BUFFER_SYNC_GLOBAL,
+ .mode = RING_BUFFER_MODE_TEMPLATE,
+ .backend = RING_BUFFER_PAGE,
+ .output = RING_BUFFER_OUTPUT_TEMPLATE,
+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
+ .ipi = RING_BUFFER_NO_IPI_BARRIER,
+ .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
+};
+
+static
+void release_priv_ops(void *priv_ops)
+{
+ module_put(THIS_MODULE);
+}
+
+static
+void lttng_channel_destroy(struct channel *chan)
+{
+ channel_destroy(chan);
+}
+
+static
+struct channel *_channel_create(const char *name,
+ void *priv, void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval)
+{
+ struct lttng_trigger_group *trigger_group = priv;
+ struct channel *chan;
+
+ chan = channel_create(&client_config, name,
+ trigger_group, buf_addr,
+ subbuf_size, num_subbuf, switch_timer_interval,
+ read_timer_interval);
+ if (chan) {
+ /*
+ * Ensure this module is not unloaded before we finish
+ * using lttng_relay_transport.ops.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ goto error;
+ }
+ chan->backend.priv_ops = <tng_relay_transport.ops;
+ chan->backend.release_priv_ops = release_priv_ops;
+ }
+ return chan;
+
+error:
+ lttng_channel_destroy(chan);
+ return NULL;
+}
+
+static
+struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
+{
+ struct lib_ring_buffer *buf;
+
+ buf = channel_get_ring_buffer(&client_config, chan, 0);
+ if (!lib_ring_buffer_open_read(buf))
+ return buf;
+ return NULL;
+}
+
+static
+int lttng_buffer_has_read_closed_stream(struct channel *chan)
+{
+ struct lib_ring_buffer *buf;
+ int cpu;
+
+ for_each_channel_cpu(cpu, chan) {
+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
+ if (!atomic_long_read(&buf->active_readers))
+ return 1;
+ }
+ return 0;
+}
+
+static
+void lttng_buffer_read_close(struct lib_ring_buffer *buf)
+{
+ lib_ring_buffer_release_read(buf);
+}
+
+static
+int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
+{
+ int ret;
+
+ ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
+ if (ret)
+ return ret;
+ lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &ctx->backend_pages);
+ return 0;
+}
+
+static
+void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
+{
+ lib_ring_buffer_commit(&client_config, ctx);
+}
+
+static
+void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
+ size_t len)
+{
+ lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+ const void __user *src, size_t len)
+{
+ lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
+ int c, size_t len)
+{
+ lib_ring_buffer_memset(&client_config, ctx, c, len);
+}
+
+static
+void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
+ size_t len)
+{
+ lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
+}
+
+static
+size_t lttng_packet_avail_size(struct channel *chan)
+{
+ unsigned long o_begin;
+ struct lib_ring_buffer *buf;
+
+ buf = chan->backend.buf; /* Only for global buffer ! */
+ o_begin = v_read(&client_config, &buf->offset);
+ if (subbuf_offset(o_begin, chan) != 0) {
+ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
+ } else {
+ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
+ - sizeof(struct trigger_packet_header);
+ }
+}
+
+static
+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
+{
+ struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
+ chan, cpu);
+ return &buf->write_wait;
+}
+
+static
+wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int lttng_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int lttng_is_disabled(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_disabled(chan);
+}
+
+static struct lttng_transport lttng_relay_transport = {
+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
+ .owner = THIS_MODULE,
+ .ops = {
+ .channel_create = _channel_create,
+ .channel_destroy = lttng_channel_destroy,
+ .buffer_read_open = lttng_buffer_read_open,
+ .buffer_has_read_closed_stream =
+ lttng_buffer_has_read_closed_stream,
+ .buffer_read_close = lttng_buffer_read_close,
+ .event_reserve = lttng_event_reserve,
+ .event_commit = lttng_event_commit,
+ .event_write_from_user = lttng_event_write_from_user,
+ .event_memset = lttng_event_memset,
+ .event_write = lttng_event_write,
+ .event_strcpy = lttng_event_strcpy,
+ .packet_avail_size = lttng_packet_avail_size,
+ .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
+ .get_hp_wait_queue = lttng_get_hp_wait_queue,
+ .is_finalized = lttng_is_finalized,
+ .is_disabled = lttng_is_disabled,
+ .timestamp_begin = client_timestamp_begin,
+ .timestamp_end = client_timestamp_end,
+ .events_discarded = client_events_discarded,
+ .content_size = client_content_size,
+ .packet_size = client_packet_size,
+ .stream_id = client_stream_id,
+ .current_timestamp = client_current_timestamp,
+ .sequence_number = client_sequence_number,
+ .instance_id = client_instance_id,
+ },
+};
+
+static int __init lttng_ring_buffer_trigger_client_init(void)
+{
+ /*
+ * This vmalloc sync all also takes care of the lib ring buffer
+ * vmalloc'd module pages when it is built as a module into LTTng.
+ */
+ wrapper_vmalloc_sync_all();
+ lttng_transport_register(<tng_relay_transport);
+ return 0;
+}
+
+module_init(lttng_ring_buffer_trigger_client_init);
+
+static void __exit lttng_ring_buffer_trigger_client_exit(void)
+{
+ lttng_transport_unregister(<tng_relay_transport);
+}
+
+module_exit(lttng_ring_buffer_trigger_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
+ " client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+ __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+ __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+ LTTNG_MODULES_EXTRAVERSION);
#include <wrapper/rcu.h>
#include <wrapper/syscall.h>
#include <lttng-events.h>
+#include <lttng-utils.h>
#ifndef CONFIG_COMPAT
# ifndef is_compat_task
#define COMPAT_SYSCALL_EXIT_STR __stringify(COMPAT_SYSCALL_EXIT_TOK)
static
-void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
+void syscall_entry_event_probe(void *__data, struct pt_regs *regs, long id);
static
-void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret);
+void syscall_exit_event_probe(void *__data, struct pt_regs *regs, long ret);
/*
* Forward declarations for old kernels.
/* Hijack probe callback for system call enter */
#undef TP_PROBE_CB
-#define TP_PROBE_CB(_template) &syscall_entry_probe
+#define TP_PROBE_CB(_template) &syscall_entry_event_probe
#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
LTTNG_TRACEPOINT_EVENT(syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
PARAMS(_fields))
#undef _TRACE_SYSCALLS_POINTERS_H
/* Hijack probe callback for compat system call enter */
-#define TP_PROBE_CB(_template) &syscall_entry_probe
+#define TP_PROBE_CB(_template) &syscall_entry_event_probe
#define LTTNG_SC_COMPAT
#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
LTTNG_TRACEPOINT_EVENT(compat_syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
#define sc_inout(...) __VA_ARGS__
/* Hijack probe callback for system call exit */
-#define TP_PROBE_CB(_template) &syscall_exit_probe
+#define TP_PROBE_CB(_template) &syscall_exit_event_probe
#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
LTTNG_TRACEPOINT_EVENT(syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
PARAMS(_fields))
/* Hijack probe callback for compat system call exit */
-#define TP_PROBE_CB(_template) &syscall_exit_probe
+#define TP_PROBE_CB(_template) &syscall_exit_event_probe
#define LTTNG_SC_COMPAT
#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
LTTNG_TRACEPOINT_EVENT(compat_syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
#undef CREATE_TRACE_POINTS
struct trace_syscall_entry {
- void *func;
+ void *event_func;
+ void *trigger_func;
const struct lttng_event_desc *desc;
const struct lttng_event_field *fields;
unsigned int nrargs;
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
- .func = __event_probe__syscall_entry_##_template, \
+ .event_func = __event_probe__syscall_entry_##_template, \
+ .trigger_func = __trigger_probe__syscall_entry_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___syscall_entry_##_template, \
.desc = &__event_desc___syscall_entry_##_name, \
},
-/* Syscall enter tracing table */
+/* Event syscall enter tracing table */
static const struct trace_syscall_entry sc_table[] = {
#include <instrumentation/syscalls/headers/syscalls_integers.h>
#include <instrumentation/syscalls/headers/syscalls_pointers.h>
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
- .func = __event_probe__compat_syscall_entry_##_template, \
+ .event_func = __event_probe__compat_syscall_entry_##_template, \
+ .trigger_func = __trigger_probe__compat_syscall_entry_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___compat_syscall_entry_##_template, \
.desc = &__event_desc___compat_syscall_entry_##_name, \
},
-/* Compat syscall enter table */
+/* Event compat syscall enter table */
const struct trace_syscall_entry compat_sc_table[] = {
#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
- .func = __event_probe__syscall_exit_##_template, \
+ .event_func = __event_probe__syscall_exit_##_template, \
+ .trigger_func = __trigger_probe__syscall_exit_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___syscall_exit_##_template, \
.desc = &__event_desc___syscall_exit_##_name, \
},
-/* Syscall exit table */
+/* Event syscall exit table */
static const struct trace_syscall_entry sc_exit_table[] = {
#include <instrumentation/syscalls/headers/syscalls_integers.h>
#include <instrumentation/syscalls/headers/syscalls_pointers.h>
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
- .func = __event_probe__compat_syscall_exit_##_template, \
+ .event_func = __event_probe__compat_syscall_exit_##_template, \
+ .trigger_func = __trigger_probe__compat_syscall_exit_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___compat_syscall_exit_##_template, \
.desc = &__event_desc___compat_syscall_exit_##_name, \
},
-/* Compat syscall exit table */
+/* Event compat syscall exit table */
const struct trace_syscall_entry compat_sc_exit_table[] = {
#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
DECLARE_BITMAP(sc_compat, NR_compat_syscalls);
};
-static void syscall_entry_unknown(struct lttng_event *event,
+static void syscall_entry_event_unknown(struct lttng_event *event,
struct pt_regs *regs, unsigned int id)
{
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
__event_probe__syscall_entry_unknown(event, id, args);
}
-void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
+static __always_inline
+void syscall_entry_call_func(void *func, unsigned int nrargs, void *data,
+ struct pt_regs *regs)
{
- struct lttng_channel *chan = __data;
- struct lttng_event *event, *unknown_event;
- const struct trace_syscall_entry *table, *entry;
- size_t table_len;
-
- if (unlikely(in_compat_syscall())) {
- struct lttng_syscall_filter *filter;
-
- filter = lttng_rcu_dereference(chan->sc_filter);
- if (filter) {
- if (id < 0 || id >= NR_compat_syscalls
- || !test_bit(id, filter->sc_compat)) {
- /* System call filtered out. */
- return;
- }
- }
- table = compat_sc_table;
- table_len = ARRAY_SIZE(compat_sc_table);
- unknown_event = chan->sc_compat_unknown;
- } else {
- struct lttng_syscall_filter *filter;
-
- filter = lttng_rcu_dereference(chan->sc_filter);
- if (filter) {
- if (id < 0 || id >= NR_syscalls
- || !test_bit(id, filter->sc)) {
- /* System call filtered out. */
- return;
- }
- }
- table = sc_table;
- table_len = ARRAY_SIZE(sc_table);
- unknown_event = chan->sc_unknown;
- }
- if (unlikely(id < 0 || id >= table_len)) {
- syscall_entry_unknown(unknown_event, regs, id);
- return;
- }
- if (unlikely(in_compat_syscall()))
- event = chan->compat_sc_table[id];
- else
- event = chan->sc_table[id];
- if (unlikely(!event)) {
- syscall_entry_unknown(unknown_event, regs, id);
- return;
- }
- entry = &table[id];
- WARN_ON_ONCE(!entry);
-
- switch (entry->nrargs) {
+ switch (nrargs) {
case 0:
{
- void (*fptr)(void *__data) = entry->func;
+ void (*fptr)(void *__data) = func;
- fptr(event);
+ fptr(data);
break;
}
case 1:
{
- void (*fptr)(void *__data, unsigned long arg0) = entry->func;
+ void (*fptr)(void *__data, unsigned long arg0) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0]);
+ fptr(data, args[0]);
break;
}
case 2:
{
void (*fptr)(void *__data,
unsigned long arg0,
- unsigned long arg1) = entry->func;
+ unsigned long arg1) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1]);
+ fptr(data, args[0], args[1]);
break;
}
case 3:
void (*fptr)(void *__data,
unsigned long arg0,
unsigned long arg1,
- unsigned long arg2) = entry->func;
+ unsigned long arg2) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1], args[2]);
+ fptr(data, args[0], args[1], args[2]);
break;
}
case 4:
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
- unsigned long arg3) = entry->func;
+ unsigned long arg3) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1], args[2], args[3]);
+ fptr(data, args[0], args[1], args[2], args[3]);
break;
}
case 5:
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
- unsigned long arg4) = entry->func;
+ unsigned long arg4) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1], args[2], args[3], args[4]);
+ fptr(data, args[0], args[1], args[2], args[3], args[4]);
break;
}
case 6:
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5) = entry->func;
+ unsigned long arg5) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1], args[2],
+ fptr(data, args[0], args[1], args[2],
args[3], args[4], args[5]);
break;
}
}
}
-static void syscall_exit_unknown(struct lttng_event *event,
+void syscall_entry_event_probe(void *__data, struct pt_regs *regs, long id)
+{
+ struct lttng_channel *chan = __data;
+ struct lttng_syscall_filter *filter;
+ struct lttng_event *event, *unknown_event;
+ const struct trace_syscall_entry *table, *entry;
+ size_t table_len;
+
+ filter = lttng_rcu_dereference(chan->sc_filter);
+
+ if (unlikely(in_compat_syscall())) {
+ if (filter) {
+ if (id < 0 || id >= NR_compat_syscalls
+ || !test_bit(id, filter->sc_compat)) {
+ /* System call filtered out. */
+ return;
+ }
+ }
+ table = compat_sc_table;
+ table_len = ARRAY_SIZE(compat_sc_table);
+ unknown_event = chan->sc_compat_unknown;
+ } else {
+ if (filter) {
+ if (id < 0 || id >= NR_syscalls
+ || !test_bit(id, filter->sc)) {
+ /* System call filtered out. */
+ return;
+ }
+ }
+ table = sc_table;
+ table_len = ARRAY_SIZE(sc_table);
+ unknown_event = chan->sc_unknown;
+ }
+ if (unlikely(id < 0 || id >= table_len)) {
+ syscall_entry_event_unknown(unknown_event, regs, id);
+ return;
+ }
+ if (unlikely(in_compat_syscall()))
+ event = chan->compat_sc_table[id];
+ else
+ event = chan->sc_table[id];
+ if (unlikely(!event)) {
+ syscall_entry_event_unknown(unknown_event, regs, id);
+ return;
+ }
+ entry = &table[id];
+ WARN_ON_ONCE(!entry);
+
+ syscall_entry_call_func(entry->event_func, entry->nrargs, event, regs);
+}
+
+void syscall_entry_trigger_probe(void *__data, struct pt_regs *regs, long id)
+{
+ struct lttng_trigger_group *trigger_group = __data;
+ const struct trace_syscall_entry *entry;
+ struct list_head *dispatch_list;
+ struct lttng_trigger *iter;
+ size_t table_len;
+
+
+ if (unlikely(in_compat_syscall())) {
+ table_len = ARRAY_SIZE(compat_sc_table);
+ if (unlikely(id < 0 || id >= table_len)) {
+ return;
+ }
+ entry = &compat_sc_table[id];
+ dispatch_list = &trigger_group->trigger_compat_syscall_dispatch[id];
+ } else {
+ table_len = ARRAY_SIZE(sc_table);
+ if (unlikely(id < 0 || id >= table_len)) {
+ return;
+ }
+ entry = &sc_table[id];
+ dispatch_list = &trigger_group->trigger_syscall_dispatch[id];
+ }
+
+ /* TODO handle unknown syscall */
+
+ list_for_each_entry_rcu(iter, dispatch_list, u.syscall.node) {
+ BUG_ON(iter->u.syscall.syscall_id != id);
+ syscall_entry_call_func(entry->trigger_func, entry->nrargs, iter, regs);
+ }
+}
+
+static void syscall_exit_event_unknown(struct lttng_event *event,
struct pt_regs *regs, int id, long ret)
{
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
__event_probe__syscall_exit_unknown(event, id, ret, args);
}
-void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
+void syscall_exit_event_probe(void *__data, struct pt_regs *regs, long ret)
{
struct lttng_channel *chan = __data;
+ struct lttng_syscall_filter *filter;
struct lttng_event *event, *unknown_event;
const struct trace_syscall_entry *table, *entry;
size_t table_len;
long id;
+ filter = lttng_rcu_dereference(chan->sc_filter);
+
id = syscall_get_nr(current, regs);
if (unlikely(in_compat_syscall())) {
- struct lttng_syscall_filter *filter;
-
- filter = lttng_rcu_dereference(chan->sc_filter);
if (filter) {
if (id < 0 || id >= NR_compat_syscalls
|| !test_bit(id, filter->sc_compat)) {
table_len = ARRAY_SIZE(compat_sc_exit_table);
unknown_event = chan->compat_sc_exit_unknown;
} else {
- struct lttng_syscall_filter *filter;
-
- filter = lttng_rcu_dereference(chan->sc_filter);
if (filter) {
if (id < 0 || id >= NR_syscalls
|| !test_bit(id, filter->sc)) {
unknown_event = chan->sc_exit_unknown;
}
if (unlikely(id < 0 || id >= table_len)) {
- syscall_exit_unknown(unknown_event, regs, id, ret);
+ syscall_exit_event_unknown(unknown_event, regs, id, ret);
return;
}
if (unlikely(in_compat_syscall()))
else
event = chan->sc_exit_table[id];
if (unlikely(!event)) {
- syscall_exit_unknown(unknown_event, regs, id, ret);
+ syscall_exit_event_unknown(unknown_event, regs, id, ret);
return;
}
entry = &table[id];
switch (entry->nrargs) {
case 0:
{
- void (*fptr)(void *__data, long ret) = entry->func;
+ void (*fptr)(void *__data, long ret) = entry->event_func;
fptr(event, ret);
break;
{
void (*fptr)(void *__data,
long ret,
- unsigned long arg0) = entry->func;
+ unsigned long arg0) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
void (*fptr)(void *__data,
long ret,
unsigned long arg0,
- unsigned long arg1) = entry->func;
+ unsigned long arg1) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
long ret,
unsigned long arg0,
unsigned long arg1,
- unsigned long arg2) = entry->func;
+ unsigned long arg2) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
- unsigned long arg3) = entry->func;
+ unsigned long arg3) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
- unsigned long arg4) = entry->func;
+ unsigned long arg4) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5) = entry->func;
+ unsigned long arg5) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
* Should be called with sessions lock held.
*/
static
-int fill_table(const struct trace_syscall_entry *table, size_t table_len,
+int fill_event_table(const struct trace_syscall_entry *table, size_t table_len,
struct lttng_event **chan_table, struct lttng_channel *chan,
void *filter, enum sc_type type)
{
/*
* Should be called with sessions lock held.
*/
-int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
+int lttng_syscalls_register_event(struct lttng_channel *chan, void *filter)
{
struct lttng_kernel_event ev;
int ret;
}
}
- ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
+ ret = fill_event_table(sc_table, ARRAY_SIZE(sc_table),
chan->sc_table, chan, filter, SC_TYPE_ENTRY);
if (ret)
return ret;
- ret = fill_table(sc_exit_table, ARRAY_SIZE(sc_exit_table),
+ ret = fill_event_table(sc_exit_table, ARRAY_SIZE(sc_exit_table),
chan->sc_exit_table, chan, filter, SC_TYPE_EXIT);
if (ret)
return ret;
#ifdef CONFIG_COMPAT
- ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
+ ret = fill_event_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
chan->compat_sc_table, chan, filter,
SC_TYPE_COMPAT_ENTRY);
if (ret)
return ret;
- ret = fill_table(compat_sc_exit_table, ARRAY_SIZE(compat_sc_exit_table),
+ ret = fill_event_table(compat_sc_exit_table, ARRAY_SIZE(compat_sc_exit_table),
chan->compat_sc_exit_table, chan, filter,
SC_TYPE_COMPAT_EXIT);
if (ret)
#endif
if (!chan->sys_enter_registered) {
ret = lttng_wrapper_tracepoint_probe_register("sys_enter",
- (void *) syscall_entry_probe, chan);
+ (void *) syscall_entry_event_probe, chan);
if (ret)
return ret;
chan->sys_enter_registered = 1;
*/
if (!chan->sys_exit_registered) {
ret = lttng_wrapper_tracepoint_probe_register("sys_exit",
- (void *) syscall_exit_probe, chan);
+ (void *) syscall_exit_event_probe, chan);
if (ret) {
WARN_ON_ONCE(lttng_wrapper_tracepoint_probe_unregister("sys_enter",
- (void *) syscall_entry_probe, chan));
+ (void *) syscall_entry_event_probe, chan));
return ret;
}
chan->sys_exit_registered = 1;
}
/*
- * Only called at session destruction.
+ * Should be called with sessions lock held.
+ */
+int lttng_syscalls_register_trigger(struct lttng_trigger_enabler *trigger_enabler, void *filter)
+{
+ struct lttng_trigger_group *group = trigger_enabler->group;
+ unsigned int i;
+ int ret = 0;
+
+ wrapper_vmalloc_sync_all();
+
+ if (!group->trigger_syscall_dispatch) {
+ group->trigger_syscall_dispatch = kzalloc(sizeof(struct list_head)
+ * ARRAY_SIZE(sc_table), GFP_KERNEL);
+ if (!group->trigger_syscall_dispatch)
+ return -ENOMEM;
+
+ /* Initialize all list_head */
+ for (i = 0; i < ARRAY_SIZE(sc_table); i++)
+ INIT_LIST_HEAD(&group->trigger_syscall_dispatch[i]);
+ }
+
+#ifdef CONFIG_COMPAT
+ if (!group->trigger_compat_syscall_dispatch) {
+ group->trigger_compat_syscall_dispatch = kzalloc(sizeof(struct list_head)
+ * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
+ if (!group->trigger_syscall_dispatch)
+ return -ENOMEM;
+
+ /* Initialize all list_head */
+ for (i = 0; i < ARRAY_SIZE(compat_sc_table); i++)
+ INIT_LIST_HEAD(&group->trigger_compat_syscall_dispatch[i]);
+ }
+#endif
+
+ if (!group->sys_enter_registered) {
+ ret = lttng_wrapper_tracepoint_probe_register("sys_enter",
+ (void *) syscall_entry_trigger_probe, group);
+ if (ret)
+ return ret;
+ group->sys_enter_registered = 1;
+ }
+
+ return ret;
+}
+
+static int create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler,
+ void *filter, const struct trace_syscall_entry *table,
+ size_t table_len, bool is_compat)
+{
+ struct lttng_trigger_group *group = trigger_enabler->group;
+ const struct lttng_event_desc *desc;
+ uint64_t id = trigger_enabler->id;
+ unsigned int i;
+ int ret = 0;
+
+ /* iterate over all syscall and create trigger that match */
+ for (i = 0; i < table_len; i++) {
+ struct lttng_trigger *trigger;
+ struct lttng_kernel_trigger trigger_param;
+ struct hlist_head *head;
+ int found = 0;
+
+ desc = table[i].desc;
+ if (!desc) {
+ /* Unknown syscall */
+ continue;
+ }
+
+ if (!lttng_desc_match_enabler(desc,
+ lttng_trigger_enabler_as_enabler(trigger_enabler)))
+ continue;
+
+ /*
+ * Check if already created.
+ */
+ head = utils_borrow_hash_table_bucket(group->triggers_ht.table,
+ LTTNG_TRIGGER_HT_SIZE, desc->name);
+ lttng_hlist_for_each_entry(trigger, head, hlist) {
+ if (trigger->desc == desc
+ && trigger->id == trigger_enabler->id)
+ found = 1;
+ }
+ if (found)
+ continue;
+
+ memset(&trigger_param, 0, sizeof(trigger_param));
+ strncat(trigger_param.name, desc->name,
+ LTTNG_KERNEL_SYM_NAME_LEN - strlen(trigger_param.name) - 1);
+ trigger_param.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ trigger_param.instrumentation = LTTNG_KERNEL_SYSCALL;
+
+ trigger = _lttng_trigger_create(desc, id, group,
+ &trigger_param, filter, trigger_param.instrumentation);
+ if (IS_ERR(trigger)) {
+ printk(KERN_INFO "Unable to create trigger %s\n",
+ desc->name);
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ trigger->u.syscall.syscall_id = i;
+ trigger->u.syscall.is_compat = is_compat;
+ }
+end:
+ return ret;
+
+}
+
+int lttng_syscals_create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler, void *filter)
+{
+ int ret;
+
+ ret = create_matching_triggers(trigger_enabler, filter, sc_table,
+ ARRAY_SIZE(sc_table), false);
+ if (ret)
+ goto end;
+
+ ret = create_matching_triggers(trigger_enabler, filter, compat_sc_table,
+ ARRAY_SIZE(compat_sc_table), true);
+end:
+ return ret;
+}
+
+/*
+ * TODO
*/
-int lttng_syscalls_unregister(struct lttng_channel *chan)
+int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *trigger_group)
+{
+ int ret;
+
+ if (trigger_group->sys_enter_registered) {
+ ret = lttng_wrapper_tracepoint_probe_unregister("sys_enter",
+ (void *) syscall_entry_trigger_probe, trigger_group);
+ if (ret)
+ return ret;
+ trigger_group->sys_enter_registered = 0;
+ }
+
+ kfree(trigger_group->trigger_syscall_dispatch);
+#ifdef CONFIG_COMPAT
+ kfree(trigger_group->trigger_compat_syscall_dispatch);
+#endif
+ return 0;
+}
+
+int lttng_syscalls_unregister_event(struct lttng_channel *chan)
{
int ret;
return 0;
if (chan->sys_enter_registered) {
ret = lttng_wrapper_tracepoint_probe_unregister("sys_enter",
- (void *) syscall_entry_probe, chan);
+ (void *) syscall_entry_event_probe, chan);
if (ret)
return ret;
chan->sys_enter_registered = 0;
}
if (chan->sys_exit_registered) {
ret = lttng_wrapper_tracepoint_probe_unregister("sys_exit",
- (void *) syscall_exit_probe, chan);
+ (void *) syscall_exit_event_probe, chan);
if (ret)
return ret;
chan->sys_exit_registered = 0;
for (i = 0; i < ARRAY_SIZE(sc_table); i++) {
const struct trace_syscall_entry *entry;
- const char *it_name;
entry = &sc_table[i];
if (!entry->desc)
continue;
- it_name = entry->desc->name;
- it_name += strlen(SYSCALL_ENTRY_STR);
- if (!strcmp(syscall_name, it_name)) {
+
+ if (!strcmp(syscall_name, entry->desc->name)) {
syscall_nr = i;
break;
}
for (i = 0; i < ARRAY_SIZE(compat_sc_table); i++) {
const struct trace_syscall_entry *entry;
- const char *it_name;
entry = &compat_sc_table[i];
if (!entry->desc)
continue;
- it_name = entry->desc->name;
- it_name += strlen(COMPAT_SYSCALL_ENTRY_STR);
- if (!strcmp(syscall_name, it_name)) {
+
+ if (!strcmp(syscall_name, entry->desc->name)) {
syscall_nr = i;
break;
}
return ARRAY_SIZE(sc_table) + ARRAY_SIZE(compat_sc_table);
}
-int lttng_syscall_filter_enable(struct lttng_channel *chan,
+int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
const char *name)
{
int syscall_nr, compat_syscall_nr, ret;
return ret;
}
-int lttng_syscall_filter_disable(struct lttng_channel *chan,
+int lttng_syscall_filter_enable_trigger(struct lttng_trigger *trigger)
+{
+ struct lttng_trigger_group *group = trigger->group;
+ unsigned int syscall_id = trigger->u.syscall.syscall_id;
+ struct list_head *dispatch_list;
+
+ if (trigger->u.syscall.is_compat)
+ dispatch_list = &group->trigger_compat_syscall_dispatch[syscall_id];
+ else
+ dispatch_list = &group->trigger_syscall_dispatch[syscall_id];
+
+ list_add_rcu(&trigger->u.syscall.node, dispatch_list);
+
+ return 0;
+}
+
+int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
const char *name)
{
int syscall_nr, compat_syscall_nr, ret;
return ret;
}
+int lttng_syscall_filter_disable_trigger(struct lttng_trigger *trigger)
+{
+ list_del_rcu(&trigger->u.syscall.node);
+ return 0;
+}
+
static
const struct trace_syscall_entry *syscall_list_get_entry(loff_t *pos)
{
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) */
+#ifndef _LTTNG_UTILS_H
+#define _LTTNG_UTILS_H
+/*
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#include <linux/jhash.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static inline
+struct hlist_head *utils_borrow_hash_table_bucket(
+ struct hlist_head *hash_table,
+ unsigned int hash_table_size,
+ const char *event_name)
+{
+ size_t name_len;
+ uint32_t hash;
+
+ name_len = strlen(event_name);
+
+ hash = jhash(event_name, name_len, 0);
+ return &hash_table[hash & (hash_table_size - 1)];
+}
+#endif /* _LTTNG_UTILS_H */
#include <blacklist/kprobes.h>
static
-int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
+int lttng_kprobes_event_handler_pre(struct kprobe *p, struct pt_regs *regs)
{
struct lttng_event *event =
container_of(p, struct lttng_event, u.kprobe.kp);
return 0;
}
+static
+int lttng_kprobes_trigger_handler_pre(struct kprobe *p, struct pt_regs *regs)
+{
+ struct lttng_trigger *trigger =
+ container_of(p, struct lttng_trigger, u.kprobe.kp);
+
+ if (unlikely(!READ_ONCE(trigger->enabled)))
+ return 0;
+
+ trigger->send_notification(trigger);
+
+ return 0;
+}
+
/*
* Create event description
*/
return ret;
}
-int lttng_kprobes_register(const char *name,
- const char *symbol_name,
+/*
+ * Create trigger description
+ */
+static
+int lttng_create_kprobe_trigger(const char *name, struct lttng_trigger *trigger)
+{
+ struct lttng_event_desc *desc;
+ int ret;
+
+ desc = kzalloc(sizeof(*trigger->desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->name = kstrdup(name, GFP_KERNEL);
+ if (!desc->name) {
+ ret = -ENOMEM;
+ goto error_str;
+ }
+ desc->nr_fields = 0;
+
+ desc->owner = THIS_MODULE;
+ trigger->desc = desc;
+
+ return 0;
+
+error_str:
+ kfree(desc);
+ return ret;
+}
+
+static
+int _lttng_kprobes_register(const char *symbol_name,
uint64_t offset,
uint64_t addr,
- struct lttng_event *event)
+ struct lttng_kprobe *lttng_kp,
+ kprobe_pre_handler_t pre_handler)
{
int ret;
if (symbol_name[0] == '\0')
symbol_name = NULL;
- ret = lttng_create_kprobe_event(name, event);
- if (ret)
- goto error;
- memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
- event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
+ memset(<tng_kp->kp, 0, sizeof(lttng_kp->kp));
+ lttng_kp->kp.pre_handler = pre_handler;
+
if (symbol_name) {
- event->u.kprobe.symbol_name =
+ lttng_kp->symbol_name =
kzalloc(LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char),
GFP_KERNEL);
- if (!event->u.kprobe.symbol_name) {
+ if (!lttng_kp->symbol_name) {
ret = -ENOMEM;
goto name_error;
}
- memcpy(event->u.kprobe.symbol_name, symbol_name,
+ memcpy(lttng_kp->symbol_name, symbol_name,
LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char));
- event->u.kprobe.kp.symbol_name =
- event->u.kprobe.symbol_name;
+ lttng_kp->kp.symbol_name = lttng_kp->symbol_name;
}
- event->u.kprobe.kp.offset = offset;
- event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
+
+ lttng_kp->kp.offset = offset;
+ lttng_kp->kp.addr = (void *) (unsigned long) addr;
/*
* Ensure the memory we just allocated don't trigger page faults.
*/
wrapper_vmalloc_sync_all();
- ret = register_kprobe(&event->u.kprobe.kp);
+ ret = register_kprobe(<tng_kp->kp);
if (ret)
goto register_error;
+
return 0;
register_error:
- kfree(event->u.kprobe.symbol_name);
+ kfree(lttng_kp->symbol_name);
name_error:
+ return ret;
+}
+
+int lttng_kprobes_register_event(const char *name,
+ const char *symbol_name,
+ uint64_t offset,
+ uint64_t addr,
+ struct lttng_event *event)
+{
+ int ret;
+
+ ret = lttng_create_kprobe_event(name, event);
+ if (ret)
+ goto error;
+
+ ret = _lttng_kprobes_register(symbol_name, offset, addr,
+ &event->u.kprobe, lttng_kprobes_event_handler_pre);
+ if (ret)
+ goto register_error;
+
+ return 0;
+
+register_error:
kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
error:
return ret;
}
-EXPORT_SYMBOL_GPL(lttng_kprobes_register);
+EXPORT_SYMBOL_GPL(lttng_kprobes_register_event);
-void lttng_kprobes_unregister(struct lttng_event *event)
+int lttng_kprobes_register_trigger(const char *symbol_name,
+ uint64_t offset,
+ uint64_t addr,
+ struct lttng_trigger *trigger)
+{
+ int ret;
+ ret = lttng_create_kprobe_trigger(symbol_name, trigger);
+ if (ret)
+ goto error;
+
+ ret = _lttng_kprobes_register(symbol_name, offset, addr,
+ &trigger->u.kprobe, lttng_kprobes_trigger_handler_pre);
+ if (ret)
+ goto register_error;
+
+ return 0;
+
+register_error:
+ kfree(trigger->desc->name);
+ kfree(trigger->desc);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_register_trigger);
+
+void lttng_kprobes_unregister_event(struct lttng_event *event)
{
unregister_kprobe(&event->u.kprobe.kp);
}
-EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
+EXPORT_SYMBOL_GPL(lttng_kprobes_unregister_event);
+
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger)
+{
+ unregister_kprobe(&trigger->u.kprobe.kp);
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_unregister_trigger);
-void lttng_kprobes_destroy_private(struct lttng_event *event)
+void lttng_kprobes_destroy_event_private(struct lttng_event *event)
{
kfree(event->u.kprobe.symbol_name);
kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
}
-EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
+EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_event_private);
+
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger)
+{
+ kfree(trigger->u.kprobe.symbol_name);
+ kfree(trigger->desc->name);
+ kfree(trigger->desc);
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_trigger_private);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/*
+ * Stage 1.2 of the trace trigger.
+ *
+ * Create dummy trace prototypes for each event class, and for each used
+ * template. This will allow checking whether the prototypes from the
+ * class and the instance using the class actually match.
+ */
+
+#include <probes/lttng-events-reset.h> /* Reset all macros within TRACE_EVENT */
+
+#undef TP_PROTO
+#define TP_PROTO(...) __VA_ARGS__
+
+#undef TP_ARGS
+#define TP_ARGS(...) __VA_ARGS__
+
+#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
+#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
+void __trigger_template_proto___##_template(_proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
+void __trigger_template_proto___##_template(void);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+void __trigger_template_proto___##_name(_proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+void __trigger_template_proto___##_name(void);
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+
/*
* Stage 1.2 of tracepoint event generation
*
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/*
+ * Stage 3.1 of the trace triggers.
+ *
+ * Create trigger probe callback prototypes.
+ */
+
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <probes/lttng-events-reset.h>
+
+#undef TP_PROTO
+#define TP_PROTO(...) __VA_ARGS__
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data, _proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data);
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
/*
* Stage 4 of the trace events.
*
struct lttng_event *__event = __data; \
struct lttng_probe_ctx __lttng_probe_ctx = { \
.event = __event, \
+ .trigger = NULL, \
.interruptible = !irqs_disabled(), \
}; \
struct lttng_channel *__chan = __event->chan; \
struct lttng_event *__event = __data; \
struct lttng_probe_ctx __lttng_probe_ctx = { \
.event = __event, \
+ .trigger = NULL, \
.interruptible = !irqs_disabled(), \
}; \
struct lttng_channel *__chan = __event->chan; \
#undef __get_dynamic_len
+/*
+ *
+ */
+
+#include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
+
+#undef TP_PROTO
+#define TP_PROTO(...) __VA_ARGS__
+
+#undef TP_ARGS
+#define TP_ARGS(...) __VA_ARGS__
+
+#undef TP_FIELDS
+#define TP_FIELDS(...) __VA_ARGS__
+
+#undef TP_locvar
+#define TP_locvar(...) __VA_ARGS__
+
+#undef TP_code_pre
+#define TP_code_pre(...) __VA_ARGS__
+
+#undef TP_code_post
+#define TP_code_post(...) __VA_ARGS__
+
+/*
+ * Using twice size for filter stack data to hold size and pointer for
+ * each field (worse case). For integers, max size required is 64-bit.
+ * Same for double-precision floats. Those fit within
+ * 2*sizeof(unsigned long) for all supported architectures.
+ * Perform UNION (||) of filter runtime list.
+ */
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data, _proto) \
+{ \
+ struct probe_local_vars { _locvar }; \
+ struct lttng_trigger *__trigger = __data; \
+ struct lttng_probe_ctx __lttng_probe_ctx = { \
+ .event = NULL, \
+ .trigger = __trigger, \
+ .interruptible = !irqs_disabled(), \
+ }; \
+ union { \
+ size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
+ char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+ } __stackvar; \
+ struct probe_local_vars __tp_locvar; \
+ struct probe_local_vars *tp_locvar __attribute__((unused)) = \
+ &__tp_locvar; \
+ \
+ if (unlikely(!READ_ONCE(__trigger->enabled))) \
+ return; \
+ _code_pre \
+ if (unlikely(!list_empty(&__trigger->bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *bc_runtime; \
+ int __filter_record = __trigger->has_enablers_without_bytecode; \
+ \
+ __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+ tp_locvar, _args); \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__trigger->bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
+ __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
+ __filter_record = 1; \
+ } \
+ if (likely(!__filter_record)) \
+ goto __post; \
+ } \
+ \
+ __trigger->send_notification(__trigger); \
+__post: \
+ _code_post \
+ return; \
+}
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data) \
+{ \
+ struct probe_local_vars { _locvar }; \
+ struct lttng_trigger *__trigger = __data; \
+ struct lttng_probe_ctx __lttng_probe_ctx = { \
+ .event = NULL, \
+ .trigger = __trigger, \
+ .interruptible = !irqs_disabled(), \
+ }; \
+ union { \
+ size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
+ char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+ } __stackvar; \
+ struct probe_local_vars __tp_locvar; \
+ struct probe_local_vars *tp_locvar __attribute__((unused)) = \
+ &__tp_locvar; \
+ \
+ if (unlikely(!READ_ONCE(__trigger->enabled))) \
+ return; \
+ _code_pre \
+ if (unlikely(!list_empty(&__trigger->bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *bc_runtime; \
+ int __filter_record = __trigger->has_enablers_without_bytecode; \
+ \
+ __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+ tp_locvar); \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__trigger->bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
+ __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
+ __filter_record = 1; \
+ } \
+ if (likely(!__filter_record)) \
+ goto __post; \
+ } \
+ \
+ __trigger->send_notification(__trigger); \
+__post: \
+ _code_post \
+ return; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 7 of the trace events.
*
#define TP_PROBE_CB(_template) &__event_probe__##_template
#endif
+#ifndef TP_TRIGGER_PROBE_CB
+#define TP_TRIGGER_PROBE_CB(_template) &__trigger_probe__##_template
+#endif
+
#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
static const struct lttng_event_desc __event_desc___##_map = { \
.probe_callback = (void *) TP_PROBE_CB(_template), \
.nr_fields = ARRAY_SIZE(__event_fields___##_template), \
.owner = THIS_MODULE, \
+ .trigger_callback = (void *) TP_TRIGGER_PROBE_CB(_template), \
};
#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
#include <wrapper/vmalloc.h>
static
-int lttng_uprobes_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
+int lttng_uprobes_event_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
{
struct lttng_uprobe_handler *uprobe_handler =
container_of(uc, struct lttng_uprobe_handler, up_consumer);
- struct lttng_event *event = uprobe_handler->event;
+ struct lttng_event *event = uprobe_handler->u.event;
struct lttng_probe_ctx lttng_probe_ctx = {
.event = event,
.interruptible = !lttng_regs_irqs_disabled(regs),
return 0;
}
+static
+int lttng_uprobes_trigger_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
+{
+ struct lttng_uprobe_handler *uprobe_handler =
+ container_of(uc, struct lttng_uprobe_handler, up_consumer);
+ struct lttng_trigger *trigger = uprobe_handler->u.trigger;
+
+ if (unlikely(!READ_ONCE(trigger->enabled)))
+ return 0;
+
+ trigger->send_notification(trigger);
+ return 0;
+}
+
/*
* Create event description.
*/
return ret;
}
+/*
+ * Create trigger description.
+ */
+static
+int lttng_create_uprobe_trigger(const char *name, struct lttng_trigger *trigger)
+{
+ struct lttng_event_desc *desc;
+ int ret;
+
+ desc = kzalloc(sizeof(*trigger->desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->name = kstrdup(name, GFP_KERNEL);
+ if (!desc->name) {
+ ret = -ENOMEM;
+ goto error_str;
+ }
+
+ desc->nr_fields = 0;
+
+ desc->owner = THIS_MODULE;
+ trigger->desc = desc;
+
+ return 0;
+
+error_str:
+ kfree(desc);
+ return ret;
+}
+
/*
* Returns the inode struct from the current task and an fd. The inode is
* grabbed by this function and must be put once we are done with it using
return inode;
}
-int lttng_uprobes_add_callsite(struct lttng_event *event,
- struct lttng_kernel_event_callsite __user *callsite)
+
+static
+int lttng_uprobes_add_callsite(struct lttng_uprobe *uprobe,
+ struct lttng_kernel_event_callsite __user *callsite,
+ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs),
+ void *priv_data)
{
int ret = 0;
struct lttng_uprobe_handler *uprobe_handler;
- if (!event) {
+ if (!priv_data) {
ret = -EINVAL;
goto end;
}
/* Ensure the memory we just allocated don't trigger page faults. */
wrapper_vmalloc_sync_all();
- uprobe_handler->event = event;
- uprobe_handler->up_consumer.handler = lttng_uprobes_handler_pre;
+ uprobe_handler->u.event = priv_data;
+ uprobe_handler->up_consumer.handler = handler;
ret = copy_from_user(&uprobe_handler->offset, &callsite->u.uprobe.offset, sizeof(uint64_t));
if (ret) {
goto register_error;
}
- ret = wrapper_uprobe_register(event->u.uprobe.inode,
+ ret = wrapper_uprobe_register(uprobe->inode,
uprobe_handler->offset, &uprobe_handler->up_consumer);
if (ret) {
printk(KERN_WARNING "Error registering probe on inode %lu "
- "and offset 0x%llx\n", event->u.uprobe.inode->i_ino,
+ "and offset 0x%llx\n", uprobe->inode->i_ino,
uprobe_handler->offset);
ret = -1;
goto register_error;
}
- list_add(&uprobe_handler->node, &event->u.uprobe.head);
+ list_add(&uprobe_handler->node, &uprobe->head);
return ret;
end:
return ret;
}
-EXPORT_SYMBOL_GPL(lttng_uprobes_add_callsite);
-int lttng_uprobes_register(const char *name, int fd, struct lttng_event *event)
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+ return lttng_uprobes_add_callsite(&event->u.uprobe, callsite,
+ lttng_uprobes_event_handler_pre, event);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_event_add_callsite);
+
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+ return lttng_uprobes_add_callsite(&trigger->u.uprobe, callsite,
+ lttng_uprobes_trigger_handler_pre, trigger);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_trigger_add_callsite);
+
+static
+int lttng_uprobes_register(struct lttng_uprobe *uprobe, int fd)
{
int ret = 0;
struct inode *inode;
- ret = lttng_create_uprobe_event(name, event);
- if (ret)
- goto error;
-
inode = get_inode_from_fd(fd);
if (!inode) {
printk(KERN_WARNING "Cannot get inode from fd\n");
ret = -EBADF;
goto inode_error;
}
- event->u.uprobe.inode = inode;
- INIT_LIST_HEAD(&event->u.uprobe.head);
+ uprobe->inode = inode;
+ INIT_LIST_HEAD(&uprobe->head);
+
+inode_error:
+ return ret;
+}
+
+int lttng_uprobes_register_event(const char *name, int fd, struct lttng_event *event)
+{
+ int ret = 0;
+
+ ret = lttng_create_uprobe_event(name, event);
+ if (ret)
+ goto error;
+
+ ret = lttng_uprobes_register(&event->u.uprobe, fd);
+ if (ret)
+ goto register_error;
return 0;
-inode_error:
+register_error:
kfree(event->desc->name);
kfree(event->desc);
error:
return ret;
}
-EXPORT_SYMBOL_GPL(lttng_uprobes_register);
+EXPORT_SYMBOL_GPL(lttng_uprobes_register_event);
-void lttng_uprobes_unregister(struct lttng_event *event)
+int lttng_uprobes_register_trigger(const char *name, int fd,
+ struct lttng_trigger *trigger)
+{
+ int ret = 0;
+
+ ret = lttng_create_uprobe_trigger(name, trigger);
+ if (ret)
+ goto error;
+
+ ret = lttng_uprobes_register(&trigger->u.uprobe, fd);
+ if (ret)
+ goto register_error;
+
+ return 0;
+
+register_error:
+ kfree(trigger->desc->name);
+ kfree(trigger->desc);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_register_trigger);
+
+static
+void lttng_uprobes_unregister(struct inode *inode, struct list_head *head)
{
struct lttng_uprobe_handler *iter, *tmp;
* Iterate over the list of handler, remove each handler from the list
* and free the struct.
*/
- list_for_each_entry_safe(iter, tmp, &event->u.uprobe.head, node) {
- wrapper_uprobe_unregister(event->u.uprobe.inode, iter->offset,
- &iter->up_consumer);
+ list_for_each_entry_safe(iter, tmp, head, node) {
+ wrapper_uprobe_unregister(inode, iter->offset, &iter->up_consumer);
list_del(&iter->node);
kfree(iter);
}
+
+}
+
+void lttng_uprobes_unregister_event(struct lttng_event *event)
+{
+ lttng_uprobes_unregister(event->u.uprobe.inode, &event->u.uprobe.head);
}
-EXPORT_SYMBOL_GPL(lttng_uprobes_unregister);
+EXPORT_SYMBOL_GPL(lttng_uprobes_unregister_event);
-void lttng_uprobes_destroy_private(struct lttng_event *event)
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger)
+{
+ lttng_uprobes_unregister(trigger->u.uprobe.inode, &trigger->u.uprobe.head);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_unregister_trigger);
+
+void lttng_uprobes_destroy_event_private(struct lttng_event *event)
{
iput(event->u.uprobe.inode);
kfree(event->desc->name);
kfree(event->desc);
}
-EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_private);
+EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_event_private);
+
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger)
+{
+ iput(trigger->u.uprobe.inode);
+ kfree(trigger->desc->name);
+ kfree(trigger->desc);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_trigger_private);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Yannick Brosseau");