+ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+retry:
+ if (disabled)
+ return POLLERR;
+
+ offset = lib_ring_buffer_get_offset(config, buf);
+ consumed = lib_ring_buffer_get_consumed(config, buf);
+
+ /*
+ * If there is no buffer available to consume.
+ */
+ if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
+ /*
+ * If there is a non-empty subbuffer, flush and try again.
+ */
+ if (subbuf_offset(offset, chan) > subbuffer_header_size) {
+ lib_ring_buffer_switch_remote(buf);
+ goto retry;
+ }
+
+ if (finalized)
+ return POLLHUP;
+ else {
+ /*
+ * The memory barriers
+ * __wait_event()/wake_up_interruptible() take
+ * care of "raw_spin_is_locked" memory ordering.
+ */
+ if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
+ goto retry;
+ else
+ return 0;
+ }
+ } else {
+ if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
+ >= chan->backend.buf_size)
+ return POLLPRI | POLLRDBAND;
+ else
+ return POLLIN | POLLRDNORM;
+ }
+ }
+
+ return mask;
+}
+
+/**
+ * lttng_trigger_group_notif_open - trigger ring buffer open file operation
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Open implementation. Makes sure only one open instance of a buffer is
+ * done at a given moment.
+ */
+static int lttng_trigger_group_notif_open(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger_group *trigger_group = inode->i_private;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+
+ file->private_data = trigger_group;
+ return lib_ring_buffer_open(inode, file, buf);
+}
+
+/**
+ * lttng_trigger_group_notif_release - trigger ring buffer release file operation
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Release implementation.
+ */
+static int lttng_trigger_group_notif_release(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger_group *trigger_group = file->private_data;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+ int ret;
+
+ ret = lib_ring_buffer_release(inode, file, buf);
+ if (ret)
+ return ret;
+ fput(trigger_group->file);
+ return 0;
+}
+
+static const struct file_operations lttng_trigger_group_notif_fops = {
+ .owner = THIS_MODULE,
+ .open = lttng_trigger_group_notif_open,
+ .release = lttng_trigger_group_notif_release,
+ .read = lttng_trigger_group_notif_read,
+ .poll = lttng_trigger_group_notif_poll,
+};
+
+/**
+ * lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
+ * @filp: the file
+ * @wait: poll table
+ *
+ * Handles the poll operations for the metadata channels.
+ */
+static
+unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
+ poll_table *wait)
+{
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+ int finalized;
+ unsigned int mask = 0;
+
+ if (filp->f_mode & FMODE_READ) {
+ poll_wait_set_exclusive(wait);
+ poll_wait(filp, &stream->read_wait, wait);
+
+ finalized = stream->finalized;
+
+ /*
+ * lib_ring_buffer_is_finalized() contains a smp_rmb()
+ * ordering finalized load before offsets loads.
+ */
+ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+
+ if (finalized)
+ mask |= POLLHUP;
+
+ mutex_lock(&stream->metadata_cache->lock);
+ if (stream->metadata_cache->metadata_written >
+ stream->metadata_out)
+ mask |= POLLIN;
+ mutex_unlock(&stream->metadata_cache->lock);
+ }
+
+ return mask;
+}
+
+static
+void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct lttng_metadata_stream *stream = filp->private_data;
+
+ stream->metadata_out = stream->metadata_in;
+}
+
+/*
+ * Reset the counter of how much metadata has been consumed to 0. That way,
+ * the consumer receives the content of the metadata cache unchanged. This is
+ * different from the metadata_regenerate where the offset from epoch is
+ * resampled, here we want the exact same content as the last time the metadata
+ * was generated. This command is only possible if all the metadata written
+ * in the cache has been output to the metadata stream to avoid corrupting the
+ * metadata file.
+ *
+ * Return 0 on success, a negative value on error.
+ */
+static
+int lttng_metadata_cache_dump(struct lttng_metadata_stream *stream)
+{
+ int ret;
+ struct lttng_metadata_cache *cache = stream->metadata_cache;
+
+ mutex_lock(&cache->lock);
+ if (stream->metadata_out != cache->metadata_written) {
+ ret = -EBUSY;
+ goto end;
+ }
+ stream->metadata_out = 0;
+ stream->metadata_in = 0;
+ wake_up_interruptible(&stream->read_wait);
+ ret = 0;
+
+end:
+ mutex_unlock(&cache->lock);
+ return ret;
+}
+
+static
+long lttng_metadata_ring_buffer_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+
+ switch (cmd) {
+ case RING_BUFFER_GET_NEXT_SUBBUF:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+ struct channel *chan = buf->backend.chan;
+
+ ret = lttng_metadata_output_channel(stream, chan);
+ if (ret > 0) {
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+ ret = 0;
+ } else if (ret < 0)
+ goto err;
+ break;
+ }
+ case RING_BUFFER_GET_SUBBUF:
+ {
+ /*
+ * Random access is not allowed for metadata channel.
+ */
+ return -ENOSYS;
+ }
+ case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
+ case RING_BUFFER_FLUSH:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+ struct channel *chan = buf->backend.chan;
+
+ /*
+ * Before doing the actual ring buffer flush, write up to one
+ * packet of metadata in the ring buffer.
+ */
+ ret = lttng_metadata_output_channel(stream, chan);
+ if (ret < 0)
+ goto err;
+ break;
+ }
+ case RING_BUFFER_GET_METADATA_VERSION:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+
+ return put_u64(stream->version, arg);
+ }
+ case RING_BUFFER_METADATA_CACHE_DUMP:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+
+ return lttng_metadata_cache_dump(stream);
+ }
+ default:
+ break;
+ }
+ /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
+
+ /* Performing lib ring buffer ioctl after our own. */
+ ret = lib_ring_buffer_ioctl(filp, cmd, arg, buf);
+ if (ret < 0)
+ goto err;
+
+ switch (cmd) {
+ case RING_BUFFER_PUT_NEXT_SUBBUF:
+ {
+ lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
+ cmd, arg);
+ break;
+ }
+ default:
+ break;
+ }
+err:
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static
+long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+
+ switch (cmd) {
+ case RING_BUFFER_GET_NEXT_SUBBUF:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+ struct channel *chan = buf->backend.chan;
+
+ ret = lttng_metadata_output_channel(stream, chan);
+ if (ret > 0) {
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+ ret = 0;
+ } else if (ret < 0)
+ goto err;
+ break;
+ }
+ case RING_BUFFER_GET_SUBBUF:
+ {
+ /*
+ * Random access is not allowed for metadata channel.
+ */
+ return -ENOSYS;
+ }
+ case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
+ case RING_BUFFER_FLUSH:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+ struct channel *chan = buf->backend.chan;
+
+ /*
+ * Before doing the actual ring buffer flush, write up to one
+ * packet of metadata in the ring buffer.
+ */
+ ret = lttng_metadata_output_channel(stream, chan);
+ if (ret < 0)
+ goto err;
+ break;
+ }
+ case RING_BUFFER_GET_METADATA_VERSION:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+
+ return put_u64(stream->version, arg);
+ }
+ case RING_BUFFER_METADATA_CACHE_DUMP:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+
+ return lttng_metadata_cache_dump(stream);
+ }
+ default:
+ break;
+ }
+ /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
+
+ /* Performing lib ring buffer ioctl after our own. */
+ ret = lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
+ if (ret < 0)
+ goto err;
+
+ switch (cmd) {
+ case RING_BUFFER_PUT_NEXT_SUBBUF:
+ {
+ lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
+ cmd, arg);
+ break;
+ }
+ default:
+ break;
+ }
+err:
+ return ret;
+}
+#endif
+
+/*
+ * This is not used by anonymous file descriptors. This code is left
+ * there if we ever want to implement an inode with open() operation.
+ */
+static
+int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
+{
+ struct lttng_metadata_stream *stream = inode->i_private;
+ struct lib_ring_buffer *buf = stream->priv;
+
+ file->private_data = buf;
+ /*
+ * Since life-time of metadata cache differs from that of
+ * session, we need to keep our own reference on the transport.
+ */
+ if (!try_module_get(stream->transport->owner)) {
+ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ return -EBUSY;
+ }
+ return lib_ring_buffer_open(inode, file, buf);
+}
+
+static
+int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
+{
+ struct lttng_metadata_stream *stream = file->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+
+ kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
+ module_put(stream->transport->owner);
+ return lib_ring_buffer_release(inode, file, buf);
+}
+
+static
+ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ struct lttng_metadata_stream *stream = in->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+
+ return lib_ring_buffer_splice_read(in, ppos, pipe, len,
+ flags, buf);
+}
+
+static
+int lttng_metadata_ring_buffer_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+
+ return lib_ring_buffer_mmap(filp, vma, buf);
+}
+
+static
+const struct file_operations lttng_metadata_ring_buffer_file_operations = {
+ .owner = THIS_MODULE,
+ .open = lttng_metadata_ring_buffer_open,
+ .release = lttng_metadata_ring_buffer_release,
+ .poll = lttng_metadata_ring_buffer_poll,
+ .splice_read = lttng_metadata_ring_buffer_splice_read,
+ .mmap = lttng_metadata_ring_buffer_mmap,
+ .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
+ .llseek = vfs_lib_ring_buffer_no_llseek,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
+#endif
+};
+
+static
+int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
+ const struct file_operations *fops, const char *name)
+{
+ int stream_fd, ret;
+ struct file *stream_file;
+
+ stream_fd = lttng_get_unused_fd();
+ if (stream_fd < 0) {
+ ret = stream_fd;
+ goto fd_error;
+ }
+ stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
+ if (IS_ERR(stream_file)) {
+ ret = PTR_ERR(stream_file);
+ goto file_error;
+ }
+ /*
+ * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
+ * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
+ * file descriptor, so we set FMODE_PREAD here.
+ */
+ stream_file->f_mode |= FMODE_PREAD;
+ fd_install(stream_fd, stream_file);
+ /*
+ * The stream holds a reference to the channel within the generic ring
+ * buffer library, so no need to hold a refcount on the channel and
+ * session files here.
+ */
+ return stream_fd;
+
+file_error:
+ put_unused_fd(stream_fd);
+fd_error:
+ return ret;
+}
+
+static
+int lttng_abi_open_stream(struct file *channel_file)
+{
+ struct lttng_channel *channel = channel_file->private_data;
+ struct lib_ring_buffer *buf;
+ int ret;
+ void *stream_priv;
+
+ buf = channel->ops->buffer_read_open(channel->chan);
+ if (!buf)
+ return -ENOENT;
+
+ stream_priv = buf;
+ ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
+ <tng_stream_ring_buffer_file_operations,
+ "[lttng_stream]");
+ if (ret < 0)
+ goto fd_error;
+
+ return ret;
+
+fd_error:
+ channel->ops->buffer_read_close(buf);
+ return ret;
+}
+
+static
+int lttng_abi_open_metadata_stream(struct file *channel_file)
+{
+ struct lttng_channel *channel = channel_file->private_data;
+ struct lttng_session *session = channel->session;
+ struct lib_ring_buffer *buf;
+ int ret;
+ struct lttng_metadata_stream *metadata_stream;
+ void *stream_priv;
+
+ buf = channel->ops->buffer_read_open(channel->chan);
+ if (!buf)
+ return -ENOENT;
+
+ metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
+ GFP_KERNEL);
+ if (!metadata_stream) {
+ ret = -ENOMEM;
+ goto nomem;
+ }
+ metadata_stream->metadata_cache = session->metadata_cache;
+ init_waitqueue_head(&metadata_stream->read_wait);
+ metadata_stream->priv = buf;
+ stream_priv = metadata_stream;
+ metadata_stream->transport = channel->transport;
+
+ /*
+ * Since life-time of metadata cache differs from that of
+ * session, we need to keep our own reference on the transport.
+ */
+ if (!try_module_get(metadata_stream->transport->owner)) {
+ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ ret = -EINVAL;
+ goto notransport;
+ }
+
+ if (!lttng_kref_get(&session->metadata_cache->refcount)) {
+ ret = -EOVERFLOW;
+ goto kref_error;
+ }
+
+ ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
+ <tng_metadata_ring_buffer_file_operations,
+ "[lttng_metadata_stream]");
+ if (ret < 0)
+ goto fd_error;
+
+ list_add(&metadata_stream->list,
+ &session->metadata_cache->metadata_stream);
+ return ret;
+
+fd_error:
+ kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
+kref_error:
+ module_put(metadata_stream->transport->owner);
+notransport:
+ kfree(metadata_stream);
+nomem:
+ channel->ops->buffer_read_close(buf);
+ return ret;
+}
+
+static
+int lttng_abi_open_trigger_group_stream(struct file *notif_file)
+{
+ struct lttng_trigger_group *trigger_group = notif_file->private_data;
+ struct channel *chan = trigger_group->chan;
+ struct lib_ring_buffer *buf;
+ int ret;
+ void *stream_priv;
+
+ buf = trigger_group->ops->buffer_read_open(chan);
+ if (!buf)
+ return -ENOENT;
+
+ /* The trigger notification fd holds a reference on the trigger group */
+ if (!atomic_long_add_unless(¬if_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+ trigger_group->buf = buf;
+ stream_priv = trigger_group;
+ ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
+ <tng_trigger_group_notif_fops,
+ "[lttng_trigger_stream]");
+ if (ret < 0)
+ goto fd_error;
+
+ return ret;
+
+fd_error:
+ atomic_long_dec(¬if_file->f_count);
+refcount_error:
+ trigger_group->ops->buffer_read_close(buf);
+ return ret;
+}
+
+static
+int lttng_abi_create_event(struct file *channel_file,
+ struct lttng_kernel_event *event_param)
+{
+ struct lttng_channel *channel = channel_file->private_data;
+ int event_fd, ret;
+ struct file *event_file;
+ void *priv;
+
+ event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ switch (event_param->instrumentation) {
+ case LTTNG_KERNEL_KRETPROBE:
+ event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ break;
+ case LTTNG_KERNEL_FUNCTION:
+ event_param->u.ftrace.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ break;
+ default:
+ break;
+ }
+ event_fd = lttng_get_unused_fd();
+ if (event_fd < 0) {
+ ret = event_fd;
+ goto fd_error;
+ }
+ event_file = anon_inode_getfile("[lttng_event]",
+ <tng_event_fops,
+ NULL, O_RDWR);
+ if (IS_ERR(event_file)) {
+ ret = PTR_ERR(event_file);
+ goto file_error;
+ }
+ /* The event holds a reference on the channel */
+ if (!atomic_long_add_unless(&channel_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+ if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
+ || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
+ struct lttng_event_enabler *event_enabler;
+
+ if (strutils_is_star_glob_pattern(event_param->name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
+ event_param, channel);
+ } else {
+ event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
+ event_param, channel);
+ }
+ priv = event_enabler;
+ } else {
+ struct lttng_event *event;
+
+ /*
+ * We tolerate no failure path after event creation. It
+ * will stay invariant for the rest of the session.
+ */
+ event = lttng_event_create(channel, event_param,
+ NULL, NULL,
+ event_param->instrumentation);
+ WARN_ON_ONCE(!event);
+ if (IS_ERR(event)) {
+ ret = PTR_ERR(event);
+ goto event_error;
+ }
+ priv = event;
+ }
+ event_file->private_data = priv;
+ fd_install(event_fd, event_file);
+ return event_fd;
+
+event_error:
+ atomic_long_dec(&channel_file->f_count);
+refcount_error:
+ fput(event_file);
+file_error:
+ put_unused_fd(event_fd);
+fd_error:
+ return ret;
+}
+
+static
+long lttng_trigger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct lttng_trigger *trigger;
+ struct lttng_trigger_enabler *trigger_enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ switch (cmd) {
+ case LTTNG_KERNEL_ENABLE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_enable(trigger);
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_enable(trigger_enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_DISABLE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_disable(trigger);
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_disable(trigger_enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_FILTER:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ return -EINVAL;
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_attach_bytecode(trigger_enabler,
+ (struct lttng_kernel_filter_bytecode __user *) arg);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_ADD_CALLSITE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_add_callsite(trigger,
+ (struct lttng_kernel_event_callsite __user *) arg);
+ case LTTNG_TYPE_ENABLER:
+ return -EINVAL;
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static
+int lttng_trigger_release(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger *trigger;
+ struct lttng_trigger_enabler *trigger_enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ if (!evtype)
+ return 0;
+
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ if (trigger)
+ fput(trigger->group->file);
+ break;
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ if (trigger_enabler)
+ fput(trigger_enabler->group->file);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct file_operations lttng_trigger_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_trigger_release,
+ .unlocked_ioctl = lttng_trigger_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_trigger_ioctl,
+#endif
+};
+
+static
+int lttng_abi_create_trigger(struct file *trigger_group_file,
+ struct lttng_kernel_trigger *trigger_param)
+{
+ struct lttng_trigger_group *trigger_group = trigger_group_file->private_data;
+ int trigger_fd, ret;
+ struct file *trigger_file;
+ void *priv;
+
+ switch (trigger_param->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_UPROBE:
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ trigger_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ break;