SoW-2019-0007-2: Dynamic Snapshot: Triggers send partial event payload with notifications sow-2019-0007-2-rev1
authorJonathan Rajotte <jonathan.rajotte-julien@efficios.com>
Fri, 29 May 2020 18:06:33 +0000 (14:06 -0400)
committerJonathan Rajotte <jonathan.rajotte-julien@efficios.com>
Fri, 29 May 2020 18:07:54 +0000 (14:07 -0400)
Revision 1

Change-Id: I7455c9fced2dc98c7048ef0d1608db519087171c

40 files changed:
DO-NOT-MERGE.md [new file with mode: 0644]
include/instrumentation/events/lttng-test.h
include/instrumentation/syscalls/headers/syscalls_unknown.h
include/lttng/abi.h
include/lttng/bytecode.h [new file with mode: 0644]
include/lttng/events.h
include/lttng/filter-bytecode.h [deleted file]
include/lttng/filter.h [deleted file]
include/lttng/lttng-bytecode.h [new file with mode: 0644]
include/lttng/msgpack.h [new file with mode: 0644]
include/lttng/tracepoint-event-impl.h
include/lttng/trigger-notification.h [new file with mode: 0644]
include/lttng/utils.h [new file with mode: 0644]
include/ringbuffer/backend_internal.h
include/ringbuffer/config.h
include/ringbuffer/frontend_types.h
include/ringbuffer/iterator.h
src/Kbuild
src/lib/msgpack/msgpack.c [new file with mode: 0644]
src/lib/ringbuffer/ring_buffer_frontend.c
src/lib/ringbuffer/ring_buffer_iterator.c
src/lttng-abi.c
src/lttng-bytecode-interpreter.c [new file with mode: 0644]
src/lttng-bytecode-specialize.c [new file with mode: 0644]
src/lttng-bytecode-validator.c [new file with mode: 0644]
src/lttng-bytecode.c [new file with mode: 0644]
src/lttng-events.c
src/lttng-filter-interpreter.c [deleted file]
src/lttng-filter-specialize.c [deleted file]
src/lttng-filter-validator.c [deleted file]
src/lttng-filter.c [deleted file]
src/lttng-probes.c
src/lttng-ring-buffer-client.h
src/lttng-ring-buffer-metadata-client.h
src/lttng-ring-buffer-trigger-client.c [new file with mode: 0644]
src/lttng-ring-buffer-trigger-client.h [new file with mode: 0644]
src/lttng-syscalls.c
src/lttng-trigger-notification.c [new file with mode: 0644]
src/probes/lttng-kprobes.c
src/probes/lttng-uprobes.c

diff --git a/DO-NOT-MERGE.md b/DO-NOT-MERGE.md
new file mode 100644 (file)
index 0000000..c727a35
--- /dev/null
@@ -0,0 +1 @@
+capture
index 7d2b1f7c53f75deaa20b9ef7b29939215608bafb..eda5d9fe170478c984533067504ca5a3dce3e27c 100644 (file)
@@ -18,6 +18,7 @@ LTTNG_TRACEPOINT_ENUM(
                ctf_enum_auto("AUTO: EXPECT 28")
                ctf_enum_range("RANGE: 101 TO 303", 101, 303)
                ctf_enum_auto("AUTO: EXPECT 304")
+               ctf_enum_value("VALUE: -1", -1)
        )
 )
 
@@ -30,6 +31,7 @@ LTTNG_TRACEPOINT_EVENT(lttng_test_filter_event,
                ctf_integer(int, intfield, anint)
                ctf_integer_hex(int, intfield2, anint)
                ctf_integer(long, longfield, anint)
+               ctf_integer(int, signedfield, -1)
                ctf_integer_network(int, netintfield, netint)
                ctf_integer_network_hex(int, netintfieldhex, netint)
                ctf_array(long, arrfield1, values, 3)
@@ -49,6 +51,7 @@ LTTNG_TRACEPOINT_EVENT(lttng_test_filter_event,
                ctf_enum(lttng_test_filter_event_enum, int, enum28, 28)
                ctf_enum(lttng_test_filter_event_enum, int, enum202, 202)
                ctf_enum(lttng_test_filter_event_enum, int, enum304, 304)
+               ctf_enum(lttng_test_filter_event_enum, int, enumnegative, -1)
        )
 )
 
index 79939c80ca89a70ac0a80ddac8d8351e193782e6..64e53792e5b2f1c8a29ac860e64a94593940a240 100644 (file)
@@ -9,7 +9,7 @@
 #define UNKNOWN_SYSCALL_NRARGS 6
 
 #undef TP_PROBE_CB
-#define TP_PROBE_CB(_template)          &syscall_entry_probe
+#define TP_PROBE_CB(_template)          &syscall_entry_event_probe
 
 LTTNG_TRACEPOINT_EVENT(syscall_entry_unknown,
        TP_PROTO(int id, unsigned long *args),
@@ -29,7 +29,7 @@ LTTNG_TRACEPOINT_EVENT(compat_syscall_entry_unknown,
 )
 
 #undef TP_PROBE_CB
-#define TP_PROBE_CB(_template)          &syscall_exit_probe
+#define TP_PROBE_CB(_template)          &syscall_exit_event_probe
 
 LTTNG_TRACEPOINT_EVENT(syscall_exit_unknown,
        TP_PROTO(int id, long ret, unsigned long *args),
index b8e2db398a53f2b0b7ce35b8b8acb8a5e21f37a4..fb74102e0117fd4c3136ad03a684773653dca9da 100644 (file)
@@ -110,6 +110,31 @@ struct lttng_kernel_event {
        } u;
 } __attribute__((packed));
 
+#define LTTNG_KERNEL_TRIGGER_PADDING1  16
+#define LTTNG_KERNEL_TRIGGER_PADDING2  LTTNG_KERNEL_SYM_NAME_LEN + 32
+struct lttng_kernel_trigger {
+       uint64_t id;
+       char name[LTTNG_KERNEL_SYM_NAME_LEN];   /* event name */
+       enum lttng_kernel_instrumentation instrumentation;
+       char padding[LTTNG_KERNEL_TRIGGER_PADDING1];
+
+       /* Per instrumentation type configuration */
+       union {
+               struct lttng_kernel_kretprobe kretprobe;
+               struct lttng_kernel_kprobe kprobe;
+               struct lttng_kernel_function_tracer ftrace;
+               struct lttng_kernel_uprobe uprobe;
+               char padding[LTTNG_KERNEL_TRIGGER_PADDING2];
+       } u;
+} __attribute__((packed));
+
+#define LTTNG_KERNEL_TRIGGER_NOTIFICATION_PADDING 32
+struct lttng_kernel_trigger_notification {
+       uint64_t id;
+       uint16_t capture_buf_size;
+       char padding[LTTNG_KERNEL_TRIGGER_NOTIFICATION_PADDING];
+} __attribute__((packed));
+
 struct lttng_kernel_tracer_version {
        uint32_t major;
        uint32_t minor;
@@ -208,6 +233,14 @@ struct lttng_kernel_filter_bytecode {
        char data[0];
 } __attribute__((packed));
 
+#define LTTNG_KERNEL_CAPTURE_BYTECODE_MAX_LEN          65536
+struct lttng_kernel_capture_bytecode {
+       uint32_t len;
+       uint32_t reloc_offset;
+       uint64_t seqnum;
+       char data[0];
+} __attribute__((packed));
+
 enum lttng_kernel_tracker_type {
        LTTNG_KERNEL_TRACKER_UNKNOWN            = -1,
 
@@ -236,6 +269,14 @@ struct lttng_kernel_tracker_args {
 #define LTTNG_KERNEL_SYSCALL_LIST              _IO(0xF6, 0x4A)
 #define LTTNG_KERNEL_TRACER_ABI_VERSION                \
        _IOR(0xF6, 0x4B, struct lttng_kernel_tracer_abi_version)
+#define LTTNG_KERNEL_TRIGGER_GROUP_CREATE      _IO(0xF6, 0x4C)
+
+/* Trigger group file descriptor ioctl */
+#define LTTNG_KERNEL_TRIGGER_GROUP_NOTIFICATION_FD \
+       _IO(0xF6, 0x30)
+#define LTTNG_KERNEL_TRIGGER_CREATE            \
+       _IOW(0xF6, 0x31, struct lttng_kernel_trigger)
+#define LTTNG_KERNEL_CAPTURE _IO(0xF6, 0x32)
 
 /* Session FD ioctl */
 /* lttng/abi-old.h reserve 0x50, 0x51, 0x52, and 0x53. */
diff --git a/include/lttng/bytecode.h b/include/lttng/bytecode.h
new file mode 100644 (file)
index 0000000..6bb20b8
--- /dev/null
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng/filter-bytecode.h
+ *
+ * LTTng filter bytecode
+ *
+ * Copyright 2012-2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _FILTER_BYTECODE_H
+#define _FILTER_BYTECODE_H
+
+/*
+ * offsets are absolute from start of bytecode.
+ */
+
+struct field_ref {
+       /* Initially, symbol offset. After link, field offset. */
+       uint16_t offset;
+} __attribute__((packed));
+
+struct get_symbol {
+       /* Symbol offset. */
+       uint16_t offset;
+} __attribute__((packed));
+
+struct get_index_u16 {
+       uint16_t index;
+} __attribute__((packed));
+
+struct get_index_u64 {
+       uint64_t index;
+} __attribute__((packed));
+
+struct literal_numeric {
+       int64_t v;
+} __attribute__((packed));
+
+struct literal_double {
+       double v;
+} __attribute__((packed));
+
+struct literal_string {
+       char string[0];
+} __attribute__((packed));
+
+enum bytecode_op {
+       BYTECODE_OP_UNKNOWN                     = 0,
+
+       BYTECODE_OP_RETURN                      = 1,
+
+       /* binary */
+       BYTECODE_OP_MUL                         = 2,
+       BYTECODE_OP_DIV                         = 3,
+       BYTECODE_OP_MOD                         = 4,
+       BYTECODE_OP_PLUS                        = 5,
+       BYTECODE_OP_MINUS                       = 6,
+       BYTECODE_OP_BIT_RSHIFT                  = 7,
+       BYTECODE_OP_BIT_LSHIFT                  = 8,
+       BYTECODE_OP_BIT_AND                     = 9,
+       BYTECODE_OP_BIT_OR                      = 10,
+       BYTECODE_OP_BIT_XOR                     = 11,
+
+       /* binary comparators */
+       BYTECODE_OP_EQ                          = 12,
+       BYTECODE_OP_NE                          = 13,
+       BYTECODE_OP_GT                          = 14,
+       BYTECODE_OP_LT                          = 15,
+       BYTECODE_OP_GE                          = 16,
+       BYTECODE_OP_LE                          = 17,
+
+       /* string binary comparator: apply to  */
+       BYTECODE_OP_EQ_STRING                   = 18,
+       BYTECODE_OP_NE_STRING                   = 19,
+       BYTECODE_OP_GT_STRING                   = 20,
+       BYTECODE_OP_LT_STRING                   = 21,
+       BYTECODE_OP_GE_STRING                   = 22,
+       BYTECODE_OP_LE_STRING                   = 23,
+
+       /* s64 binary comparator */
+       BYTECODE_OP_EQ_S64                      = 24,
+       BYTECODE_OP_NE_S64                      = 25,
+       BYTECODE_OP_GT_S64                      = 26,
+       BYTECODE_OP_LT_S64                      = 27,
+       BYTECODE_OP_GE_S64                      = 28,
+       BYTECODE_OP_LE_S64                      = 29,
+
+       /* double binary comparator */
+       BYTECODE_OP_EQ_DOUBLE                   = 30,
+       BYTECODE_OP_NE_DOUBLE                   = 31,
+       BYTECODE_OP_GT_DOUBLE                   = 32,
+       BYTECODE_OP_LT_DOUBLE                   = 33,
+       BYTECODE_OP_GE_DOUBLE                   = 34,
+       BYTECODE_OP_LE_DOUBLE                   = 35,
+
+       /* Mixed S64-double binary comparators */
+       BYTECODE_OP_EQ_DOUBLE_S64               = 36,
+       BYTECODE_OP_NE_DOUBLE_S64               = 37,
+       BYTECODE_OP_GT_DOUBLE_S64               = 38,
+       BYTECODE_OP_LT_DOUBLE_S64               = 39,
+       BYTECODE_OP_GE_DOUBLE_S64               = 40,
+       BYTECODE_OP_LE_DOUBLE_S64               = 41,
+
+       BYTECODE_OP_EQ_S64_DOUBLE               = 42,
+       BYTECODE_OP_NE_S64_DOUBLE               = 43,
+       BYTECODE_OP_GT_S64_DOUBLE               = 44,
+       BYTECODE_OP_LT_S64_DOUBLE               = 45,
+       BYTECODE_OP_GE_S64_DOUBLE               = 46,
+       BYTECODE_OP_LE_S64_DOUBLE               = 47,
+
+       /* unary */
+       BYTECODE_OP_UNARY_PLUS                  = 48,
+       BYTECODE_OP_UNARY_MINUS                 = 49,
+       BYTECODE_OP_UNARY_NOT                   = 50,
+       BYTECODE_OP_UNARY_PLUS_S64              = 51,
+       BYTECODE_OP_UNARY_MINUS_S64             = 52,
+       BYTECODE_OP_UNARY_NOT_S64               = 53,
+       BYTECODE_OP_UNARY_PLUS_DOUBLE           = 54,
+       BYTECODE_OP_UNARY_MINUS_DOUBLE          = 55,
+       BYTECODE_OP_UNARY_NOT_DOUBLE            = 56,
+
+       /* logical */
+       BYTECODE_OP_AND                         = 57,
+       BYTECODE_OP_OR                          = 58,
+
+       /* load field ref */
+       BYTECODE_OP_LOAD_FIELD_REF              = 59,
+       BYTECODE_OP_LOAD_FIELD_REF_STRING       = 60,
+       BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE     = 61,
+       BYTECODE_OP_LOAD_FIELD_REF_S64          = 62,
+       BYTECODE_OP_LOAD_FIELD_REF_DOUBLE       = 63,
+
+       /* load immediate from operand */
+       BYTECODE_OP_LOAD_STRING                 = 64,
+       BYTECODE_OP_LOAD_S64                    = 65,
+       BYTECODE_OP_LOAD_DOUBLE                 = 66,
+
+       /* cast */
+       BYTECODE_OP_CAST_TO_S64                 = 67,
+       BYTECODE_OP_CAST_DOUBLE_TO_S64          = 68,
+       BYTECODE_OP_CAST_NOP                    = 69,
+
+       /* get context ref */
+       BYTECODE_OP_GET_CONTEXT_REF             = 70,
+       BYTECODE_OP_GET_CONTEXT_REF_STRING      = 71,
+       BYTECODE_OP_GET_CONTEXT_REF_S64         = 72,
+       BYTECODE_OP_GET_CONTEXT_REF_DOUBLE      = 73,
+
+       /* load userspace field ref */
+       BYTECODE_OP_LOAD_FIELD_REF_USER_STRING  = 74,
+       BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
+
+       /*
+        * load immediate star globbing pattern (literal string)
+        * from immediate
+        */
+       BYTECODE_OP_LOAD_STAR_GLOB_STRING       = 76,
+
+       /* globbing pattern binary operator: apply to */
+       BYTECODE_OP_EQ_STAR_GLOB_STRING         = 77,
+       BYTECODE_OP_NE_STAR_GLOB_STRING         = 78,
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       BYTECODE_OP_GET_CONTEXT_ROOT            = 79,
+       BYTECODE_OP_GET_APP_CONTEXT_ROOT        = 80,
+       BYTECODE_OP_GET_PAYLOAD_ROOT            = 81,
+
+       BYTECODE_OP_GET_SYMBOL                  = 82,
+       BYTECODE_OP_GET_SYMBOL_FIELD            = 83,
+       BYTECODE_OP_GET_INDEX_U16               = 84,
+       BYTECODE_OP_GET_INDEX_U64               = 85,
+
+       BYTECODE_OP_LOAD_FIELD                  = 86,
+       BYTECODE_OP_LOAD_FIELD_S8               = 87,
+       BYTECODE_OP_LOAD_FIELD_S16              = 88,
+       BYTECODE_OP_LOAD_FIELD_S32              = 89,
+       BYTECODE_OP_LOAD_FIELD_S64              = 90,
+       BYTECODE_OP_LOAD_FIELD_U8               = 91,
+       BYTECODE_OP_LOAD_FIELD_U16              = 92,
+       BYTECODE_OP_LOAD_FIELD_U32              = 93,
+       BYTECODE_OP_LOAD_FIELD_U64              = 94,
+       BYTECODE_OP_LOAD_FIELD_STRING           = 95,
+       BYTECODE_OP_LOAD_FIELD_SEQUENCE         = 96,
+       BYTECODE_OP_LOAD_FIELD_DOUBLE           = 97,
+
+       BYTECODE_OP_UNARY_BIT_NOT               = 98,
+
+       BYTECODE_OP_RETURN_S64                  = 99,
+
+       NR_BYTECODE_OPS,
+};
+
+typedef uint8_t bytecode_opcode_t;
+
+struct load_op {
+       bytecode_opcode_t op;
+       char data[0];
+       /* data to load. Size known by enum filter_opcode and null-term char. */
+} __attribute__((packed));
+
+struct binary_op {
+       bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct unary_op {
+       bytecode_opcode_t op;
+} __attribute__((packed));
+
+/* skip_offset is absolute from start of bytecode */
+struct logical_op {
+       bytecode_opcode_t op;
+       uint16_t skip_offset;   /* bytecode insn, if skip second test */
+} __attribute__((packed));
+
+struct cast_op {
+       bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct return_op {
+       bytecode_opcode_t op;
+} __attribute__((packed));
+
+#endif /* _FILTER_BYTECODE_H */
index f010ff7698ba707114497991146b56fe7c6cc9f6..845fb13a7782ff8869479c3bd5b668f6ad389e95 100644 (file)
@@ -10,6 +10,7 @@
 #ifndef _LTTNG_EVENTS_H
 #define _LTTNG_EVENTS_H
 
+#include <linux/irq_work.h>
 #include <linux/version.h>
 #include <linux/list.h>
 #include <linux/kprobes.h>
@@ -169,6 +170,7 @@ struct lttng_perf_counter_field {
 
 struct lttng_probe_ctx {
        struct lttng_event *event;
+       struct lttng_trigger *trigger; // Not sure if we will ever need it.
        uint8_t interruptible;
 };
 
@@ -211,6 +213,7 @@ struct lttng_event_desc {
        const struct lttng_event_field *fields; /* event payload */
        unsigned int nr_fields;
        struct module *owner;
+       void *trigger_callback;
 };
 
 struct lttng_probe_desc {
@@ -229,33 +232,49 @@ enum lttng_event_type {
        LTTNG_TYPE_ENABLER = 1,
 };
 
-struct lttng_filter_bytecode_node {
+enum lttng_bytecode_node_type {
+   LTTNG_BYTECODE_NODE_TYPE_FILTER,
+   LTTNG_BYTECODE_NODE_TYPE_CAPTURE,
+};
+
+struct lttng_bytecode_node {
+       enum lttng_bytecode_node_type type;
        struct list_head node;
        struct lttng_enabler *enabler;
-       /*
-        * struct lttng_kernel_filter_bytecode has var. sized array, must be
-        * last field.
-        */
-       struct lttng_kernel_filter_bytecode bc;
+       struct {
+               uint32_t len;
+               uint32_t reloc_offset;
+               uint64_t seqnum;
+               char data[];
+       } bc;
 };
 
 /*
- * Filter return value masks.
+ * Bytecode interpreter return value masks.
  */
-enum lttng_filter_ret {
-       LTTNG_FILTER_DISCARD = 0,
-       LTTNG_FILTER_RECORD_FLAG = (1ULL << 0),
+enum lttng_bytecode_interpreter_ret {
+       LTTNG_INTERPRETER_DISCARD = 0,
+       LTTNG_INTERPRETER_RECORD_FLAG = (1ULL << 0),
        /* Other bits are kept for future use. */
 };
 
+struct lttng_interpreter_output;
+
 struct lttng_bytecode_runtime {
        /* Associated bytecode */
-       struct lttng_filter_bytecode_node *bc;
-       uint64_t (*filter)(void *filter_data, struct lttng_probe_ctx *lttng_probe_ctx,
-                       const char *filter_stack_data);
+       struct lttng_bytecode_node *bc;
+       union {
+               uint64_t (*filter)(void *filter_data,
+                               struct lttng_probe_ctx *lttng_probe_ctx,
+                               const char *filter_stack_data);
+               uint64_t (*capture)(void *filter_data,
+                               struct lttng_probe_ctx *lttng_probe_ctx,
+                               const char *capture_stack_data,
+                               struct lttng_interpreter_output *output);
+       } interpreter_funcs;
        int link_failed;
        struct list_head node;  /* list of bytecode runtime in event */
-       struct lttng_event *event;
+       struct lttng_ctx *ctx;
 };
 
 /*
@@ -267,12 +286,31 @@ struct lttng_enabler_ref {
 };
 
 struct lttng_uprobe_handler {
-       struct lttng_event *event;
+       union {
+               struct lttng_event *event;
+               struct lttng_trigger *trigger;
+       } u;
        loff_t offset;
        struct uprobe_consumer up_consumer;
        struct list_head node;
 };
 
+struct lttng_kprobe {
+       struct kprobe kp;
+       char *symbol_name;
+};
+
+struct lttng_uprobe {
+       struct inode *inode;
+       struct list_head head;
+};
+
+struct lttng_syscall {
+       struct list_head node;                  /* chain registered syscall trigger */
+       unsigned int syscall_id;
+       bool is_compat;
+};
+
 /*
  * lttng_event structure is referred to by the tracing fast path. It must be
  * kept small.
@@ -287,18 +325,12 @@ struct lttng_event {
        struct lttng_ctx *ctx;
        enum lttng_kernel_instrumentation instrumentation;
        union {
-               struct {
-                       struct kprobe kp;
-                       char *symbol_name;
-               } kprobe;
+               struct lttng_kprobe kprobe;
                struct {
                        struct lttng_krp *lttng_krp;
                        char *symbol_name;
                } kretprobe;
-               struct {
-                       struct inode *inode;
-                       struct list_head head;
-               } uprobe;
+               struct lttng_uprobe uprobe;
        } u;
        struct list_head list;          /* Event list in session */
        unsigned int metadata_dumped:1;
@@ -308,13 +340,45 @@ struct lttng_event {
        struct hlist_node hlist;        /* session ht of events */
        int registered;                 /* has reg'd tracepoint probe */
        /* list of struct lttng_bytecode_runtime, sorted by seqnum */
-       struct list_head bytecode_runtime_head;
+       struct list_head filter_bytecode_runtime_head;
+       int has_enablers_without_bytecode;
+};
+
+// FIXME: Really similar to lttng_event above. Could those be merged ?
+struct lttng_trigger {
+       enum lttng_event_type evtype;   /* First field. */
+       uint64_t id;
+       int enabled;
+       int registered;                 /* has reg'd tracepoint probe */
+       const struct lttng_event_desc *desc;
+       void *filter;
+       struct list_head list;          /* Trigger list in trigger group */
+
+       enum lttng_kernel_instrumentation instrumentation;
+       union {
+               struct lttng_kprobe kprobe;
+               struct lttng_uprobe uprobe;
+               struct lttng_syscall syscall;
+       } u;
+
+       /* Backward references: list of lttng_enabler_ref (ref to enablers) */
+       struct list_head enablers_ref_head;
+       struct hlist_node hlist;        /* session ht of triggers */
+       /* list of struct lttng_bytecode_runtime, sorted by seqnum */
+       struct list_head filter_bytecode_runtime_head;
+       size_t num_captures;
+       struct list_head capture_bytecode_runtime_head;
        int has_enablers_without_bytecode;
+
+       void (*send_notification)(struct lttng_trigger *trigger,
+                       struct lttng_probe_ctx *lttng_probe_ctx,
+                       const char *interpreter_stack_data);
+       struct lttng_trigger_group *group; /* Weak ref */
 };
 
-enum lttng_enabler_type {
-       LTTNG_ENABLER_STAR_GLOB,
-       LTTNG_ENABLER_NAME,
+enum lttng_enabler_format_type {
+       LTTNG_ENABLER_FORMAT_STAR_GLOB,
+       LTTNG_ENABLER_FORMAT_NAME,
 };
 
 /*
@@ -324,21 +388,55 @@ enum lttng_enabler_type {
 struct lttng_enabler {
        enum lttng_event_type evtype;   /* First field. */
 
-       enum lttng_enabler_type type;
+       enum lttng_enabler_format_type format_type;
 
-       struct list_head node;  /* per-session list of enablers */
        /* head list of struct lttng_ust_filter_bytecode_node */
        struct list_head filter_bytecode_head;
 
        struct lttng_kernel_event event_param;
+       unsigned int enabled:1;
+};
+
+struct lttng_event_enabler {
+       struct lttng_enabler base;
+       struct list_head node;  /* per-session list of enablers */
        struct lttng_channel *chan;
+       /*
+        * Unused, but kept around to make it explicit that the tracer can do
+        * it.
+        */
        struct lttng_ctx *ctx;
-       unsigned int enabled:1;
 };
 
+struct lttng_trigger_enabler {
+       struct lttng_enabler base;
+       uint64_t id;
+       struct list_head node;  /* List of trigger enablers */
+       struct lttng_trigger_group *group;
+
+       /* head list of struct lttng_ust_filter_bytecode_node */
+       struct list_head capture_bytecode_head;
+       uint64_t num_captures;
+};
+
+
+static inline
+struct lttng_enabler *lttng_event_enabler_as_enabler(
+               struct lttng_event_enabler *event_enabler)
+{
+       return &event_enabler->base;
+}
+
+static inline
+struct lttng_enabler *lttng_trigger_enabler_as_enabler(
+               struct lttng_trigger_enabler *trigger_enabler)
+{
+       return &trigger_enabler->base;
+}
+
 struct lttng_channel_ops {
        struct channel *(*channel_create)(const char *name,
-                               struct lttng_channel *lttng_chan,
+                               void *priv,
                                void *buf_addr,
                                size_t subbuf_size, size_t num_subbuf,
                                unsigned int switch_timer_interval,
@@ -415,6 +513,13 @@ struct lttng_event_ht {
        struct hlist_head table[LTTNG_EVENT_HT_SIZE];
 };
 
+#define LTTNG_TRIGGER_HT_BITS          12
+#define LTTNG_TRIGGER_HT_SIZE          (1U << LTTNG_TRIGGER_HT_BITS)
+
+struct lttng_trigger_ht {
+       struct hlist_head table[LTTNG_TRIGGER_HT_SIZE];
+};
+
 struct lttng_channel {
        unsigned int id;
        struct channel *chan;           /* Channel buffers */
@@ -518,14 +623,36 @@ struct lttng_session {
        struct lttng_id_tracker vgid_tracker;
        unsigned int metadata_dumped:1,
                tstate:1;               /* Transient enable state */
-       /* List of enablers */
+       /* List of event enablers */
        struct list_head enablers_head;
-       /* Hash table of events */
+/* Hash table of events */
        struct lttng_event_ht events_ht;
        char name[LTTNG_KERNEL_SESSION_NAME_LEN];
        char creation_time[LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN];
 };
 
+struct lttng_trigger_group {
+       struct file *file;              /* File associated to trigger group */
+       struct file *notif_file;        /* File used to expose notifications to userspace. */
+       struct list_head node;          /* Trigger group list */
+       struct list_head enablers_head; /* List of enablers */
+       struct list_head triggers_head; /* List of triggers */
+       struct lttng_trigger_ht triggers_ht; /* Hash table of triggers */
+       struct lttng_ctx *ctx;              /* Contexts for filters. */
+       struct lttng_channel_ops *ops;
+       struct lttng_transport *transport;
+       struct channel *chan;           /* Ring buffer channel for trigger group. */
+       struct lib_ring_buffer *buf;    /* Ring buffer for trigger group. */
+       wait_queue_head_t read_wait;
+       struct irq_work wakeup_pending; /* Pending wakeup irq work. */
+
+       struct list_head *trigger_syscall_dispatch;
+       struct list_head *trigger_compat_syscall_dispatch;
+
+       unsigned int syscall_all:1,
+               sys_enter_registered:1;
+};
+
 struct lttng_metadata_cache {
        char *data;                     /* Metadata cache */
        unsigned int cache_alloc;       /* Metadata allocated size (bytes) */
@@ -543,14 +670,24 @@ void lttng_unlock_sessions(void);
 
 struct list_head *lttng_get_probe_list_head(void);
 
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+               enum lttng_enabler_format_type format_type,
                struct lttng_kernel_event *event_param,
                struct lttng_channel *chan);
 
-int lttng_enabler_enable(struct lttng_enabler *enabler);
-int lttng_enabler_disable(struct lttng_enabler *enabler);
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler);
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler);
+struct lttng_trigger_enabler *lttng_trigger_enabler_create(
+               struct lttng_trigger_group *trigger_group,
+               enum lttng_enabler_format_type format_type,
+               struct lttng_kernel_trigger *trigger_param);
+
+int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler);
+int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler);
 int lttng_fix_pending_events(void);
+int lttng_fix_pending_triggers(void);
 int lttng_session_active(void);
+bool lttng_trigger_active(void);
 
 struct lttng_session *lttng_session_create(void);
 int lttng_session_enable(struct lttng_session *session);
@@ -560,6 +697,9 @@ int lttng_session_metadata_regenerate(struct lttng_session *session);
 int lttng_session_statedump(struct lttng_session *session);
 void metadata_cache_destroy(struct kref *kref);
 
+struct lttng_trigger_group *lttng_trigger_group_create(void);
+void lttng_trigger_group_destroy(struct lttng_trigger_group *trigger_group);
+
 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
                                       const char *transport_name,
                                       void *buf_addr,
@@ -589,11 +729,29 @@ struct lttng_event *lttng_event_compat_old_create(struct lttng_channel *chan,
                void *filter,
                const struct lttng_event_desc *internal_desc);
 
+struct lttng_trigger *lttng_trigger_create(
+                               const struct lttng_event_desc *trigger_desc,
+                               uint64_t id,
+                               struct lttng_trigger_group *trigger_group,
+                               struct lttng_kernel_trigger *trigger_param,
+                               void *filter,
+                               enum lttng_kernel_instrumentation itype);
+struct lttng_trigger *_lttng_trigger_create(
+                               const struct lttng_event_desc *trigger_desc,
+                               uint64_t id,
+                               struct lttng_trigger_group *trigger_group,
+                               struct lttng_kernel_trigger *trigger_param,
+                               void *filter,
+                               enum lttng_kernel_instrumentation itype);
+
 int lttng_channel_enable(struct lttng_channel *channel);
 int lttng_channel_disable(struct lttng_channel *channel);
 int lttng_event_enable(struct lttng_event *event);
 int lttng_event_disable(struct lttng_event *event);
 
+int lttng_trigger_enable(struct lttng_trigger *trigger);
+int lttng_trigger_disable(struct lttng_trigger *trigger);
+
 void lttng_transport_register(struct lttng_transport *transport);
 void lttng_transport_unregister(struct lttng_transport *transport);
 
@@ -605,8 +763,8 @@ void lttng_abi_compat_old_exit(void);
 
 int lttng_probe_register(struct lttng_probe_desc *desc);
 void lttng_probe_unregister(struct lttng_probe_desc *desc);
-const struct lttng_event_desc *lttng_event_get(const char *name);
-void lttng_event_put(const struct lttng_event_desc *desc);
+const struct lttng_event_desc *lttng_event_desc_get(const char *name);
+void lttng_event_desc_put(const struct lttng_event_desc *desc);
 int lttng_probes_init(void);
 void lttng_probes_exit(void);
 
@@ -631,33 +789,43 @@ int lttng_session_list_tracker_ids(struct lttng_session *session,
 void lttng_clock_ref(void);
 void lttng_clock_unref(void);
 
+int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
+               struct lttng_enabler *enabler);
+
 #if defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
-int lttng_syscalls_register(struct lttng_channel *chan, void *filter);
-int lttng_syscalls_unregister(struct lttng_channel *chan);
-int lttng_syscall_filter_enable(struct lttng_channel *chan,
+int lttng_syscalls_register_event(struct lttng_channel *chan, void *filter);
+int lttng_syscalls_unregister_event(struct lttng_channel *chan);
+int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
                const char *name);
-int lttng_syscall_filter_disable(struct lttng_channel *chan,
+int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
                const char *name);
 long lttng_channel_syscall_mask(struct lttng_channel *channel,
                struct lttng_kernel_syscall_mask __user *usyscall_mask);
+
+int lttng_syscalls_register_trigger(struct lttng_trigger_enabler *trigger_enabler, void *filter);
+int lttng_syscals_create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler, void *filter);
+int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *group);
+int lttng_syscall_filter_enable_trigger(struct lttng_trigger *trigger);
+int lttng_syscall_filter_disable_trigger(struct lttng_trigger *trigger);
 #else
-static inline int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
+static inline int lttng_syscalls_register_event(
+               struct lttng_channel *chan, void *filter)
 {
        return -ENOSYS;
 }
 
-static inline int lttng_syscalls_unregister(struct lttng_channel *chan)
+static inline int lttng_syscalls_unregister_event(struct lttng_channel *chan)
 {
        return 0;
 }
 
-static inline int lttng_syscall_filter_enable(struct lttng_channel *chan,
+static inline int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
                const char *name)
 {
        return -ENOSYS;
 }
 
-static inline int lttng_syscall_filter_disable(struct lttng_channel *chan,
+static inline int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
                const char *name)
 {
        return -ENOSYS;
@@ -668,13 +836,44 @@ static inline long lttng_channel_syscall_mask(struct lttng_channel *channel,
 {
        return -ENOSYS;
 }
+
+static inline int lttng_syscalls_register_trigger(
+               struct lttng_trigger_group *group, void *filter)
+{
+       return -ENOSYS;
+}
+
+static inline int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *group)
+{
+       return 0;
+}
+
+static inline int lttng_syscall_filter_enable_trigger(struct lttng_trigger_group *group,
+               const char *name)
+{
+       return -ENOSYS;
+}
+
+static inline int lttng_syscall_filter_disable_trigger(struct lttng_trigger_group *group,
+               const char *name)
+{
+       return -ENOSYS;
+}
+
 #endif
 
-void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime);
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
                struct lttng_kernel_filter_bytecode __user *bytecode);
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
-               struct lttng_enabler *enabler);
+int lttng_trigger_enabler_attach_filter_bytecode(struct lttng_trigger_enabler *trigger_enabler,
+               struct lttng_kernel_filter_bytecode __user *bytecode);
+int lttng_trigger_enabler_attach_capture_bytecode(
+               struct lttng_trigger_enabler *trigger_enabler,
+               struct lttng_kernel_capture_bytecode __user *bytecode);
+
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+               struct lttng_ctx *ctx,
+               struct list_head *instance_bytecode_runtime_head,
+               struct list_head *enabler_bytecode_runtime_head);
 
 int lttng_probes_init(void);
 
@@ -852,16 +1051,22 @@ void lttng_logger_exit(void);
 extern int lttng_statedump_start(struct lttng_session *session);
 
 #ifdef CONFIG_KPROBES
-int lttng_kprobes_register(const char *name,
+int lttng_kprobes_register_event(const char *name,
                const char *symbol_name,
                uint64_t offset,
                uint64_t addr,
                struct lttng_event *event);
-void lttng_kprobes_unregister(struct lttng_event *event);
-void lttng_kprobes_destroy_private(struct lttng_event *event);
+void lttng_kprobes_unregister_event(struct lttng_event *event);
+void lttng_kprobes_destroy_event_private(struct lttng_event *event);
+int lttng_kprobes_register_trigger(const char *symbol_name,
+               uint64_t offset,
+               uint64_t addr,
+               struct lttng_trigger *trigger);
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger);
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger);
 #else
 static inline
-int lttng_kprobes_register(const char *name,
+int lttng_kprobes_register_event(const char *name,
                const char *symbol_name,
                uint64_t offset,
                uint64_t addr,
@@ -871,12 +1076,31 @@ int lttng_kprobes_register(const char *name,
 }
 
 static inline
-void lttng_kprobes_unregister(struct lttng_event *event)
+void lttng_kprobes_unregister_event(struct lttng_event *event)
+{
+}
+
+static inline
+void lttng_kprobes_destroy_event_private(struct lttng_event *event)
+{
+}
+
+static inline
+int lttng_kprobes_register_trigger(const char *symbol_name,
+               uint64_t offset,
+               uint64_t addr,
+               struct lttng_trigger *trigger)
+{
+       return -ENOSYS;
+}
+
+static inline
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger)
 {
 }
 
 static inline
-void lttng_kprobes_destroy_private(struct lttng_event *event)
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger)
 {
 }
 #endif
@@ -884,35 +1108,68 @@ void lttng_kprobes_destroy_private(struct lttng_event *event)
 int lttng_event_add_callsite(struct lttng_event *event,
        struct lttng_kernel_event_callsite *callsite);
 
+int lttng_trigger_add_callsite(struct lttng_trigger *trigger,
+       struct lttng_kernel_event_callsite *callsite);
+
 #ifdef CONFIG_UPROBES
-int lttng_uprobes_register(const char *name,
+int lttng_uprobes_register_event(const char *name,
        int fd, struct lttng_event *event);
-int lttng_uprobes_add_callsite(struct lttng_event *event,
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
+       struct lttng_kernel_event_callsite *callsite);
+void lttng_uprobes_unregister_event(struct lttng_event *event);
+void lttng_uprobes_destroy_event_private(struct lttng_event *event);
+int lttng_uprobes_register_trigger(const char *name,
+       int fd, struct lttng_trigger *trigger);
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
        struct lttng_kernel_event_callsite *callsite);
-void lttng_uprobes_unregister(struct lttng_event *event);
-void lttng_uprobes_destroy_private(struct lttng_event *event);
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger);
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger);
 #else
 static inline
-int lttng_uprobes_register(const char *name,
+int lttng_uprobes_register_event(const char *name,
        int fd, struct lttng_event *event)
 {
        return -ENOSYS;
 }
 
 static inline
-int lttng_uprobes_add_callsite(struct lttng_event *event,
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
+       struct lttng_kernel_event_callsite *callsite)
+{
+       return -ENOSYS;
+}
+
+static inline
+void lttng_uprobes_unregister_event(struct lttng_event *event)
+{
+}
+
+static inline
+void lttng_uprobes_destroy_event_private(struct lttng_event *event)
+{
+}
+
+static inline
+int lttng_uprobes_register_trigger(const char *name,
+       int fd, struct lttng_trigger *trigger)
+{
+       return -ENOSYS;
+}
+
+static inline
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
        struct lttng_kernel_event_callsite *callsite)
 {
        return -ENOSYS;
 }
 
 static inline
-void lttng_uprobes_unregister(struct lttng_event *event)
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger)
 {
 }
 
 static inline
-void lttng_uprobes_destroy_private(struct lttng_event *event)
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger)
 {
 }
 #endif
diff --git a/include/lttng/filter-bytecode.h b/include/lttng/filter-bytecode.h
deleted file mode 100644 (file)
index cc1a841..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng/filter-bytecode.h
- *
- * LTTng filter bytecode
- *
- * Copyright 2012-2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _FILTER_BYTECODE_H
-#define _FILTER_BYTECODE_H
-
-/*
- * offsets are absolute from start of bytecode.
- */
-
-struct field_ref {
-       /* Initially, symbol offset. After link, field offset. */
-       uint16_t offset;
-} __attribute__((packed));
-
-struct get_symbol {
-       /* Symbol offset. */
-       uint16_t offset;
-} __attribute__((packed));
-
-struct get_index_u16 {
-       uint16_t index;
-} __attribute__((packed));
-
-struct get_index_u64 {
-       uint64_t index;
-} __attribute__((packed));
-
-struct literal_numeric {
-       int64_t v;
-} __attribute__((packed));
-
-struct literal_double {
-       double v;
-} __attribute__((packed));
-
-struct literal_string {
-       char string[0];
-} __attribute__((packed));
-
-enum filter_op {
-       FILTER_OP_UNKNOWN                       = 0,
-
-       FILTER_OP_RETURN                        = 1,
-
-       /* binary */
-       FILTER_OP_MUL                           = 2,
-       FILTER_OP_DIV                           = 3,
-       FILTER_OP_MOD                           = 4,
-       FILTER_OP_PLUS                          = 5,
-       FILTER_OP_MINUS                         = 6,
-       FILTER_OP_BIT_RSHIFT                    = 7,
-       FILTER_OP_BIT_LSHIFT                    = 8,
-       FILTER_OP_BIT_AND                       = 9,
-       FILTER_OP_BIT_OR                        = 10,
-       FILTER_OP_BIT_XOR                       = 11,
-
-       /* binary comparators */
-       FILTER_OP_EQ                            = 12,
-       FILTER_OP_NE                            = 13,
-       FILTER_OP_GT                            = 14,
-       FILTER_OP_LT                            = 15,
-       FILTER_OP_GE                            = 16,
-       FILTER_OP_LE                            = 17,
-
-       /* string binary comparator: apply to  */
-       FILTER_OP_EQ_STRING                     = 18,
-       FILTER_OP_NE_STRING                     = 19,
-       FILTER_OP_GT_STRING                     = 20,
-       FILTER_OP_LT_STRING                     = 21,
-       FILTER_OP_GE_STRING                     = 22,
-       FILTER_OP_LE_STRING                     = 23,
-
-       /* s64 binary comparator */
-       FILTER_OP_EQ_S64                        = 24,
-       FILTER_OP_NE_S64                        = 25,
-       FILTER_OP_GT_S64                        = 26,
-       FILTER_OP_LT_S64                        = 27,
-       FILTER_OP_GE_S64                        = 28,
-       FILTER_OP_LE_S64                        = 29,
-
-       /* double binary comparator */
-       FILTER_OP_EQ_DOUBLE                     = 30,
-       FILTER_OP_NE_DOUBLE                     = 31,
-       FILTER_OP_GT_DOUBLE                     = 32,
-       FILTER_OP_LT_DOUBLE                     = 33,
-       FILTER_OP_GE_DOUBLE                     = 34,
-       FILTER_OP_LE_DOUBLE                     = 35,
-
-       /* Mixed S64-double binary comparators */
-       FILTER_OP_EQ_DOUBLE_S64                 = 36,
-       FILTER_OP_NE_DOUBLE_S64                 = 37,
-       FILTER_OP_GT_DOUBLE_S64                 = 38,
-       FILTER_OP_LT_DOUBLE_S64                 = 39,
-       FILTER_OP_GE_DOUBLE_S64                 = 40,
-       FILTER_OP_LE_DOUBLE_S64                 = 41,
-
-       FILTER_OP_EQ_S64_DOUBLE                 = 42,
-       FILTER_OP_NE_S64_DOUBLE                 = 43,
-       FILTER_OP_GT_S64_DOUBLE                 = 44,
-       FILTER_OP_LT_S64_DOUBLE                 = 45,
-       FILTER_OP_GE_S64_DOUBLE                 = 46,
-       FILTER_OP_LE_S64_DOUBLE                 = 47,
-
-       /* unary */
-       FILTER_OP_UNARY_PLUS                    = 48,
-       FILTER_OP_UNARY_MINUS                   = 49,
-       FILTER_OP_UNARY_NOT                     = 50,
-       FILTER_OP_UNARY_PLUS_S64                = 51,
-       FILTER_OP_UNARY_MINUS_S64               = 52,
-       FILTER_OP_UNARY_NOT_S64                 = 53,
-       FILTER_OP_UNARY_PLUS_DOUBLE             = 54,
-       FILTER_OP_UNARY_MINUS_DOUBLE            = 55,
-       FILTER_OP_UNARY_NOT_DOUBLE              = 56,
-
-       /* logical */
-       FILTER_OP_AND                           = 57,
-       FILTER_OP_OR                            = 58,
-
-       /* load field ref */
-       FILTER_OP_LOAD_FIELD_REF                = 59,
-       FILTER_OP_LOAD_FIELD_REF_STRING         = 60,
-       FILTER_OP_LOAD_FIELD_REF_SEQUENCE       = 61,
-       FILTER_OP_LOAD_FIELD_REF_S64            = 62,
-       FILTER_OP_LOAD_FIELD_REF_DOUBLE         = 63,
-
-       /* load immediate from operand */
-       FILTER_OP_LOAD_STRING                   = 64,
-       FILTER_OP_LOAD_S64                      = 65,
-       FILTER_OP_LOAD_DOUBLE                   = 66,
-
-       /* cast */
-       FILTER_OP_CAST_TO_S64                   = 67,
-       FILTER_OP_CAST_DOUBLE_TO_S64            = 68,
-       FILTER_OP_CAST_NOP                      = 69,
-
-       /* get context ref */
-       FILTER_OP_GET_CONTEXT_REF               = 70,
-       FILTER_OP_GET_CONTEXT_REF_STRING        = 71,
-       FILTER_OP_GET_CONTEXT_REF_S64           = 72,
-       FILTER_OP_GET_CONTEXT_REF_DOUBLE        = 73,
-
-       /* load userspace field ref */
-       FILTER_OP_LOAD_FIELD_REF_USER_STRING    = 74,
-       FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE  = 75,
-
-       /*
-        * load immediate star globbing pattern (literal string)
-        * from immediate
-        */
-       FILTER_OP_LOAD_STAR_GLOB_STRING         = 76,
-
-       /* globbing pattern binary operator: apply to */
-       FILTER_OP_EQ_STAR_GLOB_STRING           = 77,
-       FILTER_OP_NE_STAR_GLOB_STRING           = 78,
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       FILTER_OP_GET_CONTEXT_ROOT              = 79,
-       FILTER_OP_GET_APP_CONTEXT_ROOT          = 80,
-       FILTER_OP_GET_PAYLOAD_ROOT              = 81,
-
-       FILTER_OP_GET_SYMBOL                    = 82,
-       FILTER_OP_GET_SYMBOL_FIELD              = 83,
-       FILTER_OP_GET_INDEX_U16                 = 84,
-       FILTER_OP_GET_INDEX_U64                 = 85,
-
-       FILTER_OP_LOAD_FIELD                    = 86,
-       FILTER_OP_LOAD_FIELD_S8                 = 87,
-       FILTER_OP_LOAD_FIELD_S16                = 88,
-       FILTER_OP_LOAD_FIELD_S32                = 89,
-       FILTER_OP_LOAD_FIELD_S64                = 90,
-       FILTER_OP_LOAD_FIELD_U8                 = 91,
-       FILTER_OP_LOAD_FIELD_U16                = 92,
-       FILTER_OP_LOAD_FIELD_U32                = 93,
-       FILTER_OP_LOAD_FIELD_U64                = 94,
-       FILTER_OP_LOAD_FIELD_STRING             = 95,
-       FILTER_OP_LOAD_FIELD_SEQUENCE           = 96,
-       FILTER_OP_LOAD_FIELD_DOUBLE             = 97,
-
-       FILTER_OP_UNARY_BIT_NOT                 = 98,
-
-       FILTER_OP_RETURN_S64                    = 99,
-
-       NR_FILTER_OPS,
-};
-
-typedef uint8_t filter_opcode_t;
-
-struct load_op {
-       filter_opcode_t op;
-       char data[0];
-       /* data to load. Size known by enum filter_opcode and null-term char. */
-} __attribute__((packed));
-
-struct binary_op {
-       filter_opcode_t op;
-} __attribute__((packed));
-
-struct unary_op {
-       filter_opcode_t op;
-} __attribute__((packed));
-
-/* skip_offset is absolute from start of bytecode */
-struct logical_op {
-       filter_opcode_t op;
-       uint16_t skip_offset;   /* bytecode insn, if skip second test */
-} __attribute__((packed));
-
-struct cast_op {
-       filter_opcode_t op;
-} __attribute__((packed));
-
-struct return_op {
-       filter_opcode_t op;
-} __attribute__((packed));
-
-#endif /* _FILTER_BYTECODE_H */
diff --git a/include/lttng/filter.h b/include/lttng/filter.h
deleted file mode 100644 (file)
index eb70fe3..0000000
+++ /dev/null
@@ -1,248 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng/filter.h
- *
- * LTTng modules filter header.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_FILTER_H
-#define _LTTNG_FILTER_H
-
-#include <linux/kernel.h>
-
-#include <lttng/events.h>
-#include <lttng/filter-bytecode.h>
-
-/* Filter stack length, in number of entries */
-#define FILTER_STACK_LEN       10      /* includes 2 dummy */
-#define FILTER_STACK_EMPTY     1
-
-#define FILTER_MAX_DATA_LEN    65536
-
-#ifdef DEBUG
-#define dbg_printk(fmt, args...)                               \
-       printk(KERN_DEBUG "[debug bytecode in %s:%s@%u] " fmt,          \
-               __FILE__, __func__, __LINE__, ## args)
-#else
-#define dbg_printk(fmt, args...)                               \
-do {                                                           \
-       /* do nothing but check printf format */                \
-       if (0)                                                  \
-               printk(KERN_DEBUG "[debug bytecode in %s:%s@%u] " fmt,  \
-                       __FILE__, __func__, __LINE__, ## args); \
-} while (0)
-#endif
-
-/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
-struct bytecode_runtime {
-       struct lttng_bytecode_runtime p;
-       size_t data_len;
-       size_t data_alloc_len;
-       char *data;
-       uint16_t len;
-       char code[0];
-};
-
-enum entry_type {
-       REG_S64,
-       REG_DOUBLE,
-       REG_STRING,
-       REG_STAR_GLOB_STRING,
-       REG_TYPE_UNKNOWN,
-       REG_PTR,
-};
-
-enum load_type {
-       LOAD_ROOT_CONTEXT,
-       LOAD_ROOT_APP_CONTEXT,
-       LOAD_ROOT_PAYLOAD,
-       LOAD_OBJECT,
-};
-
-enum object_type {
-       OBJECT_TYPE_S8,
-       OBJECT_TYPE_S16,
-       OBJECT_TYPE_S32,
-       OBJECT_TYPE_S64,
-       OBJECT_TYPE_U8,
-       OBJECT_TYPE_U16,
-       OBJECT_TYPE_U32,
-       OBJECT_TYPE_U64,
-
-       OBJECT_TYPE_DOUBLE,
-       OBJECT_TYPE_STRING,
-       OBJECT_TYPE_STRING_SEQUENCE,
-
-       OBJECT_TYPE_SEQUENCE,
-       OBJECT_TYPE_ARRAY,
-       OBJECT_TYPE_STRUCT,
-       OBJECT_TYPE_VARIANT,
-
-       OBJECT_TYPE_DYNAMIC,
-};
-
-struct filter_get_index_data {
-       uint64_t offset;        /* in bytes */
-       size_t ctx_index;
-       size_t array_len;
-       struct {
-               size_t len;
-               enum object_type type;
-               bool rev_bo;    /* reverse byte order */
-       } elem;
-};
-
-/* Validation stack */
-struct vstack_load {
-       enum load_type type;
-       enum object_type object_type;
-       const struct lttng_event_field *field;
-       bool rev_bo;    /* reverse byte order */
-};
-
-struct vstack_entry {
-       enum entry_type type;
-       struct vstack_load load;
-};
-
-struct vstack {
-       int top;        /* top of stack */
-       struct vstack_entry e[FILTER_STACK_LEN];
-};
-
-static inline
-void vstack_init(struct vstack *stack)
-{
-       stack->top = -1;
-}
-
-static inline
-struct vstack_entry *vstack_ax(struct vstack *stack)
-{
-       if (unlikely(stack->top < 0))
-               return NULL;
-       return &stack->e[stack->top];
-}
-
-static inline
-struct vstack_entry *vstack_bx(struct vstack *stack)
-{
-       if (unlikely(stack->top < 1))
-               return NULL;
-       return &stack->e[stack->top - 1];
-}
-
-static inline
-int vstack_push(struct vstack *stack)
-{
-       if (stack->top >= FILTER_STACK_LEN - 1) {
-               printk(KERN_WARNING "Stack full\n");
-               return -EINVAL;
-       }
-       ++stack->top;
-       return 0;
-}
-
-static inline
-int vstack_pop(struct vstack *stack)
-{
-       if (unlikely(stack->top < 0)) {
-               printk(KERN_WARNING "Stack empty\n");
-               return -EINVAL;
-       }
-       stack->top--;
-       return 0;
-}
-
-/* Execution stack */
-enum estack_string_literal_type {
-       ESTACK_STRING_LITERAL_TYPE_NONE,
-       ESTACK_STRING_LITERAL_TYPE_PLAIN,
-       ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
-};
-
-struct load_ptr {
-       enum load_type type;
-       enum object_type object_type;
-       const void *ptr;
-       bool rev_bo;
-       /* Temporary place-holders for contexts. */
-       union {
-               int64_t s64;
-               uint64_t u64;
-               double d;
-       } u;
-       /*
-        * "field" is only needed when nested under a variant, in which
-        * case we cannot specialize the nested operations.
-        */
-       const struct lttng_event_field *field;
-};
-
-struct estack_entry {
-       union {
-               int64_t v;
-
-               struct {
-                       const char *str;
-                       const char __user *user_str;
-                       size_t seq_len;
-                       enum estack_string_literal_type literal_type;
-                       int user;               /* is string from userspace ? */
-               } s;
-               struct load_ptr ptr;
-       } u;
-};
-
-struct estack {
-       int top;        /* top of stack */
-       struct estack_entry e[FILTER_STACK_LEN];
-};
-
-#define estack_ax_v    ax
-#define estack_bx_v    bx
-
-#define estack_ax(stack, top)                                  \
-       ({                                                      \
-               BUG_ON((top) <= FILTER_STACK_EMPTY);            \
-               &(stack)->e[top];                               \
-       })
-
-#define estack_bx(stack, top)                                  \
-       ({                                                      \
-               BUG_ON((top) <= FILTER_STACK_EMPTY + 1);        \
-               &(stack)->e[(top) - 1];                         \
-       })
-
-#define estack_push(stack, top, ax, bx)                                \
-       do {                                                    \
-               BUG_ON((top) >= FILTER_STACK_LEN - 1);          \
-               (stack)->e[(top) - 1].u.v = (bx);               \
-               (bx) = (ax);                                    \
-               ++(top);                                        \
-       } while (0)
-
-#define estack_pop(stack, top, ax, bx)                         \
-       do {                                                    \
-               BUG_ON((top) <= FILTER_STACK_EMPTY);            \
-               (ax) = (bx);                                    \
-               (bx) = (stack)->e[(top) - 2].u.v;               \
-               (top)--;                                        \
-       } while (0)
-
-const char *lttng_filter_print_op(enum filter_op op);
-
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode);
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
-               struct bytecode_runtime *bytecode);
-
-uint64_t lttng_filter_false(void *filter_data,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               const char *filter_stack_data);
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               const char *filter_stack_data);
-
-#endif /* _LTTNG_FILTER_H */
diff --git a/include/lttng/lttng-bytecode.h b/include/lttng/lttng-bytecode.h
new file mode 100644 (file)
index 0000000..25157f0
--- /dev/null
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng/lttng-bytecode.h
+ *
+ * LTTng modules bytecode header.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_BYTECODE_H
+#define _LTTNG_BYTECODE_H
+
+#include <linux/kernel.h>
+
+#include <lttng/events.h>
+#include <lttng/bytecode.h>
+
+/* Interpreter stack length, in number of entries */
+#define INTERPRETER_STACK_LEN  10      /* includes 2 dummy */
+#define INTERPRETER_STACK_EMPTY        1
+#define INTERPRETER_MAX_DATA_LEN       65536
+
+#ifdef DEBUG
+#define dbg_printk(fmt, args...)                               \
+       printk(KERN_DEBUG "[debug bytecode in %s:%s@%u] " fmt,          \
+               __FILE__, __func__, __LINE__, ## args)
+#else
+#define dbg_printk(fmt, args...)                               \
+do {                                                           \
+       /* do nothing but check printf format */                \
+       if (0)                                                  \
+               printk(KERN_DEBUG "[debug bytecode in %s:%s@%u] " fmt,  \
+                       __FILE__, __func__, __LINE__, ## args); \
+} while (0)
+#endif
+
+/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
+struct bytecode_runtime {
+       struct lttng_bytecode_runtime p;
+       size_t data_len;
+       size_t data_alloc_len;
+       char *data;
+       uint16_t len;
+       char code[0];
+};
+
+enum entry_type {
+       REG_S64,
+       REG_U64,
+       REG_DOUBLE,
+       REG_STRING,
+       REG_STAR_GLOB_STRING,
+       REG_TYPE_UNKNOWN,
+       REG_PTR,
+};
+
+enum load_type {
+       LOAD_ROOT_CONTEXT,
+       LOAD_ROOT_APP_CONTEXT,
+       LOAD_ROOT_PAYLOAD,
+       LOAD_OBJECT,
+};
+
+enum object_type {
+       OBJECT_TYPE_S8,
+       OBJECT_TYPE_S16,
+       OBJECT_TYPE_S32,
+       OBJECT_TYPE_S64,
+       OBJECT_TYPE_U8,
+       OBJECT_TYPE_U16,
+       OBJECT_TYPE_U32,
+       OBJECT_TYPE_U64,
+
+       OBJECT_TYPE_SIGNED_ENUM,
+       OBJECT_TYPE_UNSIGNED_ENUM,
+
+       OBJECT_TYPE_DOUBLE,
+       OBJECT_TYPE_STRING,
+       OBJECT_TYPE_STRING_SEQUENCE,
+
+       OBJECT_TYPE_SEQUENCE,
+       OBJECT_TYPE_ARRAY,
+       OBJECT_TYPE_STRUCT,
+       OBJECT_TYPE_VARIANT,
+
+       OBJECT_TYPE_DYNAMIC,
+};
+
+struct bytecode_get_index_data {
+       uint64_t offset;        /* in bytes */
+       size_t ctx_index;
+       size_t array_len;
+       /*
+        * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
+        * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
+        * interpreter needs to find it from the event fields and types to
+        * support variants.
+        */
+       const struct lttng_event_field *field;
+       struct {
+               size_t len;
+               enum object_type type;
+               bool rev_bo;    /* reverse byte order */
+       } elem;
+};
+
+/* Validation stack */
+struct vstack_load {
+       enum load_type type;
+       enum object_type object_type;
+       const struct lttng_event_field *field;
+       bool rev_bo;    /* reverse byte order */
+};
+
+struct vstack_entry {
+       enum entry_type type;
+       struct vstack_load load;
+};
+
+struct vstack {
+       int top;        /* top of stack */
+       struct vstack_entry e[INTERPRETER_STACK_LEN];
+};
+
+static inline
+void vstack_init(struct vstack *stack)
+{
+       stack->top = -1;
+}
+
+static inline
+struct vstack_entry *vstack_ax(struct vstack *stack)
+{
+       if (unlikely(stack->top < 0))
+               return NULL;
+       return &stack->e[stack->top];
+}
+
+static inline
+struct vstack_entry *vstack_bx(struct vstack *stack)
+{
+       if (unlikely(stack->top < 1))
+               return NULL;
+       return &stack->e[stack->top - 1];
+}
+
+static inline
+int vstack_push(struct vstack *stack)
+{
+       if (stack->top >= INTERPRETER_STACK_LEN - 1) {
+               printk(KERN_WARNING "Stack full\n");
+               return -EINVAL;
+       }
+       ++stack->top;
+       return 0;
+}
+
+static inline
+int vstack_pop(struct vstack *stack)
+{
+       if (unlikely(stack->top < 0)) {
+               printk(KERN_WARNING "Stack empty\n");
+               return -EINVAL;
+       }
+       stack->top--;
+       return 0;
+}
+
+/* Execution stack */
+enum estack_string_literal_type {
+       ESTACK_STRING_LITERAL_TYPE_NONE,
+       ESTACK_STRING_LITERAL_TYPE_PLAIN,
+       ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
+};
+
+struct load_ptr {
+       enum load_type type;
+       enum object_type object_type;
+       const void *ptr;
+       size_t nr_elem;
+       bool rev_bo;
+       /* Temporary place-holders for contexts. */
+       union {
+               int64_t s64;
+               uint64_t u64;
+               double d;
+       } u;
+       const struct lttng_event_field *field;
+};
+
+struct estack_entry {
+       enum entry_type type;
+       union {
+               int64_t v;
+
+               struct {
+                       const char *str;
+                       const char __user *user_str;
+                       size_t seq_len;
+                       enum estack_string_literal_type literal_type;
+                       int user;               /* is string from userspace ? */
+               } s;
+               struct load_ptr ptr;
+       } u;
+};
+
+struct estack {
+       int top;        /* top of stack */
+       struct estack_entry e[INTERPRETER_STACK_LEN];
+};
+
+#define estack_ax_v    ax
+#define estack_bx_v    bx
+
+#define estack_ax_t    ax_t
+#define estack_bx_t    bx_t
+
+#define estack_ax(stack, top)                                  \
+       ({                                                      \
+               BUG_ON((top) <= INTERPRETER_STACK_EMPTY);       \
+               &(stack)->e[top];                               \
+       })
+
+#define estack_bx(stack, top)                                  \
+       ({                                                      \
+               BUG_ON((top) <= INTERPRETER_STACK_EMPTY + 1);   \
+               &(stack)->e[(top) - 1];                         \
+       })
+
+#define estack_push(stack, top, ax, bx, ax_t, bx_t)            \
+       do {                                                    \
+               BUG_ON((top) >= INTERPRETER_STACK_LEN - 1);     \
+               (stack)->e[(top) - 1].u.v = (bx);               \
+               (stack)->e[(top) - 1].type = (bx_t);            \
+               (bx) = (ax);                                    \
+               (bx_t) = (ax_t);                                \
+               ++(top);                                        \
+       } while (0)
+
+#define estack_pop(stack, top, ax, bx, ax_t, bx_t)             \
+       do {                                                    \
+               BUG_ON((top) <= INTERPRETER_STACK_EMPTY);       \
+               (ax) = (bx);                                    \
+               (ax_t) = (bx_t);                                \
+               (bx) = (stack)->e[(top) - 2].u.v;               \
+               (bx_t) = (stack)->e[(top) - 2].type;            \
+               (top)--;                                        \
+       } while (0)
+
+enum lttng_interpreter_type {
+       LTTNG_INTERPRETER_TYPE_S64,
+       LTTNG_INTERPRETER_TYPE_U64,
+       LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
+       LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
+       LTTNG_INTERPRETER_TYPE_DOUBLE,
+       LTTNG_INTERPRETER_TYPE_STRING,
+       LTTNG_INTERPRETER_TYPE_SEQUENCE,
+};
+
+/*
+ * Represents the output parameter of the lttng interpreter.
+ * Currently capturable field classes are integer, double, string and sequence
+ * of integer.
+ */
+struct lttng_interpreter_output {
+       enum lttng_interpreter_type type;
+       union {
+               int64_t s;
+               uint64_t u;
+
+               struct {
+                       const char *str;
+                       size_t len;
+               } str;
+               struct {
+                       const void *ptr;
+                       size_t nr_elem;
+
+                       /* Inner type. */
+                       const struct lttng_type *nested_type;
+               } sequence;
+       } u;
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op);
+
+void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime);
+void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime);
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode);
+int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
+               struct bytecode_runtime *bytecode);
+
+uint64_t lttng_bytecode_filter_interpret_false(void *filter_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *filter_stack_data);
+uint64_t lttng_bytecode_filter_interpret(void *filter_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *filter_stack_data);
+
+uint64_t lttng_bytecode_capture_interpret_false(void *capture_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *capture_stack_data,
+               struct lttng_interpreter_output *output);
+uint64_t lttng_bytecode_capture_interpret(void *capture_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *capture_stack_data,
+               struct lttng_interpreter_output *output);
+
+#endif /* _LTTNG_FILTER_H */
diff --git a/include/lttng/msgpack.h b/include/lttng/msgpack.h
new file mode 100644 (file)
index 0000000..e5c011e
--- /dev/null
@@ -0,0 +1,61 @@
+#ifndef _LTTNG_UST_MSGPACK_H
+#define _LTTNG_UST_MSGPACK_H
+
+/*
+ * msgpack.h
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else /* __KERNEL__ */
+#include <stdint.h>
+#endif /* __KERNEL__ */
+
+struct lttng_msgpack_writer {
+       uint8_t *buffer;
+       uint8_t *write_pos;
+       const uint8_t *end_write_pos;
+       uint8_t array_nesting;
+       uint8_t map_nesting;
+};
+
+void lttng_msgpack_writer_init(
+               struct lttng_msgpack_writer *writer,
+               uint8_t *buffer, size_t size);
+
+void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer);
+
+int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_unsigned_integer(
+               struct lttng_msgpack_writer *writer, uint64_t value);
+int lttng_msgpack_write_signed_integer(
+               struct lttng_msgpack_writer *writer, int64_t value);
+int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value);
+int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
+               const char *value);
+int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count);
+int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_begin_array(
+               struct lttng_msgpack_writer *writer, size_t count);
+int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer);
+
+#endif /* _LTTNG_UST_MSGPACK_H */
index b5094619176e6764ae1ff01afda5ce726a4afe97..d599a8827f71dd3e86a2157c196f0f250f78b7bf 100644 (file)
@@ -172,6 +172,41 @@ void __event_template_proto___##_name(void);
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
+/*
+ * Stage 1.2 of the trace trigger.
+ *
+ * Create dummy trace prototypes for each event class, and for each used
+ * template. This will allow checking whether the prototypes from the
+ * class and the instance using the class actually match.
+ */
+
+#include <lttng/events-reset.h>        /* Reset all macros within TRACE_EVENT */
+
+#undef TP_PROTO
+#define TP_PROTO(...)  __VA_ARGS__
+
+#undef TP_ARGS
+#define TP_ARGS(...)   __VA_ARGS__
+
+#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
+#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
+void __trigger_template_proto___##_template(_proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
+void __trigger_template_proto___##_template(void);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+void __trigger_template_proto___##_name(_proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+void __trigger_template_proto___##_name(void);
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+
 /*
  * Stage 1.2 of tracepoint event generation
  *
@@ -469,6 +504,28 @@ static void __event_probe__##_name(void *__data);
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
+/*
+ * Stage 3.1 of the trace triggers.
+ *
+ * Create trigger probe callback prototypes.
+ */
+
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <lttng/events-reset.h>
+
+#undef TP_PROTO
+#define TP_PROTO(...)  __VA_ARGS__
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data, _proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data);
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
 /*
  * Stage 4 of the trace events.
  *
@@ -765,7 +822,7 @@ error:                                                                            \
 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
 static inline                                                                \
-void __event_prepare_filter_stack__##_name(char *__stack_data,               \
+void __event_prepare_interpreter_stack__##_name(char *__stack_data,                  \
                void *__tp_locvar)                                            \
 {                                                                            \
        struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar;  \
@@ -776,7 +833,7 @@ void __event_prepare_filter_stack__##_name(char *__stack_data,                    \
 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
 static inline                                                                \
-void __event_prepare_filter_stack__##_name(char *__stack_data,               \
+void __event_prepare_interpreter_stack__##_name(char *__stack_data,                  \
                void *__tp_locvar, _proto)                                    \
 {                                                                            \
        struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar;  \
@@ -1133,6 +1190,7 @@ static void __event_probe__##_name(void *__data, _proto)                \
        struct lttng_event *__event = __data;                                 \
        struct lttng_probe_ctx __lttng_probe_ctx = {                                  \
                .event = __event,                                             \
+               .trigger = NULL,                                              \
                .interruptible = !irqs_disabled(),                            \
        };                                                                    \
        struct lttng_channel *__chan = __event->chan;                         \
@@ -1184,15 +1242,15 @@ static void __event_probe__##_name(void *__data, _proto)                      \
        __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
        __dynamic_len_idx = __orig_dynamic_len_offset;                        \
        _code_pre                                                             \
-       if (unlikely(!list_empty(&__event->bytecode_runtime_head))) {         \
+       if (unlikely(!list_empty(&__event->filter_bytecode_runtime_head))) {          \
                struct lttng_bytecode_runtime *bc_runtime;                    \
                int __filter_record = __event->has_enablers_without_bytecode; \
                                                                              \
-               __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+               __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
                                tp_locvar, _args);                                    \
-               lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
-                       if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx,       \
-                                       __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
+               lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
+                       if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx,             \
+                                       __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
                                __filter_record = 1;                          \
                                break;                                        \
                        }                                                     \
@@ -1228,6 +1286,7 @@ static void __event_probe__##_name(void *__data)                        \
        struct lttng_event *__event = __data;                                 \
        struct lttng_probe_ctx __lttng_probe_ctx = {                                  \
                .event = __event,                                             \
+               .trigger = NULL,                                              \
                .interruptible = !irqs_disabled(),                            \
        };                                                                    \
        struct lttng_channel *__chan = __event->chan;                         \
@@ -1279,15 +1338,15 @@ static void __event_probe__##_name(void *__data)                              \
        __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
        __dynamic_len_idx = __orig_dynamic_len_offset;                        \
        _code_pre                                                             \
-       if (unlikely(!list_empty(&__event->bytecode_runtime_head))) {         \
+       if (unlikely(!list_empty(&__event->filter_bytecode_runtime_head))) {          \
                struct lttng_bytecode_runtime *bc_runtime;                    \
                int __filter_record = __event->has_enablers_without_bytecode; \
                                                                              \
-               __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+               __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
                                tp_locvar);                                   \
-               lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
-                       if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
-                                       __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
+               lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
+                       if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx,       \
+                                       __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
                                __filter_record = 1;                          \
                                break;                                        \
                        }                                                     \
@@ -1319,6 +1378,133 @@ __post:                                                                       \
 
 #undef __get_dynamic_len
 
+/*
+ *
+ */
+
+#include <lttng/events-reset.h>        /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
+
+#undef TP_PROTO
+#define TP_PROTO(...)  __VA_ARGS__
+
+#undef TP_ARGS
+#define TP_ARGS(...)   __VA_ARGS__
+
+#undef TP_FIELDS
+#define TP_FIELDS(...) __VA_ARGS__
+
+#undef TP_locvar
+#define TP_locvar(...) __VA_ARGS__
+
+#undef TP_code_pre
+#define TP_code_pre(...)       __VA_ARGS__
+
+#undef TP_code_post
+#define TP_code_post(...)      __VA_ARGS__
+
+/*
+ * Using twice size for filter stack data to hold size and pointer for
+ * each field (worse case). For integers, max size required is 64-bit.
+ * Same for double-precision floats. Those fit within
+ * 2*sizeof(unsigned long) for all supported architectures.
+ * Perform UNION (||) of filter runtime list.
+ */
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data, _proto)                   \
+{                                                                            \
+       struct probe_local_vars { _locvar };                                  \
+       struct lttng_trigger *__trigger = __data;                             \
+       struct lttng_probe_ctx __lttng_probe_ctx = {                          \
+               .event = NULL,                                                \
+               .trigger = __trigger,                                         \
+               .interruptible = !irqs_disabled(),                            \
+       };                                                                    \
+       union {                                                               \
+               size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)];   \
+               char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+       } __stackvar;                                                         \
+       struct probe_local_vars __tp_locvar;                                  \
+       struct probe_local_vars *tp_locvar __attribute__((unused)) =          \
+                       &__tp_locvar;                                         \
+                                                                             \
+       if (unlikely(!READ_ONCE(__trigger->enabled)))                         \
+               return;                                                       \
+       _code_pre                                                             \
+       if (unlikely(!list_empty(&__trigger->filter_bytecode_runtime_head))) {        \
+               struct lttng_bytecode_runtime *bc_runtime;                    \
+               int __filter_record = __trigger->has_enablers_without_bytecode; \
+                                                                             \
+               __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
+                               tp_locvar, _args);                            \
+               lttng_list_for_each_entry_rcu(bc_runtime, &__trigger->filter_bytecode_runtime_head, node) { \
+                       if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx,   \
+                                       __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG))   \
+                               __filter_record = 1;                          \
+               }                                                             \
+               if (likely(!__filter_record))                                 \
+                       goto __post;                                          \
+       }                                                                     \
+                                                                             \
+       if (unlikely(!list_empty(&__trigger->capture_bytecode_runtime_head)))  \
+               __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
+                               tp_locvar, _args);                                    \
+                                                                             \
+       __trigger->send_notification(__trigger, &__lttng_probe_ctx, __stackvar.__interpreter_stack_data);      \
+                                                                             \
+__post:                                                                              \
+       _code_post                                                            \
+       return;                                                               \
+}
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data)                           \
+{                                                                            \
+       struct probe_local_vars { _locvar };                                  \
+       struct lttng_trigger *__trigger = __data;                             \
+       struct lttng_probe_ctx __lttng_probe_ctx = {                          \
+               .event = NULL,                                                \
+               .trigger = __trigger,                                         \
+               .interruptible = !irqs_disabled(),                            \
+       };                                                                    \
+       union {                                                               \
+               size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)];   \
+               char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+       } __stackvar;                                                         \
+       struct probe_local_vars __tp_locvar;                                  \
+       struct probe_local_vars *tp_locvar __attribute__((unused)) =          \
+                       &__tp_locvar;                                         \
+                                                                             \
+       if (unlikely(!READ_ONCE(__trigger->enabled)))                         \
+               return;                                                       \
+       _code_pre                                                             \
+       if (unlikely(!list_empty(&__trigger->filter_bytecode_runtime_head))) {        \
+               struct lttng_bytecode_runtime *bc_runtime;                    \
+               int __filter_record = __trigger->has_enablers_without_bytecode; \
+                                                                             \
+               __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
+                               tp_locvar);                                   \
+               lttng_list_for_each_entry_rcu(bc_runtime, &__trigger->filter_bytecode_runtime_head, node) { \
+                       if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx,       \
+                                       __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
+                               __filter_record = 1;                          \
+               }                                                             \
+               if (likely(!__filter_record))                                 \
+                       goto __post;                                          \
+       }                                                                     \
+                                                                             \
+       if (unlikely(!list_empty(&__trigger->capture_bytecode_runtime_head)))  \
+               __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
+                               tp_locvar);                                   \
+                                                                             \
+       __trigger->send_notification(__trigger, &__lttng_probe_ctx, __stackvar.__interpreter_stack_data);      \
+__post:                                                                              \
+       _code_post                                                            \
+       return;                                                               \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 /*
  * Stage 7 of the trace events.
  *
@@ -1333,6 +1519,10 @@ __post:                                                                        \
 #define TP_PROBE_CB(_template) &__event_probe__##_template
 #endif
 
+#ifndef TP_TRIGGER_PROBE_CB
+#define TP_TRIGGER_PROBE_CB(_template) &__trigger_probe__##_template
+#endif
+
 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)     \
 static const struct lttng_event_desc __event_desc___##_map = {         \
@@ -1342,6 +1532,7 @@ static const struct lttng_event_desc __event_desc___##_map = {            \
        .probe_callback = (void *) TP_PROBE_CB(_template),              \
        .nr_fields = ARRAY_SIZE(__event_fields___##_template),          \
        .owner = THIS_MODULE,                                           \
+       .trigger_callback = (void *) TP_TRIGGER_PROBE_CB(_template),            \
 };
 
 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
diff --git a/include/lttng/trigger-notification.h b/include/lttng/trigger-notification.h
new file mode 100644 (file)
index 0000000..539ca65
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng/trigger-notification.h
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_TRIGGER_NOTIFICATION_H
+#define _LTTNG_TRIGGER_NOTIFICATION_H
+
+#include <lttng/events.h>
+
+void lttng_trigger_notification_send(struct lttng_trigger *trigger,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *stack_data);
+
+#endif /* _LTTNG_TRIGGER_NOTIFICATION_H */
diff --git a/include/lttng/utils.h b/include/lttng/utils.h
new file mode 100644 (file)
index 0000000..c01b648
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) */
+#ifndef _LTTNG_UTILS_H
+#define _LTTNG_UTILS_H
+
+/*
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#include <linux/jhash.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static inline
+struct hlist_head *utils_borrow_hash_table_bucket(
+               struct hlist_head *hash_table,
+               unsigned int hash_table_size,
+               const char *event_name)
+{
+       size_t name_len;
+       uint32_t hash;
+
+       name_len = strlen(event_name);
+
+       hash = jhash(event_name, name_len, 0);
+       return &hash_table[hash & (hash_table_size - 1)];
+}
+#endif /* _LTTNG_UTILS_H */
index aab408fb2697e67da4042bfd113e3cdc92df3224..b7e1415a63ccea86e6d2e58084f2ef79d9521fd6 100644 (file)
@@ -236,14 +236,6 @@ void subbuffer_count_record(const struct lib_ring_buffer_config *config,
        sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
        v_inc(config, &bufb->array[sb_bindex]->records_commit);
 }
-#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
-static inline
-void subbuffer_count_record(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_backend *bufb,
-                           unsigned long idx)
-{
-}
-#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
 
 /*
  * Reader has exclusive subbuffer access for record consumption. No need to
@@ -262,6 +254,19 @@ void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
        _v_dec(config, &bufb->array[sb_bindex]->records_unread);
        v_inc(config, &bufb->records_read);
 }
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_backend *bufb,
+                           unsigned long idx)
+{
+}
+static inline
+void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
+                             struct lib_ring_buffer_backend *bufb)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
 
 static inline
 unsigned long subbuffer_get_records_count(
index a17d22024ddc13f0374e6bb7c18ba6ba0b2eaa27..ccb683371cd9ec52176909f59e8a348dbfb01776 100644 (file)
@@ -101,7 +101,9 @@ struct lib_ring_buffer_client_cb {
  *
  * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
  * ready to read. Lower latencies before the reader is woken up. Mainly suitable
- * for drivers.
+ * for drivers. Going through an "irq_work" allows triggering this type of wakeup
+ * even from NMI context: the wakeup will be slightly delayed until the next
+ * interrupts are handled.
  *
  * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
  * has the responsibility to perform wakeups.
@@ -142,9 +144,8 @@ struct lib_ring_buffer_config {
        enum {
                RING_BUFFER_WAKEUP_BY_TIMER,    /* wake up performed by timer */
                RING_BUFFER_WAKEUP_BY_WRITER,   /*
-                                                * writer wakes up reader,
-                                                * not lock-free
-                                                * (takes spinlock).
+                                                * writer wakes up reader through
+                                                * irq_work.
                                                 */
        } wakeup;
        /*
index 07be81aacf95c98de11c3763f1bc9e7e4bcdd1bd..de205337ce9c8ccdc53f7c7fcb93428f44a38588 100644 (file)
@@ -13,6 +13,7 @@
 #define _LIB_RING_BUFFER_FRONTEND_TYPES_H
 
 #include <linux/kref.h>
+#include <linux/irq_work.h>
 #include <ringbuffer/config.h>
 #include <ringbuffer/backend_types.h>
 #include <lttng/prio_heap.h>   /* For per-CPU read-side iterator */
@@ -66,6 +67,7 @@ struct channel {
        struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
        wait_queue_head_t read_wait;            /* reader wait queue */
        wait_queue_head_t hp_wait;              /* CPU hotplug wait queue */
+       struct irq_work wakeup_pending;         /* Pending wakeup irq work */
        int finalized;                          /* Has channel been finalized */
        struct channel_iter iter;               /* Channel read-side iterator */
        struct kref ref;                        /* Reference count */
@@ -146,6 +148,7 @@ struct lib_ring_buffer {
        union v_atomic records_overrun; /* Number of overwritten records */
        wait_queue_head_t read_wait;    /* reader buffer-level wait queue */
        wait_queue_head_t write_wait;   /* writer buffer-level wait queue (for metadata only) */
+       struct irq_work wakeup_pending;         /* Pending wakeup irq work */
        int finalized;                  /* buffer has been finalized */
        struct timer_list switch_timer; /* timer for periodical switch */
        struct timer_list read_timer;   /* timer for read poll */
index a006ed00e1bb34b66da9f7552a2f8435605a4702..a9879903503dbb44d118bb6de9473c6123676d34 100644 (file)
 extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
                                               struct lib_ring_buffer *buf);
 
+/*
+ * Ensure that the current subbuffer is put after client code has read the
+ * payload of the current record. Has an effect when the end of subbuffer is
+ * reached. It is not required if get_next_record is called successively.
+ * However, it should be invoked before returning data to user-space to ensure
+ * that the get/put subbuffer state is quiescent.
+ */
+extern void lib_ring_buffer_put_current_record(struct lib_ring_buffer *buf);
+
 /*
  * channel_get_next_record advances the buffer read position to the next record.
  * It returns either the size of the next record, -EAGAIN if there is currently
index fad44601c6e2397457d9080a22edef6f69c35465..8926808d9457d9df1e576cc02b153f1d3976283b 100644 (file)
@@ -12,13 +12,15 @@ obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-client.o
 obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-discard.o
 obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-overwrite.o
 obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-mmap-client.o
+obj-$(CONFIG_LTTNG) += lttng-ring-buffer-trigger-client.o
 obj-$(CONFIG_LTTNG) += lttng-clock.o
 
 obj-$(CONFIG_LTTNG) += lttng-tracer.o
 
 obj-$(CONFIG_LTTNG) += lttng-wrapper.o
 
-lttng-tracer-objs := lttng-events.o lttng-abi.o lttng-string-utils.o \
+lttng-tracer-objs := lib/msgpack/msgpack.o \
+                     lttng-events.o lttng-abi.o lttng-string-utils.o \
                      lttng-probes.o lttng-context.o \
                      lttng-context-pid.o lttng-context-procname.o \
                      lttng-context-prio.o lttng-context-nice.o \
@@ -43,11 +45,12 @@ lttng-tracer-objs := lttng-events.o lttng-abi.o lttng-string-utils.o \
                      lttng-context-hostname.o \
                      probes/lttng.o \
                      lttng-tracker-id.o \
-                     lttng-filter.o lttng-filter-interpreter.o \
-                     lttng-filter-specialize.o \
-                     lttng-filter-validator.o \
+                     lttng-bytecode.o lttng-bytecode-interpreter.o \
+                     lttng-bytecode-specialize.o \
+                     lttng-bytecode-validator.o \
                      probes/lttng-probe-user.o \
                      lttng-tp-mempool.o \
+                     lttng-trigger-notification.o
 
 lttng-wrapper-objs := wrapper/page_alloc.o \
                       wrapper/random.o \
diff --git a/src/lib/msgpack/msgpack.c b/src/lib/msgpack/msgpack.c
new file mode 100644 (file)
index 0000000..0803bc6
--- /dev/null
@@ -0,0 +1,517 @@
+/*
+ * msgpack.c
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <stddef.h>
+
+#define MSGPACK_FIXSTR_ID_MASK         0xA0
+#define MSGPACK_FIXMAP_ID_MASK         0x80
+#define MSGPACK_FIXARRAY_ID_MASK       0x90
+
+#define MSGPACK_NIL_ID         0xC0
+#define MSGPACK_FALSE_ID       0xC2
+#define MSGPACK_TRUE_ID                0xC3
+#define MSGPACK_MAP16_ID       0xDE
+#define MSGPACK_ARRAY16_ID     0xDC
+
+#define MSGPACK_UINT8_ID       0xCC
+#define MSGPACK_UINT16_ID      0xCD
+#define MSGPACK_UINT32_ID      0xCE
+#define MSGPACK_UINT64_ID      0xCF
+
+#define MSGPACK_INT8_ID                0xD0
+#define MSGPACK_INT16_ID       0xD1
+#define MSGPACK_INT32_ID       0xD2
+#define MSGPACK_INT64_ID       0xD3
+
+#define MSGPACK_FLOAT64_ID     0xCB
+#define MSGPACK_STR16_ID       0xDA
+
+#define MSGPACK_FIXINT_MAX             ((1 << 7) - 1)
+#define MSGPACK_FIXINT_MIN             -(1 << 5)
+#define MSGPACK_FIXMAP_MAX_COUNT       15
+#define MSGPACK_FIXARRAY_MAX_COUNT     15
+#define MSGPACK_FIXSTR_MAX_LENGTH      31
+
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <lttng/msgpack.h>
+
+#define INT8_MIN               (-128)
+#define INT16_MIN              (-32767-1)
+#define INT32_MIN              (-2147483647-1)
+#define INT8_MAX               (127)
+#define INT16_MAX              (32767)
+#define INT32_MAX              (2147483647)
+#define UINT8_MAX              (255)
+#define UINT16_MAX             (65535)
+#define UINT32_MAX             (4294967295U)
+
+#define byteswap_host_to_be16(_tmp) cpu_to_be16(_tmp)
+#define byteswap_host_to_be32(_tmp) cpu_to_be32(_tmp)
+#define byteswap_host_to_be64(_tmp) cpu_to_be64(_tmp)
+
+#define lttng_msgpack_assert(cond) WARN_ON(!(cond))
+
+#else /* __KERNEL__ */
+
+#include <endian.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "msgpack.h"
+
+#define byteswap_host_to_be16(_tmp) htobe16(_tmp)
+#define byteswap_host_to_be32(_tmp) htobe32(_tmp)
+#define byteswap_host_to_be64(_tmp) htobe64(_tmp)
+
+#define lttng_msgpack_assert(cond) ({ \
+       if (!(cond)) \
+               fprintf(stderr, "Assertion failed. %s:%d\n", __FILE__, __LINE__); \
+       })
+#endif /* __KERNEL__ */
+
+static inline int lttng_msgpack_append_buffer(
+               struct lttng_msgpack_writer *writer,
+               const uint8_t *buf,
+               size_t length)
+{
+       int ret = 0;
+
+       lttng_msgpack_assert(buf);
+
+       /* Ensure we are not trying to write after the end of the buffer. */
+       if (writer->write_pos + length > writer->end_write_pos) {
+               ret = -1;
+               goto end;
+       }
+
+       memcpy(writer->write_pos, buf, length);
+       writer->write_pos += length;
+end:
+       return ret;
+}
+
+static inline int lttng_msgpack_append_u8(
+               struct lttng_msgpack_writer *writer, uint8_t value)
+{
+       return lttng_msgpack_append_buffer(writer, &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u16(
+               struct lttng_msgpack_writer *writer, uint16_t value)
+{
+       value = byteswap_host_to_be16(value);
+
+       return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u32(
+               struct lttng_msgpack_writer *writer, uint32_t value)
+{
+       value = byteswap_host_to_be32(value);
+
+       return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u64(
+               struct lttng_msgpack_writer *writer, uint64_t value)
+{
+       value = byteswap_host_to_be64(value);
+
+       return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_f64(
+               struct lttng_msgpack_writer *writer, double value)
+{
+
+       union {
+               double d;
+               uint64_t u;
+       } u;
+
+       u.d = value;
+
+       return lttng_msgpack_append_u64(writer, u.u);
+}
+
+static inline int lttng_msgpack_append_i8(
+               struct lttng_msgpack_writer *writer, int8_t value)
+{
+       return lttng_msgpack_append_u8(writer, (uint8_t) value);
+}
+
+static inline int lttng_msgpack_append_i16(
+               struct lttng_msgpack_writer *writer, int16_t value)
+{
+       return lttng_msgpack_append_u16(writer, (uint16_t) value);
+}
+
+static inline int lttng_msgpack_append_i32(
+               struct lttng_msgpack_writer *writer, int32_t value)
+{
+       return lttng_msgpack_append_u32(writer, (uint32_t) value);
+}
+
+static inline int lttng_msgpack_append_i64(
+               struct lttng_msgpack_writer *writer, int64_t value)
+{
+       return lttng_msgpack_append_u64(writer, (uint64_t) value);
+}
+
+static inline int lttng_msgpack_encode_f64(
+               struct lttng_msgpack_writer *writer, double value)
+{
+       int ret;
+
+       ret = lttng_msgpack_append_u8(writer, MSGPACK_FLOAT64_ID);
+       if (ret)
+               goto end;
+
+       ret = lttng_msgpack_append_f64(writer, value);
+       if (ret)
+               goto end;
+
+end:
+       return ret;
+}
+
+static inline int lttng_msgpack_encode_fixmap(
+               struct lttng_msgpack_writer *writer, uint8_t count)
+{
+       int ret = 0;
+
+       lttng_msgpack_assert(count <= MSGPACK_FIXMAP_MAX_COUNT);
+
+       ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXMAP_ID_MASK | count);
+       if (ret)
+               goto end;
+
+end:
+       return ret;
+}
+
+static inline int lttng_msgpack_encode_map16(
+               struct lttng_msgpack_writer *writer, uint16_t count)
+{
+       int ret;
+
+       lttng_msgpack_assert(count > MSGPACK_FIXMAP_MAX_COUNT);
+
+       ret = lttng_msgpack_append_u8(writer, MSGPACK_MAP16_ID);
+       if (ret)
+               goto end;
+
+       ret = lttng_msgpack_append_u16(writer, count);
+       if (ret)
+               goto end;
+
+end:
+       return ret;
+}
+
+static inline int lttng_msgpack_encode_fixarray(
+               struct lttng_msgpack_writer *writer, uint8_t count)
+{
+       int ret = 0;
+
+       lttng_msgpack_assert(count <= MSGPACK_FIXARRAY_MAX_COUNT);
+
+       ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXARRAY_ID_MASK | count);
+       if (ret)
+               goto end;
+
+end:
+       return ret;
+}
+
+static inline int lttng_msgpack_encode_array16(
+               struct lttng_msgpack_writer *writer, uint16_t count)
+{
+       int ret;
+
+       lttng_msgpack_assert(count > MSGPACK_FIXARRAY_MAX_COUNT);
+
+       ret = lttng_msgpack_append_u8(writer, MSGPACK_ARRAY16_ID);
+       if (ret)
+               goto end;
+
+       ret = lttng_msgpack_append_u16(writer, count);
+       if (ret)
+               goto end;
+
+end:
+       return ret;
+}
+
+static inline int lttng_msgpack_encode_fixstr(
+               struct lttng_msgpack_writer *writer,
+               const char *str,
+               uint8_t len)
+{
+       int ret;
+
+       lttng_msgpack_assert(len <= MSGPACK_FIXSTR_MAX_LENGTH);
+
+       ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXSTR_ID_MASK | len);
+       if (ret)
+               goto end;
+
+       ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
+       if (ret)
+               goto end;
+
+end:
+       return ret;
+}
+
+static inline int lttng_msgpack_encode_str16(
+               struct lttng_msgpack_writer *writer,
+               const char *str,
+               uint16_t len)
+{
+       int ret;
+
+       lttng_msgpack_assert(len > MSGPACK_FIXSTR_MAX_LENGTH);
+
+       ret = lttng_msgpack_append_u8(writer, MSGPACK_STR16_ID);
+       if (ret)
+               goto end;
+
+       ret = lttng_msgpack_append_u16(writer, len);
+       if (ret)
+               goto end;
+
+       ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
+       if (ret)
+               goto end;
+
+end:
+       return ret;
+}
+
+int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count)
+{
+       int ret;
+
+       if (count < 0 || count >= (1 << 16)) {
+               ret = -1;
+               goto end;
+       }
+
+       if (count <= MSGPACK_FIXMAP_MAX_COUNT)
+               ret = lttng_msgpack_encode_fixmap(writer, count);
+       else
+               ret = lttng_msgpack_encode_map16(writer, count);
+
+       writer->map_nesting++;
+end:
+       return ret;
+}
+
+int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer)
+{
+       lttng_msgpack_assert(writer->map_nesting > 0);
+       writer->map_nesting--;
+       return 0;
+}
+
+int lttng_msgpack_begin_array(
+               struct lttng_msgpack_writer *writer, size_t count)
+{
+       int ret;
+
+       if (count < 0 || count >= (1 << 16)) {
+               ret = -1;
+               goto end;
+       }
+
+       if (count <= MSGPACK_FIXARRAY_MAX_COUNT)
+               ret = lttng_msgpack_encode_fixarray(writer, count);
+       else
+               ret = lttng_msgpack_encode_array16(writer, count);
+
+       writer->array_nesting++;
+end:
+       return ret;
+}
+
+int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer)
+{
+       lttng_msgpack_assert(writer->array_nesting > 0);
+       writer->array_nesting--;
+       return 0;
+}
+
+int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
+               const char *str)
+{
+       int ret;
+       size_t length = strlen(str);
+       if (length < 0 || length >= (1 << 16)) {
+               ret = -1;
+               goto end;
+       }
+
+       if (length <= MSGPACK_FIXSTR_MAX_LENGTH)
+               ret = lttng_msgpack_encode_fixstr(writer, str, length);
+       else
+               ret = lttng_msgpack_encode_str16(writer, str, length);
+
+end:
+       return ret;
+}
+
+int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer)
+{
+       return lttng_msgpack_append_u8(writer, MSGPACK_NIL_ID);
+}
+
+int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer)
+{
+       return lttng_msgpack_append_u8(writer, MSGPACK_TRUE_ID);
+}
+
+int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer)
+{
+       return lttng_msgpack_append_u8(writer, MSGPACK_FALSE_ID);
+}
+
+int lttng_msgpack_write_unsigned_integer(
+               struct lttng_msgpack_writer *writer, uint64_t value)
+{
+       int ret = 0;
+
+       if (value <= MSGPACK_FIXINT_MAX) {
+               ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
+               if (ret)
+                       goto end;
+       } else if (value <= UINT8_MAX) {
+               ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT8_ID);
+               if (ret)
+                       goto end;
+
+               ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
+               if (ret)
+                       goto end;
+       } else if (value <= UINT16_MAX) {
+               ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT16_ID);
+               if (ret)
+                       goto end;
+
+               ret = lttng_msgpack_append_u16(writer, (uint16_t) value);
+               if (ret)
+                       goto end;
+       } else if (value <= UINT32_MAX) {
+               ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT32_ID);
+               if (ret)
+                       goto end;
+
+               ret = lttng_msgpack_append_u32(writer, (uint32_t) value);
+               if (ret)
+                       goto end;
+       } else {
+               ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT64_ID);
+               if (ret)
+                       goto end;
+
+               ret = lttng_msgpack_append_u64(writer, value);
+               if (ret)
+                       goto end;
+       }
+
+end:
+       return ret;
+}
+
+int lttng_msgpack_write_signed_integer(struct lttng_msgpack_writer *writer, int64_t value)
+{
+       int ret;
+
+       if (value >= MSGPACK_FIXINT_MIN && value <= MSGPACK_FIXINT_MAX){
+               ret = lttng_msgpack_append_i8(writer, (int8_t) value);
+               if (ret)
+                       goto end;
+       } else if (value >= INT8_MIN && value <= INT8_MAX) {
+               ret = lttng_msgpack_append_u8(writer, MSGPACK_INT8_ID);
+               if (ret)
+                       goto end;
+
+               ret = lttng_msgpack_append_i8(writer, (int8_t) value);
+               if (ret)
+                       goto end;
+       } else if (value >= INT16_MIN && value <= INT16_MAX) {
+               ret = lttng_msgpack_append_u8(writer, MSGPACK_INT16_ID);
+               if (ret)
+                       goto end;
+
+               ret = lttng_msgpack_append_i16(writer, (int16_t) value);
+               if (ret)
+                       goto end;
+       } else if (value >= INT32_MIN && value <= INT32_MAX) {
+               ret = lttng_msgpack_append_u8(writer, MSGPACK_INT32_ID);
+               if (ret)
+                       goto end;
+
+               ret = lttng_msgpack_append_i32(writer, (int32_t) value);
+               if (ret)
+                       goto end;
+       } else {
+               ret = lttng_msgpack_append_u8(writer, MSGPACK_INT64_ID);
+               if (ret)
+                       goto end;
+
+               ret = lttng_msgpack_append_i64(writer, value);
+               if (ret)
+                       goto end;
+       }
+
+end:
+       return ret;
+}
+
+int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value)
+{
+       return lttng_msgpack_encode_f64(writer, value);
+}
+
+void lttng_msgpack_writer_init(struct lttng_msgpack_writer *writer,
+               uint8_t *buffer, size_t size)
+{
+       lttng_msgpack_assert(buffer);
+       lttng_msgpack_assert(size >= 0);
+
+       writer->buffer = buffer;
+       writer->write_pos = buffer;
+       writer->end_write_pos = buffer + size;
+
+       writer->array_nesting = 0;
+       writer->map_nesting = 0;
+}
+
+void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer)
+{
+       memset(writer, 0, sizeof(*writer));
+}
index fca37fbc3a5b143a08cb55b94f762bf664fb7187..2e857a420b42c3fd32ae922a3eed3fd7a8b932bd 100644 (file)
@@ -133,6 +133,8 @@ void lib_ring_buffer_free(struct lib_ring_buffer *buf)
 {
        struct channel *chan = buf->backend.chan;
 
+       irq_work_sync(&buf->wakeup_pending);
+
        lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
        lttng_kvfree(buf->commit_hot);
        lttng_kvfree(buf->commit_cold);
@@ -206,6 +208,19 @@ void channel_reset(struct channel *chan)
 }
 EXPORT_SYMBOL_GPL(channel_reset);
 
+static void lib_ring_buffer_pending_wakeup_buf(struct irq_work *entry)
+{
+       struct lib_ring_buffer *buf = container_of(entry, struct lib_ring_buffer,
+                                                  wakeup_pending);
+       wake_up_interruptible(&buf->read_wait);
+}
+
+static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry)
+{
+       struct channel *chan = container_of(entry, struct channel, wakeup_pending);
+       wake_up_interruptible(&chan->read_wait);
+}
+
 /*
  * Must be called under cpu hotplug protection.
  */
@@ -268,6 +283,7 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf,
 
        init_waitqueue_head(&buf->read_wait);
        init_waitqueue_head(&buf->write_wait);
+       init_irq_work(&buf->wakeup_pending, lib_ring_buffer_pending_wakeup_buf);
        raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
 
        /*
@@ -854,6 +870,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config,
        kref_init(&chan->ref);
        init_waitqueue_head(&chan->read_wait);
        init_waitqueue_head(&chan->hp_wait);
+       init_irq_work(&chan->wakeup_pending, lib_ring_buffer_pending_wakeup_chan);
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
@@ -963,6 +980,8 @@ void *channel_destroy(struct channel *chan)
        const struct lib_ring_buffer_config *config = &chan->backend.config;
        void *priv;
 
+       irq_work_sync(&chan->wakeup_pending);
+
        channel_unregister_notifiers(chan);
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
@@ -2356,13 +2375,14 @@ void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *con
                                                 commit_count, idx);
 
                /*
-                * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
+                * RING_BUFFER_WAKEUP_BY_WRITER uses an irq_work to issue
+                * the wakeups.
                 */
                if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
                    && atomic_long_read(&buf->active_readers)
                    && lib_ring_buffer_poll_deliver(config, buf, chan)) {
-                       wake_up_interruptible(&buf->read_wait);
-                       wake_up_interruptible(&chan->read_wait);
+                       irq_work_queue(&buf->wakeup_pending);
+                       irq_work_queue(&chan->wakeup_pending);
                }
 
        }
index 15d7c75c15862b8ec4f47771dd98dc1eb8291c30..69abb84f20667de7908b657a99b0bb0472700ed5 100644 (file)
@@ -105,6 +105,24 @@ restart:
 }
 EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
 
+void lib_ring_buffer_put_current_record(struct lib_ring_buffer *buf)
+{
+       struct lib_ring_buffer_iter *iter;
+
+       if (!buf)
+               return;
+       iter = &buf->iter;
+       if (iter->state != ITER_NEXT_RECORD)
+               return;
+       iter->read_offset += iter->payload_len;
+       iter->state = ITER_TEST_RECORD;
+       if (iter->read_offset - iter->consumed >= iter->data_size) {
+               lib_ring_buffer_put_next_subbuf(buf);
+               iter->state = ITER_GET_SUBBUF;
+       }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_put_current_record);
+
 static int buf_is_higher(void *a, void *b)
 {
        struct lib_ring_buffer *bufa = a;
@@ -696,12 +714,14 @@ skip_get_next:
                        return -EFAULT;
                }
                read_count += copy_len;
-       };
-       return read_count;
+       }
+       goto put_record;
 
 nodata:
        *ppos = 0;
        chan->iter.len_left = 0;
+put_record:
+       lib_ring_buffer_put_current_record(buf);
        return read_count;
 }
 
index c3721b93e428ce7d48bf94f40ff9ebd0a6585ef4..56b468fe148b110f9394b6a05142ad143fb24263 100644 (file)
@@ -44,6 +44,7 @@
 #include <lttng/tracer.h>
 #include <lttng/tp-mempool.h>
 #include <ringbuffer/frontend_types.h>
+#include <ringbuffer/iterator.h>
 
 /*
  * This is LTTng's own personal way to create a system call as an external
@@ -59,6 +60,7 @@ static const struct file_operations lttng_proc_ops;
 #endif
 
 static const struct file_operations lttng_session_fops;
+static const struct file_operations lttng_trigger_group_fops;
 static const struct file_operations lttng_channel_fops;
 static const struct file_operations lttng_metadata_fops;
 static const struct file_operations lttng_event_fops;
@@ -105,6 +107,52 @@ fd_error:
        return ret;
 }
 
+static
+void trigger_send_notification_work_wakeup(struct irq_work *entry)
+{
+       struct lttng_trigger_group *trigger_group = container_of(entry,
+                       struct lttng_trigger_group, wakeup_pending);
+       wake_up_interruptible(&trigger_group->read_wait);
+}
+
+static
+int lttng_abi_create_trigger_group(void)
+{
+       struct lttng_trigger_group *trigger_group;
+       struct file *trigger_group_file;
+       int trigger_group_fd, ret;
+
+       trigger_group = lttng_trigger_group_create();
+       if (!trigger_group)
+               return -ENOMEM;
+
+       trigger_group_fd = lttng_get_unused_fd();
+       if (trigger_group_fd < 0) {
+               ret = trigger_group_fd;
+               goto fd_error;
+       }
+       trigger_group_file = anon_inode_getfile("[lttng_trigger_group]",
+                                         &lttng_trigger_group_fops,
+                                         trigger_group, O_RDWR);
+       if (IS_ERR(trigger_group_file)) {
+               ret = PTR_ERR(trigger_group_file);
+               goto file_error;
+       }
+
+       trigger_group->file = trigger_group_file;
+       init_waitqueue_head(&trigger_group->read_wait);
+       init_irq_work(&trigger_group->wakeup_pending,
+                     trigger_send_notification_work_wakeup);
+       fd_install(trigger_group_fd, trigger_group_file);
+       return trigger_group_fd;
+
+file_error:
+       put_unused_fd(trigger_group_fd);
+fd_error:
+       lttng_trigger_group_destroy(trigger_group);
+       return ret;
+}
+
 static
 int lttng_abi_tracepoint_list(void)
 {
@@ -304,6 +352,8 @@ long lttng_abi_add_context(struct file *file,
  *             Returns after all previously running probes have completed
  *     LTTNG_KERNEL_TRACER_ABI_VERSION
  *             Returns the LTTng kernel tracer ABI version
+ *     LTTNG_KERNEL_TRIGGER_GROUP_CREATE
+ *             Returns a LTTng trigger group file descriptor
  *
  * The returned session will be deleted when its file descriptor is closed.
  */
@@ -314,6 +364,8 @@ long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case LTTNG_KERNEL_OLD_SESSION:
        case LTTNG_KERNEL_SESSION:
                return lttng_abi_create_session();
+       case LTTNG_KERNEL_TRIGGER_GROUP_CREATE:
+               return lttng_abi_create_trigger_group();
        case LTTNG_KERNEL_OLD_TRACER_VERSION:
        {
                struct lttng_kernel_tracer_version v;
@@ -762,6 +814,229 @@ static const struct file_operations lttng_session_fops = {
 #endif
 };
 
+/*
+ * When encountering empty buffer, flush current sub-buffer if non-empty
+ * and retry (if new data available to read after flush).
+ */
+static
+ssize_t lttng_trigger_group_notif_read(struct file *filp, char __user *user_buf,
+               size_t count, loff_t *ppos)
+{
+       struct lttng_trigger_group *trigger_group = filp->private_data;
+       struct channel *chan = trigger_group->chan;
+       struct lib_ring_buffer *buf = trigger_group->buf;
+       ssize_t read_count = 0, len;
+       size_t read_offset;
+
+       might_sleep();
+       if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
+               return -EFAULT;
+
+       /* Finish copy of previous record */
+       if (*ppos != 0) {
+               if (read_count < count) {
+                       len = chan->iter.len_left;
+                       read_offset = *ppos;
+                       goto skip_get_next;
+               }
+       }
+
+       while (read_count < count) {
+               size_t copy_len, space_left;
+
+               len = lib_ring_buffer_get_next_record(chan, buf);
+len_test:
+               if (len < 0) {
+                       /*
+                        * Check if buffer is finalized (end of file).
+                        */
+                       if (len == -ENODATA) {
+                               /* A 0 read_count will tell about end of file */
+                               goto nodata;
+                       }
+                       if (filp->f_flags & O_NONBLOCK) {
+                               if (!read_count)
+                                       read_count = -EAGAIN;
+                               goto nodata;
+                       } else {
+                               int error;
+
+                               /*
+                                * No data available at the moment, return what
+                                * we got.
+                                */
+                               if (read_count)
+                                       goto nodata;
+
+                               /*
+                                * Wait for returned len to be >= 0 or -ENODATA.
+                                */
+                               error = wait_event_interruptible(
+                                         trigger_group->read_wait,
+                                         ((len = lib_ring_buffer_get_next_record(
+                                                 chan, buf)), len != -EAGAIN));
+                               CHAN_WARN_ON(chan, len == -EBUSY);
+                               if (error) {
+                                       read_count = error;
+                                       goto nodata;
+                               }
+                               CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
+                               goto len_test;
+                       }
+               }
+               read_offset = buf->iter.read_offset;
+skip_get_next:
+               space_left = count - read_count;
+               if (len <= space_left) {
+                       copy_len = len;
+                       chan->iter.len_left = 0;
+                       *ppos = 0;
+               } else {
+                       copy_len = space_left;
+                       chan->iter.len_left = len - copy_len;
+                       *ppos = read_offset + copy_len;
+               }
+               if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
+                                              &user_buf[read_count],
+                                              copy_len)) {
+                       /*
+                        * Leave the len_left and ppos values at their current
+                        * state, as we currently have a valid event to read.
+                        */
+                       return -EFAULT;
+               }
+               read_count += copy_len;
+       }
+       goto put_record;
+
+nodata:
+       *ppos = 0;
+       chan->iter.len_left = 0;
+
+put_record:
+       lib_ring_buffer_put_current_record(buf);
+       return read_count;
+}
+
+/*
+ * If the ring buffer is non empty (even just a partial subbuffer), return that
+ * there is data available. Perform a ring buffer flush if we encounter a
+ * non-empty ring buffer which does not have any consumeable subbuffer available.
+ */
+static
+unsigned int lttng_trigger_group_notif_poll(struct file *filp,
+               poll_table *wait)
+{
+       unsigned int mask = 0;
+       struct lttng_trigger_group *trigger_group = filp->private_data;
+       struct channel *chan = trigger_group->chan;
+       struct lib_ring_buffer *buf = trigger_group->buf;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
+       int finalized, disabled;
+       unsigned long consumed, offset;
+       size_t subbuffer_header_size = config->cb.subbuffer_header_size();
+
+       if (filp->f_mode & FMODE_READ) {
+               poll_wait_set_exclusive(wait);
+               poll_wait(filp, &trigger_group->read_wait, wait);
+
+               finalized = lib_ring_buffer_is_finalized(config, buf);
+               disabled = lib_ring_buffer_channel_is_disabled(chan);
+
+               /*
+                * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
+                * finalized load before offsets loads.
+                */
+               WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+retry:
+               if (disabled)
+                       return POLLERR;
+
+               offset = lib_ring_buffer_get_offset(config, buf);
+               consumed = lib_ring_buffer_get_consumed(config, buf);
+
+               /*
+                * If there is no buffer available to consume.
+                */
+               if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
+                       /*
+                        * If there is a non-empty subbuffer, flush and try again.
+                        */
+                       if (subbuf_offset(offset, chan) > subbuffer_header_size) {
+                               lib_ring_buffer_switch_remote(buf);
+                               goto retry;
+                       }
+
+                       if (finalized)
+                               return POLLHUP;
+                       else {
+                               /*
+                                * The memory barriers
+                                * __wait_event()/wake_up_interruptible() take
+                                * care of "raw_spin_is_locked" memory ordering.
+                                */
+                               if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
+                                       goto retry;
+                               else
+                                       return 0;
+                       }
+               } else {
+                       if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
+                                       >= chan->backend.buf_size)
+                               return POLLPRI | POLLRDBAND;
+                       else
+                               return POLLIN | POLLRDNORM;
+               }
+       }
+
+       return mask;
+}
+
+/**
+ *     lttng_trigger_group_notif_open - trigger ring buffer open file operation
+ *     @inode: opened inode
+ *     @file: opened file
+ *
+ *     Open implementation. Makes sure only one open instance of a buffer is
+ *     done at a given moment.
+ */
+static int lttng_trigger_group_notif_open(struct inode *inode, struct file *file)
+{
+       struct lttng_trigger_group *trigger_group = inode->i_private;
+       struct lib_ring_buffer *buf = trigger_group->buf;
+
+       file->private_data = trigger_group;
+       return lib_ring_buffer_open(inode, file, buf);
+}
+
+/**
+ *     lttng_trigger_group_notif_release - trigger ring buffer release file operation
+ *     @inode: opened inode
+ *     @file: opened file
+ *
+ *     Release implementation.
+ */
+static int lttng_trigger_group_notif_release(struct inode *inode, struct file *file)
+{
+       struct lttng_trigger_group *trigger_group = file->private_data;
+       struct lib_ring_buffer *buf = trigger_group->buf;
+       int ret;
+
+       ret = lib_ring_buffer_release(inode, file, buf);
+       if (ret)
+               return ret;
+       fput(trigger_group->file);
+       return 0;
+}
+
+static const struct file_operations lttng_trigger_group_notif_fops = {
+       .owner = THIS_MODULE,
+       .open = lttng_trigger_group_notif_open,
+       .release = lttng_trigger_group_notif_release,
+       .read = lttng_trigger_group_notif_read,
+       .poll = lttng_trigger_group_notif_poll,
+};
+
 /**
  *     lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
  *     @filp: the file
@@ -1134,7 +1409,7 @@ const struct file_operations lttng_metadata_ring_buffer_file_operations = {
 
 static
 int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
-               const struct file_operations *fops)
+               const struct file_operations *fops, const char *name)
 {
        int stream_fd, ret;
        struct file *stream_file;
@@ -1144,8 +1419,7 @@ int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
                ret = stream_fd;
                goto fd_error;
        }
-       stream_file = anon_inode_getfile("[lttng_stream]", fops,
-                       stream_priv, O_RDWR);
+       stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
        if (IS_ERR(stream_file)) {
                ret = PTR_ERR(stream_file);
                goto file_error;
@@ -1184,7 +1458,8 @@ int lttng_abi_open_stream(struct file *channel_file)
 
        stream_priv = buf;
        ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
-                       &lttng_stream_ring_buffer_file_operations);
+                       &lttng_stream_ring_buffer_file_operations,
+                       "[lttng_stream]");
        if (ret < 0)
                goto fd_error;
 
@@ -1239,7 +1514,8 @@ int lttng_abi_open_metadata_stream(struct file *channel_file)
        }
 
        ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
-                       &lttng_metadata_ring_buffer_file_operations);
+                       &lttng_metadata_ring_buffer_file_operations,
+                       "[lttng_metadata_stream]");
        if (ret < 0)
                goto fd_error;
 
@@ -1258,6 +1534,41 @@ nomem:
        return ret;
 }
 
+static
+int lttng_abi_open_trigger_group_stream(struct file *notif_file)
+{
+       struct lttng_trigger_group *trigger_group = notif_file->private_data;
+       struct channel *chan = trigger_group->chan;
+       struct lib_ring_buffer *buf;
+       int ret;
+       void *stream_priv;
+
+       buf = trigger_group->ops->buffer_read_open(chan);
+       if (!buf)
+               return -ENOENT;
+
+       /* The trigger notification fd holds a reference on the trigger group */
+       if (!atomic_long_add_unless(&notif_file->f_count, 1, LONG_MAX)) {
+               ret = -EOVERFLOW;
+               goto refcount_error;
+       }
+       trigger_group->buf = buf;
+       stream_priv = trigger_group;
+       ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
+                       &lttng_trigger_group_notif_fops,
+                       "[lttng_trigger_stream]");
+       if (ret < 0)
+               goto fd_error;
+
+       return ret;
+
+fd_error:
+       atomic_long_dec(&notif_file->f_count);
+refcount_error:
+       trigger_group->ops->buffer_read_close(buf);
+       return ret;
+}
+
 static
 int lttng_abi_create_event(struct file *channel_file,
                           struct lttng_kernel_event *event_param)
@@ -1301,20 +1612,20 @@ int lttng_abi_create_event(struct file *channel_file,
        }
        if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
                        || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
-               struct lttng_enabler *enabler;
+               struct lttng_event_enabler *event_enabler;
 
                if (strutils_is_star_glob_pattern(event_param->name)) {
                        /*
                         * If the event name is a star globbing pattern,
                         * we create the special star globbing enabler.
                         */
-                       enabler = lttng_enabler_create(LTTNG_ENABLER_STAR_GLOB,
+                       event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
                                event_param, channel);
                } else {
-                       enabler = lttng_enabler_create(LTTNG_ENABLER_NAME,
+                       event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
                                event_param, channel);
                }
-               priv = enabler;
+               priv = event_enabler;
        } else {
                struct lttng_event *event;
 
@@ -1346,6 +1657,260 @@ fd_error:
        return ret;
 }
 
+static
+long lttng_trigger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct lttng_trigger *trigger;
+       struct lttng_trigger_enabler *trigger_enabler;
+       enum lttng_event_type *evtype = file->private_data;
+
+       switch (cmd) {
+       case LTTNG_KERNEL_ENABLE:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       trigger = file->private_data;
+                       return lttng_trigger_enable(trigger);
+               case LTTNG_TYPE_ENABLER:
+                       trigger_enabler = file->private_data;
+                       return lttng_trigger_enabler_enable(trigger_enabler);
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+       case LTTNG_KERNEL_DISABLE:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       trigger = file->private_data;
+                       return lttng_trigger_disable(trigger);
+               case LTTNG_TYPE_ENABLER:
+                       trigger_enabler = file->private_data;
+                       return lttng_trigger_enabler_disable(trigger_enabler);
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+       case LTTNG_KERNEL_FILTER:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       return -EINVAL;
+               case LTTNG_TYPE_ENABLER:
+                       trigger_enabler = file->private_data;
+                       return lttng_trigger_enabler_attach_filter_bytecode(
+                               trigger_enabler,
+                               (struct lttng_kernel_filter_bytecode __user *) arg);
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+
+       case LTTNG_KERNEL_CAPTURE:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       return -EINVAL;
+               case LTTNG_TYPE_ENABLER:
+                       trigger_enabler = file->private_data;
+                       return lttng_trigger_enabler_attach_capture_bytecode(
+                               trigger_enabler,
+                               (struct lttng_kernel_capture_bytecode __user *) arg);
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+       case LTTNG_KERNEL_ADD_CALLSITE:
+               switch (*evtype) {
+               case LTTNG_TYPE_EVENT:
+                       trigger = file->private_data;
+                       return lttng_trigger_add_callsite(trigger,
+                               (struct lttng_kernel_event_callsite __user *) arg);
+               case LTTNG_TYPE_ENABLER:
+                       return -EINVAL;
+               default:
+                       WARN_ON_ONCE(1);
+                       return -ENOSYS;
+               }
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
+static
+int lttng_trigger_release(struct inode *inode, struct file *file)
+{
+       struct lttng_trigger *trigger;
+       struct lttng_trigger_enabler *trigger_enabler;
+       enum lttng_event_type *evtype = file->private_data;
+
+       if (!evtype)
+               return 0;
+
+       switch (*evtype) {
+       case LTTNG_TYPE_EVENT:
+               trigger = file->private_data;
+               if (trigger)
+                       fput(trigger->group->file);
+               break;
+       case LTTNG_TYPE_ENABLER:
+               trigger_enabler = file->private_data;
+               if (trigger_enabler)
+                       fput(trigger_enabler->group->file);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+
+       return 0;
+}
+
+static const struct file_operations lttng_trigger_fops = {
+       .owner = THIS_MODULE,
+       .release = lttng_trigger_release,
+       .unlocked_ioctl = lttng_trigger_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = lttng_trigger_ioctl,
+#endif
+};
+
+static
+int lttng_abi_create_trigger(struct file *trigger_group_file,
+               struct lttng_kernel_trigger *trigger_param)
+{
+       struct lttng_trigger_group *trigger_group = trigger_group_file->private_data;
+       int trigger_fd, ret;
+       struct file *trigger_file;
+       void *priv;
+
+       switch (trigger_param->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+       case LTTNG_KERNEL_UPROBE:
+               break;
+       case LTTNG_KERNEL_KPROBE:
+               trigger_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               break;
+       case LTTNG_KERNEL_SYSCALL:
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+               /* Placing a trigger on kretprobe is not supported. */
+       case LTTNG_KERNEL_FUNCTION:
+       case LTTNG_KERNEL_NOOP:
+       default:
+               ret = -EINVAL;
+               goto inval_instr;
+       }
+
+       trigger_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+
+       trigger_fd = lttng_get_unused_fd();
+       if (trigger_fd < 0) {
+               ret = trigger_fd;
+               goto fd_error;
+       }
+
+       trigger_file = anon_inode_getfile("[lttng_trigger]",
+                                       &lttng_trigger_fops,
+                                       NULL, O_RDWR);
+       if (IS_ERR(trigger_file)) {
+               ret = PTR_ERR(trigger_file);
+               goto file_error;
+       }
+
+       /* The trigger holds a reference on the trigger group. */
+       if (!atomic_long_add_unless(&trigger_group_file->f_count, 1, LONG_MAX)) {
+               ret = -EOVERFLOW;
+               goto refcount_error;
+       }
+
+       if (trigger_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
+                       || trigger_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
+               struct lttng_trigger_enabler *enabler;
+
+               if (strutils_is_star_glob_pattern(trigger_param->name)) {
+                       /*
+                        * If the event name is a star globbing pattern,
+                        * we create the special star globbing enabler.
+                        */
+                       enabler = lttng_trigger_enabler_create(trigger_group,
+                               LTTNG_ENABLER_FORMAT_STAR_GLOB, trigger_param);
+               } else {
+                       enabler = lttng_trigger_enabler_create(trigger_group,
+                               LTTNG_ENABLER_FORMAT_NAME, trigger_param);
+               }
+               priv = enabler;
+       } else {
+               struct lttng_trigger *trigger;
+
+               /*
+                * We tolerate no failure path after trigger creation. It
+                * will stay invariant for the rest of the session.
+                */
+               trigger = lttng_trigger_create(NULL, trigger_param->id,
+                       trigger_group, trigger_param, NULL,
+                       trigger_param->instrumentation);
+               WARN_ON_ONCE(!trigger);
+               if (IS_ERR(trigger)) {
+                       ret = PTR_ERR(trigger);
+                       goto trigger_error;
+               }
+               priv = trigger;
+       }
+       trigger_file->private_data = priv;
+       fd_install(trigger_fd, trigger_file);
+       return trigger_fd;
+
+trigger_error:
+       atomic_long_dec(&trigger_group_file->f_count);
+refcount_error:
+       fput(trigger_file);
+file_error:
+       put_unused_fd(trigger_fd);
+fd_error:
+inval_instr:
+       return ret;
+}
+
+static
+long lttng_trigger_group_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case LTTNG_KERNEL_TRIGGER_GROUP_NOTIFICATION_FD:
+       {
+               return lttng_abi_open_trigger_group_stream(file);
+       }
+       case LTTNG_KERNEL_TRIGGER_CREATE:
+       {
+               struct lttng_kernel_trigger utrigger_param;
+
+               if (copy_from_user(&utrigger_param,
+                               (struct lttng_kernel_trigger __user *) arg,
+                               sizeof(utrigger_param)))
+                       return -EFAULT;
+               return lttng_abi_create_trigger(file, &utrigger_param);
+       }
+       default:
+               return -ENOIOCTLCMD;
+       }
+       return 0;
+}
+
+static
+int lttng_trigger_group_release(struct inode *inode, struct file *file)
+{
+       struct lttng_trigger_group *trigger_group = file->private_data;
+
+       if (trigger_group)
+               lttng_trigger_group_destroy(trigger_group);
+       return 0;
+}
+
+static const struct file_operations lttng_trigger_group_fops = {
+       .owner = THIS_MODULE,
+       .release = lttng_trigger_group_release,
+       .unlocked_ioctl = lttng_trigger_group_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = lttng_trigger_group_ioctl,
+#endif
+};
+
 /**
  *     lttng_channel_ioctl - lttng syscall through ioctl
  *
@@ -1643,7 +2208,7 @@ static
 long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct lttng_event *event;
-       struct lttng_enabler *enabler;
+       struct lttng_event_enabler *event_enabler;
        enum lttng_event_type *evtype = file->private_data;
 
        switch (cmd) {
@@ -1664,8 +2229,8 @@ long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        event = file->private_data;
                        return lttng_event_enable(event);
                case LTTNG_TYPE_ENABLER:
-                       enabler = file->private_data;
-                       return lttng_enabler_enable(enabler);
+                       event_enabler = file->private_data;
+                       return lttng_event_enabler_enable(event_enabler);
                default:
                        WARN_ON_ONCE(1);
                        return -ENOSYS;
@@ -1677,8 +2242,8 @@ long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        event = file->private_data;
                        return lttng_event_disable(event);
                case LTTNG_TYPE_ENABLER:
-                       enabler = file->private_data;
-                       return lttng_enabler_disable(enabler);
+                       event_enabler = file->private_data;
+                       return lttng_event_enabler_disable(event_enabler);
                default:
                        WARN_ON_ONCE(1);
                        return -ENOSYS;
@@ -1689,8 +2254,9 @@ long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        return -EINVAL;
                case LTTNG_TYPE_ENABLER:
                {
-                       enabler = file->private_data;
-                       return lttng_enabler_attach_bytecode(enabler,
+                       event_enabler = file->private_data;
+                       return lttng_event_enabler_attach_filter_bytecode(
+                               event_enabler,
                                (struct lttng_kernel_filter_bytecode __user *) arg);
                }
                default:
@@ -1718,7 +2284,7 @@ static
 int lttng_event_release(struct inode *inode, struct file *file)
 {
        struct lttng_event *event;
-       struct lttng_enabler *enabler;
+       struct lttng_event_enabler *event_enabler;
        enum lttng_event_type *evtype = file->private_data;
 
        if (!evtype)
@@ -1731,9 +2297,9 @@ int lttng_event_release(struct inode *inode, struct file *file)
                        fput(event->chan->file);
                break;
        case LTTNG_TYPE_ENABLER:
-               enabler = file->private_data;
-               if (enabler)
-                       fput(enabler->chan->file);
+               event_enabler = file->private_data;
+               if (event_enabler)
+                       fput(event_enabler->chan->file);
                break;
        default:
                WARN_ON_ONCE(1);
diff --git a/src/lttng-bytecode-interpreter.c b/src/lttng-bytecode-interpreter.c
new file mode 100644 (file)
index 0000000..8d40611
--- /dev/null
@@ -0,0 +1,1813 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-interpreter.c
+ *
+ * LTTng modules bytecode interpreter.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <wrapper/uaccess.h>
+#include <wrapper/frame.h>
+#include <wrapper/types.h>
+#include <linux/swab.h>
+
+#include <lttng/lttng-bytecode.h>
+#include <lttng/string-utils.h>
+
+/*
+ * get_char should be called with page fault handler disabled if it is expected
+ * to handle user-space read.
+ */
+static
+char get_char(struct estack_entry *reg, size_t offset)
+{
+       if (unlikely(offset >= reg->u.s.seq_len))
+               return '\0';
+       if (reg->u.s.user) {
+               char c;
+
+               /* Handle invalid access as end of string. */
+               if (unlikely(!lttng_access_ok(VERIFY_READ,
+                               reg->u.s.user_str + offset,
+                               sizeof(c))))
+                       return '\0';
+               /* Handle fault (nonzero return value) as end of string. */
+               if (unlikely(__copy_from_user_inatomic(&c,
+                               reg->u.s.user_str + offset,
+                               sizeof(c))))
+                       return '\0';
+               return c;
+       } else {
+               return reg->u.s.str[offset];
+       }
+}
+
+/*
+ * -1: wildcard found.
+ * -2: unknown escape char.
+ * 0: normal char.
+ */
+static
+int parse_char(struct estack_entry *reg, char *c, size_t *offset)
+{
+       switch (*c) {
+       case '\\':
+               (*offset)++;
+               *c = get_char(reg, *offset);
+               switch (*c) {
+               case '\\':
+               case '*':
+                       return 0;
+               default:
+                       return -2;
+               }
+       case '*':
+               return -1;
+       default:
+               return 0;
+       }
+}
+
+static
+char get_char_at_cb(size_t at, void *data)
+{
+       return get_char(data, at);
+}
+
+static
+int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
+{
+       bool has_user = false;
+       mm_segment_t old_fs;
+       int result;
+       struct estack_entry *pattern_reg;
+       struct estack_entry *candidate_reg;
+
+       if (estack_bx(stack, top)->u.s.user
+                       || estack_ax(stack, top)->u.s.user) {
+               has_user = true;
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               pagefault_disable();
+       }
+
+       /* Find out which side is the pattern vs. the candidate. */
+       if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
+               pattern_reg = estack_ax(stack, top);
+               candidate_reg = estack_bx(stack, top);
+       } else {
+               pattern_reg = estack_bx(stack, top);
+               candidate_reg = estack_ax(stack, top);
+       }
+
+       /* Perform the match operation. */
+       result = !strutils_star_glob_match_char_cb(get_char_at_cb,
+               pattern_reg, get_char_at_cb, candidate_reg);
+       if (has_user) {
+               pagefault_enable();
+               set_fs(old_fs);
+       }
+
+       return result;
+}
+
+static
+int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
+{
+       size_t offset_bx = 0, offset_ax = 0;
+       int diff, has_user = 0;
+       mm_segment_t old_fs;
+
+       if (estack_bx(stack, top)->u.s.user
+                       || estack_ax(stack, top)->u.s.user) {
+               has_user = 1;
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               pagefault_disable();
+       }
+
+       for (;;) {
+               int ret;
+               int escaped_r0 = 0;
+               char char_bx, char_ax;
+
+               char_bx = get_char(estack_bx(stack, top), offset_bx);
+               char_ax = get_char(estack_ax(stack, top), offset_ax);
+
+               if (unlikely(char_bx == '\0')) {
+                       if (char_ax == '\0') {
+                               diff = 0;
+                               break;
+                       } else {
+                               if (estack_ax(stack, top)->u.s.literal_type ==
+                                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+                                       ret = parse_char(estack_ax(stack, top),
+                                               &char_ax, &offset_ax);
+                                       if (ret == -1) {
+                                               diff = 0;
+                                               break;
+                                       }
+                               }
+                               diff = -1;
+                               break;
+                       }
+               }
+               if (unlikely(char_ax == '\0')) {
+                       if (estack_bx(stack, top)->u.s.literal_type ==
+                                       ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+                               ret = parse_char(estack_bx(stack, top),
+                                       &char_bx, &offset_bx);
+                               if (ret == -1) {
+                                       diff = 0;
+                                       break;
+                               }
+                       }
+                       diff = 1;
+                       break;
+               }
+               if (estack_bx(stack, top)->u.s.literal_type ==
+                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+                       ret = parse_char(estack_bx(stack, top),
+                               &char_bx, &offset_bx);
+                       if (ret == -1) {
+                               diff = 0;
+                               break;
+                       } else if (ret == -2) {
+                               escaped_r0 = 1;
+                       }
+                       /* else compare both char */
+               }
+               if (estack_ax(stack, top)->u.s.literal_type ==
+                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+                       ret = parse_char(estack_ax(stack, top),
+                               &char_ax, &offset_ax);
+                       if (ret == -1) {
+                               diff = 0;
+                               break;
+                       } else if (ret == -2) {
+                               if (!escaped_r0) {
+                                       diff = -1;
+                                       break;
+                               }
+                       } else {
+                               if (escaped_r0) {
+                                       diff = 1;
+                                       break;
+                               }
+                       }
+               } else {
+                       if (escaped_r0) {
+                               diff = 1;
+                               break;
+                       }
+               }
+               diff = char_bx - char_ax;
+               if (diff != 0)
+                       break;
+               offset_bx++;
+               offset_ax++;
+       }
+       if (has_user) {
+               pagefault_enable();
+               set_fs(old_fs);
+       }
+       return diff;
+}
+
+uint64_t lttng_bytecode_filter_interpret_false(void *filter_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *filter_stack_data)
+{
+       return LTTNG_INTERPRETER_DISCARD;
+}
+
+uint64_t lttng_bytecode_capture_interpret_false(void *filter_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *capture_stack_data,
+               struct lttng_interpreter_output *output)
+{
+       return LTTNG_INTERPRETER_DISCARD;
+}
+
+#ifdef INTERPRETER_USE_SWITCH
+
+/*
+ * Fallback for compilers that do not support taking address of labels.
+ */
+
+#define START_OP                                                       \
+       start_pc = &bytecode->data[0];                                  \
+       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;    \
+                       pc = next_pc) {                                 \
+               dbg_printk("Executing op %s (%u)\n",                    \
+                       lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc), \
+                       (unsigned int) *(bytecode_opcode_t *) pc);      \
+               switch (*(bytecode_opcode_t *) pc)      {
+
+#define OP(name)       case name
+
+#define PO             break
+
+#define END_OP         }                                               \
+       }
+
+#else
+
+/*
+ * Dispatch-table based interpreter.
+ */
+
+#define START_OP                                                       \
+       start_pc = &bytecode->code[0];                                  \
+       pc = next_pc = start_pc;                                        \
+       if (unlikely(pc - start_pc >= bytecode->len))                   \
+               goto end;                                               \
+       goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define OP(name)                                                       \
+LABEL_##name
+
+#define PO                                                             \
+               pc = next_pc;                                           \
+               goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define END_OP
+
+#endif
+
+#define IS_INTEGER_REGISTER(reg_type) \
+               (reg_type == REG_S64 || reg_type == REG_S64)
+
+static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+               struct load_ptr *ptr,
+               uint32_t idx)
+{
+
+       struct lttng_ctx_field *ctx_field;
+       struct lttng_event_field *field;
+       union lttng_ctx_value v;
+
+       ctx_field = &lttng_static_ctx->fields[idx];
+       field = &ctx_field->event_field;
+       ptr->type = LOAD_OBJECT;
+       /* field is only used for types nested within variants. */
+       ptr->field = NULL;
+
+       switch (field->type.atype) {
+       case atype_integer:
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               if (field->type.u.integer.signedness) {
+                       ptr->object_type = OBJECT_TYPE_S64;
+                       ptr->u.s64 = v.s64;
+                       ptr->ptr = &ptr->u.s64;
+               } else {
+                       ptr->object_type = OBJECT_TYPE_U64;
+                       ptr->u.u64 = v.s64;     /* Cast. */
+                       ptr->ptr = &ptr->u.u64;
+               }
+               break;
+       case atype_enum_nestable:
+       {
+               const struct lttng_integer_type *itype =
+                       &field->type.u.enum_nestable.container_type->u.integer;
+
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               if (itype->signedness) {
+                       ptr->object_type = OBJECT_TYPE_SIGNED_ENUM;
+                       ptr->u.s64 = v.s64;
+                       ptr->ptr = &ptr->u.s64;
+               } else {
+                       ptr->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+                       ptr->u.u64 = v.s64;     /* Cast. */
+                       ptr->ptr = &ptr->u.u64;
+               }
+               break;
+       }
+       case atype_array_nestable:
+               if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+                       printk(KERN_WARNING "Array nesting only supports integer types.\n");
+                       return -EINVAL;
+               }
+               if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+                       printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
+                       return -EINVAL;
+               }
+               ptr->object_type = OBJECT_TYPE_STRING;
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               ptr->ptr = v.str;
+               break;
+       case atype_sequence_nestable:
+               if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+                       printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
+                       return -EINVAL;
+               }
+               if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+                       printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
+                       return -EINVAL;
+               }
+               ptr->object_type = OBJECT_TYPE_STRING;
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               ptr->ptr = v.str;
+               break;
+       case atype_string:
+               ptr->object_type = OBJECT_TYPE_STRING;
+               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+               ptr->ptr = v.str;
+               break;
+       case atype_struct_nestable:
+               printk(KERN_WARNING "Structure type cannot be loaded.\n");
+               return -EINVAL;
+       case atype_variant_nestable:
+               printk(KERN_WARNING "Variant type cannot be loaded.\n");
+               return -EINVAL;
+       default:
+               printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+               struct bytecode_runtime *runtime,
+               uint64_t index, struct estack_entry *stack_top)
+{
+       int ret;
+       const struct bytecode_get_index_data *gid;
+
+       gid = (const struct bytecode_get_index_data *) &runtime->data[index];
+       switch (stack_top->u.ptr.type) {
+       case LOAD_OBJECT:
+               switch (stack_top->u.ptr.object_type) {
+               case OBJECT_TYPE_ARRAY:
+               {
+                       const char *ptr;
+
+                       WARN_ON_ONCE(gid->offset >= gid->array_len);
+                       /* Skip count (unsigned long) */
+                       ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+                       ptr = ptr + gid->offset;
+                       stack_top->u.ptr.ptr = ptr;
+                       stack_top->u.ptr.object_type = gid->elem.type;
+                       stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+                       BUG_ON(stack_top->u.ptr.field->type.atype != atype_array_nestable);
+                       stack_top->u.ptr.field = NULL;
+                       break;
+               }
+               case OBJECT_TYPE_SEQUENCE:
+               {
+                       const char *ptr;
+                       size_t ptr_seq_len;
+
+                       ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+                       ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+                       if (gid->offset >= gid->elem.len * ptr_seq_len) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       ptr = ptr + gid->offset;
+                       stack_top->u.ptr.ptr = ptr;
+                       stack_top->u.ptr.object_type = gid->elem.type;
+                       stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+                       BUG_ON(stack_top->u.ptr.field->type.atype != atype_sequence_nestable);
+                       stack_top->u.ptr.field = NULL;
+                       break;
+               }
+               case OBJECT_TYPE_STRUCT:
+                       printk(KERN_WARNING "Nested structures are not supported yet.\n");
+                       ret = -EINVAL;
+                       goto end;
+               case OBJECT_TYPE_VARIANT:
+               default:
+                       printk(KERN_WARNING "Unexpected get index type %d",
+                               (int) stack_top->u.ptr.object_type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       case LOAD_ROOT_CONTEXT:
+       case LOAD_ROOT_APP_CONTEXT:     /* Fall-through */
+       {
+               ret = context_get_index(lttng_probe_ctx,
+                               &stack_top->u.ptr,
+                               gid->ctx_index);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       }
+       case LOAD_ROOT_PAYLOAD:
+               stack_top->u.ptr.ptr += gid->offset;
+               if (gid->elem.type == OBJECT_TYPE_STRING)
+                       stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+               stack_top->u.ptr.object_type = gid->elem.type;
+               stack_top->u.ptr.type = LOAD_OBJECT;
+               stack_top->u.ptr.field = gid->field;
+               stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+               break;
+       }
+
+       stack_top->type = REG_PTR;
+
+       return 0;
+
+end:
+       return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+       int ret;
+
+       switch (stack_top->u.ptr.type) {
+       case LOAD_OBJECT:
+               break;
+       case LOAD_ROOT_CONTEXT:
+       case LOAD_ROOT_APP_CONTEXT:
+       case LOAD_ROOT_PAYLOAD:
+       default:
+               dbg_printk("Filter warning: cannot load root, missing field name.\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       switch (stack_top->u.ptr.object_type) {
+       case OBJECT_TYPE_S8:
+               dbg_printk("op load field s8\n");
+               stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+               stack_top->type = REG_S64;
+               break;
+       case OBJECT_TYPE_S16:
+       {
+               int16_t tmp;
+
+               dbg_printk("op load field s16\n");
+               tmp = *(int16_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab16s(&tmp);
+               stack_top->u.v = tmp;
+               stack_top->type = REG_S64;
+               break;
+       }
+       case OBJECT_TYPE_S32:
+       {
+               int32_t tmp;
+
+               dbg_printk("op load field s32\n");
+               tmp = *(int32_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab32s(&tmp);
+               stack_top->u.v = tmp;
+               stack_top->type = REG_S64;
+               break;
+       }
+       case OBJECT_TYPE_S64:
+       {
+               int64_t tmp;
+
+               dbg_printk("op load field s64\n");
+               tmp = *(int64_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab64s(&tmp);
+               stack_top->u.v = tmp;
+               stack_top->type = REG_S64;
+               break;
+       }
+       case OBJECT_TYPE_SIGNED_ENUM:
+       {
+               int64_t tmp;
+
+               dbg_printk("op load field signed enumeration\n");
+               tmp = *(int64_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab64s(&tmp);
+               stack_top->u.v = tmp;
+               stack_top->type = REG_S64;
+               break;
+       }
+       case OBJECT_TYPE_U8:
+               dbg_printk("op load field u8\n");
+               stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+               stack_top->type = REG_U64;
+               break;
+       case OBJECT_TYPE_U16:
+       {
+               uint16_t tmp;
+
+               dbg_printk("op load field u16\n");
+               tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab16s(&tmp);
+               stack_top->u.v = tmp;
+               stack_top->type = REG_U64;
+               break;
+       }
+       case OBJECT_TYPE_U32:
+       {
+               uint32_t tmp;
+
+               dbg_printk("op load field u32\n");
+               tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab32s(&tmp);
+               stack_top->u.v = tmp;
+               stack_top->type = REG_U64;
+               break;
+       }
+       case OBJECT_TYPE_U64:
+       {
+               uint64_t tmp;
+
+               dbg_printk("op load field u64\n");
+               tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab64s(&tmp);
+               stack_top->u.v = tmp;
+               stack_top->type = REG_U64;
+               break;
+       }
+       case OBJECT_TYPE_UNSIGNED_ENUM:
+       {
+               uint64_t tmp;
+
+               dbg_printk("op load field unsigned enumeration\n");
+               tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+               if (stack_top->u.ptr.rev_bo)
+                       __swab64s(&tmp);
+               stack_top->u.v = tmp;
+               stack_top->type = REG_U64;
+               break;
+       }
+       case OBJECT_TYPE_STRING:
+       {
+               const char *str;
+
+               dbg_printk("op load field string\n");
+               str = (const char *) stack_top->u.ptr.ptr;
+               stack_top->u.s.str = str;
+               if (unlikely(!stack_top->u.s.str)) {
+                       dbg_printk("Filter warning: loading a NULL string.\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
+               stack_top->u.s.literal_type =
+                       ESTACK_STRING_LITERAL_TYPE_NONE;
+               stack_top->type = REG_STRING;
+               break;
+       }
+       case OBJECT_TYPE_STRING_SEQUENCE:
+       {
+               const char *ptr;
+
+               dbg_printk("op load field string sequence\n");
+               ptr = stack_top->u.ptr.ptr;
+               stack_top->u.s.seq_len = *(unsigned long *) ptr;
+               stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+               if (unlikely(!stack_top->u.s.str)) {
+                       dbg_printk("Filter warning: loading a NULL sequence.\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               stack_top->u.s.literal_type =
+                       ESTACK_STRING_LITERAL_TYPE_NONE;
+               stack_top->type = REG_STRING;
+               break;
+       }
+       case OBJECT_TYPE_DYNAMIC:
+               /*
+                * Dynamic types in context are looked up
+                * by context get index.
+                */
+               ret = -EINVAL;
+               goto end;
+       case OBJECT_TYPE_DOUBLE:
+               ret = -EINVAL;
+               goto end;
+       case OBJECT_TYPE_SEQUENCE:
+       case OBJECT_TYPE_ARRAY:
+       case OBJECT_TYPE_STRUCT:
+       case OBJECT_TYPE_VARIANT:
+               printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       return 0;
+
+end:
+       return ret;
+}
+
+static
+int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
+               struct lttng_interpreter_output *output)
+{
+       int ret;
+
+again:
+       switch (ax->type) {
+       case REG_S64:
+               output->type = LTTNG_INTERPRETER_TYPE_S64;
+               output->u.s = ax->u.v;
+               break;
+       case REG_U64:
+               output->type = LTTNG_INTERPRETER_TYPE_U64;
+               output->u.u = (uint64_t) ax->u.v;
+               break;
+       case REG_STRING:
+               output->type = LTTNG_INTERPRETER_TYPE_STRING;
+               output->u.str.str = ax->u.s.str;
+               output->u.str.len = ax->u.s.seq_len;
+               break;
+       case REG_PTR:
+               switch (ax->u.ptr.object_type) {
+               case OBJECT_TYPE_S8:
+               case OBJECT_TYPE_S16:
+               case OBJECT_TYPE_S32:
+               case OBJECT_TYPE_S64:
+               case OBJECT_TYPE_U8:
+               case OBJECT_TYPE_U16:
+               case OBJECT_TYPE_U32:
+               case OBJECT_TYPE_U64:
+               case OBJECT_TYPE_DOUBLE:
+               case OBJECT_TYPE_STRING:
+               case OBJECT_TYPE_STRING_SEQUENCE:
+                       ret = dynamic_load_field(ax);
+                       if (ret)
+                               return ret;
+                       /* Retry after loading ptr into stack top. */
+                       goto again;
+               case OBJECT_TYPE_SEQUENCE:
+                       output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+                       output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+                       output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
+                       output->u.sequence.nested_type = ax->u.ptr.field->type.u.sequence_nestable.elem_type;
+                       break;
+               case OBJECT_TYPE_ARRAY:
+                       /* Skip count (unsigned long) */
+                       output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+                       output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+                       output->u.sequence.nr_elem = ax->u.ptr.field->type.u.array_nestable.length;
+                       output->u.sequence.nested_type = ax->u.ptr.field->type.u.array_nestable.elem_type;
+                       break;
+               case OBJECT_TYPE_SIGNED_ENUM:
+                       ret = dynamic_load_field(ax);
+                       if (ret)
+                               return ret;
+                       output->type = LTTNG_INTERPRETER_TYPE_SIGNED_ENUM;
+                       output->u.s = ax->u.v;
+                       break;
+               case OBJECT_TYPE_UNSIGNED_ENUM:
+                       ret = dynamic_load_field(ax);
+                       if (ret)
+                               return ret;
+                       output->type = LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM;
+                       output->u.u = ax->u.v;
+                       break;
+               case OBJECT_TYPE_STRUCT:
+               case OBJECT_TYPE_VARIANT:
+               default:
+                       return -EINVAL;
+               }
+
+               break;
+       case REG_STAR_GLOB_STRING:
+       case REG_TYPE_UNKNOWN:
+       default:
+               return -EINVAL;
+       }
+
+       return LTTNG_INTERPRETER_RECORD_FLAG;
+}
+
+/*
+ * Return 0 (discard), or raise the 0x1 flag (log event).
+ * Currently, other flags are kept for future extensions and have no
+ * effect.
+ */
+static
+uint64_t bytecode_interpret(void *interpreter_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *interpreter_stack_data,
+               struct lttng_interpreter_output *output)
+{
+       struct bytecode_runtime *bytecode = interpreter_data;
+       void *pc, *next_pc, *start_pc;
+       int ret = -EINVAL;
+       uint64_t retval = 0;
+       struct estack _stack;
+       struct estack *stack = &_stack;
+       register int64_t ax = 0, bx = 0;
+       register enum entry_type ax_t = REG_TYPE_UNKNOWN, bx_t = REG_TYPE_UNKNOWN;
+       register int top = INTERPRETER_STACK_EMPTY;
+#ifndef INTERPRETER_USE_SWITCH
+       static void *dispatch[NR_BYTECODE_OPS] = {
+               [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN,
+
+               [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN,
+
+               /* binary */
+               [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL,
+               [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV,
+               [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD,
+               [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS,
+               [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS,
+               [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT,
+               [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT,
+               [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND,
+               [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR,
+               [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR,
+
+               /* binary comparators */
+               [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ,
+               [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE,
+               [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT,
+               [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT,
+               [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE,
+               [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE,
+
+               /* string binary comparator */
+               [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING,
+               [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING,
+               [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING,
+               [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING,
+               [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING,
+               [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING,
+
+               /* globbing pattern binary comparator */
+               [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING,
+               [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING,
+
+               /* s64 binary comparator */
+               [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64,
+               [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64,
+               [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64,
+               [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64,
+               [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64,
+               [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64,
+
+               /* double binary comparator */
+               [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE,
+               [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE,
+               [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE,
+               [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE,
+               [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE,
+               [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE,
+
+               /* Mixed S64-double binary comparators */
+               [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64,
+               [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64,
+               [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64,
+               [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64,
+               [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64,
+               [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64,
+
+               [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE,
+               [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE,
+               [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE,
+               [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE,
+               [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE,
+               [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE,
+
+               /* unary */
+               [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS,
+               [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS,
+               [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT,
+               [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64,
+               [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64,
+               [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64,
+               [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE,
+               [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE,
+               [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE,
+
+               /* logical */
+               [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND,
+               [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR,
+
+               /* load field ref */
+               [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF,
+               [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING,
+               [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE,
+               [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64,
+               [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE,
+
+               /* load from immediate operand */
+               [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING,
+               [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING,
+               [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64,
+               [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE,
+
+               /* cast */
+               [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64,
+               [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64,
+               [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP,
+
+               /* get context ref */
+               [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF,
+               [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING,
+               [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64,
+               [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE,
+
+               /* load userspace field ref */
+               [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_USER_STRING,
+               [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE,
+
+               /* Instructions for recursive traversal through composed types. */
+               [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT,
+               [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT,
+               [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT,
+
+               [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL,
+               [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD,
+               [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16,
+               [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64,
+
+               [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD,
+               [ BYTECODE_OP_LOAD_FIELD_S8      ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8,
+               [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16,
+               [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32,
+               [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64,
+               [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8,
+               [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16,
+               [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32,
+               [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64,
+               [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING,
+               [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE,
+               [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE,
+
+               [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT,
+
+               [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64,
+       };
+#endif /* #ifndef INTERPRETER_USE_SWITCH */
+
+       START_OP
+
+               OP(BYTECODE_OP_UNKNOWN):
+               OP(BYTECODE_OP_LOAD_FIELD_REF):
+               OP(BYTECODE_OP_GET_CONTEXT_REF):
+#ifdef INTERPRETER_USE_SWITCH
+               default:
+#endif /* INTERPRETER_USE_SWITCH */
+                       printk(KERN_WARNING "unknown bytecode op %u\n",
+                               (unsigned int) *(bytecode_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               OP(BYTECODE_OP_RETURN):
+               OP(BYTECODE_OP_RETURN_S64):
+                       /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */
+                       switch (estack_ax_t) {
+                       case REG_S64:
+                       case REG_U64:
+                               retval = !!estack_ax_v;
+                               break;
+                       case REG_DOUBLE:
+                       case REG_STRING:
+                       case REG_PTR:
+                               if (!output) {
+                                       ret = -EINVAL;
+                                       goto end;
+                               }
+                               retval = 0;
+                               break;
+                       case REG_STAR_GLOB_STRING:
+                       case REG_TYPE_UNKNOWN:
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       ret = 0;
+                       goto end;
+
+               /* binary */
+               OP(BYTECODE_OP_MUL):
+               OP(BYTECODE_OP_DIV):
+               OP(BYTECODE_OP_MOD):
+               OP(BYTECODE_OP_PLUS):
+               OP(BYTECODE_OP_MINUS):
+                       printk(KERN_WARNING "unsupported bytecode op %u\n",
+                               (unsigned int) *(bytecode_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               OP(BYTECODE_OP_EQ):
+               OP(BYTECODE_OP_NE):
+               OP(BYTECODE_OP_GT):
+               OP(BYTECODE_OP_LT):
+               OP(BYTECODE_OP_GE):
+               OP(BYTECODE_OP_LE):
+                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
+                               (unsigned int) *(bytecode_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               OP(BYTECODE_OP_EQ_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, "==") == 0);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_NE_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, "!=") != 0);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_GT_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, ">") > 0);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LT_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, "<") < 0);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_GE_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, ">=") >= 0);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LE_STRING):
+               {
+                       int res;
+
+                       res = (stack_strcmp(stack, top, "<=") <= 0);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_EQ_STAR_GLOB_STRING):
+               {
+                       int res;
+
+                       res = (stack_star_glob_match(stack, top, "==") == 0);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_NE_STAR_GLOB_STRING):
+               {
+                       int res;
+
+                       res = (stack_star_glob_match(stack, top, "!=") != 0);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_EQ_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v == estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_NE_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v != estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_GT_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v > estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LT_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v < estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_GE_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v >= estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LE_S64):
+               {
+                       int res;
+
+                       res = (estack_bx_v <= estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_EQ_DOUBLE):
+               OP(BYTECODE_OP_NE_DOUBLE):
+               OP(BYTECODE_OP_GT_DOUBLE):
+               OP(BYTECODE_OP_LT_DOUBLE):
+               OP(BYTECODE_OP_GE_DOUBLE):
+               OP(BYTECODE_OP_LE_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* Mixed S64-double binary comparators */
+               OP(BYTECODE_OP_EQ_DOUBLE_S64):
+               OP(BYTECODE_OP_NE_DOUBLE_S64):
+               OP(BYTECODE_OP_GT_DOUBLE_S64):
+               OP(BYTECODE_OP_LT_DOUBLE_S64):
+               OP(BYTECODE_OP_GE_DOUBLE_S64):
+               OP(BYTECODE_OP_LE_DOUBLE_S64):
+               OP(BYTECODE_OP_EQ_S64_DOUBLE):
+               OP(BYTECODE_OP_NE_S64_DOUBLE):
+               OP(BYTECODE_OP_GT_S64_DOUBLE):
+               OP(BYTECODE_OP_LT_S64_DOUBLE):
+               OP(BYTECODE_OP_GE_S64_DOUBLE):
+               OP(BYTECODE_OP_LE_S64_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+               OP(BYTECODE_OP_BIT_RSHIFT):
+               {
+                       int64_t res;
+
+                       if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+
+                       /* Catch undefined behavior. */
+                       if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_U64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_BIT_LSHIFT):
+               {
+                       int64_t res;
+
+                       if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+
+                       /* Catch undefined behavior. */
+                       if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_U64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_BIT_AND):
+               {
+                       int64_t res;
+
+                       if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+
+                       res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_U64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_BIT_OR):
+               {
+                       int64_t res;
+
+                       if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+
+                       res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_U64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_BIT_XOR):
+               {
+                       int64_t res;
+
+                       if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+
+                       res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
+                       estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = res;
+                       estack_ax_t = REG_U64;
+                       next_pc += sizeof(struct binary_op);
+                       PO;
+               }
+
+               /* unary */
+               OP(BYTECODE_OP_UNARY_PLUS):
+               OP(BYTECODE_OP_UNARY_MINUS):
+               OP(BYTECODE_OP_UNARY_NOT):
+                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
+                               (unsigned int) *(bytecode_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+
+               OP(BYTECODE_OP_UNARY_BIT_NOT):
+               {
+                       estack_ax_v = ~(uint64_t) estack_ax_v;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct unary_op);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_UNARY_PLUS_S64):
+               {
+                       next_pc += sizeof(struct unary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_UNARY_MINUS_S64):
+               {
+                       estack_ax_v = -estack_ax_v;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct unary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_UNARY_PLUS_DOUBLE):
+               OP(BYTECODE_OP_UNARY_MINUS_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+               OP(BYTECODE_OP_UNARY_NOT_S64):
+               {
+                       estack_ax_v = !estack_ax_v;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct unary_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_UNARY_NOT_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* logical */
+               OP(BYTECODE_OP_AND):
+               {
+                       struct logical_op *insn = (struct logical_op *) pc;
+
+                       /* If AX is 0, skip and evaluate to 0 */
+                       if (unlikely(estack_ax_v == 0)) {
+                               dbg_printk("Jumping to bytecode offset %u\n",
+                                       (unsigned int) insn->skip_offset);
+                               next_pc = start_pc + insn->skip_offset;
+                       } else {
+                               /* Pop 1 when jump not taken */
+                               estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                               next_pc += sizeof(struct logical_op);
+                       }
+                       PO;
+               }
+               OP(BYTECODE_OP_OR):
+               {
+                       struct logical_op *insn = (struct logical_op *) pc;
+
+                       /* If AX is nonzero, skip and evaluate to 1 */
+
+                       if (unlikely(estack_ax_v != 0)) {
+                               estack_ax_v = 1;
+                               dbg_printk("Jumping to bytecode offset %u\n",
+                                       (unsigned int) insn->skip_offset);
+                               next_pc = start_pc + insn->skip_offset;
+                       } else {
+                               /* Pop 1 when jump not taken */
+                               estack_pop(stack, top, ax, bx, ax_t, bx_t);
+                               next_pc += sizeof(struct logical_op);
+                       }
+                       PO;
+               }
+
+
+               /* load field ref */
+               OP(BYTECODE_OP_LOAD_FIELD_REF_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type string\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.s.str =
+                               *(const char * const *) &interpreter_stack_data[ref->offset];
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL string.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       estack_ax(stack, top)->type = REG_STRING;
+                       dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type sequence\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.s.seq_len =
+                               *(unsigned long *) &interpreter_stack_data[ref->offset];
+                       estack_ax(stack, top)->u.s.str =
+                               *(const char **) (&interpreter_stack_data[ref->offset
+                                                               + sizeof(unsigned long)]);
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL sequence.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_FIELD_REF_S64):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type s64\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v =
+                               ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
+                       estack_ax_t = REG_S64;
+                       dbg_printk("ref load s64 %lld\n",
+                               (long long) estack_ax_v);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* load from immediate operand */
+               OP(BYTECODE_OP_LOAD_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       dbg_printk("load string %s\n", insn->data);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.s.str = insn->data;
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_PLAIN;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       dbg_printk("load globbing pattern %s\n", insn->data);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.s.str = insn->data;
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_S64):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = ((struct literal_numeric *) insn->data)->v;
+                       estack_ax_t = REG_S64;
+                       dbg_printk("load s64 %lld\n",
+                               (long long) estack_ax_v);
+                       next_pc += sizeof(struct load_op)
+                                       + sizeof(struct literal_numeric);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* cast */
+               OP(BYTECODE_OP_CAST_TO_S64):
+                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
+                               (unsigned int) *(bytecode_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               OP(BYTECODE_OP_CAST_DOUBLE_TO_S64):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_CAST_NOP):
+               {
+                       next_pc += sizeof(struct cast_op);
+                       PO;
+               }
+
+               /* get context ref */
+               OP(BYTECODE_OP_GET_CONTEXT_REF_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+                       struct lttng_ctx_field *ctx_field;
+                       union lttng_ctx_value v;
+
+                       dbg_printk("get context ref offset %u type string\n",
+                               ref->offset);
+                       ctx_field = &lttng_static_ctx->fields[ref->offset];
+                       ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.s.str = v.str;
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL string.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 0;
+                       estack_ax(stack, top)->type = REG_STRING;
+                       dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_GET_CONTEXT_REF_S64):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+                       struct lttng_ctx_field *ctx_field;
+                       union lttng_ctx_value v;
+
+                       dbg_printk("get context ref offset %u type s64\n",
+                               ref->offset);
+                       ctx_field = &lttng_static_ctx->fields[ref->offset];
+                       ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax_v = v.s64;
+                       estack_ax_t = REG_S64;
+                       dbg_printk("ref get context s64 %lld\n",
+                               (long long) estack_ax_v);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               /* load userspace field ref */
+               OP(BYTECODE_OP_LOAD_FIELD_REF_USER_STRING):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type user string\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.s.user_str =
+                               *(const char * const *) &interpreter_stack_data[ref->offset];
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL string.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 1;
+                       estack_ax(stack, top)->type = REG_STRING;
+                       dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct field_ref *ref = (struct field_ref *) insn->data;
+
+                       dbg_printk("load field ref offset %u type user sequence\n",
+                               ref->offset);
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.s.seq_len =
+                               *(unsigned long *) &interpreter_stack_data[ref->offset];
+                       estack_ax(stack, top)->u.s.user_str =
+                               *(const char **) (&interpreter_stack_data[ref->offset
+                                                               + sizeof(unsigned long)]);
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL sequence.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->u.s.user = 1;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_GET_CONTEXT_ROOT):
+               {
+                       dbg_printk("op get context root\n");
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+                       /* "field" only needed for variants. */
+                       estack_ax(stack, top)->u.ptr.field = NULL;
+                       estack_ax(stack, top)->type = REG_PTR;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT):
+               {
+                       BUG_ON(1);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_GET_PAYLOAD_ROOT):
+               {
+                       dbg_printk("op get app payload root\n");
+                       estack_push(stack, top, ax, bx, ax_t, bx_t);
+                       estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+                       estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
+                       /* "field" only needed for variants. */
+                       estack_ax(stack, top)->u.ptr.field = NULL;
+                       estack_ax(stack, top)->type = REG_PTR;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_GET_SYMBOL):
+               {
+                       dbg_printk("op get symbol\n");
+                       switch (estack_ax(stack, top)->u.ptr.type) {
+                       case LOAD_OBJECT:
+                               printk(KERN_WARNING "Nested fields not implemented yet.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case LOAD_ROOT_CONTEXT:
+                       case LOAD_ROOT_APP_CONTEXT:
+                       case LOAD_ROOT_PAYLOAD:
+                               /*
+                                * symbol lookup is performed by
+                                * specialization.
+                                */
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_GET_SYMBOL_FIELD):
+               {
+                       /*
+                        * Used for first variant encountered in a
+                        * traversal. Variants are not implemented yet.
+                        */
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               OP(BYTECODE_OP_GET_INDEX_U16):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+                       dbg_printk("op get index u16\n");
+                       ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+                       if (ret)
+                               goto end;
+                       estack_ax_v = estack_ax(stack, top)->u.v;
+                       estack_ax_t = estack_ax(stack, top)->type;
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_GET_INDEX_U64):
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+                       dbg_printk("op get index u64\n");
+                       ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+                       if (ret)
+                               goto end;
+                       estack_ax_v = estack_ax(stack, top)->u.v;
+                       estack_ax_t = estack_ax(stack, top)->type;
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_FIELD):
+               {
+                       dbg_printk("op load field\n");
+                       ret = dynamic_load_field(estack_ax(stack, top));
+                       if (ret)
+                               goto end;
+                       estack_ax_v = estack_ax(stack, top)->u.v;
+                       estack_ax_t = estack_ax(stack, top)->type;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_FIELD_S8):
+               {
+                       dbg_printk("op load field s8\n");
+
+                       estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LOAD_FIELD_S16):
+               {
+                       dbg_printk("op load field s16\n");
+
+                       estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LOAD_FIELD_S32):
+               {
+                       dbg_printk("op load field s32\n");
+
+                       estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LOAD_FIELD_S64):
+               {
+                       dbg_printk("op load field s64\n");
+
+                       estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LOAD_FIELD_U8):
+               {
+                       dbg_printk("op load field u8\n");
+
+                       estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LOAD_FIELD_U16):
+               {
+                       dbg_printk("op load field u16\n");
+
+                       estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LOAD_FIELD_U32):
+               {
+                       dbg_printk("op load field u32\n");
+
+                       estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LOAD_FIELD_U64):
+               {
+                       dbg_printk("op load field u64\n");
+
+                       estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax_t = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+               OP(BYTECODE_OP_LOAD_FIELD_DOUBLE):
+               {
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               OP(BYTECODE_OP_LOAD_FIELD_STRING):
+               {
+                       const char *str;
+
+                       dbg_printk("op load field string\n");
+                       str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax(stack, top)->u.s.str = str;
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL string.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->type = REG_STRING;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+               OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE):
+               {
+                       const char *ptr;
+
+                       dbg_printk("op load field string sequence\n");
+                       ptr = estack_ax(stack, top)->u.ptr.ptr;
+                       estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+                       estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+                               dbg_printk("Filter warning: loading a NULL sequence.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       estack_ax(stack, top)->u.s.literal_type =
+                               ESTACK_STRING_LITERAL_TYPE_NONE;
+                       estack_ax(stack, top)->type = REG_STRING;
+                       next_pc += sizeof(struct load_op);
+                       PO;
+               }
+
+       END_OP
+end:
+       /* Return _DISCARD on error. */
+       if (ret)
+               return LTTNG_INTERPRETER_DISCARD;
+
+       if (output) {
+               return lttng_bytecode_interpret_format_output(
+                               estack_ax(stack, top), output);
+       }
+
+       return retval;
+}
+LTTNG_STACK_FRAME_NON_STANDARD(bytecode_interpret);
+
+uint64_t lttng_bytecode_filter_interpret(void *filter_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *filter_stack_data)
+{
+       return bytecode_interpret(filter_data, lttng_probe_ctx,
+                       filter_stack_data, NULL);
+}
+
+uint64_t lttng_bytecode_capture_interpret(void *capture_data,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *capture_stack_data,
+               struct lttng_interpreter_output *output)
+{
+       return bytecode_interpret(capture_data, lttng_probe_ctx,
+                       capture_stack_data, output);
+}
+
+#undef START_OP
+#undef OP
+#undef PO
+#undef END_OP
diff --git a/src/lttng-bytecode-specialize.c b/src/lttng-bytecode-specialize.c
new file mode 100644 (file)
index 0000000..86fe7ce
--- /dev/null
@@ -0,0 +1,1227 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-specialize.c
+ *
+ * LTTng modules bytecode code specializer.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/slab.h>
+#include <lttng/lttng-bytecode.h>
+#include <lttng/align.h>
+
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+               size_t align, size_t len)
+{
+       ssize_t ret;
+       size_t padding = offset_align(runtime->data_len, align);
+       size_t new_len = runtime->data_len + padding + len;
+       size_t new_alloc_len = new_len;
+       size_t old_alloc_len = runtime->data_alloc_len;
+
+       if (new_len > INTERPRETER_MAX_DATA_LEN)
+               return -EINVAL;
+
+       if (new_alloc_len > old_alloc_len) {
+               char *newptr;
+
+               new_alloc_len =
+                       max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+               newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
+               if (!newptr)
+                       return -ENOMEM;
+               runtime->data = newptr;
+               /* We zero directly the memory from start of allocation. */
+               memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+               runtime->data_alloc_len = new_alloc_len;
+       }
+       runtime->data_len += padding;
+       ret = runtime->data_len;
+       runtime->data_len += len;
+       return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+               const void *p, size_t align, size_t len)
+{
+       ssize_t offset;
+
+       offset = bytecode_reserve_data(runtime, align, len);
+       if (offset < 0)
+               return -ENOMEM;
+       memcpy(&runtime->data[offset], p, len);
+       return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+               struct load_op *insn)
+{
+       int ret;
+
+       switch (stack_top->load.type) {
+       case LOAD_OBJECT:
+               break;
+       case LOAD_ROOT_CONTEXT:
+       case LOAD_ROOT_APP_CONTEXT:
+       case LOAD_ROOT_PAYLOAD:
+       default:
+               dbg_printk("Bytecode warning: cannot load root, missing field name.\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       switch (stack_top->load.object_type) {
+       case OBJECT_TYPE_S8:
+               dbg_printk("op load field s8\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = BYTECODE_OP_LOAD_FIELD_S8;
+               break;
+       case OBJECT_TYPE_S16:
+               dbg_printk("op load field s16\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = BYTECODE_OP_LOAD_FIELD_S16;
+               break;
+       case OBJECT_TYPE_S32:
+               dbg_printk("op load field s32\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = BYTECODE_OP_LOAD_FIELD_S32;
+               break;
+       case OBJECT_TYPE_S64:
+               dbg_printk("op load field s64\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = BYTECODE_OP_LOAD_FIELD_S64;
+               break;
+       case OBJECT_TYPE_SIGNED_ENUM:
+               dbg_printk("op load field signed enumeration\n");
+               stack_top->type = REG_PTR;
+               break;
+       case OBJECT_TYPE_U8:
+               dbg_printk("op load field u8\n");
+               stack_top->type = REG_S64;
+               insn->op = BYTECODE_OP_LOAD_FIELD_U8;
+               break;
+       case OBJECT_TYPE_U16:
+               dbg_printk("op load field u16\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = BYTECODE_OP_LOAD_FIELD_U16;
+               break;
+       case OBJECT_TYPE_U32:
+               dbg_printk("op load field u32\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = BYTECODE_OP_LOAD_FIELD_U32;
+               break;
+       case OBJECT_TYPE_U64:
+               dbg_printk("op load field u64\n");
+               stack_top->type = REG_S64;
+               if (!stack_top->load.rev_bo)
+                       insn->op = BYTECODE_OP_LOAD_FIELD_U64;
+               break;
+       case OBJECT_TYPE_UNSIGNED_ENUM:
+               dbg_printk("op load field unsigned enumeration\n");
+               stack_top->type = REG_PTR;
+               break;
+       case OBJECT_TYPE_DOUBLE:
+               printk(KERN_WARNING "Double type unsupported\n\n");
+               ret = -EINVAL;
+               goto end;
+       case OBJECT_TYPE_STRING:
+               dbg_printk("op load field string\n");
+               stack_top->type = REG_STRING;
+               insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
+               break;
+       case OBJECT_TYPE_STRING_SEQUENCE:
+               dbg_printk("op load field string sequence\n");
+               stack_top->type = REG_STRING;
+               insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
+               break;
+       case OBJECT_TYPE_DYNAMIC:
+               ret = -EINVAL;
+               goto end;
+       case OBJECT_TYPE_SEQUENCE:
+       case OBJECT_TYPE_ARRAY:
+       case OBJECT_TYPE_STRUCT:
+       case OBJECT_TYPE_VARIANT:
+               printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       return 0;
+
+end:
+       return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+               int signedness, uint32_t elem_len)
+{
+       switch (elem_len) {
+       case 8:
+               if (signedness)
+                       *otype = OBJECT_TYPE_S8;
+               else
+                       *otype = OBJECT_TYPE_U8;
+               break;
+       case 16:
+               if (signedness)
+                       *otype = OBJECT_TYPE_S16;
+               else
+                       *otype = OBJECT_TYPE_U16;
+               break;
+       case 32:
+               if (signedness)
+                       *otype = OBJECT_TYPE_S32;
+               else
+                       *otype = OBJECT_TYPE_U32;
+               break;
+       case 64:
+               if (signedness)
+                       *otype = OBJECT_TYPE_S64;
+               else
+                       *otype = OBJECT_TYPE_U64;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+               struct load_op *insn, uint64_t index,
+               struct vstack_entry *stack_top,
+               int idx_len)
+{
+       int ret;
+       struct bytecode_get_index_data gid;
+       ssize_t data_offset;
+
+       memset(&gid, 0, sizeof(gid));
+       switch (stack_top->load.type) {
+       case LOAD_OBJECT:
+               switch (stack_top->load.object_type) {
+               case OBJECT_TYPE_ARRAY:
+               {
+                       const struct lttng_integer_type *integer_type;
+                       const struct lttng_event_field *field;
+                       uint32_t elem_len, num_elems;
+                       int signedness;
+
+                       field = stack_top->load.field;
+                       if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       integer_type = &field->type.u.array_nestable.elem_type->u.integer;
+                       num_elems = field->type.u.array_nestable.length;
+                       elem_len = integer_type->size;
+                       signedness = integer_type->signedness;
+                       if (index >= num_elems) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       ret = specialize_get_index_object_type(&stack_top->load.object_type,
+                                       signedness, elem_len);
+                       if (ret)
+                               goto end;
+                       gid.offset = index * (elem_len / CHAR_BIT);
+                       gid.array_len = num_elems * (elem_len / CHAR_BIT);
+                       gid.elem.type = stack_top->load.object_type;
+                       gid.elem.len = elem_len;
+                       if (integer_type->reverse_byte_order)
+                               gid.elem.rev_bo = true;
+                       stack_top->load.rev_bo = gid.elem.rev_bo;
+                       break;
+               }
+               case OBJECT_TYPE_SEQUENCE:
+               {
+                       const struct lttng_integer_type *integer_type;
+                       const struct lttng_event_field *field;
+                       uint32_t elem_len;
+                       int signedness;
+
+                       field = stack_top->load.field;
+                       if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
+                       elem_len = integer_type->size;
+                       signedness = integer_type->signedness;
+                       ret = specialize_get_index_object_type(&stack_top->load.object_type,
+                                       signedness, elem_len);
+                       if (ret)
+                               goto end;
+                       gid.offset = index * (elem_len / CHAR_BIT);
+                       gid.elem.type = stack_top->load.object_type;
+                       gid.elem.len = elem_len;
+                       if (integer_type->reverse_byte_order)
+                               gid.elem.rev_bo = true;
+                       stack_top->load.rev_bo = gid.elem.rev_bo;
+                       break;
+               }
+               case OBJECT_TYPE_STRUCT:
+                       /* Only generated by the specialize phase. */
+               case OBJECT_TYPE_VARIANT:       /* Fall-through */
+               default:
+                       printk(KERN_WARNING "Unexpected get index type %d",
+                               (int) stack_top->load.object_type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       case LOAD_ROOT_CONTEXT:
+       case LOAD_ROOT_APP_CONTEXT:
+       case LOAD_ROOT_PAYLOAD:
+               printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       data_offset = bytecode_push_data(runtime, &gid,
+               __alignof__(gid), sizeof(gid));
+       if (data_offset < 0) {
+               ret = -EINVAL;
+               goto end;
+       }
+       switch (idx_len) {
+       case 2:
+               ((struct get_index_u16 *) insn->data)->index = data_offset;
+               break;
+       case 8:
+               ((struct get_index_u64 *) insn->data)->index = data_offset;
+               break;
+       default:
+               ret = -EINVAL;
+               goto end;
+       }
+
+       return 0;
+
+end:
+       return ret;
+}
+
+static int specialize_context_lookup_name(struct lttng_ctx *ctx,
+               struct bytecode_runtime *bytecode,
+               struct load_op *insn)
+{
+       uint16_t offset;
+       const char *name;
+
+       offset = ((struct get_symbol *) insn->data)->offset;
+       name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+       return lttng_get_context_index(ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_event_field *field,
+               struct vstack_load *load, bool is_context)
+{
+       load->type = LOAD_OBJECT;
+
+       switch (field->type.atype) {
+       case atype_integer:
+               if (field->type.u.integer.signedness)
+                       load->object_type = OBJECT_TYPE_S64;
+               else
+                       load->object_type = OBJECT_TYPE_U64;
+               load->rev_bo = false;
+               break;
+       case atype_enum_nestable:
+       {
+               const struct lttng_integer_type *itype =
+                       &field->type.u.enum_nestable.container_type->u.integer;
+
+               if (itype->signedness)
+                       load->object_type = OBJECT_TYPE_SIGNED_ENUM;
+               else
+                       load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+               load->rev_bo = false;
+               break;
+       }
+       case atype_array_nestable:
+               if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+                       printk(KERN_WARNING "Array nesting only supports integer types.\n");
+                       return -EINVAL;
+               }
+               if (is_context) {
+                       load->object_type = OBJECT_TYPE_STRING;
+               } else {
+                       if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+                               load->object_type = OBJECT_TYPE_ARRAY;
+                               load->field = field;
+                       } else {
+                               load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+                       }
+               }
+               break;
+       case atype_sequence_nestable:
+               if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+                       printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
+                       return -EINVAL;
+               }
+               if (is_context) {
+                       load->object_type = OBJECT_TYPE_STRING;
+               } else {
+                       if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+                               load->object_type = OBJECT_TYPE_SEQUENCE;
+                               load->field = field;
+                       } else {
+                               load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+                       }
+               }
+               break;
+       case atype_string:
+               load->object_type = OBJECT_TYPE_STRING;
+               break;
+       case atype_struct_nestable:
+               printk(KERN_WARNING "Structure type cannot be loaded.\n");
+               return -EINVAL;
+       case atype_variant_nestable:
+               printk(KERN_WARNING "Variant type cannot be loaded.\n");
+               return -EINVAL;
+       default:
+               printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int specialize_context_lookup(struct lttng_ctx *ctx,
+               struct bytecode_runtime *runtime,
+               struct load_op *insn,
+               struct vstack_load *load)
+{
+       int idx, ret;
+       struct lttng_ctx_field *ctx_field;
+       struct lttng_event_field *field;
+       struct bytecode_get_index_data gid;
+       ssize_t data_offset;
+
+       idx = specialize_context_lookup_name(ctx, runtime, insn);
+       if (idx < 0) {
+               return -ENOENT;
+       }
+       ctx_field = &lttng_static_ctx->fields[idx];
+       field = &ctx_field->event_field;
+       ret = specialize_load_object(field, load, true);
+       if (ret)
+               return ret;
+       /* Specialize each get_symbol into a get_index. */
+       insn->op = BYTECODE_OP_GET_INDEX_U16;
+       memset(&gid, 0, sizeof(gid));
+       gid.ctx_index = idx;
+       gid.elem.type = load->object_type;
+       gid.elem.rev_bo = load->rev_bo;
+       gid.field = field;
+       data_offset = bytecode_push_data(runtime, &gid,
+               __alignof__(gid), sizeof(gid));
+       if (data_offset < 0) {
+               return -EINVAL;
+       }
+       ((struct get_index_u16 *) insn->data)->index = data_offset;
+       return 0;
+}
+
+static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
+               struct bytecode_runtime *runtime,
+               struct load_op *insn,
+               struct vstack_load *load)
+{
+       const char *name;
+       uint16_t offset;
+       unsigned int i, nr_fields;
+       bool found = false;
+       uint32_t field_offset = 0;
+       const struct lttng_event_field *field;
+       int ret;
+       struct bytecode_get_index_data gid;
+       ssize_t data_offset;
+
+       nr_fields = event_desc->nr_fields;
+       offset = ((struct get_symbol *) insn->data)->offset;
+       name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+       for (i = 0; i < nr_fields; i++) {
+               field = &event_desc->fields[i];
+               if (field->nofilter) {
+                       continue;
+               }
+               if (!strcmp(field->name, name)) {
+                       found = true;
+                       break;
+               }
+               /* compute field offset on stack */
+               switch (field->type.atype) {
+               case atype_integer:
+               case atype_enum_nestable:
+                       field_offset += sizeof(int64_t);
+                       break;
+               case atype_array_nestable:
+               case atype_sequence_nestable:
+                       field_offset += sizeof(unsigned long);
+                       field_offset += sizeof(void *);
+                       break;
+               case atype_string:
+                       field_offset += sizeof(void *);
+                       break;
+               default:
+                       ret = -EINVAL;
+                       goto end;
+               }
+       }
+       if (!found) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       ret = specialize_load_object(field, load, false);
+       if (ret)
+               goto end;
+
+       /* Specialize each get_symbol into a get_index. */
+       insn->op = BYTECODE_OP_GET_INDEX_U16;
+       memset(&gid, 0, sizeof(gid));
+       gid.offset = field_offset;
+       gid.elem.type = load->object_type;
+       gid.elem.rev_bo = load->rev_bo;
+       gid.field = field;
+       data_offset = bytecode_push_data(runtime, &gid,
+               __alignof__(gid), sizeof(gid));
+       if (data_offset < 0) {
+               ret = -EINVAL;
+               goto end;
+       }
+       ((struct get_index_u16 *) insn->data)->index = data_offset;
+       ret = 0;
+end:
+       return ret;
+}
+
+int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
+               struct bytecode_runtime *bytecode)
+{
+       void *pc, *next_pc, *start_pc;
+       int ret = -EINVAL;
+       struct vstack _stack;
+       struct vstack *stack = &_stack;
+       struct lttng_ctx *ctx = bytecode->p.ctx;
+
+       vstack_init(stack);
+
+       start_pc = &bytecode->code[0];
+       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+                       pc = next_pc) {
+               switch (*(bytecode_opcode_t *) pc) {
+               case BYTECODE_OP_UNKNOWN:
+               default:
+                       printk(KERN_WARNING "unknown bytecode op %u\n",
+                               (unsigned int) *(bytecode_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               case BYTECODE_OP_RETURN:
+               case BYTECODE_OP_RETURN_S64:
+                       ret = 0;
+                       goto end;
+
+               /* binary */
+               case BYTECODE_OP_MUL:
+               case BYTECODE_OP_DIV:
+               case BYTECODE_OP_MOD:
+               case BYTECODE_OP_PLUS:
+               case BYTECODE_OP_MINUS:
+                       printk(KERN_WARNING "unsupported bytecode op %u\n",
+                               (unsigned int) *(bytecode_opcode_t *) pc);
+                       ret = -EINVAL;
+                       goto end;
+
+               case BYTECODE_OP_EQ:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STRING:
+                               if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+                                       insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+                               else
+                                       insn->op = BYTECODE_OP_EQ_STRING;
+                               break;
+                       case REG_STAR_GLOB_STRING:
+                               insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_EQ_S64;
+                               else
+                                       insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
+                               else
+                                       insn->op = BYTECODE_OP_EQ_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_NE:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STRING:
+                               if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+                                       insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+                               else
+                                       insn->op = BYTECODE_OP_NE_STRING;
+                               break;
+                       case REG_STAR_GLOB_STRING:
+                               insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_NE_S64;
+                               else
+                                       insn->op = BYTECODE_OP_NE_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_NE_S64_DOUBLE;
+                               else
+                                       insn->op = BYTECODE_OP_NE_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_GT:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "invalid register type for > binary operator\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_STRING:
+                               insn->op = BYTECODE_OP_GT_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_GT_S64;
+                               else
+                                       insn->op = BYTECODE_OP_GT_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_GT_S64_DOUBLE;
+                               else
+                                       insn->op = BYTECODE_OP_GT_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_LT:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "invalid register type for < binary operator\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_STRING:
+                               insn->op = BYTECODE_OP_LT_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_LT_S64;
+                               else
+                                       insn->op = BYTECODE_OP_LT_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_LT_S64_DOUBLE;
+                               else
+                                       insn->op = BYTECODE_OP_LT_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_GE:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "invalid register type for >= binary operator\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_STRING:
+                               insn->op = BYTECODE_OP_GE_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_GE_S64;
+                               else
+                                       insn->op = BYTECODE_OP_GE_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_GE_S64_DOUBLE;
+                               else
+                                       insn->op = BYTECODE_OP_GE_DOUBLE;
+                               break;
+                       }
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+               case BYTECODE_OP_LE:
+               {
+                       struct binary_op *insn = (struct binary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "invalid register type for <= binary operator\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_STRING:
+                               insn->op = BYTECODE_OP_LE_STRING;
+                               break;
+                       case REG_S64:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_LE_S64;
+                               else
+                                       insn->op = BYTECODE_OP_LE_DOUBLE_S64;
+                               break;
+                       case REG_DOUBLE:
+                               if (vstack_bx(stack)->type == REG_S64)
+                                       insn->op = BYTECODE_OP_LE_S64_DOUBLE;
+                               else
+                                       insn->op = BYTECODE_OP_LE_DOUBLE;
+                               break;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_EQ_STRING:
+               case BYTECODE_OP_NE_STRING:
+               case BYTECODE_OP_GT_STRING:
+               case BYTECODE_OP_LT_STRING:
+               case BYTECODE_OP_GE_STRING:
+               case BYTECODE_OP_LE_STRING:
+               case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+               case BYTECODE_OP_NE_STAR_GLOB_STRING:
+               case BYTECODE_OP_EQ_S64:
+               case BYTECODE_OP_NE_S64:
+               case BYTECODE_OP_GT_S64:
+               case BYTECODE_OP_LT_S64:
+               case BYTECODE_OP_GE_S64:
+               case BYTECODE_OP_LE_S64:
+               case BYTECODE_OP_EQ_DOUBLE:
+               case BYTECODE_OP_NE_DOUBLE:
+               case BYTECODE_OP_GT_DOUBLE:
+               case BYTECODE_OP_LT_DOUBLE:
+               case BYTECODE_OP_GE_DOUBLE:
+               case BYTECODE_OP_LE_DOUBLE:
+               case BYTECODE_OP_EQ_DOUBLE_S64:
+               case BYTECODE_OP_NE_DOUBLE_S64:
+               case BYTECODE_OP_GT_DOUBLE_S64:
+               case BYTECODE_OP_LT_DOUBLE_S64:
+               case BYTECODE_OP_GE_DOUBLE_S64:
+               case BYTECODE_OP_LE_DOUBLE_S64:
+               case BYTECODE_OP_EQ_S64_DOUBLE:
+               case BYTECODE_OP_NE_S64_DOUBLE:
+               case BYTECODE_OP_GT_S64_DOUBLE:
+               case BYTECODE_OP_LT_S64_DOUBLE:
+               case BYTECODE_OP_GE_S64_DOUBLE:
+               case BYTECODE_OP_LE_S64_DOUBLE:
+               case BYTECODE_OP_BIT_RSHIFT:
+               case BYTECODE_OP_BIT_LSHIFT:
+               case BYTECODE_OP_BIT_AND:
+               case BYTECODE_OP_BIT_OR:
+               case BYTECODE_OP_BIT_XOR:
+               {
+                       /* Pop 2, push 1 */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct binary_op);
+                       break;
+               }
+
+               /* unary */
+               case BYTECODE_OP_UNARY_PLUS:
+               {
+                       struct unary_op *insn = (struct unary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_S64:
+                               insn->op = BYTECODE_OP_UNARY_PLUS_S64;
+                               break;
+                       case REG_DOUBLE:
+                               insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
+                               break;
+                       }
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_UNARY_MINUS:
+               {
+                       struct unary_op *insn = (struct unary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_S64:
+                               insn->op = BYTECODE_OP_UNARY_MINUS_S64;
+                               break;
+                       case REG_DOUBLE:
+                               insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
+                               break;
+                       }
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_UNARY_NOT:
+               {
+                       struct unary_op *insn = (struct unary_op *) pc;
+
+                       switch(vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_S64:
+                               insn->op = BYTECODE_OP_UNARY_NOT_S64;
+                               break;
+                       case REG_DOUBLE:
+                               insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
+                               break;
+                       }
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_UNARY_BIT_NOT:
+               {
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               case BYTECODE_OP_UNARY_PLUS_S64:
+               case BYTECODE_OP_UNARY_MINUS_S64:
+               case BYTECODE_OP_UNARY_NOT_S64:
+               case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+               case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+               case BYTECODE_OP_UNARY_NOT_DOUBLE:
+               {
+                       /* Pop 1, push 1 */
+                       next_pc += sizeof(struct unary_op);
+                       break;
+               }
+
+               /* logical */
+               case BYTECODE_OP_AND:
+               case BYTECODE_OP_OR:
+               {
+                       /* Continue to next instruction */
+                       /* Pop 1 when jump not taken */
+                       if (vstack_pop(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       next_pc += sizeof(struct logical_op);
+                       break;
+               }
+
+               /* load field ref */
+               case BYTECODE_OP_LOAD_FIELD_REF:
+               {
+                       printk(KERN_WARNING "Unknown field ref type\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               /* get context ref */
+               case BYTECODE_OP_GET_CONTEXT_REF:
+               {
+                       printk(KERN_WARNING "Unknown get context ref type\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+               case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+               case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+               case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+               case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_STRING;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       break;
+               }
+               case BYTECODE_OP_LOAD_FIELD_REF_S64:
+               case BYTECODE_OP_GET_CONTEXT_REF_S64:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       break;
+               }
+               case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+               case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_DOUBLE;
+                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+                       break;
+               }
+
+               /* load from immediate operand */
+               case BYTECODE_OP_LOAD_STRING:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_STRING;
+                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+                       break;
+               }
+
+               case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+                       break;
+               }
+
+               case BYTECODE_OP_LOAD_S64:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct load_op)
+                                       + sizeof(struct literal_numeric);
+                       break;
+               }
+
+               case BYTECODE_OP_LOAD_DOUBLE:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_DOUBLE;
+                       next_pc += sizeof(struct load_op)
+                                       + sizeof(struct literal_double);
+                       break;
+               }
+
+               /* cast */
+               case BYTECODE_OP_CAST_TO_S64:
+               {
+                       struct cast_op *insn = (struct cast_op *) pc;
+
+                       switch (vstack_ax(stack)->type) {
+                       default:
+                               printk(KERN_WARNING "unknown register type\n");
+                               ret = -EINVAL;
+                               goto end;
+
+                       case REG_STRING:
+                       case REG_STAR_GLOB_STRING:
+                               printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case REG_S64:
+                               insn->op = BYTECODE_OP_CAST_NOP;
+                               break;
+                       case REG_DOUBLE:
+                               insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
+                               break;
+                       }
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct cast_op);
+                       break;
+               }
+               case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+               {
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct cast_op);
+                       break;
+               }
+               case BYTECODE_OP_CAST_NOP:
+               {
+                       next_pc += sizeof(struct cast_op);
+                       break;
+               }
+
+               /*
+                * Instructions for recursive traversal through composed types.
+                */
+               case BYTECODE_OP_GET_CONTEXT_ROOT:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_PTR;
+                       vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+               case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_PTR;
+                       vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+               case BYTECODE_OP_GET_PAYLOAD_ROOT:
+               {
+                       if (vstack_push(stack)) {
+                               ret = -EINVAL;
+                               goto end;
+                       }
+                       vstack_ax(stack)->type = REG_PTR;
+                       vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case BYTECODE_OP_LOAD_FIELD:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
+                       /* Pop 1, push 1 */
+                       ret = specialize_load_field(vstack_ax(stack), insn);
+                       if (ret)
+                               goto end;
+
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case BYTECODE_OP_LOAD_FIELD_S8:
+               case BYTECODE_OP_LOAD_FIELD_S16:
+               case BYTECODE_OP_LOAD_FIELD_S32:
+               case BYTECODE_OP_LOAD_FIELD_S64:
+               case BYTECODE_OP_LOAD_FIELD_U8:
+               case BYTECODE_OP_LOAD_FIELD_U16:
+               case BYTECODE_OP_LOAD_FIELD_U32:
+               case BYTECODE_OP_LOAD_FIELD_U64:
+               {
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_S64;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case BYTECODE_OP_LOAD_FIELD_STRING:
+               case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+               {
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_STRING;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+               {
+                       /* Pop 1, push 1 */
+                       vstack_ax(stack)->type = REG_DOUBLE;
+                       next_pc += sizeof(struct load_op);
+                       break;
+               }
+
+               case BYTECODE_OP_GET_SYMBOL:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+
+                       dbg_printk("op get symbol\n");
+                       switch (vstack_ax(stack)->load.type) {
+                       case LOAD_OBJECT:
+                               printk(KERN_WARNING "Nested fields not implemented yet.\n");
+                               ret = -EINVAL;
+                               goto end;
+                       case LOAD_ROOT_CONTEXT:
+                               /* Lookup context field. */
+                               ret = specialize_context_lookup(ctx, bytecode, insn,
+                                       &vstack_ax(stack)->load);
+                               if (ret)
+                                       goto end;
+                               break;
+                       case LOAD_ROOT_APP_CONTEXT:
+                               ret = -EINVAL;
+                               goto end;
+                       case LOAD_ROOT_PAYLOAD:
+                               /* Lookup event payload field. */
+                               ret = specialize_payload_lookup(event_desc,
+                                       bytecode, insn,
+                                       &vstack_ax(stack)->load);
+                               if (ret)
+                                       goto end;
+                               break;
+                       }
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+                       break;
+               }
+
+               case BYTECODE_OP_GET_SYMBOL_FIELD:
+               {
+                       /* Always generated by specialize phase. */
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               case BYTECODE_OP_GET_INDEX_U16:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+                       dbg_printk("op get index u16\n");
+                       /* Pop 1, push 1 */
+                       ret = specialize_get_index(bytecode, insn, index->index,
+                                       vstack_ax(stack), sizeof(*index));
+                       if (ret)
+                               goto end;
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+                       break;
+               }
+
+               case BYTECODE_OP_GET_INDEX_U64:
+               {
+                       struct load_op *insn = (struct load_op *) pc;
+                       struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+                       dbg_printk("op get index u64\n");
+                       /* Pop 1, push 1 */
+                       ret = specialize_get_index(bytecode, insn, index->index,
+                                       vstack_ax(stack), sizeof(*index));
+                       if (ret)
+                               goto end;
+                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+                       break;
+               }
+
+               }
+       }
+end:
+       return ret;
+}
diff --git a/src/lttng-bytecode-validator.c b/src/lttng-bytecode-validator.c
new file mode 100644 (file)
index 0000000..bcbbe61
--- /dev/null
@@ -0,0 +1,1821 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-validator.c
+ *
+ * LTTng modules bytecode bytecode validator.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/types.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+
+#include <wrapper/list.h>
+#include <lttng/lttng-bytecode.h>
+
+#define MERGE_POINT_TABLE_BITS         7
+#define MERGE_POINT_TABLE_SIZE         (1U << MERGE_POINT_TABLE_BITS)
+
+/* merge point table node */
+struct mp_node {
+       struct hlist_node node;
+
+       /* Context at merge point */
+       struct vstack stack;
+       unsigned long target_pc;
+};
+
+struct mp_table {
+       struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
+};
+
+static
+int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
+{
+       if (mp_node->target_pc == key_pc)
+               return 1;
+       else
+               return 0;
+}
+
+static
+int merge_points_compare(const struct vstack *stacka,
+                       const struct vstack *stackb)
+{
+       int i, len;
+
+       if (stacka->top != stackb->top)
+               return 1;
+       len = stacka->top + 1;
+       WARN_ON_ONCE(len < 0);
+       for (i = 0; i < len; i++) {
+               if (stacka->e[i].type != stackb->e[i].type)
+                       return 1;
+       }
+       return 0;
+}
+
+static
+int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
+               const struct vstack *stack)
+{
+       struct mp_node *mp_node;
+       unsigned long hash = jhash_1word(target_pc, 0);
+       struct hlist_head *head;
+       struct mp_node *lookup_node;
+       int found = 0;
+
+       dbg_printk("Bytecode: adding merge point at offset %lu, hash %lu\n",
+                       target_pc, hash);
+       mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
+       if (!mp_node)
+               return -ENOMEM;
+       mp_node->target_pc = target_pc;
+       memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
+
+       head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
+       lttng_hlist_for_each_entry(lookup_node, head, node) {
+               if (lttng_hash_match(lookup_node, target_pc)) {
+                       found = 1;
+                       break;
+               }
+       }
+       if (found) {
+               /* Key already present */
+               dbg_printk("Bytecode: compare merge points for offset %lu, hash %lu\n",
+                               target_pc, hash);
+               kfree(mp_node);
+               if (merge_points_compare(stack, &lookup_node->stack)) {
+                       printk(KERN_WARNING "Merge points differ for offset %lu\n",
+                               target_pc);
+                       return -EINVAL;
+               }
+       } else {
+               hlist_add_head(&mp_node->node, head);
+       }
+       return 0;
+}
+
+/*
+ * Binary comparators use top of stack and top of stack -1.
+ */
+static
+int bin_op_compare_check(struct vstack *stack, const bytecode_opcode_t opcode,
+               const char *str)
+{
+       if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+               goto error_empty;
+
+       switch (vstack_ax(stack)->type) {
+       default:
+       case REG_DOUBLE:
+               goto error_type;
+
+       case REG_STRING:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+                       goto unknown;
+               case REG_STRING:
+                       break;
+               case REG_STAR_GLOB_STRING:
+                       if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+                               goto error_mismatch;
+                       }
+                       break;
+               case REG_S64:
+               case REG_U64:
+                       goto error_mismatch;
+               }
+               break;
+       case REG_STAR_GLOB_STRING:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+                       goto unknown;
+               case REG_STRING:
+                       if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+                               goto error_mismatch;
+                       }
+                       break;
+               case REG_STAR_GLOB_STRING:
+               case REG_S64:
+               case REG_U64:
+                       goto error_mismatch;
+               }
+               break;
+       case REG_S64:
+       case REG_U64:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+                       goto unknown;
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+                       goto error_mismatch;
+               case REG_S64:
+               case REG_U64:
+                       break;
+               }
+               break;
+       case REG_TYPE_UNKNOWN:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_S64:
+               case REG_U64:
+                       goto unknown;
+               }
+               break;
+       }
+       return 0;
+
+unknown:
+       return 1;
+
+error_empty:
+       printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
+       return -EINVAL;
+
+error_mismatch:
+       printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
+       return -EINVAL;
+
+error_type:
+       printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
+       return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, bytecode_opcode_t opcode,
+               const char *str)
+{
+       if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+               goto error_empty;
+
+       switch (vstack_ax(stack)->type) {
+       default:
+       case REG_DOUBLE:
+               goto error_type;
+
+       case REG_TYPE_UNKNOWN:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_S64:
+               case REG_U64:
+                       goto unknown;
+               }
+               break;
+       case REG_S64:
+       case REG_U64:
+               switch (vstack_bx(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       goto error_type;
+               case REG_TYPE_UNKNOWN:
+                       goto unknown;
+               case REG_S64:
+               case REG_U64:
+                       break;
+               }
+               break;
+       }
+       return 0;
+
+unknown:
+       return 1;
+
+error_empty:
+       printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
+       return -EINVAL;
+
+error_type:
+       printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
+       return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+               const struct get_symbol *sym)
+{
+       const char *str, *str_limit;
+       size_t len_limit;
+
+       if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+               return -EINVAL;
+
+       str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+       str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+       len_limit = str_limit - str;
+       if (strnlen(str, len_limit) == len_limit)
+               return -EINVAL;
+       return 0;
+}
+
+/*
+ * Validate bytecode range overflow within the validation pass.
+ * Called for each instruction encountered.
+ */
+static
+int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
+               char *start_pc, char *pc)
+{
+       int ret = 0;
+
+       switch (*(bytecode_opcode_t *) pc) {
+       case BYTECODE_OP_UNKNOWN:
+       default:
+       {
+               printk(KERN_WARNING "unknown bytecode op %u\n",
+                       (unsigned int) *(bytecode_opcode_t *) pc);
+               ret = -EINVAL;
+               break;
+       }
+
+       case BYTECODE_OP_RETURN:
+       case BYTECODE_OP_RETURN_S64:
+       {
+               if (unlikely(pc + sizeof(struct return_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* binary */
+       case BYTECODE_OP_MUL:
+       case BYTECODE_OP_DIV:
+       case BYTECODE_OP_MOD:
+       case BYTECODE_OP_PLUS:
+       case BYTECODE_OP_MINUS:
+       case BYTECODE_OP_EQ_DOUBLE:
+       case BYTECODE_OP_NE_DOUBLE:
+       case BYTECODE_OP_GT_DOUBLE:
+       case BYTECODE_OP_LT_DOUBLE:
+       case BYTECODE_OP_GE_DOUBLE:
+       case BYTECODE_OP_LE_DOUBLE:
+       /* Floating point */
+       case BYTECODE_OP_EQ_DOUBLE_S64:
+       case BYTECODE_OP_NE_DOUBLE_S64:
+       case BYTECODE_OP_GT_DOUBLE_S64:
+       case BYTECODE_OP_LT_DOUBLE_S64:
+       case BYTECODE_OP_GE_DOUBLE_S64:
+       case BYTECODE_OP_LE_DOUBLE_S64:
+       case BYTECODE_OP_EQ_S64_DOUBLE:
+       case BYTECODE_OP_NE_S64_DOUBLE:
+       case BYTECODE_OP_GT_S64_DOUBLE:
+       case BYTECODE_OP_LT_S64_DOUBLE:
+       case BYTECODE_OP_GE_S64_DOUBLE:
+       case BYTECODE_OP_LE_S64_DOUBLE:
+       case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+       case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+       case BYTECODE_OP_LOAD_DOUBLE:
+       case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+       case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+       case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+       case BYTECODE_OP_UNARY_NOT_DOUBLE:
+       {
+               printk(KERN_WARNING "unsupported bytecode op %u\n",
+                       (unsigned int) *(bytecode_opcode_t *) pc);
+               ret = -EINVAL;
+               break;
+       }
+
+       case BYTECODE_OP_EQ:
+       case BYTECODE_OP_NE:
+       case BYTECODE_OP_GT:
+       case BYTECODE_OP_LT:
+       case BYTECODE_OP_GE:
+       case BYTECODE_OP_LE:
+       case BYTECODE_OP_EQ_STRING:
+       case BYTECODE_OP_NE_STRING:
+       case BYTECODE_OP_GT_STRING:
+       case BYTECODE_OP_LT_STRING:
+       case BYTECODE_OP_GE_STRING:
+       case BYTECODE_OP_LE_STRING:
+       case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+       case BYTECODE_OP_NE_STAR_GLOB_STRING:
+       case BYTECODE_OP_EQ_S64:
+       case BYTECODE_OP_NE_S64:
+       case BYTECODE_OP_GT_S64:
+       case BYTECODE_OP_LT_S64:
+       case BYTECODE_OP_GE_S64:
+       case BYTECODE_OP_LE_S64:
+       case BYTECODE_OP_BIT_RSHIFT:
+       case BYTECODE_OP_BIT_LSHIFT:
+       case BYTECODE_OP_BIT_AND:
+       case BYTECODE_OP_BIT_OR:
+       case BYTECODE_OP_BIT_XOR:
+       {
+               if (unlikely(pc + sizeof(struct binary_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* unary */
+       case BYTECODE_OP_UNARY_PLUS:
+       case BYTECODE_OP_UNARY_MINUS:
+       case BYTECODE_OP_UNARY_NOT:
+       case BYTECODE_OP_UNARY_PLUS_S64:
+       case BYTECODE_OP_UNARY_MINUS_S64:
+       case BYTECODE_OP_UNARY_NOT_S64:
+       case BYTECODE_OP_UNARY_BIT_NOT:
+       {
+               if (unlikely(pc + sizeof(struct unary_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* logical */
+       case BYTECODE_OP_AND:
+       case BYTECODE_OP_OR:
+       {
+               if (unlikely(pc + sizeof(struct logical_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* load field ref */
+       case BYTECODE_OP_LOAD_FIELD_REF:
+       {
+               printk(KERN_WARNING "Unknown field ref type\n");
+               ret = -EINVAL;
+               break;
+       }
+
+       /* get context ref */
+       case BYTECODE_OP_GET_CONTEXT_REF:
+       {
+               printk(KERN_WARNING "Unknown field ref type\n");
+               ret = -EINVAL;
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+       case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+       case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+       case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+       case BYTECODE_OP_LOAD_FIELD_REF_S64:
+       case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+       case BYTECODE_OP_GET_CONTEXT_REF_S64:
+       {
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /* load from immediate operand */
+       case BYTECODE_OP_LOAD_STRING:
+       case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               uint32_t str_len, maxlen;
+
+               if (unlikely(pc + sizeof(struct load_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+                       break;
+               }
+
+               maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
+               str_len = strnlen(insn->data, maxlen);
+               if (unlikely(str_len >= maxlen)) {
+                       /* Final '\0' not found within range */
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       case BYTECODE_OP_LOAD_S64:
+       {
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       case BYTECODE_OP_CAST_TO_S64:
+       case BYTECODE_OP_CAST_NOP:
+       {
+               if (unlikely(pc + sizeof(struct cast_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       case BYTECODE_OP_GET_CONTEXT_ROOT:
+       case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+       case BYTECODE_OP_GET_PAYLOAD_ROOT:
+       case BYTECODE_OP_LOAD_FIELD:
+       case BYTECODE_OP_LOAD_FIELD_S8:
+       case BYTECODE_OP_LOAD_FIELD_S16:
+       case BYTECODE_OP_LOAD_FIELD_S32:
+       case BYTECODE_OP_LOAD_FIELD_S64:
+       case BYTECODE_OP_LOAD_FIELD_U8:
+       case BYTECODE_OP_LOAD_FIELD_U16:
+       case BYTECODE_OP_LOAD_FIELD_U32:
+       case BYTECODE_OP_LOAD_FIELD_U64:
+       case BYTECODE_OP_LOAD_FIELD_STRING:
+       case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+       case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+               if (unlikely(pc + sizeof(struct load_op)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+
+       case BYTECODE_OP_GET_SYMBOL:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+                       break;
+               }
+               ret = validate_get_symbol(bytecode, sym);
+               break;
+       }
+
+       case BYTECODE_OP_GET_SYMBOL_FIELD:
+               printk(KERN_WARNING "Unexpected get symbol field\n");
+               ret = -EINVAL;
+               break;
+
+       case BYTECODE_OP_GET_INDEX_U16:
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+
+       case BYTECODE_OP_GET_INDEX_U64:
+               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+                               > start_pc + bytecode->len)) {
+                       ret = -ERANGE;
+               }
+               break;
+       }
+
+       return ret;
+}
+
+static
+unsigned long delete_all_nodes(struct mp_table *mp_table)
+{
+       struct mp_node *mp_node;
+       struct hlist_node *tmp;
+       unsigned long nr_nodes = 0;
+       int i;
+
+       for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
+               struct hlist_head *head;
+
+               head = &mp_table->mp_head[i];
+               lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
+                       kfree(mp_node);
+                       nr_nodes++;
+               }
+       }
+       return nr_nodes;
+}
+
+/*
+ * Return value:
+ * >=0: success
+ * <0: error
+ */
+static
+int validate_instruction_context(struct bytecode_runtime *bytecode,
+               struct vstack *stack,
+               char *start_pc,
+               char *pc)
+{
+       int ret = 0;
+       const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
+
+       switch (opcode) {
+       case BYTECODE_OP_UNKNOWN:
+       default:
+       {
+               printk(KERN_WARNING "unknown bytecode op %u\n",
+                       (unsigned int) *(bytecode_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case BYTECODE_OP_RETURN:
+       case BYTECODE_OP_RETURN_S64:
+       {
+               goto end;
+       }
+
+       /* binary */
+       case BYTECODE_OP_MUL:
+       case BYTECODE_OP_DIV:
+       case BYTECODE_OP_MOD:
+       case BYTECODE_OP_PLUS:
+       case BYTECODE_OP_MINUS:
+       /* Floating point */
+       case BYTECODE_OP_EQ_DOUBLE:
+       case BYTECODE_OP_NE_DOUBLE:
+       case BYTECODE_OP_GT_DOUBLE:
+       case BYTECODE_OP_LT_DOUBLE:
+       case BYTECODE_OP_GE_DOUBLE:
+       case BYTECODE_OP_LE_DOUBLE:
+       case BYTECODE_OP_EQ_DOUBLE_S64:
+       case BYTECODE_OP_NE_DOUBLE_S64:
+       case BYTECODE_OP_GT_DOUBLE_S64:
+       case BYTECODE_OP_LT_DOUBLE_S64:
+       case BYTECODE_OP_GE_DOUBLE_S64:
+       case BYTECODE_OP_LE_DOUBLE_S64:
+       case BYTECODE_OP_EQ_S64_DOUBLE:
+       case BYTECODE_OP_NE_S64_DOUBLE:
+       case BYTECODE_OP_GT_S64_DOUBLE:
+       case BYTECODE_OP_LT_S64_DOUBLE:
+       case BYTECODE_OP_GE_S64_DOUBLE:
+       case BYTECODE_OP_LE_S64_DOUBLE:
+       case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+       case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+       case BYTECODE_OP_UNARY_NOT_DOUBLE:
+       case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+       case BYTECODE_OP_LOAD_DOUBLE:
+       case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+       case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+       {
+               printk(KERN_WARNING "unsupported bytecode op %u\n",
+                       (unsigned int) *(bytecode_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case BYTECODE_OP_EQ:
+       {
+               ret = bin_op_compare_check(stack, opcode, "==");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case BYTECODE_OP_NE:
+       {
+               ret = bin_op_compare_check(stack, opcode, "!=");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case BYTECODE_OP_GT:
+       {
+               ret = bin_op_compare_check(stack, opcode, ">");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case BYTECODE_OP_LT:
+       {
+               ret = bin_op_compare_check(stack, opcode, "<");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case BYTECODE_OP_GE:
+       {
+               ret = bin_op_compare_check(stack, opcode, ">=");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+       case BYTECODE_OP_LE:
+       {
+               ret = bin_op_compare_check(stack, opcode, "<=");
+               if (ret < 0)
+                       goto end;
+               break;
+       }
+
+       case BYTECODE_OP_EQ_STRING:
+       case BYTECODE_OP_NE_STRING:
+       case BYTECODE_OP_GT_STRING:
+       case BYTECODE_OP_LT_STRING:
+       case BYTECODE_OP_GE_STRING:
+       case BYTECODE_OP_LE_STRING:
+       {
+               if (!vstack_ax(stack) || !vstack_bx(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_STRING
+                               || vstack_bx(stack)->type != REG_STRING) {
+                       printk(KERN_WARNING "Unexpected register type for string comparator\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+
+       case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+       case BYTECODE_OP_NE_STAR_GLOB_STRING:
+       {
+               if (!vstack_ax(stack) || !vstack_bx(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+                               && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+                       printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+       case BYTECODE_OP_EQ_S64:
+       case BYTECODE_OP_NE_S64:
+       case BYTECODE_OP_GT_S64:
+       case BYTECODE_OP_LT_S64:
+       case BYTECODE_OP_GE_S64:
+       case BYTECODE_OP_LE_S64:
+       {
+               if (!vstack_ax(stack) || !vstack_bx(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_bx(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+       case BYTECODE_OP_BIT_RSHIFT:
+               ret = bin_op_bitwise_check(stack, opcode, ">>");
+               if (ret < 0)
+                       goto end;
+               break;
+       case BYTECODE_OP_BIT_LSHIFT:
+               ret = bin_op_bitwise_check(stack, opcode, "<<");
+               if (ret < 0)
+                       goto end;
+               break;
+       case BYTECODE_OP_BIT_AND:
+               ret = bin_op_bitwise_check(stack, opcode, "&");
+               if (ret < 0)
+                       goto end;
+               break;
+       case BYTECODE_OP_BIT_OR:
+               ret = bin_op_bitwise_check(stack, opcode, "|");
+               if (ret < 0)
+                       goto end;
+               break;
+       case BYTECODE_OP_BIT_XOR:
+               ret = bin_op_bitwise_check(stack, opcode, "^");
+               if (ret < 0)
+                       goto end;
+               break;
+
+       /* unary */
+       case BYTECODE_OP_UNARY_PLUS:
+       case BYTECODE_OP_UNARY_MINUS:
+       case BYTECODE_OP_UNARY_NOT:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       printk(KERN_WARNING "unknown register type\n");
+                       ret = -EINVAL;
+                       goto end;
+
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+                       printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
+                       ret = -EINVAL;
+                       goto end;
+               case REG_S64:
+               case REG_U64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               }
+               break;
+       }
+       case BYTECODE_OP_UNARY_BIT_NOT:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               default:
+                       printk(KERN_WARNING "unknown register type\n");
+                       ret = -EINVAL;
+                       goto end;
+
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_DOUBLE:
+                       printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
+                       ret = -EINVAL;
+                       goto end;
+               case REG_S64:
+               case REG_U64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               }
+               break;
+       }
+
+       case BYTECODE_OP_UNARY_PLUS_S64:
+       case BYTECODE_OP_UNARY_MINUS_S64:
+       case BYTECODE_OP_UNARY_NOT_S64:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_S64 &&
+                               vstack_ax(stack)->type != REG_U64) {
+                       printk(KERN_WARNING "Invalid register type\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+       /* logical */
+       case BYTECODE_OP_AND:
+       case BYTECODE_OP_OR:
+       {
+               struct logical_op *insn = (struct logical_op *) pc;
+
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_S64 &&
+                               vstack_ax(stack)->type != REG_U64) {
+                       printk(KERN_WARNING "Logical comparator expects S64 or U64 register\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               dbg_printk("Validate jumping to bytecode offset %u\n",
+                       (unsigned int) insn->skip_offset);
+               if (unlikely(start_pc + insn->skip_offset <= pc)) {
+                       printk(KERN_WARNING "Loops are not allowed in bytecode\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               break;
+       }
+
+       /* load field ref */
+       case BYTECODE_OP_LOAD_FIELD_REF:
+       {
+               printk(KERN_WARNING "Unknown field ref type\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+       case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+       case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+       case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct field_ref *ref = (struct field_ref *) insn->data;
+
+               dbg_printk("Validate load field ref offset %u type string\n",
+                       ref->offset);
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_REF_S64:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct field_ref *ref = (struct field_ref *) insn->data;
+
+               dbg_printk("Validate load field ref offset %u type s64\n",
+                       ref->offset);
+               break;
+       }
+
+       /* load from immediate operand */
+       case BYTECODE_OP_LOAD_STRING:
+       case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+       {
+               break;
+       }
+
+       case BYTECODE_OP_LOAD_S64:
+       {
+               break;
+       }
+
+       case BYTECODE_OP_CAST_TO_S64:
+       {
+               struct cast_op *insn = (struct cast_op *) pc;
+
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               default:
+               case REG_DOUBLE:
+                       printk(KERN_WARNING "unknown register type\n");
+                       ret = -EINVAL;
+                       goto end;
+
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+                       printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
+                       ret = -EINVAL;
+                       goto end;
+               case REG_S64:
+                       break;
+               }
+               if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
+                       if (vstack_ax(stack)->type != REG_DOUBLE) {
+                               printk(KERN_WARNING "Cast expects double\n");
+                               ret = -EINVAL;
+                               goto end;
+                       }
+               }
+               break;
+       }
+       case BYTECODE_OP_CAST_NOP:
+       {
+               break;
+       }
+
+       /* get context ref */
+       case BYTECODE_OP_GET_CONTEXT_REF:
+       {
+               printk(KERN_WARNING "Unknown get context ref type\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct field_ref *ref = (struct field_ref *) insn->data;
+
+               dbg_printk("Validate get context ref offset %u type string\n",
+                       ref->offset);
+               break;
+       }
+       case BYTECODE_OP_GET_CONTEXT_REF_S64:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct field_ref *ref = (struct field_ref *) insn->data;
+
+               dbg_printk("Validate get context ref offset %u type s64\n",
+                       ref->offset);
+               break;
+       }
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       case BYTECODE_OP_GET_CONTEXT_ROOT:
+       {
+               dbg_printk("Validate get context root\n");
+               break;
+       }
+       case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+       {
+               dbg_printk("Validate get app context root\n");
+               break;
+       }
+       case BYTECODE_OP_GET_PAYLOAD_ROOT:
+       {
+               dbg_printk("Validate get payload root\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD:
+       {
+               /*
+                * We tolerate that field type is unknown at validation,
+                * because we are performing the load specialization in
+                * a phase after validation.
+                */
+               dbg_printk("Validate load field\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_S8:
+       {
+               dbg_printk("Validate load field s8\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_S16:
+       {
+               dbg_printk("Validate load field s16\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_S32:
+       {
+               dbg_printk("Validate load field s32\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_S64:
+       {
+               dbg_printk("Validate load field s64\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_U8:
+       {
+               dbg_printk("Validate load field u8\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_U16:
+       {
+               dbg_printk("Validate load field u16\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_U32:
+       {
+               dbg_printk("Validate load field u32\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_U64:
+       {
+               dbg_printk("Validate load field u64\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_STRING:
+       {
+               dbg_printk("Validate load field string\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+       {
+               dbg_printk("Validate load field sequence\n");
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+       {
+               dbg_printk("Validate load field double\n");
+               break;
+       }
+
+       case BYTECODE_OP_GET_SYMBOL:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+               dbg_printk("Validate get symbol offset %u\n", sym->offset);
+               break;
+       }
+
+       case BYTECODE_OP_GET_SYMBOL_FIELD:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+               dbg_printk("Validate get symbol field offset %u\n", sym->offset);
+               break;
+       }
+
+       case BYTECODE_OP_GET_INDEX_U16:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+               dbg_printk("Validate get index u16 index %u\n", get_index->index);
+               break;
+       }
+
+       case BYTECODE_OP_GET_INDEX_U64:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+               struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+               dbg_printk("Validate get index u64 index %llu\n",
+                       (unsigned long long) get_index->index);
+               break;
+       }
+       }
+end:
+       return ret;
+}
+
+/*
+ * Return value:
+ * 0: success
+ * <0: error
+ */
+static
+int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
+               struct mp_table *mp_table,
+               struct vstack *stack,
+               char *start_pc,
+               char *pc)
+{
+       int ret, found = 0;
+       unsigned long target_pc = pc - start_pc;
+       unsigned long hash;
+       struct hlist_head *head;
+       struct mp_node *mp_node;
+
+       /* Validate the context resulting from the previous instruction */
+       ret = validate_instruction_context(bytecode, stack, start_pc, pc);
+       if (ret < 0)
+               return ret;
+
+       /* Validate merge points */
+       hash = jhash_1word(target_pc, 0);
+       head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
+       lttng_hlist_for_each_entry(mp_node, head, node) {
+               if (lttng_hash_match(mp_node, target_pc)) {
+                       found = 1;
+                       break;
+               }
+       }
+       if (found) {
+               dbg_printk("Bytecode: validate merge point at offset %lu\n",
+                               target_pc);
+               if (merge_points_compare(stack, &mp_node->stack)) {
+                       printk(KERN_WARNING "Merge points differ for offset %lu\n",
+                               target_pc);
+                       return -EINVAL;
+               }
+               /* Once validated, we can remove the merge point */
+               dbg_printk("Bytecode: remove merge point at offset %lu\n",
+                               target_pc);
+               hlist_del(&mp_node->node);
+       }
+       return 0;
+}
+
+/*
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int exec_insn(struct bytecode_runtime *bytecode,
+               struct mp_table *mp_table,
+               struct vstack *stack,
+               char **_next_pc,
+               char *pc)
+{
+       int ret = 1;
+       char *next_pc = *_next_pc;
+
+       switch (*(bytecode_opcode_t *) pc) {
+       case BYTECODE_OP_UNKNOWN:
+       default:
+       {
+               printk(KERN_WARNING "unknown bytecode op %u\n",
+                       (unsigned int) *(bytecode_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case BYTECODE_OP_RETURN:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+               case REG_DOUBLE:
+               case REG_STRING:
+               case REG_PTR:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               ret = 0;
+               goto end;
+       }
+
+       case BYTECODE_OP_RETURN_S64:
+       {
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+                       break;
+               default:
+               case REG_TYPE_UNKNOWN:
+                       printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               ret = 0;
+               goto end;
+       }
+
+       /* binary */
+       case BYTECODE_OP_MUL:
+       case BYTECODE_OP_DIV:
+       case BYTECODE_OP_MOD:
+       case BYTECODE_OP_PLUS:
+       case BYTECODE_OP_MINUS:
+       /* Floating point */
+       case BYTECODE_OP_EQ_DOUBLE:
+       case BYTECODE_OP_NE_DOUBLE:
+       case BYTECODE_OP_GT_DOUBLE:
+       case BYTECODE_OP_LT_DOUBLE:
+       case BYTECODE_OP_GE_DOUBLE:
+       case BYTECODE_OP_LE_DOUBLE:
+       case BYTECODE_OP_EQ_DOUBLE_S64:
+       case BYTECODE_OP_NE_DOUBLE_S64:
+       case BYTECODE_OP_GT_DOUBLE_S64:
+       case BYTECODE_OP_LT_DOUBLE_S64:
+       case BYTECODE_OP_GE_DOUBLE_S64:
+       case BYTECODE_OP_LE_DOUBLE_S64:
+       case BYTECODE_OP_EQ_S64_DOUBLE:
+       case BYTECODE_OP_NE_S64_DOUBLE:
+       case BYTECODE_OP_GT_S64_DOUBLE:
+       case BYTECODE_OP_LT_S64_DOUBLE:
+       case BYTECODE_OP_GE_S64_DOUBLE:
+       case BYTECODE_OP_LE_S64_DOUBLE:
+       case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+       case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+       case BYTECODE_OP_UNARY_NOT_DOUBLE:
+       case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+       case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+       case BYTECODE_OP_LOAD_DOUBLE:
+       case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+       {
+               printk(KERN_WARNING "unsupported bytecode op %u\n",
+                       (unsigned int) *(bytecode_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case BYTECODE_OP_EQ:
+       case BYTECODE_OP_NE:
+       case BYTECODE_OP_GT:
+       case BYTECODE_OP_LT:
+       case BYTECODE_OP_GE:
+       case BYTECODE_OP_LE:
+       case BYTECODE_OP_EQ_STRING:
+       case BYTECODE_OP_NE_STRING:
+       case BYTECODE_OP_GT_STRING:
+       case BYTECODE_OP_LT_STRING:
+       case BYTECODE_OP_GE_STRING:
+       case BYTECODE_OP_LE_STRING:
+       case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+       case BYTECODE_OP_NE_STAR_GLOB_STRING:
+       case BYTECODE_OP_EQ_S64:
+       case BYTECODE_OP_NE_S64:
+       case BYTECODE_OP_GT_S64:
+       case BYTECODE_OP_LT_S64:
+       case BYTECODE_OP_GE_S64:
+       case BYTECODE_OP_LE_S64:
+       {
+               /* Pop 2, push 1 */
+               if (vstack_pop(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+               case REG_DOUBLE:
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct binary_op);
+               break;
+       }
+       case BYTECODE_OP_BIT_RSHIFT:
+       case BYTECODE_OP_BIT_LSHIFT:
+       case BYTECODE_OP_BIT_AND:
+       case BYTECODE_OP_BIT_OR:
+       case BYTECODE_OP_BIT_XOR:
+       {
+               /* Pop 2, push 1 */
+               if (vstack_pop(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+               case REG_DOUBLE:
+               case REG_STRING:
+               case REG_STAR_GLOB_STRING:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_U64;
+               next_pc += sizeof(struct binary_op);
+               break;
+       }
+
+       /* unary */
+       case BYTECODE_OP_UNARY_PLUS:
+       case BYTECODE_OP_UNARY_MINUS:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       case BYTECODE_OP_UNARY_PLUS_S64:
+       case BYTECODE_OP_UNARY_MINUS_S64:
+       case BYTECODE_OP_UNARY_NOT_S64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       case BYTECODE_OP_UNARY_NOT:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       case BYTECODE_OP_UNARY_BIT_NOT:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               case REG_DOUBLE:
+               default:
+                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               vstack_ax(stack)->type = REG_U64;
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       /* logical */
+       case BYTECODE_OP_AND:
+       case BYTECODE_OP_OR:
+       {
+               struct logical_op *insn = (struct logical_op *) pc;
+               int merge_ret;
+
+               /* Add merge point to table */
+               merge_ret = merge_point_add_check(mp_table,
+                                       insn->skip_offset, stack);
+               if (merge_ret) {
+                       ret = merge_ret;
+                       goto end;
+               }
+
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               /* There is always a cast-to-s64 operation before a or/and op. */
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+                       break;
+               default:
+                       printk(KERN_WARNING "Incorrect register type %d for operation\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+
+               /* Continue to next instruction */
+               /* Pop 1 when jump not taken */
+               if (vstack_pop(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               next_pc += sizeof(struct logical_op);
+               break;
+       }
+
+       /* load field ref */
+       case BYTECODE_OP_LOAD_FIELD_REF:
+       {
+               printk(KERN_WARNING "Unknown field ref type\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       /* get context ref */
+       case BYTECODE_OP_GET_CONTEXT_REF:
+       {
+               printk(KERN_WARNING "Unknown get context ref type\n");
+               ret = -EINVAL;
+               goto end;
+       }
+       case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+       case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+       case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+       case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+       case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+       {
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_STRING;
+               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_REF_S64:
+       case BYTECODE_OP_GET_CONTEXT_REF_S64:
+       {
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+               break;
+       }
+
+       /* load from immediate operand */
+       case BYTECODE_OP_LOAD_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_STRING;
+               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+               break;
+       }
+
+       case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+               break;
+       }
+
+       case BYTECODE_OP_LOAD_S64:
+       {
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct load_op)
+                               + sizeof(struct literal_numeric);
+               break;
+       }
+
+       case BYTECODE_OP_CAST_TO_S64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               switch (vstack_ax(stack)->type) {
+               case REG_S64:
+               case REG_U64:
+               case REG_DOUBLE:
+               case REG_TYPE_UNKNOWN:
+                       break;
+               default:
+                       printk(KERN_WARNING "Incorrect register type %d for cast\n",
+                               (int) vstack_ax(stack)->type);
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct cast_op);
+               break;
+       }
+       case BYTECODE_OP_CAST_NOP:
+       {
+               next_pc += sizeof(struct cast_op);
+               break;
+       }
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       case BYTECODE_OP_GET_CONTEXT_ROOT:
+       case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+       case BYTECODE_OP_GET_PAYLOAD_ROOT:
+       {
+               if (vstack_push(stack)) {
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_PTR;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case BYTECODE_OP_LOAD_FIELD:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case BYTECODE_OP_LOAD_FIELD_S8:
+       case BYTECODE_OP_LOAD_FIELD_S16:
+       case BYTECODE_OP_LOAD_FIELD_S32:
+       case BYTECODE_OP_LOAD_FIELD_S64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_S64;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_U8:
+       case BYTECODE_OP_LOAD_FIELD_U16:
+       case BYTECODE_OP_LOAD_FIELD_U32:
+       case BYTECODE_OP_LOAD_FIELD_U64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_U64;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+       case BYTECODE_OP_LOAD_FIELD_STRING:
+       case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_STRING;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               vstack_ax(stack)->type = REG_DOUBLE;
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case BYTECODE_OP_GET_SYMBOL:
+       case BYTECODE_OP_GET_SYMBOL_FIELD:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+               break;
+       }
+
+       case BYTECODE_OP_GET_INDEX_U16:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+               break;
+       }
+
+       case BYTECODE_OP_GET_INDEX_U64:
+       {
+               /* Pop 1, push 1 */
+               if (!vstack_ax(stack)) {
+                       printk(KERN_WARNING "Empty stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               if (vstack_ax(stack)->type != REG_PTR) {
+                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+                       ret = -EINVAL;
+                       goto end;
+               }
+               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+               break;
+       }
+
+       }
+end:
+       *_next_pc = next_pc;
+       return ret;
+}
+
+/*
+ * Never called concurrently (hash seed is shared).
+ */
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
+{
+       struct mp_table *mp_table;
+       char *pc, *next_pc, *start_pc;
+       int ret = -EINVAL;
+       struct vstack stack;
+
+       vstack_init(&stack);
+
+       mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
+       if (!mp_table) {
+               printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
+               return -ENOMEM;
+       }
+       start_pc = &bytecode->code[0];
+       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+                       pc = next_pc) {
+               ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+               if (ret != 0) {
+                       if (ret == -ERANGE)
+                               printk(KERN_WARNING "bytecode overflow\n");
+                       goto end;
+               }
+               dbg_printk("Validating op %s (%u)\n",
+                       lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc),
+                       (unsigned int) *(bytecode_opcode_t *) pc);
+
+               /*
+                * For each instruction, validate the current context
+                * (traversal of entire execution flow), and validate
+                * all merge points targeting this instruction.
+                */
+               ret = validate_instruction_all_contexts(bytecode, mp_table,
+                                       &stack, start_pc, pc);
+               if (ret)
+                       goto end;
+               ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
+               if (ret <= 0)
+                       goto end;
+       }
+end:
+       if (delete_all_nodes(mp_table)) {
+               if (!ret) {
+                       printk(KERN_WARNING "Unexpected merge points\n");
+                       ret = -EINVAL;
+               }
+       }
+       kfree(mp_table);
+       return ret;
+}
diff --git a/src/lttng-bytecode.c b/src/lttng-bytecode.c
new file mode 100644 (file)
index 0000000..44f06ac
--- /dev/null
@@ -0,0 +1,605 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-filter.c
+ *
+ * LTTng modules filter code.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <lttng/lttng-bytecode.h>
+
+static const char *opnames[] = {
+       [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
+
+       [ BYTECODE_OP_RETURN ] = "RETURN",
+
+       /* binary */
+       [ BYTECODE_OP_MUL ] = "MUL",
+       [ BYTECODE_OP_DIV ] = "DIV",
+       [ BYTECODE_OP_MOD ] = "MOD",
+       [ BYTECODE_OP_PLUS ] = "PLUS",
+       [ BYTECODE_OP_MINUS ] = "MINUS",
+       [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
+       [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
+       [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
+       [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
+       [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
+
+       /* binary comparators */
+       [ BYTECODE_OP_EQ ] = "EQ",
+       [ BYTECODE_OP_NE ] = "NE",
+       [ BYTECODE_OP_GT ] = "GT",
+       [ BYTECODE_OP_LT ] = "LT",
+       [ BYTECODE_OP_GE ] = "GE",
+       [ BYTECODE_OP_LE ] = "LE",
+
+       /* string binary comparators */
+       [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
+       [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
+       [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
+       [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
+       [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
+       [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
+
+       /* s64 binary comparators */
+       [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
+       [ BYTECODE_OP_NE_S64 ] = "NE_S64",
+       [ BYTECODE_OP_GT_S64 ] = "GT_S64",
+       [ BYTECODE_OP_LT_S64 ] = "LT_S64",
+       [ BYTECODE_OP_GE_S64 ] = "GE_S64",
+       [ BYTECODE_OP_LE_S64 ] = "LE_S64",
+
+       /* double binary comparators */
+       [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
+       [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
+       [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
+       [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
+       [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
+       [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
+
+       /* Mixed S64-double binary comparators */
+       [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
+       [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
+       [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
+       [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
+       [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
+       [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
+
+       [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
+       [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
+       [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
+       [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
+       [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
+       [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
+
+       /* unary */
+       [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
+       [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
+       [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
+       [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
+       [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
+       [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
+       [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
+       [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
+       [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
+
+       /* logical */
+       [ BYTECODE_OP_AND ] = "AND",
+       [ BYTECODE_OP_OR ] = "OR",
+
+       /* load field ref */
+       [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
+       [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
+       [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
+       [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
+       [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
+
+       /* load from immediate operand */
+       [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
+       [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
+       [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
+
+       /* cast */
+       [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
+       [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
+       [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
+
+       /* get context ref */
+       [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
+       [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
+       [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
+       [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
+
+       /* load userspace field ref */
+       [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
+       [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
+
+       /*
+        * load immediate star globbing pattern (literal string)
+        * from immediate.
+        */
+       [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
+
+       /* globbing pattern binary operator: apply to */
+       [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
+       [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+       [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+       [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+       [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
+       [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+       [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+       [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+       [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
+       [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+       [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+       [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+       [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+       [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+       [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+       [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+       [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+       [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+       [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+       [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
+
+       [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
+
+       [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op)
+{
+       if (op >= NR_BYTECODE_OPS)
+               return "UNKNOWN";
+       else
+               return opnames[op];
+}
+
+static
+int apply_field_reloc(const struct lttng_event_desc *event_desc,
+               struct bytecode_runtime *runtime,
+               uint32_t runtime_len,
+               uint32_t reloc_offset,
+               const char *field_name,
+               enum bytecode_op bytecode_op)
+{
+       const struct lttng_event_field *fields, *field = NULL;
+       unsigned int nr_fields, i;
+       struct load_op *op;
+       uint32_t field_offset = 0;
+
+       dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
+
+       /* Lookup event by name */
+       if (!event_desc)
+               return -EINVAL;
+       fields = event_desc->fields;
+       if (!fields)
+               return -EINVAL;
+       nr_fields = event_desc->nr_fields;
+       for (i = 0; i < nr_fields; i++) {
+               if (fields[i].nofilter)
+                       continue;
+               if (!strcmp(fields[i].name, field_name)) {
+                       field = &fields[i];
+                       break;
+               }
+               /* compute field offset */
+               switch (fields[i].type.atype) {
+               case atype_integer:
+               case atype_enum_nestable:
+                       field_offset += sizeof(int64_t);
+                       break;
+               case atype_array_nestable:
+                       if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
+                               return -EINVAL;
+                       field_offset += sizeof(unsigned long);
+                       field_offset += sizeof(void *);
+                       break;
+               case atype_sequence_nestable:
+                       if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
+                               return -EINVAL;
+                       field_offset += sizeof(unsigned long);
+                       field_offset += sizeof(void *);
+                       break;
+               case atype_string:
+                       field_offset += sizeof(void *);
+                       break;
+               case atype_struct_nestable:     /* Unsupported. */
+               case atype_variant_nestable:    /* Unsupported. */
+               default:
+                       return -EINVAL;
+               }
+       }
+       if (!field)
+               return -EINVAL;
+
+       /* Check if field offset is too large for 16-bit offset */
+       if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+               return -EINVAL;
+
+       /* set type */
+       op = (struct load_op *) &runtime->code[reloc_offset];
+
+       switch (bytecode_op) {
+       case BYTECODE_OP_LOAD_FIELD_REF:
+       {
+               struct field_ref *field_ref;
+
+               field_ref = (struct field_ref *) op->data;
+               switch (field->type.atype) {
+               case atype_integer:
+               case atype_enum_nestable:
+                       op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
+                       break;
+               case atype_array_nestable:
+               case atype_sequence_nestable:
+                       if (field->user)
+                               op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE;
+                       else
+                               op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+                       break;
+               case atype_string:
+                       if (field->user)
+                               op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_STRING;
+                       else
+                               op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
+                       break;
+               case atype_struct_nestable:     /* Unsupported. */
+               case atype_variant_nestable:    /* Unsupported. */
+               default:
+                       return -EINVAL;
+               }
+               /* set offset */
+               field_ref->offset = (uint16_t) field_offset;
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static
+int apply_context_reloc(struct bytecode_runtime *runtime,
+               uint32_t runtime_len,
+               uint32_t reloc_offset,
+               const char *context_name,
+               enum bytecode_op bytecode_op)
+{
+       struct load_op *op;
+       struct lttng_ctx_field *ctx_field;
+       int idx;
+
+       dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
+
+       /* Get context index */
+       idx = lttng_get_context_index(lttng_static_ctx, context_name);
+       if (idx < 0)
+               return -ENOENT;
+
+       /* Check if idx is too large for 16-bit offset */
+       if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+               return -EINVAL;
+
+       /* Get context return type */
+       ctx_field = &lttng_static_ctx->fields[idx];
+       op = (struct load_op *) &runtime->code[reloc_offset];
+
+       switch (bytecode_op) {
+       case BYTECODE_OP_GET_CONTEXT_REF:
+       {
+               struct field_ref *field_ref;
+
+               field_ref = (struct field_ref *) op->data;
+               switch (ctx_field->event_field.type.atype) {
+               case atype_integer:
+               case atype_enum_nestable:
+                       op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
+                       break;
+                       /* Sequence and array supported as string */
+               case atype_string:
+                       BUG_ON(ctx_field->event_field.user);
+                       op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+                       break;
+               case atype_array_nestable:
+                       if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
+                               return -EINVAL;
+                       BUG_ON(ctx_field->event_field.user);
+                       op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+                       break;
+               case atype_sequence_nestable:
+                       if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
+                               return -EINVAL;
+                       BUG_ON(ctx_field->event_field.user);
+                       op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+                       break;
+               case atype_struct_nestable:     /* Unsupported. */
+               case atype_variant_nestable:    /* Unsupported. */
+               default:
+                       return -EINVAL;
+               }
+               /* set offset to context index within channel contexts */
+               field_ref->offset = (uint16_t) idx;
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static
+int apply_reloc(const struct lttng_event_desc *event_desc,
+               struct bytecode_runtime *runtime,
+               uint32_t runtime_len,
+               uint32_t reloc_offset,
+               const char *name)
+{
+       struct load_op *op;
+
+       dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
+
+       /* Ensure that the reloc is within the code */
+       if (runtime_len - reloc_offset < sizeof(uint16_t))
+               return -EINVAL;
+
+       op = (struct load_op *) &runtime->code[reloc_offset];
+       switch (op->op) {
+       case BYTECODE_OP_LOAD_FIELD_REF:
+               return apply_field_reloc(event_desc, runtime, runtime_len,
+                       reloc_offset, name, op->op);
+       case BYTECODE_OP_GET_CONTEXT_REF:
+               return apply_context_reloc(runtime, runtime_len,
+                       reloc_offset, name, op->op);
+       case BYTECODE_OP_GET_SYMBOL:
+       case BYTECODE_OP_GET_SYMBOL_FIELD:
+               /*
+                * Will be handled by load specialize phase or
+                * dynamically by interpreter.
+                */
+               return 0;
+       default:
+               printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static
+int bytecode_is_linked(struct lttng_bytecode_node *bytecode,
+               struct list_head *bytecode_runtime_head)
+{
+       struct lttng_bytecode_runtime *bc_runtime;
+
+       list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
+               if (bc_runtime->bc == bytecode)
+                       return 1;
+       }
+       return 0;
+}
+
+/*
+ * Take a bytecode with reloc table and link it to an event to create a
+ * bytecode runtime.
+ */
+static
+int link_bytecode(const struct lttng_event_desc *event_desc,
+               struct lttng_ctx *ctx,
+               struct lttng_bytecode_node *bytecode,
+               struct list_head *insert_loc)
+{
+       int ret, offset, next_offset;
+       struct bytecode_runtime *runtime = NULL;
+       size_t runtime_alloc_len;
+
+       if (!bytecode)
+               return 0;
+       /* Bytecode already linked */
+       if (bytecode_is_linked(bytecode, insert_loc))
+               return 0;
+
+       dbg_printk("Linking...\n");
+
+       /* We don't need the reloc table in the runtime */
+       runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
+       runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
+       if (!runtime) {
+               ret = -ENOMEM;
+               goto alloc_error;
+       }
+       runtime->p.bc = bytecode;
+       runtime->p.ctx = ctx;
+       runtime->len = bytecode->bc.reloc_offset;
+       /* copy original bytecode */
+       memcpy(runtime->code, bytecode->bc.data, runtime->len);
+       /*
+        * apply relocs. Those are a uint16_t (offset in bytecode)
+        * followed by a string (field name).
+        */
+       for (offset = bytecode->bc.reloc_offset;
+                       offset < bytecode->bc.len;
+                       offset = next_offset) {
+               uint16_t reloc_offset =
+                       *(uint16_t *) &bytecode->bc.data[offset];
+               const char *name =
+                       (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
+
+               ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
+               if (ret) {
+                       goto link_error;
+               }
+               next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
+       }
+       /* Validate bytecode */
+       ret = lttng_bytecode_validate(runtime);
+       if (ret) {
+               goto link_error;
+       }
+       /* Specialize bytecode */
+       ret = lttng_bytecode_specialize(event_desc, runtime);
+       if (ret) {
+               goto link_error;
+       }
+
+       switch (bytecode->type) {
+       case LTTNG_BYTECODE_NODE_TYPE_FILTER:
+               runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret;
+               break;
+       case LTTNG_BYTECODE_NODE_TYPE_CAPTURE:
+               runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       runtime->p.link_failed = 0;
+       list_add_rcu(&runtime->p.node, insert_loc);
+       dbg_printk("Linking successful.\n");
+       return 0;
+
+link_error:
+
+       switch (bytecode->type) {
+       case LTTNG_BYTECODE_NODE_TYPE_FILTER:
+               runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
+               break;
+       case LTTNG_BYTECODE_NODE_TYPE_CAPTURE:
+               runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
+               break;
+       default:
+               WARN_ON(1);
+       }
+       runtime->p.link_failed = 1;
+       list_add_rcu(&runtime->p.node, insert_loc);
+alloc_error:
+       dbg_printk("Linking failed.\n");
+       return ret;
+}
+
+void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+       struct lttng_bytecode_node *bc = runtime->bc;
+
+       if (!bc->enabler->enabled || runtime->link_failed)
+               runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
+       else
+               runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret;
+}
+
+void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+       struct lttng_bytecode_node *bc = runtime->bc;
+
+       if (!bc->enabler->enabled || runtime->link_failed)
+               runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
+       else
+               runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret;
+}
+
+/*
+ * Given the lists of bytecode programs of an instance (trigger or event) and
+ * of a matching enabler, try to link all the enabler's bytecode programs with
+ * the instance.
+ *
+ * This function is called after we confirmed that name enabler and the
+ * instance are matching names (or glob pattern matching).
+ */
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+               struct lttng_ctx *ctx,
+               struct list_head *instance_bytecode_head,
+               struct list_head *enabler_bytecode_head)
+{
+       struct lttng_bytecode_node *enabler_bc;
+       struct lttng_bytecode_runtime *runtime;
+
+       WARN_ON_ONCE(!event_desc);
+
+       /* Go over all the bytecode programs of the enabler.  */
+       list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
+               int found = 0, ret;
+               struct list_head *insert_loc;
+
+               /*
+                * Check if the current enabler bytecode program is already
+                * linked with the instance.
+                */
+               list_for_each_entry(runtime, instance_bytecode_head, node) {
+                       if (runtime->bc == enabler_bc) {
+                               found = 1;
+                               break;
+                       }
+               }
+
+               /*
+                * Skip bytecode already linked, go to the next enabler
+                * bytecode program.
+                */
+               if (found)
+                       continue;
+
+               /*
+                * Insert at specified priority (seqnum) in increasing
+                * order. If there already is a bytecode of the same priority,
+                * insert the new bytecode right after it.
+                */
+               list_for_each_entry_reverse(runtime,
+                               instance_bytecode_head, node) {
+                       if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
+                               /* insert here */
+                               insert_loc = &runtime->node;
+                               goto add_within;
+                       }
+               }
+               /* Add to head to list */
+               insert_loc = instance_bytecode_head;
+       add_within:
+               dbg_printk("linking bytecode\n");
+               ret = link_bytecode(event_desc, ctx, enabler_bc, insert_loc);
+               if (ret) {
+                       dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
+               }
+       }
+}
+
+/*
+ * We own the filter_bytecode if we return success.
+ */
+int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
+               struct lttng_bytecode_node *filter_bytecode)
+{
+       list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
+       return 0;
+}
+
+void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
+{
+       struct lttng_bytecode_node *filter_bytecode, *tmp;
+
+       list_for_each_entry_safe(filter_bytecode, tmp,
+                       &enabler->filter_bytecode_head, node) {
+               kfree(filter_bytecode);
+       }
+}
+
+void lttng_free_event_filter_runtime(struct lttng_event *event)
+{
+       struct bytecode_runtime *runtime, *tmp;
+
+       list_for_each_entry_safe(runtime, tmp,
+                       &event->filter_bytecode_runtime_head, p.node) {
+               kfree(runtime->data);
+               kfree(runtime);
+       }
+}
index 3b9b907beb85196b36301cbb6df6456ab195b23b..6f120ac1e307b4d832cf3243774d7fd4b143470d 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/file.h>
 #include <linux/anon_inodes.h>
 #include <wrapper/file.h>
-#include <linux/jhash.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
 #include <linux/dmi.h>
 #include <wrapper/types.h>
 #include <lttng/kernel-version.h>
 #include <lttng/events.h>
+#include <lttng/lttng-bytecode.h>
 #include <lttng/tracer.h>
+#include <lttng/trigger-notification.h>
 #include <lttng/abi-old.h>
 #include <lttng/endian.h>
 #include <lttng/string-utils.h>
+#include <lttng/utils.h>
 #include <ringbuffer/backend.h>
 #include <ringbuffer/frontend.h>
 #include <wrapper/time.h>
 #define METADATA_CACHE_DEFAULT_SIZE 4096
 
 static LIST_HEAD(sessions);
+static LIST_HEAD(trigger_groups);
 static LIST_HEAD(lttng_transport_list);
 /*
  * Protect the sessions and metadata caches.
  */
 static DEFINE_MUTEX(sessions_mutex);
 static struct kmem_cache *event_cache;
+static struct kmem_cache *trigger_cache;
 
-static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
-static void lttng_session_sync_enablers(struct lttng_session *session);
-static void lttng_enabler_destroy(struct lttng_enabler *enabler);
+static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
+static void lttng_session_sync_event_enablers(struct lttng_session *session);
+static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
+static void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler);
+static void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group);
 
 static void _lttng_event_destroy(struct lttng_event *event);
+static void _lttng_trigger_destroy(struct lttng_trigger *trigger);
 static void _lttng_channel_destroy(struct lttng_channel *chan);
 static int _lttng_event_unregister(struct lttng_event *event);
+static int _lttng_trigger_unregister(struct lttng_trigger *trigger);
 static
 int _lttng_event_metadata_statedump(struct lttng_session *session,
                                  struct lttng_channel *chan,
@@ -108,6 +116,17 @@ void lttng_unlock_sessions(void)
        mutex_unlock(&sessions_mutex);
 }
 
+static struct lttng_transport *lttng_transport_find(const char *name)
+{
+       struct lttng_transport *transport;
+
+       list_for_each_entry(transport, &lttng_transport_list, node) {
+               if (!strcmp(transport->name, name))
+                       return transport;
+       }
+       return NULL;
+}
+
 /*
  * Called with sessions lock held.
  */
@@ -178,6 +197,63 @@ err:
        return NULL;
 }
 
+struct lttng_trigger_group *lttng_trigger_group_create(void)
+{
+       struct lttng_transport *transport = NULL;
+       struct lttng_trigger_group *trigger_group;
+       const char *transport_name = "relay-trigger";
+       size_t subbuf_size = 4096;      //TODO
+       size_t num_subbuf = 16;         //TODO
+       unsigned int switch_timer_interval = 0;
+       unsigned int read_timer_interval = 0;
+       int i;
+
+       mutex_lock(&sessions_mutex);
+
+       transport = lttng_transport_find(transport_name);
+       if (!transport) {
+               printk(KERN_WARNING "LTTng transport %s not found\n",
+                      transport_name);
+               goto notransport;
+       }
+       if (!try_module_get(transport->owner)) {
+               printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+               goto notransport;
+       }
+
+       trigger_group = lttng_kvzalloc(sizeof(struct lttng_trigger_group),
+                                      GFP_KERNEL);
+       if (!trigger_group)
+               goto nomem;
+
+       trigger_group->ops = &transport->ops;
+       trigger_group->chan = transport->ops.channel_create(transport_name,
+                       trigger_group, NULL, subbuf_size, num_subbuf,
+                       switch_timer_interval, read_timer_interval);
+       if (!trigger_group->chan)
+               goto create_error;
+
+       trigger_group->transport = transport;
+       INIT_LIST_HEAD(&trigger_group->enablers_head);
+       INIT_LIST_HEAD(&trigger_group->triggers_head);
+       for (i = 0; i < LTTNG_TRIGGER_HT_SIZE; i++)
+               INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]);
+
+       list_add(&trigger_group->node, &trigger_groups);
+       mutex_unlock(&sessions_mutex);
+
+       return trigger_group;
+
+create_error:
+       lttng_kvfree(trigger_group);
+nomem:
+       if (transport)
+               module_put(transport->owner);
+notransport:
+       mutex_unlock(&sessions_mutex);
+       return NULL;
+}
+
 void metadata_cache_destroy(struct kref *kref)
 {
        struct lttng_metadata_cache *cache =
@@ -191,13 +267,13 @@ void lttng_session_destroy(struct lttng_session *session)
        struct lttng_channel *chan, *tmpchan;
        struct lttng_event *event, *tmpevent;
        struct lttng_metadata_stream *metadata_stream;
-       struct lttng_enabler *enabler, *tmpenabler;
+       struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
        int ret;
 
        mutex_lock(&sessions_mutex);
        WRITE_ONCE(session->active, 0);
        list_for_each_entry(chan, &session->chan, list) {
-               ret = lttng_syscalls_unregister(chan);
+               ret = lttng_syscalls_unregister_event(chan);
                WARN_ON(ret);
        }
        list_for_each_entry(event, &session->events, list) {
@@ -205,9 +281,9 @@ void lttng_session_destroy(struct lttng_session *session)
                WARN_ON(ret);
        }
        synchronize_trace();    /* Wait for in-flight events to complete */
-       list_for_each_entry_safe(enabler, tmpenabler,
+       list_for_each_entry_safe(event_enabler, tmp_event_enabler,
                        &session->enablers_head, node)
-               lttng_enabler_destroy(enabler);
+               lttng_event_enabler_destroy(event_enabler);
        list_for_each_entry_safe(event, tmpevent, &session->events, list)
                _lttng_event_destroy(event);
        list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
@@ -228,6 +304,45 @@ void lttng_session_destroy(struct lttng_session *session)
        lttng_kvfree(session);
 }
 
+void lttng_trigger_group_destroy(struct lttng_trigger_group *trigger_group)
+{
+       struct lttng_trigger_enabler *trigger_enabler, *tmp_trigger_enabler;
+       struct lttng_trigger *trigger, *tmptrigger;
+       int ret;
+
+       if (!trigger_group)
+               return;
+
+       mutex_lock(&sessions_mutex);
+
+       ret = lttng_syscalls_unregister_trigger(trigger_group);
+       WARN_ON(ret);
+
+       list_for_each_entry_safe(trigger, tmptrigger,
+                       &trigger_group->triggers_head, list) {
+               ret = _lttng_trigger_unregister(trigger);
+               WARN_ON(ret);
+       }
+
+       synchronize_trace();    /* Wait for in-flight triggers to complete */
+
+       irq_work_sync(&trigger_group->wakeup_pending);
+
+       list_for_each_entry_safe(trigger_enabler, tmp_trigger_enabler,
+                       &trigger_group->enablers_head, node)
+               lttng_trigger_enabler_destroy(trigger_enabler);
+
+       list_for_each_entry_safe(trigger, tmptrigger,
+                       &trigger_group->triggers_head, list)
+               _lttng_trigger_destroy(trigger);
+
+       trigger_group->ops->channel_destroy(trigger_group->chan);
+       module_put(trigger_group->transport->owner);
+       list_del(&trigger_group->node);
+       mutex_unlock(&sessions_mutex);
+       lttng_kvfree(trigger_group);
+}
+
 int lttng_session_statedump(struct lttng_session *session)
 {
        int ret;
@@ -253,7 +368,7 @@ int lttng_session_enable(struct lttng_session *session)
        session->tstate = 1;
 
        /* We need to sync enablers with session before activation. */
-       lttng_session_sync_enablers(session);
+       lttng_session_sync_event_enablers(session);
 
        /*
         * Snapshot the number of events per channel to know the type of header
@@ -303,7 +418,7 @@ int lttng_session_disable(struct lttng_session *session)
 
        /* Set transient enabler state to "disabled" */
        session->tstate = 0;
-       lttng_session_sync_enablers(session);
+       lttng_session_sync_event_enablers(session);
 
        /* Set each stream's quiescent state. */
        list_for_each_entry(chan, &session->chan, list) {
@@ -370,7 +485,7 @@ int lttng_channel_enable(struct lttng_channel *channel)
        }
        /* Set transient enabler state to "enabled" */
        channel->tstate = 1;
-       lttng_session_sync_enablers(channel->session);
+       lttng_session_sync_event_enablers(channel->session);
        /* Set atomically the state to "enabled" */
        WRITE_ONCE(channel->enabled, 1);
 end:
@@ -395,7 +510,7 @@ int lttng_channel_disable(struct lttng_channel *channel)
        WRITE_ONCE(channel->enabled, 0);
        /* Set transient enabler state to "enabled" */
        channel->tstate = 0;
-       lttng_session_sync_enablers(channel->session);
+       lttng_session_sync_event_enablers(channel->session);
 end:
        mutex_unlock(&sessions_mutex);
        return ret;
@@ -473,15 +588,64 @@ end:
        return ret;
 }
 
-static struct lttng_transport *lttng_transport_find(const char *name)
+int lttng_trigger_enable(struct lttng_trigger *trigger)
 {
-       struct lttng_transport *transport;
+       int ret = 0;
 
-       list_for_each_entry(transport, &lttng_transport_list, node) {
-               if (!strcmp(transport->name, name))
-                       return transport;
+       mutex_lock(&sessions_mutex);
+       if (trigger->enabled) {
+               ret = -EEXIST;
+               goto end;
        }
-       return NULL;
+       switch (trigger->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+       case LTTNG_KERNEL_SYSCALL:
+               ret = -EINVAL;
+               break;
+       case LTTNG_KERNEL_KPROBE:
+       case LTTNG_KERNEL_UPROBE:
+               WRITE_ONCE(trigger->enabled, 1);
+               break;
+       case LTTNG_KERNEL_FUNCTION:
+       case LTTNG_KERNEL_NOOP:
+       case LTTNG_KERNEL_KRETPROBE:
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+       }
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
+}
+
+int lttng_trigger_disable(struct lttng_trigger *trigger)
+{
+       int ret = 0;
+
+       mutex_lock(&sessions_mutex);
+       if (!trigger->enabled) {
+               ret = -EEXIST;
+               goto end;
+       }
+       switch (trigger->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+       case LTTNG_KERNEL_SYSCALL:
+               ret = -EINVAL;
+               break;
+       case LTTNG_KERNEL_KPROBE:
+       case LTTNG_KERNEL_UPROBE:
+               WRITE_ONCE(trigger->enabled, 0);
+               break;
+       case LTTNG_KERNEL_FUNCTION:
+       case LTTNG_KERNEL_NOOP:
+       case LTTNG_KERNEL_KRETPROBE:
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+       }
+end:
+       mutex_unlock(&sessions_mutex);
+       return ret;
 }
 
 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
@@ -590,8 +754,6 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
        struct lttng_event *event;
        const char *event_name;
        struct hlist_head *head;
-       size_t name_len;
-       uint32_t hash;
        int ret;
 
        if (chan->free_event_id == -1U) {
@@ -616,9 +778,9 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
                ret = -EINVAL;
                goto type_error;
        }
-       name_len = strlen(event_name);
-       hash = jhash(event_name, name_len, 0);
-       head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+
+       head = utils_borrow_hash_table_bucket(session->events_ht.table,
+               LTTNG_EVENT_HT_SIZE, event_name);
        lttng_hlist_for_each_entry(event, head, hlist) {
                WARN_ON_ONCE(!event->desc);
                if (!strncmp(event->desc->name, event_name,
@@ -639,7 +801,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
        event->id = chan->free_event_id++;
        event->instrumentation = itype;
        event->evtype = LTTNG_TYPE_EVENT;
-       INIT_LIST_HEAD(&event->bytecode_runtime_head);
+       INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
        INIT_LIST_HEAD(&event->enablers_ref_head);
 
        switch (itype) {
@@ -647,7 +809,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
                /* Event will be enabled by enabler sync. */
                event->enabled = 0;
                event->registered = 0;
-               event->desc = lttng_event_get(event_name);
+               event->desc = lttng_event_desc_get(event_name);
                if (!event->desc) {
                        ret = -ENOENT;
                        goto register_error;
@@ -667,7 +829,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
                 * registration.
                 */
                smp_wmb();
-               ret = lttng_kprobes_register(event_name,
+               ret = lttng_kprobes_register_event(event_name,
                                event_param->u.kprobe.symbol_name,
                                event_param->u.kprobe.offset,
                                event_param->u.kprobe.addr,
@@ -761,7 +923,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
                 */
                smp_wmb();
 
-               ret = lttng_uprobes_register(event_param->name,
+               ret = lttng_uprobes_register_event(event_param->name,
                                event_param->u.uprobe.fd,
                                event);
                if (ret)
@@ -795,6 +957,157 @@ full:
        return ERR_PTR(ret);
 }
 
+struct lttng_trigger *_lttng_trigger_create(
+               const struct lttng_event_desc *event_desc,
+               uint64_t id, struct lttng_trigger_group *trigger_group,
+               struct lttng_kernel_trigger *trigger_param, void *filter,
+               enum lttng_kernel_instrumentation itype)
+{
+       struct lttng_trigger *trigger;
+       const char *event_name;
+       struct hlist_head *head;
+       int ret;
+
+       switch (itype) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               event_name = event_desc->name;
+               break;
+       case LTTNG_KERNEL_KPROBE:
+       case LTTNG_KERNEL_UPROBE:
+       case LTTNG_KERNEL_SYSCALL:
+               event_name = trigger_param->name;
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+       case LTTNG_KERNEL_FUNCTION:
+       case LTTNG_KERNEL_NOOP:
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+               goto type_error;
+       }
+
+       head = utils_borrow_hash_table_bucket(trigger_group->triggers_ht.table,
+               LTTNG_TRIGGER_HT_SIZE, event_name);
+       lttng_hlist_for_each_entry(trigger, head, hlist) {
+               WARN_ON_ONCE(!trigger->desc);
+               if (!strncmp(trigger->desc->name, event_name,
+                                       LTTNG_KERNEL_SYM_NAME_LEN - 1)
+                               && trigger_group == trigger->group
+                               && id == trigger->id) {
+                       ret = -EEXIST;
+                       goto exist;
+               }
+       }
+
+       trigger = kmem_cache_zalloc(trigger_cache, GFP_KERNEL);
+       if (!trigger) {
+               ret = -ENOMEM;
+               goto cache_error;
+       }
+       trigger->group = trigger_group;
+       trigger->id = id;
+       trigger->num_captures = 0;
+       trigger->filter = filter;
+       trigger->instrumentation = itype;
+       trigger->evtype = LTTNG_TYPE_EVENT;
+       trigger->send_notification = lttng_trigger_notification_send;
+       INIT_LIST_HEAD(&trigger->filter_bytecode_runtime_head);
+       INIT_LIST_HEAD(&trigger->capture_bytecode_runtime_head);
+       INIT_LIST_HEAD(&trigger->enablers_ref_head);
+
+       switch (itype) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               /* Event will be enabled by enabler sync. */
+               trigger->enabled = 0;
+               trigger->registered = 0;
+               trigger->desc = lttng_event_desc_get(event_name);
+               if (!trigger->desc) {
+                       ret = -ENOENT;
+                       goto register_error;
+               }
+               /* Populate lttng_trigger structure before event registration. */
+               smp_wmb();
+               break;
+       case LTTNG_KERNEL_KPROBE:
+               /*
+                * Needs to be explicitly enabled after creation, since
+                * we may want to apply filters.
+                */
+               trigger->enabled = 0;
+               trigger->registered = 1;
+               /*
+                * Populate lttng_trigger structure before event
+                * registration.
+                */
+               smp_wmb();
+               ret = lttng_kprobes_register_trigger(
+                               trigger_param->u.kprobe.symbol_name,
+                               trigger_param->u.kprobe.offset,
+                               trigger_param->u.kprobe.addr,
+                               trigger);
+               if (ret) {
+                       ret = -EINVAL;
+                       goto register_error;
+               }
+               ret = try_module_get(trigger->desc->owner);
+               WARN_ON_ONCE(!ret);
+               break;
+       case LTTNG_KERNEL_NOOP:
+       case LTTNG_KERNEL_SYSCALL:
+               /*
+                * Needs to be explicitly enabled after creation, since
+                * we may want to apply filters.
+                */
+               trigger->enabled = 0;
+               trigger->registered = 0;
+               trigger->desc = event_desc;
+               if (!trigger->desc) {
+                       ret = -EINVAL;
+                       goto register_error;
+               }
+               break;
+       case LTTNG_KERNEL_UPROBE:
+               /*
+                * Needs to be explicitly enabled after creation, since
+                * we may want to apply filters.
+                */
+               trigger->enabled = 0;
+               trigger->registered = 1;
+
+               /*
+                * Populate lttng_trigger structure before trigger
+                * registration.
+                */
+               smp_wmb();
+
+               ret = lttng_uprobes_register_trigger(trigger_param->name,
+                               trigger_param->u.uprobe.fd,
+                               trigger);
+               if (ret)
+                       goto register_error;
+               ret = try_module_get(trigger->desc->owner);
+               WARN_ON_ONCE(!ret);
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+       case LTTNG_KERNEL_FUNCTION:
+       default:
+               WARN_ON_ONCE(1);
+               ret = -EINVAL;
+               goto register_error;
+       }
+
+       list_add(&trigger->list, &trigger_group->triggers_head);
+       hlist_add_head(&trigger->hlist, head);
+       return trigger;
+
+register_error:
+       kmem_cache_free(trigger_cache, trigger);
+cache_error:
+exist:
+type_error:
+       return ERR_PTR(ret);
+}
+
 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
                                struct lttng_kernel_event *event_param,
                                void *filter,
@@ -810,6 +1123,21 @@ struct lttng_event *lttng_event_create(struct lttng_channel *chan,
        return event;
 }
 
+struct lttng_trigger *lttng_trigger_create(
+               const struct lttng_event_desc *event_desc,
+               uint64_t id, struct lttng_trigger_group *trigger_group,
+               struct lttng_kernel_trigger *trigger_param, void *filter,
+               enum lttng_kernel_instrumentation itype)
+{
+       struct lttng_trigger *trigger;
+
+       mutex_lock(&sessions_mutex);
+       trigger = _lttng_trigger_create(event_desc, id, trigger_group,
+               trigger_param, filter, itype);
+       mutex_unlock(&sessions_mutex);
+       return trigger;
+}
+
 /* Only used for tracepoints for now. */
 static
 void register_event(struct lttng_event *event)
@@ -828,7 +1156,7 @@ void register_event(struct lttng_event *event)
                                                  event);
                break;
        case LTTNG_KERNEL_SYSCALL:
-               ret = lttng_syscall_filter_enable(event->chan,
+               ret = lttng_syscall_filter_enable_event(event->chan,
                        desc->name);
                break;
        case LTTNG_KERNEL_KPROBE:
@@ -864,7 +1192,7 @@ int _lttng_event_unregister(struct lttng_event *event)
                                                  event);
                break;
        case LTTNG_KERNEL_KPROBE:
-               lttng_kprobes_unregister(event);
+               lttng_kprobes_unregister_event(event);
                ret = 0;
                break;
        case LTTNG_KERNEL_KRETPROBE:
@@ -872,14 +1200,14 @@ int _lttng_event_unregister(struct lttng_event *event)
                ret = 0;
                break;
        case LTTNG_KERNEL_SYSCALL:
-               ret = lttng_syscall_filter_disable(event->chan,
+               ret = lttng_syscall_filter_disable_event(event->chan,
                        desc->name);
                break;
        case LTTNG_KERNEL_NOOP:
                ret = 0;
                break;
        case LTTNG_KERNEL_UPROBE:
-               lttng_uprobes_unregister(event);
+               lttng_uprobes_unregister_event(event);
                ret = 0;
                break;
        case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
@@ -891,6 +1219,78 @@ int _lttng_event_unregister(struct lttng_event *event)
        return ret;
 }
 
+/* Only used for tracepoints for now. */
+static
+void register_trigger(struct lttng_trigger *trigger)
+{
+       const struct lttng_event_desc *desc;
+       int ret = -EINVAL;
+
+       if (trigger->registered)
+               return;
+
+       desc = trigger->desc;
+       switch (trigger->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
+                                                 desc->trigger_callback,
+                                                 trigger);
+               break;
+       case LTTNG_KERNEL_SYSCALL:
+               ret = lttng_syscall_filter_enable_trigger(trigger);
+               break;
+       case LTTNG_KERNEL_KPROBE:
+       case LTTNG_KERNEL_UPROBE:
+               ret = 0;
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+       case LTTNG_KERNEL_FUNCTION:
+       case LTTNG_KERNEL_NOOP:
+       default:
+               WARN_ON_ONCE(1);
+       }
+       if (!ret)
+               trigger->registered = 1;
+}
+
+static
+int _lttng_trigger_unregister(struct lttng_trigger *trigger)
+{
+       const struct lttng_event_desc *desc;
+       int ret = -EINVAL;
+
+       if (!trigger->registered)
+               return 0;
+
+       desc = trigger->desc;
+       switch (trigger->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               ret = lttng_wrapper_tracepoint_probe_unregister(trigger->desc->kname,
+                                                 trigger->desc->trigger_callback,
+                                                 trigger);
+               break;
+       case LTTNG_KERNEL_KPROBE:
+               lttng_kprobes_unregister_trigger(trigger);
+               ret = 0;
+               break;
+       case LTTNG_KERNEL_UPROBE:
+               lttng_uprobes_unregister_trigger(trigger);
+               ret = 0;
+               break;
+       case LTTNG_KERNEL_SYSCALL:
+               ret = lttng_syscall_filter_disable_trigger(trigger);
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+       case LTTNG_KERNEL_FUNCTION:
+       case LTTNG_KERNEL_NOOP:
+       default:
+               WARN_ON_ONCE(1);
+       }
+       if (!ret)
+               trigger->registered = 0;
+       return ret;
+}
+
 /*
  * Only used internally at session destruction.
  */
@@ -899,11 +1299,11 @@ void _lttng_event_destroy(struct lttng_event *event)
 {
        switch (event->instrumentation) {
        case LTTNG_KERNEL_TRACEPOINT:
-               lttng_event_put(event->desc);
+               lttng_event_desc_put(event->desc);
                break;
        case LTTNG_KERNEL_KPROBE:
                module_put(event->desc->owner);
-               lttng_kprobes_destroy_private(event);
+               lttng_kprobes_destroy_event_private(event);
                break;
        case LTTNG_KERNEL_KRETPROBE:
                module_put(event->desc->owner);
@@ -914,7 +1314,7 @@ void _lttng_event_destroy(struct lttng_event *event)
                break;
        case LTTNG_KERNEL_UPROBE:
                module_put(event->desc->owner);
-               lttng_uprobes_destroy_private(event);
+               lttng_uprobes_destroy_event_private(event);
                break;
        case LTTNG_KERNEL_FUNCTION:     /* Fall-through */
        default:
@@ -925,6 +1325,36 @@ void _lttng_event_destroy(struct lttng_event *event)
        kmem_cache_free(event_cache, event);
 }
 
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_trigger_destroy(struct lttng_trigger *trigger)
+{
+       switch (trigger->instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               lttng_event_desc_put(trigger->desc);
+               break;
+       case LTTNG_KERNEL_KPROBE:
+               module_put(trigger->desc->owner);
+               lttng_kprobes_destroy_trigger_private(trigger);
+               break;
+       case LTTNG_KERNEL_NOOP:
+       case LTTNG_KERNEL_SYSCALL:
+               break;
+       case LTTNG_KERNEL_UPROBE:
+               module_put(trigger->desc->owner);
+               lttng_uprobes_destroy_trigger_private(trigger);
+               break;
+       case LTTNG_KERNEL_KRETPROBE:
+       case LTTNG_KERNEL_FUNCTION:
+       default:
+               WARN_ON_ONCE(1);
+       }
+       list_del(&trigger->list);
+       kmem_cache_free(trigger_cache, trigger);
+}
+
 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
                enum tracker_type tracker_type)
 {
@@ -1200,7 +1630,6 @@ int lttng_match_enabler_name(const char *desc_name,
        return 1;
 }
 
-static
 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
                struct lttng_enabler *enabler)
 {
@@ -1230,10 +1659,10 @@ int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
                WARN_ON_ONCE(1);
                return -EINVAL;
        }
-       switch (enabler->type) {
-       case LTTNG_ENABLER_STAR_GLOB:
+       switch (enabler->format_type) {
+       case LTTNG_ENABLER_FORMAT_STAR_GLOB:
                return lttng_match_enabler_star_glob(desc_name, enabler_name);
-       case LTTNG_ENABLER_NAME:
+       case LTTNG_ENABLER_FORMAT_NAME:
                return lttng_match_enabler_name(desc_name, enabler_name);
        default:
                return -EINVAL;
@@ -1241,36 +1670,111 @@ int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
 }
 
 static
-int lttng_event_match_enabler(struct lttng_event *event,
-               struct lttng_enabler *enabler)
+int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
+               struct lttng_event *event)
 {
-       if (enabler->event_param.instrumentation != event->instrumentation)
+       struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
+               event_enabler);
+
+       if (base_enabler->event_param.instrumentation != event->instrumentation)
                return 0;
-       if (lttng_desc_match_enabler(event->desc, enabler)
-                       && event->chan == enabler->chan)
+       if (lttng_desc_match_enabler(event->desc, base_enabler)
+                       && event->chan == event_enabler->chan)
                return 1;
        else
                return 0;
 }
 
 static
-struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+int lttng_trigger_enabler_match_trigger(struct lttng_trigger_enabler *trigger_enabler,
+               struct lttng_trigger *trigger)
+{
+       struct lttng_enabler *base_enabler = lttng_trigger_enabler_as_enabler(
+               trigger_enabler);
+
+       if (base_enabler->event_param.instrumentation != trigger->instrumentation)
+               return 0;
+       if (lttng_desc_match_enabler(trigger->desc, base_enabler)
+                       && trigger->group == trigger_enabler->group
+                       && trigger->id == trigger_enabler->id)
+               return 1;
+       else
+               return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_enabler_ref(
+               struct list_head *enablers_ref_list,
                struct lttng_enabler *enabler)
 {
        struct lttng_enabler_ref *enabler_ref;
 
-       list_for_each_entry(enabler_ref,
-                       &event->enablers_ref_head, node) {
-               if (enabler_ref->ref == enabler)
-                       return enabler_ref;
+       list_for_each_entry(enabler_ref, enablers_ref_list, node) {
+               if (enabler_ref->ref == enabler)
+                       return enabler_ref;
+       }
+       return NULL;
+}
+
+static
+void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
+{
+       struct lttng_session *session = event_enabler->chan->session;
+       struct lttng_probe_desc *probe_desc;
+       const struct lttng_event_desc *desc;
+       int i;
+       struct list_head *probe_list;
+
+       probe_list = lttng_get_probe_list_head();
+       /*
+        * For each probe event, if we find that a probe event matches
+        * our enabler, create an associated lttng_event if not
+        * already present.
+        */
+       list_for_each_entry(probe_desc, probe_list, head) {
+               for (i = 0; i < probe_desc->nr_events; i++) {
+                       int found = 0;
+                       struct hlist_head *head;
+                       struct lttng_event *event;
+
+                       desc = probe_desc->event_desc[i];
+                       if (!lttng_desc_match_enabler(desc,
+                                       lttng_event_enabler_as_enabler(event_enabler)))
+                               continue;
+
+                       /*
+                        * Check if already created.
+                        */
+                       head = utils_borrow_hash_table_bucket(
+                               session->events_ht.table, LTTNG_EVENT_HT_SIZE,
+                               desc->name);
+                       lttng_hlist_for_each_entry(event, head, hlist) {
+                               if (event->desc == desc
+                                               && event->chan == event_enabler->chan)
+                                       found = 1;
+                       }
+                       if (found)
+                               continue;
+
+                       /*
+                        * We need to create an event for this
+                        * event probe.
+                        */
+                       event = _lttng_event_create(event_enabler->chan,
+                                       NULL, NULL, desc,
+                                       LTTNG_KERNEL_TRACEPOINT);
+                       if (!event) {
+                               printk(KERN_INFO "Unable to create event %s\n",
+                                       probe_desc->event_desc[i]->name);
+                       }
+               }
        }
-       return NULL;
 }
 
 static
-void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
+void lttng_create_tracepoint_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
 {
-       struct lttng_session *session = enabler->chan->session;
+       struct lttng_trigger_group *trigger_group = trigger_enabler->group;
        struct lttng_probe_desc *probe_desc;
        const struct lttng_event_desc *desc;
        int i;
@@ -1279,46 +1783,42 @@ void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
        probe_list = lttng_get_probe_list_head();
        /*
         * For each probe event, if we find that a probe event matches
-        * our enabler, create an associated lttng_event if not
+        * our enabler, create an associated lttng_trigger if not
         * already present.
         */
        list_for_each_entry(probe_desc, probe_list, head) {
                for (i = 0; i < probe_desc->nr_events; i++) {
                        int found = 0;
                        struct hlist_head *head;
-                       const char *event_name;
-                       size_t name_len;
-                       uint32_t hash;
-                       struct lttng_event *event;
+                       struct lttng_trigger *trigger;
 
                        desc = probe_desc->event_desc[i];
-                       if (!lttng_desc_match_enabler(desc, enabler))
+                       if (!lttng_desc_match_enabler(desc,
+                                       lttng_trigger_enabler_as_enabler(trigger_enabler)))
                                continue;
-                       event_name = desc->name;
-                       name_len = strlen(event_name);
 
                        /*
                         * Check if already created.
                         */
-                       hash = jhash(event_name, name_len, 0);
-                       head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
-                       lttng_hlist_for_each_entry(event, head, hlist) {
-                               if (event->desc == desc
-                                               && event->chan == enabler->chan)
+                       head = utils_borrow_hash_table_bucket(
+                               trigger_group->triggers_ht.table,
+                               LTTNG_TRIGGER_HT_SIZE, desc->name);
+                       lttng_hlist_for_each_entry(trigger, head, hlist) {
+                               if (trigger->desc == desc
+                                               && trigger->id == trigger_enabler->id)
                                        found = 1;
                        }
                        if (found)
                                continue;
 
                        /*
-                        * We need to create an event for this
-                        * event probe.
+                        * We need to create a trigger for this event probe.
                         */
-                       event = _lttng_event_create(enabler->chan,
-                                       NULL, NULL, desc,
-                                       LTTNG_KERNEL_TRACEPOINT);
-                       if (!event) {
-                               printk(KERN_INFO "Unable to create event %s\n",
+                       trigger = _lttng_trigger_create(desc,
+                               trigger_enabler->id, trigger_group, NULL, NULL,
+                               LTTNG_KERNEL_TRACEPOINT);
+                       if (IS_ERR(trigger)) {
+                               printk(KERN_INFO "Unable to create trigger %s\n",
                                        probe_desc->event_desc[i]->name);
                        }
                }
@@ -1326,11 +1826,22 @@ void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
 }
 
 static
-void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
+void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
+{
+       int ret;
+
+       ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
+       WARN_ON_ONCE(ret);
+}
+
+static
+void lttng_create_syscall_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
 {
        int ret;
 
-       ret = lttng_syscalls_register(enabler->chan, NULL);
+       ret = lttng_syscalls_register_trigger(trigger_enabler, NULL);
+       WARN_ON_ONCE(ret);
+       ret = lttng_syscals_create_matching_triggers(trigger_enabler, NULL);
        WARN_ON_ONCE(ret);
 }
 
@@ -1340,14 +1851,14 @@ void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
  * Should be called with sessions mutex held.
  */
 static
-void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
 {
-       switch (enabler->event_param.instrumentation) {
+       switch (event_enabler->base.event_param.instrumentation) {
        case LTTNG_KERNEL_TRACEPOINT:
-               lttng_create_tracepoint_if_missing(enabler);
+               lttng_create_tracepoint_event_if_missing(event_enabler);
                break;
        case LTTNG_KERNEL_SYSCALL:
-               lttng_create_syscall_if_missing(enabler);
+               lttng_create_syscall_event_if_missing(event_enabler);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -1356,35 +1867,36 @@ void lttng_create_event_if_missing(struct lttng_enabler *enabler)
 }
 
 /*
- * Create events associated with an enabler (if not already present),
+ * Create events associated with an event_enabler (if not already present),
  * and add backward reference from the event to the enabler.
  * Should be called with sessions mutex held.
  */
 static
-int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
 {
-       struct lttng_session *session = enabler->chan->session;
+       struct lttng_session *session = event_enabler->chan->session;
        struct lttng_event *event;
 
        /* First ensure that probe events are created for this enabler. */
-       lttng_create_event_if_missing(enabler);
+       lttng_create_event_if_missing(event_enabler);
 
-       /* For each event matching enabler in session event list. */
+       /* For each event matching event_enabler in session event list. */
        list_for_each_entry(event, &session->events, list) {
                struct lttng_enabler_ref *enabler_ref;
 
-               if (!lttng_event_match_enabler(event, enabler))
+               if (!lttng_event_enabler_match_event(event_enabler, event))
                        continue;
-               enabler_ref = lttng_event_enabler_ref(event, enabler);
+               enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
+                       lttng_event_enabler_as_enabler(event_enabler));
                if (!enabler_ref) {
                        /*
                         * If no backward ref, create it.
-                        * Add backward ref from event to enabler.
+                        * Add backward ref from event to event_enabler.
                         */
                        enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
                        if (!enabler_ref)
                                return -ENOMEM;
-                       enabler_ref->ref = enabler;
+                       enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
                        list_add(&enabler_ref->node,
                                &event->enablers_ref_head);
                }
@@ -1392,13 +1904,90 @@ int lttng_enabler_ref_events(struct lttng_enabler *enabler)
                /*
                 * Link filter bytecodes if not linked yet.
                 */
-               lttng_enabler_event_link_bytecode(event, enabler);
+               lttng_enabler_link_bytecode(event->desc,
+                       lttng_static_ctx,
+                       &event->filter_bytecode_runtime_head,
+                       &lttng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
 
                /* TODO: merge event context. */
        }
        return 0;
 }
 
+/*
+ * Create struct lttng_trigger if it is missing and present in the list of
+ * tracepoint probes.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
+{
+       switch (trigger_enabler->base.event_param.instrumentation) {
+       case LTTNG_KERNEL_TRACEPOINT:
+               lttng_create_tracepoint_trigger_if_missing(trigger_enabler);
+               break;
+       case LTTNG_KERNEL_SYSCALL:
+               lttng_create_syscall_trigger_if_missing(trigger_enabler);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+}
+
+/*
+ * Create triggers associated with a trigger enabler (if not already present).
+ */
+static
+int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler)
+{
+       struct lttng_trigger_group *trigger_group = trigger_enabler->group;
+       struct lttng_trigger *trigger;
+
+       /* First ensure that probe triggers are created for this enabler. */
+       lttng_create_trigger_if_missing(trigger_enabler);
+
+       /* Link the created trigger with its associated enabler. */
+       list_for_each_entry(trigger, &trigger_group->triggers_head, list) {
+               struct lttng_enabler_ref *enabler_ref;
+
+               if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger))
+                       continue;
+
+               enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head,
+                       lttng_trigger_enabler_as_enabler(trigger_enabler));
+               if (!enabler_ref) {
+                       /*
+                        * If no backward ref, create it.
+                        * Add backward ref from trigger to enabler.
+                        */
+                       enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
+                       if (!enabler_ref)
+                               return -ENOMEM;
+
+                       enabler_ref->ref = lttng_trigger_enabler_as_enabler(
+                               trigger_enabler);
+                       list_add(&enabler_ref->node,
+                               &trigger->enablers_ref_head);
+               }
+
+               /*
+                * Link filter bytecodes if not linked yet.
+                */
+               lttng_enabler_link_bytecode(trigger->desc,
+                       lttng_static_ctx, &trigger->filter_bytecode_runtime_head,
+                       &lttng_trigger_enabler_as_enabler(trigger_enabler)->filter_bytecode_head);
+
+               /* Link capture bytecodes if not linked yet. */
+               lttng_enabler_link_bytecode(trigger->desc,
+                       lttng_static_ctx, &trigger->capture_bytecode_runtime_head,
+                       &trigger_enabler->capture_bytecode_head);
+
+               trigger->num_captures = trigger_enabler->num_captures;
+       }
+       return 0;
+}
+
 /*
  * Called at module load: connect the probe on all enablers matching
  * this event.
@@ -1409,56 +1998,91 @@ int lttng_fix_pending_events(void)
        struct lttng_session *session;
 
        list_for_each_entry(session, &sessions, list)
-               lttng_session_lazy_sync_enablers(session);
+               lttng_session_lazy_sync_event_enablers(session);
+       return 0;
+}
+
+static bool lttng_trigger_group_has_active_triggers(
+               struct lttng_trigger_group *trigger_group)
+{
+       struct lttng_trigger_enabler *trigger_enabler;
+
+       list_for_each_entry(trigger_enabler, &trigger_group->enablers_head,
+                       node) {
+               if (trigger_enabler->base.enabled)
+                       return true;
+       }
+       return false;
+}
+
+bool lttng_trigger_active(void)
+{
+       struct lttng_trigger_group *trigger_group;
+
+       list_for_each_entry(trigger_group, &trigger_groups, node) {
+               if (lttng_trigger_group_has_active_triggers(trigger_group))
+                       return true;
+       }
+       return false;
+}
+
+int lttng_fix_pending_triggers(void)
+{
+       struct lttng_trigger_group *trigger_group;
+
+       list_for_each_entry(trigger_group, &trigger_groups, node)
+               lttng_trigger_group_sync_enablers(trigger_group);
        return 0;
 }
 
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+               enum lttng_enabler_format_type format_type,
                struct lttng_kernel_event *event_param,
                struct lttng_channel *chan)
 {
-       struct lttng_enabler *enabler;
+       struct lttng_event_enabler *event_enabler;
 
-       enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
-       if (!enabler)
+       event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
+       if (!event_enabler)
                return NULL;
-       enabler->type = type;
-       INIT_LIST_HEAD(&enabler->filter_bytecode_head);
-       memcpy(&enabler->event_param, event_param,
-               sizeof(enabler->event_param));
-       enabler->chan = chan;
+       event_enabler->base.format_type = format_type;
+       INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
+       memcpy(&event_enabler->base.event_param, event_param,
+               sizeof(event_enabler->base.event_param));
+       event_enabler->chan = chan;
        /* ctx left NULL */
-       enabler->enabled = 0;
-       enabler->evtype = LTTNG_TYPE_ENABLER;
+       event_enabler->base.enabled = 0;
+       event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
        mutex_lock(&sessions_mutex);
-       list_add(&enabler->node, &enabler->chan->session->enablers_head);
-       lttng_session_lazy_sync_enablers(enabler->chan->session);
+       list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
+       lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
        mutex_unlock(&sessions_mutex);
-       return enabler;
+       return event_enabler;
 }
 
-int lttng_enabler_enable(struct lttng_enabler *enabler)
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
 {
        mutex_lock(&sessions_mutex);
-       enabler->enabled = 1;
-       lttng_session_lazy_sync_enablers(enabler->chan->session);
+       lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+       lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
        mutex_unlock(&sessions_mutex);
        return 0;
 }
 
-int lttng_enabler_disable(struct lttng_enabler *enabler)
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
 {
        mutex_lock(&sessions_mutex);
-       enabler->enabled = 0;
-       lttng_session_lazy_sync_enablers(enabler->chan->session);
+       lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+       lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
        mutex_unlock(&sessions_mutex);
        return 0;
 }
 
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+static
+int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
                struct lttng_kernel_filter_bytecode __user *bytecode)
 {
-       struct lttng_filter_bytecode_node *bytecode_node;
+       struct lttng_bytecode_node *bytecode_node;
        uint32_t bytecode_len;
        int ret;
 
@@ -1473,11 +2097,13 @@ int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
                sizeof(*bytecode) + bytecode_len);
        if (ret)
                goto error_free;
+
+       bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
        bytecode_node->enabler = enabler;
        /* Enforce length based on allocated size */
        bytecode_node->bc.len = bytecode_len;
        list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
-       lttng_session_lazy_sync_enablers(enabler->chan->session);
+
        return 0;
 
 error_free:
@@ -1485,19 +2111,35 @@ error_free:
        return ret;
 }
 
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
+               struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+       int ret;
+       ret = lttng_enabler_attach_filter_bytecode(
+               lttng_event_enabler_as_enabler(event_enabler), bytecode);
+       if (ret)
+               goto error;
+
+       lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
+       return 0;
+
+error:
+       return ret;
+}
+
 int lttng_event_add_callsite(struct lttng_event *event,
                struct lttng_kernel_event_callsite __user *callsite)
 {
 
        switch (event->instrumentation) {
        case LTTNG_KERNEL_UPROBE:
-               return lttng_uprobes_add_callsite(event, callsite);
+               return lttng_uprobes_event_add_callsite(event, callsite);
        default:
                return -EINVAL;
        }
 }
 
-int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
                struct lttng_kernel_context *context_param)
 {
        return -ENOSYS;
@@ -1506,34 +2148,184 @@ int lttng_enabler_attach_context(struct lttng_enabler *enabler,
 static
 void lttng_enabler_destroy(struct lttng_enabler *enabler)
 {
-       struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+       struct lttng_bytecode_node *filter_node, *tmp_filter_node;
 
        /* Destroy filter bytecode */
        list_for_each_entry_safe(filter_node, tmp_filter_node,
                        &enabler->filter_bytecode_head, node) {
                kfree(filter_node);
        }
+}
+
+static
+void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
+{
+       lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
 
        /* Destroy contexts */
-       lttng_destroy_context(enabler->ctx);
+       lttng_destroy_context(event_enabler->ctx);
+
+       list_del(&event_enabler->node);
+       kfree(event_enabler);
+}
+
+struct lttng_trigger_enabler *lttng_trigger_enabler_create(
+               struct lttng_trigger_group *trigger_group,
+               enum lttng_enabler_format_type format_type,
+               struct lttng_kernel_trigger *trigger_param)
+{
+       struct lttng_trigger_enabler *trigger_enabler;
+
+       trigger_enabler = kzalloc(sizeof(*trigger_enabler), GFP_KERNEL);
+       if (!trigger_enabler)
+               return NULL;
+
+       trigger_enabler->base.format_type = format_type;
+       INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head);
+       INIT_LIST_HEAD(&trigger_enabler->capture_bytecode_head);
+
+       trigger_enabler->id = trigger_param->id;
+       trigger_enabler->num_captures = 0;
+
+       memcpy(&trigger_enabler->base.event_param.name, trigger_param->name,
+               sizeof(trigger_enabler->base.event_param.name));
+       trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation;
+       trigger_enabler->base.evtype = LTTNG_TYPE_ENABLER;
+
+       trigger_enabler->base.enabled = 0;
+       trigger_enabler->group = trigger_group;
+
+       mutex_lock(&sessions_mutex);
+       list_add(&trigger_enabler->node, &trigger_enabler->group->enablers_head);
+       lttng_trigger_group_sync_enablers(trigger_enabler->group);
+
+       mutex_unlock(&sessions_mutex);
+
+       return trigger_enabler;
+}
+
+int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler)
+{
+       mutex_lock(&sessions_mutex);
+       lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1;
+       lttng_trigger_group_sync_enablers(trigger_enabler->group);
+       mutex_unlock(&sessions_mutex);
+       return 0;
+}
+
+int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler)
+{
+       mutex_lock(&sessions_mutex);
+       lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0;
+       lttng_trigger_group_sync_enablers(trigger_enabler->group);
+       mutex_unlock(&sessions_mutex);
+       return 0;
+}
+
+int lttng_trigger_enabler_attach_filter_bytecode(
+               struct lttng_trigger_enabler *trigger_enabler,
+               struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+       int ret;
+
+       ret = lttng_enabler_attach_filter_bytecode(
+               lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode);
+       if (ret)
+               goto error;
+
+       lttng_trigger_group_sync_enablers(trigger_enabler->group);
+       return 0;
+
+error:
+       return ret;
+}
+
+int lttng_trigger_enabler_attach_capture_bytecode(
+               struct lttng_trigger_enabler *trigger_enabler,
+               struct lttng_kernel_capture_bytecode __user *bytecode)
+{
+       struct lttng_bytecode_node *bytecode_node;
+       struct lttng_enabler *enabler =
+                       lttng_trigger_enabler_as_enabler(trigger_enabler);
+       uint32_t bytecode_len;
+       int ret;
+
+       ret = get_user(bytecode_len, &bytecode->len);
+       if (ret)
+               return ret;
+
+       bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+                       GFP_KERNEL);
+       if (!bytecode_node)
+               return -ENOMEM;
+
+       ret = copy_from_user(&bytecode_node->bc, bytecode,
+               sizeof(*bytecode) + bytecode_len);
+       if (ret)
+               goto error_free;
+
+       bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
+       bytecode_node->enabler = enabler;
+
+       /* Enforce length based on allocated size */
+       bytecode_node->bc.len = bytecode_len;
+       list_add_tail(&bytecode_node->node, &trigger_enabler->capture_bytecode_head);
+
+       trigger_enabler->num_captures++;
+
+       lttng_trigger_group_sync_enablers(trigger_enabler->group);
+       goto end;
+
+error_free:
+       kfree(bytecode_node);
+end:
+       return ret;
+}
+
+int lttng_trigger_add_callsite(struct lttng_trigger *trigger,
+               struct lttng_kernel_event_callsite __user *callsite)
+{
+
+       switch (trigger->instrumentation) {
+       case LTTNG_KERNEL_UPROBE:
+               return lttng_uprobes_trigger_add_callsite(trigger, callsite);
+       default:
+               return -EINVAL;
+       }
+}
+
+int lttng_trigger_enabler_attach_context(struct lttng_trigger_enabler *trigger_enabler,
+               struct lttng_kernel_context *context_param)
+{
+       return -ENOSYS;
+}
+
+static
+void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler)
+{
+       if (!trigger_enabler) {
+               return;
+       }
 
-       list_del(&enabler->node);
-       kfree(enabler);
+       list_del(&trigger_enabler->node);
+
+       lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler));
+       kfree(trigger_enabler);
 }
 
 /*
- * lttng_session_sync_enablers should be called just before starting a
+ * lttng_session_sync_event_enablers should be called just before starting a
  * session.
  * Should be called with sessions mutex held.
  */
 static
-void lttng_session_sync_enablers(struct lttng_session *session)
+void lttng_session_sync_event_enablers(struct lttng_session *session)
 {
-       struct lttng_enabler *enabler;
+       struct lttng_event_enabler *event_enabler;
        struct lttng_event *event;
 
-       list_for_each_entry(enabler, &session->enablers_head, node)
-               lttng_enabler_ref_events(enabler);
+       list_for_each_entry(event_enabler, &session->enablers_head, node)
+               lttng_event_enabler_ref_events(event_enabler);
        /*
         * For each event, if at least one of its enablers is enabled,
         * and its channel and session transient states are enabled, we
@@ -1592,8 +2384,8 @@ void lttng_session_sync_enablers(struct lttng_session *session)
 
                /* Enable filters */
                list_for_each_entry(runtime,
-                               &event->bytecode_runtime_head, node)
-                       lttng_filter_sync_state(runtime);
+                               &event->filter_bytecode_runtime_head, node)
+                       lttng_bytecode_filter_sync_state(runtime);
        }
 }
 
@@ -1605,12 +2397,84 @@ void lttng_session_sync_enablers(struct lttng_session *session)
  * Should be called with sessions mutex held.
  */
 static
-void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
 {
        /* We can skip if session is not active */
        if (!session->active)
                return;
-       lttng_session_sync_enablers(session);
+       lttng_session_sync_event_enablers(session);
+}
+
+static
+void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group)
+{
+       struct lttng_trigger_enabler *trigger_enabler;
+       struct lttng_trigger *trigger;
+
+       list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node)
+               lttng_trigger_enabler_ref_triggers(trigger_enabler);
+
+       /*
+        * For each trigger, if at least one of its enablers is enabled,
+        * we enable the trigger, else we disable it.
+        */
+       list_for_each_entry(trigger, &trigger_group->triggers_head, list) {
+               struct lttng_enabler_ref *enabler_ref;
+               struct lttng_bytecode_runtime *runtime;
+               int enabled = 0, has_enablers_without_bytecode = 0;
+
+               switch (trigger->instrumentation) {
+               case LTTNG_KERNEL_TRACEPOINT:
+               case LTTNG_KERNEL_SYSCALL:
+                       /* Enable triggers */
+                       list_for_each_entry(enabler_ref,
+                                       &trigger->enablers_ref_head, node) {
+                               if (enabler_ref->ref->enabled) {
+                                       enabled = 1;
+                                       break;
+                               }
+                       }
+                       break;
+               default:
+                       /* Not handled with sync. */
+                       continue;
+               }
+
+               WRITE_ONCE(trigger->enabled, enabled);
+               /*
+                * Sync tracepoint registration with trigger enabled
+                * state.
+                */
+               if (enabled) {
+                       if (!trigger->registered)
+                               register_trigger(trigger);
+               } else {
+                       if (trigger->registered)
+                               _lttng_trigger_unregister(trigger);
+               }
+
+               /* Check if has enablers without bytecode enabled */
+               list_for_each_entry(enabler_ref,
+                               &trigger->enablers_ref_head, node) {
+                       if (enabler_ref->ref->enabled
+                                       && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+                               has_enablers_without_bytecode = 1;
+                               break;
+                       }
+               }
+               trigger->has_enablers_without_bytecode =
+                       has_enablers_without_bytecode;
+
+               /* Enable filters */
+               list_for_each_entry(runtime,
+                               &trigger->filter_bytecode_runtime_head, node)
+                       lttng_bytecode_filter_sync_state(runtime);
+
+               /* Enable captures */
+               list_for_each_entry(runtime,
+                               &trigger->capture_bytecode_runtime_head, node)
+                       lttng_bytecode_capture_sync_state(runtime);
+       }
 }
 
 /*
@@ -3022,7 +3886,12 @@ static int __init lttng_events_init(void)
        event_cache = KMEM_CACHE(lttng_event, 0);
        if (!event_cache) {
                ret = -ENOMEM;
-               goto error_kmem;
+               goto error_kmem_event;
+       }
+       trigger_cache = KMEM_CACHE(lttng_trigger, 0);
+       if (!trigger_cache) {
+               ret = -ENOMEM;
+               goto error_kmem_trigger;
        }
        ret = lttng_abi_init();
        if (ret)
@@ -3056,8 +3925,10 @@ error_hotplug:
 error_logger:
        lttng_abi_exit();
 error_abi:
+       kmem_cache_destroy(trigger_cache);
+error_kmem_trigger:
        kmem_cache_destroy(event_cache);
-error_kmem:
+error_kmem_event:
        lttng_tracepoint_exit();
 error_tp:
        lttng_context_exit();
@@ -3092,6 +3963,7 @@ static void __exit lttng_events_exit(void)
        list_for_each_entry_safe(session, tmpsession, &sessions, list)
                lttng_session_destroy(session);
        kmem_cache_destroy(event_cache);
+       kmem_cache_destroy(trigger_cache);
        lttng_tracepoint_exit();
        lttng_context_exit();
        printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
diff --git a/src/lttng-filter-interpreter.c b/src/lttng-filter-interpreter.c
deleted file mode 100644 (file)
index 6c1c2f2..0000000
+++ /dev/null
@@ -1,1579 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-interpreter.c
- *
- * LTTng modules filter interpreter.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <wrapper/uaccess.h>
-#include <wrapper/frame.h>
-#include <wrapper/types.h>
-#include <linux/swab.h>
-
-#include <lttng/filter.h>
-#include <lttng/string-utils.h>
-
-LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
-
-/*
- * get_char should be called with page fault handler disabled if it is expected
- * to handle user-space read.
- */
-static
-char get_char(struct estack_entry *reg, size_t offset)
-{
-       if (unlikely(offset >= reg->u.s.seq_len))
-               return '\0';
-       if (reg->u.s.user) {
-               char c;
-
-               /* Handle invalid access as end of string. */
-               if (unlikely(!lttng_access_ok(VERIFY_READ,
-                               reg->u.s.user_str + offset,
-                               sizeof(c))))
-                       return '\0';
-               /* Handle fault (nonzero return value) as end of string. */
-               if (unlikely(__copy_from_user_inatomic(&c,
-                               reg->u.s.user_str + offset,
-                               sizeof(c))))
-                       return '\0';
-               return c;
-       } else {
-               return reg->u.s.str[offset];
-       }
-}
-
-/*
- * -1: wildcard found.
- * -2: unknown escape char.
- * 0: normal char.
- */
-static
-int parse_char(struct estack_entry *reg, char *c, size_t *offset)
-{
-       switch (*c) {
-       case '\\':
-               (*offset)++;
-               *c = get_char(reg, *offset);
-               switch (*c) {
-               case '\\':
-               case '*':
-                       return 0;
-               default:
-                       return -2;
-               }
-       case '*':
-               return -1;
-       default:
-               return 0;
-       }
-}
-
-static
-char get_char_at_cb(size_t at, void *data)
-{
-       return get_char(data, at);
-}
-
-static
-int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
-{
-       bool has_user = false;
-       mm_segment_t old_fs;
-       int result;
-       struct estack_entry *pattern_reg;
-       struct estack_entry *candidate_reg;
-
-       if (estack_bx(stack, top)->u.s.user
-                       || estack_ax(stack, top)->u.s.user) {
-               has_user = true;
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               pagefault_disable();
-       }
-
-       /* Find out which side is the pattern vs. the candidate. */
-       if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
-               pattern_reg = estack_ax(stack, top);
-               candidate_reg = estack_bx(stack, top);
-       } else {
-               pattern_reg = estack_bx(stack, top);
-               candidate_reg = estack_ax(stack, top);
-       }
-
-       /* Perform the match operation. */
-       result = !strutils_star_glob_match_char_cb(get_char_at_cb,
-               pattern_reg, get_char_at_cb, candidate_reg);
-       if (has_user) {
-               pagefault_enable();
-               set_fs(old_fs);
-       }
-
-       return result;
-}
-
-static
-int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
-{
-       size_t offset_bx = 0, offset_ax = 0;
-       int diff, has_user = 0;
-       mm_segment_t old_fs;
-
-       if (estack_bx(stack, top)->u.s.user
-                       || estack_ax(stack, top)->u.s.user) {
-               has_user = 1;
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               pagefault_disable();
-       }
-
-       for (;;) {
-               int ret;
-               int escaped_r0 = 0;
-               char char_bx, char_ax;
-
-               char_bx = get_char(estack_bx(stack, top), offset_bx);
-               char_ax = get_char(estack_ax(stack, top), offset_ax);
-
-               if (unlikely(char_bx == '\0')) {
-                       if (char_ax == '\0') {
-                               diff = 0;
-                               break;
-                       } else {
-                               if (estack_ax(stack, top)->u.s.literal_type ==
-                                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
-                                       ret = parse_char(estack_ax(stack, top),
-                                               &char_ax, &offset_ax);
-                                       if (ret == -1) {
-                                               diff = 0;
-                                               break;
-                                       }
-                               }
-                               diff = -1;
-                               break;
-                       }
-               }
-               if (unlikely(char_ax == '\0')) {
-                       if (estack_bx(stack, top)->u.s.literal_type ==
-                                       ESTACK_STRING_LITERAL_TYPE_PLAIN) {
-                               ret = parse_char(estack_bx(stack, top),
-                                       &char_bx, &offset_bx);
-                               if (ret == -1) {
-                                       diff = 0;
-                                       break;
-                               }
-                       }
-                       diff = 1;
-                       break;
-               }
-               if (estack_bx(stack, top)->u.s.literal_type ==
-                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
-                       ret = parse_char(estack_bx(stack, top),
-                               &char_bx, &offset_bx);
-                       if (ret == -1) {
-                               diff = 0;
-                               break;
-                       } else if (ret == -2) {
-                               escaped_r0 = 1;
-                       }
-                       /* else compare both char */
-               }
-               if (estack_ax(stack, top)->u.s.literal_type ==
-                               ESTACK_STRING_LITERAL_TYPE_PLAIN) {
-                       ret = parse_char(estack_ax(stack, top),
-                               &char_ax, &offset_ax);
-                       if (ret == -1) {
-                               diff = 0;
-                               break;
-                       } else if (ret == -2) {
-                               if (!escaped_r0) {
-                                       diff = -1;
-                                       break;
-                               }
-                       } else {
-                               if (escaped_r0) {
-                                       diff = 1;
-                                       break;
-                               }
-                       }
-               } else {
-                       if (escaped_r0) {
-                               diff = 1;
-                               break;
-                       }
-               }
-               diff = char_bx - char_ax;
-               if (diff != 0)
-                       break;
-               offset_bx++;
-               offset_ax++;
-       }
-       if (has_user) {
-               pagefault_enable();
-               set_fs(old_fs);
-       }
-       return diff;
-}
-
-uint64_t lttng_filter_false(void *filter_data,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               const char *filter_stack_data)
-{
-       return LTTNG_FILTER_DISCARD;
-}
-
-#ifdef INTERPRETER_USE_SWITCH
-
-/*
- * Fallback for compilers that do not support taking address of labels.
- */
-
-#define START_OP                                                       \
-       start_pc = &bytecode->data[0];                                  \
-       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;    \
-                       pc = next_pc) {                                 \
-               dbg_printk("Executing op %s (%u)\n",                    \
-                       lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
-                       (unsigned int) *(filter_opcode_t *) pc);        \
-               switch (*(filter_opcode_t *) pc)        {
-
-#define OP(name)       case name
-
-#define PO             break
-
-#define END_OP         }                                               \
-       }
-
-#else
-
-/*
- * Dispatch-table based interpreter.
- */
-
-#define START_OP                                                       \
-       start_pc = &bytecode->code[0];                                  \
-       pc = next_pc = start_pc;                                        \
-       if (unlikely(pc - start_pc >= bytecode->len))                   \
-               goto end;                                               \
-       goto *dispatch[*(filter_opcode_t *) pc];
-
-#define OP(name)                                                       \
-LABEL_##name
-
-#define PO                                                             \
-               pc = next_pc;                                           \
-               goto *dispatch[*(filter_opcode_t *) pc];
-
-#define END_OP
-
-#endif
-
-static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
-               struct load_ptr *ptr,
-               uint32_t idx)
-{
-
-       struct lttng_ctx_field *ctx_field;
-       struct lttng_event_field *field;
-       union lttng_ctx_value v;
-
-       ctx_field = &lttng_static_ctx->fields[idx];
-       field = &ctx_field->event_field;
-       ptr->type = LOAD_OBJECT;
-       /* field is only used for types nested within variants. */
-       ptr->field = NULL;
-
-       switch (field->type.atype) {
-       case atype_integer:
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               if (field->type.u.integer.signedness) {
-                       ptr->object_type = OBJECT_TYPE_S64;
-                       ptr->u.s64 = v.s64;
-                       ptr->ptr = &ptr->u.s64;
-               } else {
-                       ptr->object_type = OBJECT_TYPE_U64;
-                       ptr->u.u64 = v.s64;     /* Cast. */
-                       ptr->ptr = &ptr->u.u64;
-               }
-               break;
-       case atype_enum_nestable:
-       {
-               const struct lttng_integer_type *itype =
-                       &field->type.u.enum_nestable.container_type->u.integer;
-
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               if (itype->signedness) {
-                       ptr->object_type = OBJECT_TYPE_S64;
-                       ptr->u.s64 = v.s64;
-                       ptr->ptr = &ptr->u.s64;
-               } else {
-                       ptr->object_type = OBJECT_TYPE_U64;
-                       ptr->u.u64 = v.s64;     /* Cast. */
-                       ptr->ptr = &ptr->u.u64;
-               }
-               break;
-       }
-       case atype_array_nestable:
-               if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
-                       printk(KERN_WARNING "Array nesting only supports integer types.\n");
-                       return -EINVAL;
-               }
-               if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
-                       printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
-                       return -EINVAL;
-               }
-               ptr->object_type = OBJECT_TYPE_STRING;
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               ptr->ptr = v.str;
-               break;
-       case atype_sequence_nestable:
-               if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
-                       printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
-                       return -EINVAL;
-               }
-               if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
-                       printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
-                       return -EINVAL;
-               }
-               ptr->object_type = OBJECT_TYPE_STRING;
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               ptr->ptr = v.str;
-               break;
-       case atype_string:
-               ptr->object_type = OBJECT_TYPE_STRING;
-               ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-               ptr->ptr = v.str;
-               break;
-       case atype_struct_nestable:
-               printk(KERN_WARNING "Structure type cannot be loaded.\n");
-               return -EINVAL;
-       case atype_variant_nestable:
-               printk(KERN_WARNING "Variant type cannot be loaded.\n");
-               return -EINVAL;
-       default:
-               printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
-               struct bytecode_runtime *runtime,
-               uint64_t index, struct estack_entry *stack_top)
-{
-       int ret;
-       const struct filter_get_index_data *gid;
-
-       /*
-        * Types nested within variants need to perform dynamic lookup
-        * based on the field descriptions. LTTng-UST does not implement
-        * variants for now.
-        */
-       if (stack_top->u.ptr.field)
-               return -EINVAL;
-       gid = (const struct filter_get_index_data *) &runtime->data[index];
-       switch (stack_top->u.ptr.type) {
-       case LOAD_OBJECT:
-               switch (stack_top->u.ptr.object_type) {
-               case OBJECT_TYPE_ARRAY:
-               {
-                       const char *ptr;
-
-                       WARN_ON_ONCE(gid->offset >= gid->array_len);
-                       /* Skip count (unsigned long) */
-                       ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
-                       ptr = ptr + gid->offset;
-                       stack_top->u.ptr.ptr = ptr;
-                       stack_top->u.ptr.object_type = gid->elem.type;
-                       stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
-                       /* field is only used for types nested within variants. */
-                       stack_top->u.ptr.field = NULL;
-                       break;
-               }
-               case OBJECT_TYPE_SEQUENCE:
-               {
-                       const char *ptr;
-                       size_t ptr_seq_len;
-
-                       ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
-                       ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
-                       if (gid->offset >= gid->elem.len * ptr_seq_len) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       ptr = ptr + gid->offset;
-                       stack_top->u.ptr.ptr = ptr;
-                       stack_top->u.ptr.object_type = gid->elem.type;
-                       stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
-                       /* field is only used for types nested within variants. */
-                       stack_top->u.ptr.field = NULL;
-                       break;
-               }
-               case OBJECT_TYPE_STRUCT:
-                       printk(KERN_WARNING "Nested structures are not supported yet.\n");
-                       ret = -EINVAL;
-                       goto end;
-               case OBJECT_TYPE_VARIANT:
-               default:
-                       printk(KERN_WARNING "Unexpected get index type %d",
-                               (int) stack_top->u.ptr.object_type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       case LOAD_ROOT_CONTEXT:
-       case LOAD_ROOT_APP_CONTEXT:     /* Fall-through */
-       {
-               ret = context_get_index(lttng_probe_ctx,
-                               &stack_top->u.ptr,
-                               gid->ctx_index);
-               if (ret) {
-                       goto end;
-               }
-               break;
-       }
-       case LOAD_ROOT_PAYLOAD:
-               stack_top->u.ptr.ptr += gid->offset;
-               if (gid->elem.type == OBJECT_TYPE_STRING)
-                       stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
-               stack_top->u.ptr.object_type = gid->elem.type;
-               stack_top->u.ptr.type = LOAD_OBJECT;
-               /* field is only used for types nested within variants. */
-               stack_top->u.ptr.field = NULL;
-               break;
-       }
-       return 0;
-
-end:
-       return ret;
-}
-
-static int dynamic_load_field(struct estack_entry *stack_top)
-{
-       int ret;
-
-       switch (stack_top->u.ptr.type) {
-       case LOAD_OBJECT:
-               break;
-       case LOAD_ROOT_CONTEXT:
-       case LOAD_ROOT_APP_CONTEXT:
-       case LOAD_ROOT_PAYLOAD:
-       default:
-               dbg_printk("Filter warning: cannot load root, missing field name.\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       switch (stack_top->u.ptr.object_type) {
-       case OBJECT_TYPE_S8:
-               dbg_printk("op load field s8\n");
-               stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
-               break;
-       case OBJECT_TYPE_S16:
-       {
-               int16_t tmp;
-
-               dbg_printk("op load field s16\n");
-               tmp = *(int16_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab16s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_S32:
-       {
-               int32_t tmp;
-
-               dbg_printk("op load field s32\n");
-               tmp = *(int32_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab32s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_S64:
-       {
-               int64_t tmp;
-
-               dbg_printk("op load field s64\n");
-               tmp = *(int64_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab64s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_U8:
-               dbg_printk("op load field u8\n");
-               stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
-               break;
-       case OBJECT_TYPE_U16:
-       {
-               uint16_t tmp;
-
-               dbg_printk("op load field u16\n");
-               tmp = *(uint16_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab16s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_U32:
-       {
-               uint32_t tmp;
-
-               dbg_printk("op load field u32\n");
-               tmp = *(uint32_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab32s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_U64:
-       {
-               uint64_t tmp;
-
-               dbg_printk("op load field u64\n");
-               tmp = *(uint64_t *) stack_top->u.ptr.ptr;
-               if (stack_top->u.ptr.rev_bo)
-                       __swab64s(&tmp);
-               stack_top->u.v = tmp;
-               break;
-       }
-       case OBJECT_TYPE_STRING:
-       {
-               const char *str;
-
-               dbg_printk("op load field string\n");
-               str = (const char *) stack_top->u.ptr.ptr;
-               stack_top->u.s.str = str;
-               if (unlikely(!stack_top->u.s.str)) {
-                       dbg_printk("Filter warning: loading a NULL string.\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
-               stack_top->u.s.literal_type =
-                       ESTACK_STRING_LITERAL_TYPE_NONE;
-               break;
-       }
-       case OBJECT_TYPE_STRING_SEQUENCE:
-       {
-               const char *ptr;
-
-               dbg_printk("op load field string sequence\n");
-               ptr = stack_top->u.ptr.ptr;
-               stack_top->u.s.seq_len = *(unsigned long *) ptr;
-               stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
-               if (unlikely(!stack_top->u.s.str)) {
-                       dbg_printk("Filter warning: loading a NULL sequence.\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               stack_top->u.s.literal_type =
-                       ESTACK_STRING_LITERAL_TYPE_NONE;
-               break;
-       }
-       case OBJECT_TYPE_DYNAMIC:
-               /*
-                * Dynamic types in context are looked up
-                * by context get index.
-                */
-               ret = -EINVAL;
-               goto end;
-       case OBJECT_TYPE_DOUBLE:
-               ret = -EINVAL;
-               goto end;
-       case OBJECT_TYPE_SEQUENCE:
-       case OBJECT_TYPE_ARRAY:
-       case OBJECT_TYPE_STRUCT:
-       case OBJECT_TYPE_VARIANT:
-               printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       return 0;
-
-end:
-       return ret;
-}
-
-/*
- * Return 0 (discard), or raise the 0x1 flag (log event).
- * Currently, other flags are kept for future extensions and have no
- * effect.
- */
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
-               struct lttng_probe_ctx *lttng_probe_ctx,
-               const char *filter_stack_data)
-{
-       struct bytecode_runtime *bytecode = filter_data;
-       void *pc, *next_pc, *start_pc;
-       int ret = -EINVAL;
-       uint64_t retval = 0;
-       struct estack _stack;
-       struct estack *stack = &_stack;
-       register int64_t ax = 0, bx = 0;
-       register int top = FILTER_STACK_EMPTY;
-#ifndef INTERPRETER_USE_SWITCH
-       static void *dispatch[NR_FILTER_OPS] = {
-               [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
-
-               [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
-
-               /* binary */
-               [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
-               [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
-               [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
-               [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
-               [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
-               [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
-               [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
-               [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
-               [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
-               [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
-
-               /* binary comparators */
-               [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
-               [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
-               [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
-               [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
-               [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
-               [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
-
-               /* string binary comparator */
-               [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
-               [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
-               [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
-               [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
-               [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
-               [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
-
-               /* globbing pattern binary comparator */
-               [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
-               [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
-
-               /* s64 binary comparator */
-               [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
-               [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
-               [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
-               [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
-               [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
-               [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
-
-               /* double binary comparator */
-               [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
-               [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
-               [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
-               [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
-               [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
-               [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
-
-               /* Mixed S64-double binary comparators */
-               [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
-               [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
-               [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
-               [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
-               [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
-               [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
-
-               [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
-               [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
-               [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
-               [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
-               [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
-               [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
-
-               /* unary */
-               [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
-               [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
-               [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
-               [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
-               [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
-               [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
-               [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
-               [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
-               [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
-
-               /* logical */
-               [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
-               [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
-
-               /* load field ref */
-               [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
-               [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
-               [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
-               [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
-               [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
-
-               /* load from immediate operand */
-               [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
-               [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
-               [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
-               [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
-
-               /* cast */
-               [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
-               [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
-               [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
-
-               /* get context ref */
-               [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
-               [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
-               [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
-               [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
-
-               /* load userspace field ref */
-               [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
-               [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
-
-               /* Instructions for recursive traversal through composed types. */
-               [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
-               [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
-               [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
-
-               [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
-               [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
-               [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
-               [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
-
-               [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
-               [ FILTER_OP_LOAD_FIELD_S8        ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
-               [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
-               [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
-               [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
-               [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
-               [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
-               [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
-               [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
-               [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
-               [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
-               [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
-
-               [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
-
-               [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
-       };
-#endif /* #ifndef INTERPRETER_USE_SWITCH */
-
-       START_OP
-
-               OP(FILTER_OP_UNKNOWN):
-               OP(FILTER_OP_LOAD_FIELD_REF):
-               OP(FILTER_OP_GET_CONTEXT_REF):
-#ifdef INTERPRETER_USE_SWITCH
-               default:
-#endif /* INTERPRETER_USE_SWITCH */
-                       printk(KERN_WARNING "unknown bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               OP(FILTER_OP_RETURN):
-               OP(FILTER_OP_RETURN_S64):
-                       /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
-                       retval = !!estack_ax_v;
-                       ret = 0;
-                       goto end;
-
-               /* binary */
-               OP(FILTER_OP_MUL):
-               OP(FILTER_OP_DIV):
-               OP(FILTER_OP_MOD):
-               OP(FILTER_OP_PLUS):
-               OP(FILTER_OP_MINUS):
-                       printk(KERN_WARNING "unsupported bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               OP(FILTER_OP_EQ):
-               OP(FILTER_OP_NE):
-               OP(FILTER_OP_GT):
-               OP(FILTER_OP_LT):
-               OP(FILTER_OP_GE):
-               OP(FILTER_OP_LE):
-                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               OP(FILTER_OP_EQ_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, "==") == 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_NE_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, "!=") != 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_GT_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, ">") > 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_LT_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, "<") < 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_GE_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, ">=") >= 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_LE_STRING):
-               {
-                       int res;
-
-                       res = (stack_strcmp(stack, top, "<=") <= 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_EQ_STAR_GLOB_STRING):
-               {
-                       int res;
-
-                       res = (stack_star_glob_match(stack, top, "==") == 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_NE_STAR_GLOB_STRING):
-               {
-                       int res;
-
-                       res = (stack_star_glob_match(stack, top, "!=") != 0);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_EQ_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v == estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_NE_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v != estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_GT_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v > estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_LT_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v < estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_GE_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v >= estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_LE_S64):
-               {
-                       int res;
-
-                       res = (estack_bx_v <= estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_EQ_DOUBLE):
-               OP(FILTER_OP_NE_DOUBLE):
-               OP(FILTER_OP_GT_DOUBLE):
-               OP(FILTER_OP_LT_DOUBLE):
-               OP(FILTER_OP_GE_DOUBLE):
-               OP(FILTER_OP_LE_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* Mixed S64-double binary comparators */
-               OP(FILTER_OP_EQ_DOUBLE_S64):
-               OP(FILTER_OP_NE_DOUBLE_S64):
-               OP(FILTER_OP_GT_DOUBLE_S64):
-               OP(FILTER_OP_LT_DOUBLE_S64):
-               OP(FILTER_OP_GE_DOUBLE_S64):
-               OP(FILTER_OP_LE_DOUBLE_S64):
-               OP(FILTER_OP_EQ_S64_DOUBLE):
-               OP(FILTER_OP_NE_S64_DOUBLE):
-               OP(FILTER_OP_GT_S64_DOUBLE):
-               OP(FILTER_OP_LT_S64_DOUBLE):
-               OP(FILTER_OP_GE_S64_DOUBLE):
-               OP(FILTER_OP_LE_S64_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_RSHIFT):
-               {
-                       int64_t res;
-
-                       /* Catch undefined behavior. */
-                       if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_LSHIFT):
-               {
-                       int64_t res;
-
-                       /* Catch undefined behavior. */
-                       if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_AND):
-               {
-                       int64_t res;
-
-                       res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_OR):
-               {
-                       int64_t res;
-
-                       res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-               OP(FILTER_OP_BIT_XOR):
-               {
-                       int64_t res;
-
-                       res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
-                       estack_pop(stack, top, ax, bx);
-                       estack_ax_v = res;
-                       next_pc += sizeof(struct binary_op);
-                       PO;
-               }
-
-               /* unary */
-               OP(FILTER_OP_UNARY_PLUS):
-               OP(FILTER_OP_UNARY_MINUS):
-               OP(FILTER_OP_UNARY_NOT):
-                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-
-               OP(FILTER_OP_UNARY_BIT_NOT):
-               {
-                       estack_ax_v = ~(uint64_t) estack_ax_v;
-                       next_pc += sizeof(struct unary_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_UNARY_PLUS_S64):
-               {
-                       next_pc += sizeof(struct unary_op);
-                       PO;
-               }
-               OP(FILTER_OP_UNARY_MINUS_S64):
-               {
-                       estack_ax_v = -estack_ax_v;
-                       next_pc += sizeof(struct unary_op);
-                       PO;
-               }
-               OP(FILTER_OP_UNARY_PLUS_DOUBLE):
-               OP(FILTER_OP_UNARY_MINUS_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-               OP(FILTER_OP_UNARY_NOT_S64):
-               {
-                       estack_ax_v = !estack_ax_v;
-                       next_pc += sizeof(struct unary_op);
-                       PO;
-               }
-               OP(FILTER_OP_UNARY_NOT_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* logical */
-               OP(FILTER_OP_AND):
-               {
-                       struct logical_op *insn = (struct logical_op *) pc;
-
-                       /* If AX is 0, skip and evaluate to 0 */
-                       if (unlikely(estack_ax_v == 0)) {
-                               dbg_printk("Jumping to bytecode offset %u\n",
-                                       (unsigned int) insn->skip_offset);
-                               next_pc = start_pc + insn->skip_offset;
-                       } else {
-                               /* Pop 1 when jump not taken */
-                               estack_pop(stack, top, ax, bx);
-                               next_pc += sizeof(struct logical_op);
-                       }
-                       PO;
-               }
-               OP(FILTER_OP_OR):
-               {
-                       struct logical_op *insn = (struct logical_op *) pc;
-
-                       /* If AX is nonzero, skip and evaluate to 1 */
-
-                       if (unlikely(estack_ax_v != 0)) {
-                               estack_ax_v = 1;
-                               dbg_printk("Jumping to bytecode offset %u\n",
-                                       (unsigned int) insn->skip_offset);
-                               next_pc = start_pc + insn->skip_offset;
-                       } else {
-                               /* Pop 1 when jump not taken */
-                               estack_pop(stack, top, ax, bx);
-                               next_pc += sizeof(struct logical_op);
-                       }
-                       PO;
-               }
-
-
-               /* load field ref */
-               OP(FILTER_OP_LOAD_FIELD_REF_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type string\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.str =
-                               *(const char * const *) &filter_stack_data[ref->offset];
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL string.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type sequence\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.seq_len =
-                               *(unsigned long *) &filter_stack_data[ref->offset];
-                       estack_ax(stack, top)->u.s.str =
-                               *(const char **) (&filter_stack_data[ref->offset
-                                                               + sizeof(unsigned long)]);
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL sequence.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_REF_S64):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type s64\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax_v =
-                               ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
-                       dbg_printk("ref load s64 %lld\n",
-                               (long long) estack_ax_v);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* load from immediate operand */
-               OP(FILTER_OP_LOAD_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       dbg_printk("load string %s\n", insn->data);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.str = insn->data;
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_PLAIN;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       dbg_printk("load globbing pattern %s\n", insn->data);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.str = insn->data;
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_S64):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       estack_push(stack, top, ax, bx);
-                       estack_ax_v = ((struct literal_numeric *) insn->data)->v;
-                       dbg_printk("load s64 %lld\n",
-                               (long long) estack_ax_v);
-                       next_pc += sizeof(struct load_op)
-                                       + sizeof(struct literal_numeric);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* cast */
-               OP(FILTER_OP_CAST_TO_S64):
-                       printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               OP(FILTER_OP_CAST_DOUBLE_TO_S64):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               OP(FILTER_OP_CAST_NOP):
-               {
-                       next_pc += sizeof(struct cast_op);
-                       PO;
-               }
-
-               /* get context ref */
-               OP(FILTER_OP_GET_CONTEXT_REF_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-                       struct lttng_ctx_field *ctx_field;
-                       union lttng_ctx_value v;
-
-                       dbg_printk("get context ref offset %u type string\n",
-                               ref->offset);
-                       ctx_field = &lttng_static_ctx->fields[ref->offset];
-                       ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.str = v.str;
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL string.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 0;
-                       dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_CONTEXT_REF_S64):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-                       struct lttng_ctx_field *ctx_field;
-                       union lttng_ctx_value v;
-
-                       dbg_printk("get context ref offset %u type s64\n",
-                               ref->offset);
-                       ctx_field = &lttng_static_ctx->fields[ref->offset];
-                       ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax_v = v.s64;
-                       dbg_printk("ref get context s64 %lld\n",
-                               (long long) estack_ax_v);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               /* load userspace field ref */
-               OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type user string\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.user_str =
-                               *(const char * const *) &filter_stack_data[ref->offset];
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL string.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 1;
-                       dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct field_ref *ref = (struct field_ref *) insn->data;
-
-                       dbg_printk("load field ref offset %u type user sequence\n",
-                               ref->offset);
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.s.seq_len =
-                               *(unsigned long *) &filter_stack_data[ref->offset];
-                       estack_ax(stack, top)->u.s.user_str =
-                               *(const char **) (&filter_stack_data[ref->offset
-                                                               + sizeof(unsigned long)]);
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL sequence.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       estack_ax(stack, top)->u.s.user = 1;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_CONTEXT_ROOT):
-               {
-                       dbg_printk("op get context root\n");
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
-                       /* "field" only needed for variants. */
-                       estack_ax(stack, top)->u.ptr.field = NULL;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
-               {
-                       BUG_ON(1);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_PAYLOAD_ROOT):
-               {
-                       dbg_printk("op get app payload root\n");
-                       estack_push(stack, top, ax, bx);
-                       estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
-                       estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
-                       /* "field" only needed for variants. */
-                       estack_ax(stack, top)->u.ptr.field = NULL;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_SYMBOL):
-               {
-                       dbg_printk("op get symbol\n");
-                       switch (estack_ax(stack, top)->u.ptr.type) {
-                       case LOAD_OBJECT:
-                               printk(KERN_WARNING "Nested fields not implemented yet.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case LOAD_ROOT_CONTEXT:
-                       case LOAD_ROOT_APP_CONTEXT:
-                       case LOAD_ROOT_PAYLOAD:
-                               /*
-                                * symbol lookup is performed by
-                                * specialization.
-                                */
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_SYMBOL_FIELD):
-               {
-                       /*
-                        * Used for first variant encountered in a
-                        * traversal. Variants are not implemented yet.
-                        */
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               OP(FILTER_OP_GET_INDEX_U16):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
-                       dbg_printk("op get index u16\n");
-                       ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
-                       if (ret)
-                               goto end;
-                       estack_ax_v = estack_ax(stack, top)->u.v;
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
-                       PO;
-               }
-
-               OP(FILTER_OP_GET_INDEX_U64):
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
-                       dbg_printk("op get index u64\n");
-                       ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
-                       if (ret)
-                               goto end;
-                       estack_ax_v = estack_ax(stack, top)->u.v;
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD):
-               {
-                       dbg_printk("op load field\n");
-                       ret = dynamic_load_field(estack_ax(stack, top));
-                       if (ret)
-                               goto end;
-                       estack_ax_v = estack_ax(stack, top)->u.v;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_S8):
-               {
-                       dbg_printk("op load field s8\n");
-
-                       estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_S16):
-               {
-                       dbg_printk("op load field s16\n");
-
-                       estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_S32):
-               {
-                       dbg_printk("op load field s32\n");
-
-                       estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_S64):
-               {
-                       dbg_printk("op load field s64\n");
-
-                       estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_U8):
-               {
-                       dbg_printk("op load field u8\n");
-
-                       estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_U16):
-               {
-                       dbg_printk("op load field u16\n");
-
-                       estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_U32):
-               {
-                       dbg_printk("op load field u32\n");
-
-                       estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_U64):
-               {
-                       dbg_printk("op load field u64\n");
-
-                       estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-               OP(FILTER_OP_LOAD_FIELD_DOUBLE):
-               {
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_STRING):
-               {
-                       const char *str;
-
-                       dbg_printk("op load field string\n");
-                       str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
-                       estack_ax(stack, top)->u.s.str = str;
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL string.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-               OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
-               {
-                       const char *ptr;
-
-                       dbg_printk("op load field string sequence\n");
-                       ptr = estack_ax(stack, top)->u.ptr.ptr;
-                       estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
-                       estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
-                       if (unlikely(!estack_ax(stack, top)->u.s.str)) {
-                               dbg_printk("Filter warning: loading a NULL sequence.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       estack_ax(stack, top)->u.s.literal_type =
-                               ESTACK_STRING_LITERAL_TYPE_NONE;
-                       next_pc += sizeof(struct load_op);
-                       PO;
-               }
-
-       END_OP
-end:
-       /* Return _DISCARD on error. */
-       if (ret)
-               return LTTNG_FILTER_DISCARD;
-       return retval;
-}
-
-#undef START_OP
-#undef OP
-#undef PO
-#undef END_OP
diff --git a/src/lttng-filter-specialize.c b/src/lttng-filter-specialize.c
deleted file mode 100644 (file)
index ccc4583..0000000
+++ /dev/null
@@ -1,1215 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-specialize.c
- *
- * LTTng modules filter code specializer.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/slab.h>
-#include <lttng/filter.h>
-#include <lttng/align.h>
-
-static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
-               size_t align, size_t len)
-{
-       ssize_t ret;
-       size_t padding = offset_align(runtime->data_len, align);
-       size_t new_len = runtime->data_len + padding + len;
-       size_t new_alloc_len = new_len;
-       size_t old_alloc_len = runtime->data_alloc_len;
-
-       if (new_len > FILTER_MAX_DATA_LEN)
-               return -EINVAL;
-
-       if (new_alloc_len > old_alloc_len) {
-               char *newptr;
-
-               new_alloc_len =
-                       max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
-               newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
-               if (!newptr)
-                       return -ENOMEM;
-               runtime->data = newptr;
-               /* We zero directly the memory from start of allocation. */
-               memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
-               runtime->data_alloc_len = new_alloc_len;
-       }
-       runtime->data_len += padding;
-       ret = runtime->data_len;
-       runtime->data_len += len;
-       return ret;
-}
-
-static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
-               const void *p, size_t align, size_t len)
-{
-       ssize_t offset;
-
-       offset = bytecode_reserve_data(runtime, align, len);
-       if (offset < 0)
-               return -ENOMEM;
-       memcpy(&runtime->data[offset], p, len);
-       return offset;
-}
-
-static int specialize_load_field(struct vstack_entry *stack_top,
-               struct load_op *insn)
-{
-       int ret;
-
-       switch (stack_top->load.type) {
-       case LOAD_OBJECT:
-               break;
-       case LOAD_ROOT_CONTEXT:
-       case LOAD_ROOT_APP_CONTEXT:
-       case LOAD_ROOT_PAYLOAD:
-       default:
-               dbg_printk("Filter warning: cannot load root, missing field name.\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       switch (stack_top->load.object_type) {
-       case OBJECT_TYPE_S8:
-               dbg_printk("op load field s8\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_S8;
-               break;
-       case OBJECT_TYPE_S16:
-               dbg_printk("op load field s16\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_S16;
-               break;
-       case OBJECT_TYPE_S32:
-               dbg_printk("op load field s32\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_S32;
-               break;
-       case OBJECT_TYPE_S64:
-               dbg_printk("op load field s64\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_S64;
-               break;
-       case OBJECT_TYPE_U8:
-               dbg_printk("op load field u8\n");
-               stack_top->type = REG_S64;
-               insn->op = FILTER_OP_LOAD_FIELD_U8;
-               break;
-       case OBJECT_TYPE_U16:
-               dbg_printk("op load field u16\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_U16;
-               break;
-       case OBJECT_TYPE_U32:
-               dbg_printk("op load field u32\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_U32;
-               break;
-       case OBJECT_TYPE_U64:
-               dbg_printk("op load field u64\n");
-               stack_top->type = REG_S64;
-               if (!stack_top->load.rev_bo)
-                       insn->op = FILTER_OP_LOAD_FIELD_U64;
-               break;
-       case OBJECT_TYPE_DOUBLE:
-               printk(KERN_WARNING "Double type unsupported\n\n");
-               ret = -EINVAL;
-               goto end;
-       case OBJECT_TYPE_STRING:
-               dbg_printk("op load field string\n");
-               stack_top->type = REG_STRING;
-               insn->op = FILTER_OP_LOAD_FIELD_STRING;
-               break;
-       case OBJECT_TYPE_STRING_SEQUENCE:
-               dbg_printk("op load field string sequence\n");
-               stack_top->type = REG_STRING;
-               insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
-               break;
-       case OBJECT_TYPE_DYNAMIC:
-               ret = -EINVAL;
-               goto end;
-       case OBJECT_TYPE_SEQUENCE:
-       case OBJECT_TYPE_ARRAY:
-       case OBJECT_TYPE_STRUCT:
-       case OBJECT_TYPE_VARIANT:
-               printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       return 0;
-
-end:
-       return ret;
-}
-
-static int specialize_get_index_object_type(enum object_type *otype,
-               int signedness, uint32_t elem_len)
-{
-       switch (elem_len) {
-       case 8:
-               if (signedness)
-                       *otype = OBJECT_TYPE_S8;
-               else
-                       *otype = OBJECT_TYPE_U8;
-               break;
-       case 16:
-               if (signedness)
-                       *otype = OBJECT_TYPE_S16;
-               else
-                       *otype = OBJECT_TYPE_U16;
-               break;
-       case 32:
-               if (signedness)
-                       *otype = OBJECT_TYPE_S32;
-               else
-                       *otype = OBJECT_TYPE_U32;
-               break;
-       case 64:
-               if (signedness)
-                       *otype = OBJECT_TYPE_S64;
-               else
-                       *otype = OBJECT_TYPE_U64;
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int specialize_get_index(struct bytecode_runtime *runtime,
-               struct load_op *insn, uint64_t index,
-               struct vstack_entry *stack_top,
-               int idx_len)
-{
-       int ret;
-       struct filter_get_index_data gid;
-       ssize_t data_offset;
-
-       memset(&gid, 0, sizeof(gid));
-       switch (stack_top->load.type) {
-       case LOAD_OBJECT:
-               switch (stack_top->load.object_type) {
-               case OBJECT_TYPE_ARRAY:
-               {
-                       const struct lttng_integer_type *integer_type;
-                       const struct lttng_event_field *field;
-                       uint32_t elem_len, num_elems;
-                       int signedness;
-
-                       field = stack_top->load.field;
-                       if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       integer_type = &field->type.u.array_nestable.elem_type->u.integer;
-                       num_elems = field->type.u.array_nestable.length;
-                       elem_len = integer_type->size;
-                       signedness = integer_type->signedness;
-                       if (index >= num_elems) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       ret = specialize_get_index_object_type(&stack_top->load.object_type,
-                                       signedness, elem_len);
-                       if (ret)
-                               goto end;
-                       gid.offset = index * (elem_len / CHAR_BIT);
-                       gid.array_len = num_elems * (elem_len / CHAR_BIT);
-                       gid.elem.type = stack_top->load.object_type;
-                       gid.elem.len = elem_len;
-                       if (integer_type->reverse_byte_order)
-                               gid.elem.rev_bo = true;
-                       stack_top->load.rev_bo = gid.elem.rev_bo;
-                       break;
-               }
-               case OBJECT_TYPE_SEQUENCE:
-               {
-                       const struct lttng_integer_type *integer_type;
-                       const struct lttng_event_field *field;
-                       uint32_t elem_len;
-                       int signedness;
-
-                       field = stack_top->load.field;
-                       if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
-                       elem_len = integer_type->size;
-                       signedness = integer_type->signedness;
-                       ret = specialize_get_index_object_type(&stack_top->load.object_type,
-                                       signedness, elem_len);
-                       if (ret)
-                               goto end;
-                       gid.offset = index * (elem_len / CHAR_BIT);
-                       gid.elem.type = stack_top->load.object_type;
-                       gid.elem.len = elem_len;
-                       if (integer_type->reverse_byte_order)
-                               gid.elem.rev_bo = true;
-                       stack_top->load.rev_bo = gid.elem.rev_bo;
-                       break;
-               }
-               case OBJECT_TYPE_STRUCT:
-                       /* Only generated by the specialize phase. */
-               case OBJECT_TYPE_VARIANT:       /* Fall-through */
-               default:
-                       printk(KERN_WARNING "Unexpected get index type %d",
-                               (int) stack_top->load.object_type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       case LOAD_ROOT_CONTEXT:
-       case LOAD_ROOT_APP_CONTEXT:
-       case LOAD_ROOT_PAYLOAD:
-               printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       data_offset = bytecode_push_data(runtime, &gid,
-               __alignof__(gid), sizeof(gid));
-       if (data_offset < 0) {
-               ret = -EINVAL;
-               goto end;
-       }
-       switch (idx_len) {
-       case 2:
-               ((struct get_index_u16 *) insn->data)->index = data_offset;
-               break;
-       case 8:
-               ((struct get_index_u64 *) insn->data)->index = data_offset;
-               break;
-       default:
-               ret = -EINVAL;
-               goto end;
-       }
-
-       return 0;
-
-end:
-       return ret;
-}
-
-static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
-               struct load_op *insn)
-{
-       uint16_t offset;
-       const char *name;
-
-       offset = ((struct get_symbol *) insn->data)->offset;
-       name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
-       return lttng_get_context_index(lttng_static_ctx, name);
-}
-
-static int specialize_load_object(const struct lttng_event_field *field,
-               struct vstack_load *load, bool is_context)
-{
-       load->type = LOAD_OBJECT;
-       /*
-        * LTTng-UST layout all integer fields as s64 on the stack for the filter.
-        */
-       switch (field->type.atype) {
-       case atype_integer:
-               if (field->type.u.integer.signedness)
-                       load->object_type = OBJECT_TYPE_S64;
-               else
-                       load->object_type = OBJECT_TYPE_U64;
-               load->rev_bo = false;
-               break;
-       case atype_enum_nestable:
-       {
-               const struct lttng_integer_type *itype =
-                       &field->type.u.enum_nestable.container_type->u.integer;
-
-               if (itype->signedness)
-                       load->object_type = OBJECT_TYPE_S64;
-               else
-                       load->object_type = OBJECT_TYPE_U64;
-               load->rev_bo = false;
-               break;
-       }
-       case atype_array_nestable:
-               if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
-                       printk(KERN_WARNING "Array nesting only supports integer types.\n");
-                       return -EINVAL;
-               }
-               if (is_context) {
-                       load->object_type = OBJECT_TYPE_STRING;
-               } else {
-                       if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
-                               load->object_type = OBJECT_TYPE_ARRAY;
-                               load->field = field;
-                       } else {
-                               load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
-                       }
-               }
-               break;
-       case atype_sequence_nestable:
-               if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
-                       printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
-                       return -EINVAL;
-               }
-               if (is_context) {
-                       load->object_type = OBJECT_TYPE_STRING;
-               } else {
-                       if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
-                               load->object_type = OBJECT_TYPE_SEQUENCE;
-                               load->field = field;
-                       } else {
-                               load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
-                       }
-               }
-               break;
-       case atype_string:
-               load->object_type = OBJECT_TYPE_STRING;
-               break;
-       case atype_struct_nestable:
-               printk(KERN_WARNING "Structure type cannot be loaded.\n");
-               return -EINVAL;
-       case atype_variant_nestable:
-               printk(KERN_WARNING "Variant type cannot be loaded.\n");
-               return -EINVAL;
-       default:
-               printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int specialize_context_lookup(struct bytecode_runtime *runtime,
-               struct load_op *insn,
-               struct vstack_load *load)
-{
-       int idx, ret;
-       struct lttng_ctx_field *ctx_field;
-       struct lttng_event_field *field;
-       struct filter_get_index_data gid;
-       ssize_t data_offset;
-
-       idx = specialize_context_lookup_name(runtime, insn);
-       if (idx < 0) {
-               return -ENOENT;
-       }
-       ctx_field = &lttng_static_ctx->fields[idx];
-       field = &ctx_field->event_field;
-       ret = specialize_load_object(field, load, true);
-       if (ret)
-               return ret;
-       /* Specialize each get_symbol into a get_index. */
-       insn->op = FILTER_OP_GET_INDEX_U16;
-       memset(&gid, 0, sizeof(gid));
-       gid.ctx_index = idx;
-       gid.elem.type = load->object_type;
-       data_offset = bytecode_push_data(runtime, &gid,
-               __alignof__(gid), sizeof(gid));
-       if (data_offset < 0) {
-               return -EINVAL;
-       }
-       ((struct get_index_u16 *) insn->data)->index = data_offset;
-       return 0;
-}
-
-static int specialize_event_payload_lookup(struct lttng_event *event,
-               struct bytecode_runtime *runtime,
-               struct load_op *insn,
-               struct vstack_load *load)
-{
-       const char *name;
-       uint16_t offset;
-       const struct lttng_event_desc *desc = event->desc;
-       unsigned int i, nr_fields;
-       bool found = false;
-       uint32_t field_offset = 0;
-       const struct lttng_event_field *field;
-       int ret;
-       struct filter_get_index_data gid;
-       ssize_t data_offset;
-
-       nr_fields = desc->nr_fields;
-       offset = ((struct get_symbol *) insn->data)->offset;
-       name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
-       for (i = 0; i < nr_fields; i++) {
-               field = &desc->fields[i];
-               if (field->nofilter) {
-                       continue;
-               }
-               if (!strcmp(field->name, name)) {
-                       found = true;
-                       break;
-               }
-               /* compute field offset on stack */
-               switch (field->type.atype) {
-               case atype_integer:
-               case atype_enum_nestable:
-                       field_offset += sizeof(int64_t);
-                       break;
-               case atype_array_nestable:
-               case atype_sequence_nestable:
-                       field_offset += sizeof(unsigned long);
-                       field_offset += sizeof(void *);
-                       break;
-               case atype_string:
-                       field_offset += sizeof(void *);
-                       break;
-               default:
-                       ret = -EINVAL;
-                       goto end;
-               }
-       }
-       if (!found) {
-               ret = -EINVAL;
-               goto end;
-       }
-
-       ret = specialize_load_object(field, load, false);
-       if (ret)
-               goto end;
-
-       /* Specialize each get_symbol into a get_index. */
-       insn->op = FILTER_OP_GET_INDEX_U16;
-       memset(&gid, 0, sizeof(gid));
-       gid.offset = field_offset;
-       gid.elem.type = load->object_type;
-       data_offset = bytecode_push_data(runtime, &gid,
-               __alignof__(gid), sizeof(gid));
-       if (data_offset < 0) {
-               ret = -EINVAL;
-               goto end;
-       }
-       ((struct get_index_u16 *) insn->data)->index = data_offset;
-       ret = 0;
-end:
-       return ret;
-}
-
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
-               struct bytecode_runtime *bytecode)
-{
-       void *pc, *next_pc, *start_pc;
-       int ret = -EINVAL;
-       struct vstack _stack;
-       struct vstack *stack = &_stack;
-
-       vstack_init(stack);
-
-       start_pc = &bytecode->code[0];
-       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
-                       pc = next_pc) {
-               switch (*(filter_opcode_t *) pc) {
-               case FILTER_OP_UNKNOWN:
-               default:
-                       printk(KERN_WARNING "unknown bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               case FILTER_OP_RETURN:
-               case FILTER_OP_RETURN_S64:
-                       ret = 0;
-                       goto end;
-
-               /* binary */
-               case FILTER_OP_MUL:
-               case FILTER_OP_DIV:
-               case FILTER_OP_MOD:
-               case FILTER_OP_PLUS:
-               case FILTER_OP_MINUS:
-                       printk(KERN_WARNING "unsupported bytecode op %u\n",
-                               (unsigned int) *(filter_opcode_t *) pc);
-                       ret = -EINVAL;
-                       goto end;
-
-               case FILTER_OP_EQ:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STRING:
-                               if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
-                                       insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
-                               else
-                                       insn->op = FILTER_OP_EQ_STRING;
-                               break;
-                       case REG_STAR_GLOB_STRING:
-                               insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_EQ_S64;
-                               else
-                                       insn->op = FILTER_OP_EQ_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_EQ_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_EQ_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_NE:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STRING:
-                               if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
-                                       insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
-                               else
-                                       insn->op = FILTER_OP_NE_STRING;
-                               break;
-                       case REG_STAR_GLOB_STRING:
-                               insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_NE_S64;
-                               else
-                                       insn->op = FILTER_OP_NE_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_NE_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_NE_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_GT:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "invalid register type for > binary operator\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_STRING:
-                               insn->op = FILTER_OP_GT_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_GT_S64;
-                               else
-                                       insn->op = FILTER_OP_GT_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_GT_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_GT_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_LT:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "invalid register type for < binary operator\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_STRING:
-                               insn->op = FILTER_OP_LT_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_LT_S64;
-                               else
-                                       insn->op = FILTER_OP_LT_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_LT_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_LT_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_GE:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "invalid register type for >= binary operator\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_STRING:
-                               insn->op = FILTER_OP_GE_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_GE_S64;
-                               else
-                                       insn->op = FILTER_OP_GE_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_GE_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_GE_DOUBLE;
-                               break;
-                       }
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-               case FILTER_OP_LE:
-               {
-                       struct binary_op *insn = (struct binary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "invalid register type for <= binary operator\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_STRING:
-                               insn->op = FILTER_OP_LE_STRING;
-                               break;
-                       case REG_S64:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_LE_S64;
-                               else
-                                       insn->op = FILTER_OP_LE_DOUBLE_S64;
-                               break;
-                       case REG_DOUBLE:
-                               if (vstack_bx(stack)->type == REG_S64)
-                                       insn->op = FILTER_OP_LE_S64_DOUBLE;
-                               else
-                                       insn->op = FILTER_OP_LE_DOUBLE;
-                               break;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               case FILTER_OP_EQ_STRING:
-               case FILTER_OP_NE_STRING:
-               case FILTER_OP_GT_STRING:
-               case FILTER_OP_LT_STRING:
-               case FILTER_OP_GE_STRING:
-               case FILTER_OP_LE_STRING:
-               case FILTER_OP_EQ_STAR_GLOB_STRING:
-               case FILTER_OP_NE_STAR_GLOB_STRING:
-               case FILTER_OP_EQ_S64:
-               case FILTER_OP_NE_S64:
-               case FILTER_OP_GT_S64:
-               case FILTER_OP_LT_S64:
-               case FILTER_OP_GE_S64:
-               case FILTER_OP_LE_S64:
-               case FILTER_OP_EQ_DOUBLE:
-               case FILTER_OP_NE_DOUBLE:
-               case FILTER_OP_GT_DOUBLE:
-               case FILTER_OP_LT_DOUBLE:
-               case FILTER_OP_GE_DOUBLE:
-               case FILTER_OP_LE_DOUBLE:
-               case FILTER_OP_EQ_DOUBLE_S64:
-               case FILTER_OP_NE_DOUBLE_S64:
-               case FILTER_OP_GT_DOUBLE_S64:
-               case FILTER_OP_LT_DOUBLE_S64:
-               case FILTER_OP_GE_DOUBLE_S64:
-               case FILTER_OP_LE_DOUBLE_S64:
-               case FILTER_OP_EQ_S64_DOUBLE:
-               case FILTER_OP_NE_S64_DOUBLE:
-               case FILTER_OP_GT_S64_DOUBLE:
-               case FILTER_OP_LT_S64_DOUBLE:
-               case FILTER_OP_GE_S64_DOUBLE:
-               case FILTER_OP_LE_S64_DOUBLE:
-               case FILTER_OP_BIT_RSHIFT:
-               case FILTER_OP_BIT_LSHIFT:
-               case FILTER_OP_BIT_AND:
-               case FILTER_OP_BIT_OR:
-               case FILTER_OP_BIT_XOR:
-               {
-                       /* Pop 2, push 1 */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct binary_op);
-                       break;
-               }
-
-               /* unary */
-               case FILTER_OP_UNARY_PLUS:
-               {
-                       struct unary_op *insn = (struct unary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_S64:
-                               insn->op = FILTER_OP_UNARY_PLUS_S64;
-                               break;
-                       case REG_DOUBLE:
-                               insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
-                               break;
-                       }
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               case FILTER_OP_UNARY_MINUS:
-               {
-                       struct unary_op *insn = (struct unary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_S64:
-                               insn->op = FILTER_OP_UNARY_MINUS_S64;
-                               break;
-                       case REG_DOUBLE:
-                               insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
-                               break;
-                       }
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               case FILTER_OP_UNARY_NOT:
-               {
-                       struct unary_op *insn = (struct unary_op *) pc;
-
-                       switch(vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_S64:
-                               insn->op = FILTER_OP_UNARY_NOT_S64;
-                               break;
-                       case REG_DOUBLE:
-                               insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
-                               break;
-                       }
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               case FILTER_OP_UNARY_BIT_NOT:
-               {
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               case FILTER_OP_UNARY_PLUS_S64:
-               case FILTER_OP_UNARY_MINUS_S64:
-               case FILTER_OP_UNARY_NOT_S64:
-               case FILTER_OP_UNARY_PLUS_DOUBLE:
-               case FILTER_OP_UNARY_MINUS_DOUBLE:
-               case FILTER_OP_UNARY_NOT_DOUBLE:
-               {
-                       /* Pop 1, push 1 */
-                       next_pc += sizeof(struct unary_op);
-                       break;
-               }
-
-               /* logical */
-               case FILTER_OP_AND:
-               case FILTER_OP_OR:
-               {
-                       /* Continue to next instruction */
-                       /* Pop 1 when jump not taken */
-                       if (vstack_pop(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       next_pc += sizeof(struct logical_op);
-                       break;
-               }
-
-               /* load field ref */
-               case FILTER_OP_LOAD_FIELD_REF:
-               {
-                       printk(KERN_WARNING "Unknown field ref type\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               /* get context ref */
-               case FILTER_OP_GET_CONTEXT_REF:
-               {
-                       printk(KERN_WARNING "Unknown get context ref type\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               case FILTER_OP_LOAD_FIELD_REF_STRING:
-               case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
-               case FILTER_OP_GET_CONTEXT_REF_STRING:
-               case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
-               case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_STRING;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       break;
-               }
-               case FILTER_OP_LOAD_FIELD_REF_S64:
-               case FILTER_OP_GET_CONTEXT_REF_S64:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       break;
-               }
-               case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
-               case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_DOUBLE;
-                       next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-                       break;
-               }
-
-               /* load from immediate operand */
-               case FILTER_OP_LOAD_STRING:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_STRING;
-                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-                       break;
-               }
-
-               case FILTER_OP_LOAD_STAR_GLOB_STRING:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
-                       next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-                       break;
-               }
-
-               case FILTER_OP_LOAD_S64:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct load_op)
-                                       + sizeof(struct literal_numeric);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_DOUBLE:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_DOUBLE;
-                       next_pc += sizeof(struct load_op)
-                                       + sizeof(struct literal_double);
-                       break;
-               }
-
-               /* cast */
-               case FILTER_OP_CAST_TO_S64:
-               {
-                       struct cast_op *insn = (struct cast_op *) pc;
-
-                       switch (vstack_ax(stack)->type) {
-                       default:
-                               printk(KERN_WARNING "unknown register type\n");
-                               ret = -EINVAL;
-                               goto end;
-
-                       case REG_STRING:
-                       case REG_STAR_GLOB_STRING:
-                               printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case REG_S64:
-                               insn->op = FILTER_OP_CAST_NOP;
-                               break;
-                       case REG_DOUBLE:
-                               insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
-                               break;
-                       }
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct cast_op);
-                       break;
-               }
-               case FILTER_OP_CAST_DOUBLE_TO_S64:
-               {
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct cast_op);
-                       break;
-               }
-               case FILTER_OP_CAST_NOP:
-               {
-                       next_pc += sizeof(struct cast_op);
-                       break;
-               }
-
-               /*
-                * Instructions for recursive traversal through composed types.
-                */
-               case FILTER_OP_GET_CONTEXT_ROOT:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_PTR;
-                       vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-               case FILTER_OP_GET_APP_CONTEXT_ROOT:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_PTR;
-                       vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-               case FILTER_OP_GET_PAYLOAD_ROOT:
-               {
-                       if (vstack_push(stack)) {
-                               ret = -EINVAL;
-                               goto end;
-                       }
-                       vstack_ax(stack)->type = REG_PTR;
-                       vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_FIELD:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
-                       /* Pop 1, push 1 */
-                       ret = specialize_load_field(vstack_ax(stack), insn);
-                       if (ret)
-                               goto end;
-
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_FIELD_S8:
-               case FILTER_OP_LOAD_FIELD_S16:
-               case FILTER_OP_LOAD_FIELD_S32:
-               case FILTER_OP_LOAD_FIELD_S64:
-               case FILTER_OP_LOAD_FIELD_U8:
-               case FILTER_OP_LOAD_FIELD_U16:
-               case FILTER_OP_LOAD_FIELD_U32:
-               case FILTER_OP_LOAD_FIELD_U64:
-               {
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_S64;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_FIELD_STRING:
-               case FILTER_OP_LOAD_FIELD_SEQUENCE:
-               {
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_STRING;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_LOAD_FIELD_DOUBLE:
-               {
-                       /* Pop 1, push 1 */
-                       vstack_ax(stack)->type = REG_DOUBLE;
-                       next_pc += sizeof(struct load_op);
-                       break;
-               }
-
-               case FILTER_OP_GET_SYMBOL:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-
-                       dbg_printk("op get symbol\n");
-                       switch (vstack_ax(stack)->load.type) {
-                       case LOAD_OBJECT:
-                               printk(KERN_WARNING "Nested fields not implemented yet.\n");
-                               ret = -EINVAL;
-                               goto end;
-                       case LOAD_ROOT_CONTEXT:
-                               /* Lookup context field. */
-                               ret = specialize_context_lookup(bytecode, insn,
-                                       &vstack_ax(stack)->load);
-                               if (ret)
-                                       goto end;
-                               break;
-                       case LOAD_ROOT_APP_CONTEXT:
-                               ret = -EINVAL;
-                               goto end;
-                       case LOAD_ROOT_PAYLOAD:
-                               /* Lookup event payload field. */
-                               ret = specialize_event_payload_lookup(event,
-                                       bytecode, insn,
-                                       &vstack_ax(stack)->load);
-                               if (ret)
-                                       goto end;
-                               break;
-                       }
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
-                       break;
-               }
-
-               case FILTER_OP_GET_SYMBOL_FIELD:
-               {
-                       /* Always generated by specialize phase. */
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               case FILTER_OP_GET_INDEX_U16:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
-                       dbg_printk("op get index u16\n");
-                       /* Pop 1, push 1 */
-                       ret = specialize_get_index(bytecode, insn, index->index,
-                                       vstack_ax(stack), sizeof(*index));
-                       if (ret)
-                               goto end;
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
-                       break;
-               }
-
-               case FILTER_OP_GET_INDEX_U64:
-               {
-                       struct load_op *insn = (struct load_op *) pc;
-                       struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
-                       dbg_printk("op get index u64\n");
-                       /* Pop 1, push 1 */
-                       ret = specialize_get_index(bytecode, insn, index->index,
-                                       vstack_ax(stack), sizeof(*index));
-                       if (ret)
-                               goto end;
-                       next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
-                       break;
-               }
-
-               }
-       }
-end:
-       return ret;
-}
diff --git a/src/lttng-filter-validator.c b/src/lttng-filter-validator.c
deleted file mode 100644 (file)
index 38d6ed0..0000000
+++ /dev/null
@@ -1,1743 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-validator.c
- *
- * LTTng modules filter bytecode validator.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/types.h>
-#include <linux/jhash.h>
-#include <linux/slab.h>
-
-#include <wrapper/list.h>
-#include <lttng/filter.h>
-
-#define MERGE_POINT_TABLE_BITS         7
-#define MERGE_POINT_TABLE_SIZE         (1U << MERGE_POINT_TABLE_BITS)
-
-/* merge point table node */
-struct mp_node {
-       struct hlist_node node;
-
-       /* Context at merge point */
-       struct vstack stack;
-       unsigned long target_pc;
-};
-
-struct mp_table {
-       struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
-};
-
-static
-int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
-{
-       if (mp_node->target_pc == key_pc)
-               return 1;
-       else
-               return 0;
-}
-
-static
-int merge_points_compare(const struct vstack *stacka,
-                       const struct vstack *stackb)
-{
-       int i, len;
-
-       if (stacka->top != stackb->top)
-               return 1;
-       len = stacka->top + 1;
-       WARN_ON_ONCE(len < 0);
-       for (i = 0; i < len; i++) {
-               if (stacka->e[i].type != stackb->e[i].type)
-                       return 1;
-       }
-       return 0;
-}
-
-static
-int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
-               const struct vstack *stack)
-{
-       struct mp_node *mp_node;
-       unsigned long hash = jhash_1word(target_pc, 0);
-       struct hlist_head *head;
-       struct mp_node *lookup_node;
-       int found = 0;
-
-       dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
-                       target_pc, hash);
-       mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
-       if (!mp_node)
-               return -ENOMEM;
-       mp_node->target_pc = target_pc;
-       memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
-
-       head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
-       lttng_hlist_for_each_entry(lookup_node, head, node) {
-               if (lttng_hash_match(lookup_node, target_pc)) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (found) {
-               /* Key already present */
-               dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
-                               target_pc, hash);
-               kfree(mp_node);
-               if (merge_points_compare(stack, &lookup_node->stack)) {
-                       printk(KERN_WARNING "Merge points differ for offset %lu\n",
-                               target_pc);
-                       return -EINVAL;
-               }
-       } else {
-               hlist_add_head(&mp_node->node, head);
-       }
-       return 0;
-}
-
-/*
- * Binary comparators use top of stack and top of stack -1.
- */
-static
-int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
-               const char *str)
-{
-       if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
-               goto error_empty;
-
-       switch (vstack_ax(stack)->type) {
-       default:
-       case REG_DOUBLE:
-               goto error_type;
-
-       case REG_STRING:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-                       goto unknown;
-               case REG_STRING:
-                       break;
-               case REG_STAR_GLOB_STRING:
-                       if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
-                               goto error_mismatch;
-                       }
-                       break;
-               case REG_S64:
-                       goto error_mismatch;
-               }
-               break;
-       case REG_STAR_GLOB_STRING:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-                       goto unknown;
-               case REG_STRING:
-                       if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
-                               goto error_mismatch;
-                       }
-                       break;
-               case REG_STAR_GLOB_STRING:
-               case REG_S64:
-                       goto error_mismatch;
-               }
-               break;
-       case REG_S64:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-                       goto unknown;
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-                       goto error_mismatch;
-               case REG_S64:
-                       break;
-               }
-               break;
-       case REG_TYPE_UNKNOWN:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-               case REG_S64:
-                       goto unknown;
-               }
-               break;
-       }
-       return 0;
-
-unknown:
-       return 1;
-
-error_empty:
-       printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
-       return -EINVAL;
-
-error_mismatch:
-       printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
-       return -EINVAL;
-
-error_type:
-       printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
-       return -EINVAL;
-}
-
-/*
- * Binary bitwise operators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
-               const char *str)
-{
-       if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
-               goto error_empty;
-
-       switch (vstack_ax(stack)->type) {
-       default:
-       case REG_DOUBLE:
-               goto error_type;
-
-       case REG_TYPE_UNKNOWN:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-               case REG_S64:
-                       goto unknown;
-               }
-               break;
-       case REG_S64:
-               switch (vstack_bx(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       goto error_type;
-               case REG_TYPE_UNKNOWN:
-                       goto unknown;
-               case REG_S64:
-                       break;
-               }
-               break;
-       }
-       return 0;
-
-unknown:
-       return 1;
-
-error_empty:
-       printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
-       return -EINVAL;
-
-error_type:
-       printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
-       return -EINVAL;
-}
-
-static
-int validate_get_symbol(struct bytecode_runtime *bytecode,
-               const struct get_symbol *sym)
-{
-       const char *str, *str_limit;
-       size_t len_limit;
-
-       if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
-               return -EINVAL;
-
-       str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
-       str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
-       len_limit = str_limit - str;
-       if (strnlen(str, len_limit) == len_limit)
-               return -EINVAL;
-       return 0;
-}
-
-/*
- * Validate bytecode range overflow within the validation pass.
- * Called for each instruction encountered.
- */
-static
-int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
-               char *start_pc, char *pc)
-{
-       int ret = 0;
-
-       switch (*(filter_opcode_t *) pc) {
-       case FILTER_OP_UNKNOWN:
-       default:
-       {
-               printk(KERN_WARNING "unknown bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               break;
-       }
-
-       case FILTER_OP_RETURN:
-       case FILTER_OP_RETURN_S64:
-       {
-               if (unlikely(pc + sizeof(struct return_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* binary */
-       case FILTER_OP_MUL:
-       case FILTER_OP_DIV:
-       case FILTER_OP_MOD:
-       case FILTER_OP_PLUS:
-       case FILTER_OP_MINUS:
-       case FILTER_OP_EQ_DOUBLE:
-       case FILTER_OP_NE_DOUBLE:
-       case FILTER_OP_GT_DOUBLE:
-       case FILTER_OP_LT_DOUBLE:
-       case FILTER_OP_GE_DOUBLE:
-       case FILTER_OP_LE_DOUBLE:
-       /* Floating point */
-       case FILTER_OP_EQ_DOUBLE_S64:
-       case FILTER_OP_NE_DOUBLE_S64:
-       case FILTER_OP_GT_DOUBLE_S64:
-       case FILTER_OP_LT_DOUBLE_S64:
-       case FILTER_OP_GE_DOUBLE_S64:
-       case FILTER_OP_LE_DOUBLE_S64:
-       case FILTER_OP_EQ_S64_DOUBLE:
-       case FILTER_OP_NE_S64_DOUBLE:
-       case FILTER_OP_GT_S64_DOUBLE:
-       case FILTER_OP_LT_S64_DOUBLE:
-       case FILTER_OP_GE_S64_DOUBLE:
-       case FILTER_OP_LE_S64_DOUBLE:
-       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
-       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
-       case FILTER_OP_LOAD_DOUBLE:
-       case FILTER_OP_CAST_DOUBLE_TO_S64:
-       case FILTER_OP_UNARY_PLUS_DOUBLE:
-       case FILTER_OP_UNARY_MINUS_DOUBLE:
-       case FILTER_OP_UNARY_NOT_DOUBLE:
-       {
-               printk(KERN_WARNING "unsupported bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               break;
-       }
-
-       case FILTER_OP_EQ:
-       case FILTER_OP_NE:
-       case FILTER_OP_GT:
-       case FILTER_OP_LT:
-       case FILTER_OP_GE:
-       case FILTER_OP_LE:
-       case FILTER_OP_EQ_STRING:
-       case FILTER_OP_NE_STRING:
-       case FILTER_OP_GT_STRING:
-       case FILTER_OP_LT_STRING:
-       case FILTER_OP_GE_STRING:
-       case FILTER_OP_LE_STRING:
-       case FILTER_OP_EQ_STAR_GLOB_STRING:
-       case FILTER_OP_NE_STAR_GLOB_STRING:
-       case FILTER_OP_EQ_S64:
-       case FILTER_OP_NE_S64:
-       case FILTER_OP_GT_S64:
-       case FILTER_OP_LT_S64:
-       case FILTER_OP_GE_S64:
-       case FILTER_OP_LE_S64:
-       case FILTER_OP_BIT_RSHIFT:
-       case FILTER_OP_BIT_LSHIFT:
-       case FILTER_OP_BIT_AND:
-       case FILTER_OP_BIT_OR:
-       case FILTER_OP_BIT_XOR:
-       {
-               if (unlikely(pc + sizeof(struct binary_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* unary */
-       case FILTER_OP_UNARY_PLUS:
-       case FILTER_OP_UNARY_MINUS:
-       case FILTER_OP_UNARY_NOT:
-       case FILTER_OP_UNARY_PLUS_S64:
-       case FILTER_OP_UNARY_MINUS_S64:
-       case FILTER_OP_UNARY_NOT_S64:
-       case FILTER_OP_UNARY_BIT_NOT:
-       {
-               if (unlikely(pc + sizeof(struct unary_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* logical */
-       case FILTER_OP_AND:
-       case FILTER_OP_OR:
-       {
-               if (unlikely(pc + sizeof(struct logical_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* load field ref */
-       case FILTER_OP_LOAD_FIELD_REF:
-       {
-               printk(KERN_WARNING "Unknown field ref type\n");
-               ret = -EINVAL;
-               break;
-       }
-
-       /* get context ref */
-       case FILTER_OP_GET_CONTEXT_REF:
-       {
-               printk(KERN_WARNING "Unknown field ref type\n");
-               ret = -EINVAL;
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
-       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
-       case FILTER_OP_LOAD_FIELD_REF_S64:
-       case FILTER_OP_GET_CONTEXT_REF_STRING:
-       case FILTER_OP_GET_CONTEXT_REF_S64:
-       {
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /* load from immediate operand */
-       case FILTER_OP_LOAD_STRING:
-       case FILTER_OP_LOAD_STAR_GLOB_STRING:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               uint32_t str_len, maxlen;
-
-               if (unlikely(pc + sizeof(struct load_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-                       break;
-               }
-
-               maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
-               str_len = strnlen(insn->data, maxlen);
-               if (unlikely(str_len >= maxlen)) {
-                       /* Final '\0' not found within range */
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       case FILTER_OP_LOAD_S64:
-       {
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       case FILTER_OP_CAST_TO_S64:
-       case FILTER_OP_CAST_NOP:
-       {
-               if (unlikely(pc + sizeof(struct cast_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       case FILTER_OP_GET_CONTEXT_ROOT:
-       case FILTER_OP_GET_APP_CONTEXT_ROOT:
-       case FILTER_OP_GET_PAYLOAD_ROOT:
-       case FILTER_OP_LOAD_FIELD:
-       case FILTER_OP_LOAD_FIELD_S8:
-       case FILTER_OP_LOAD_FIELD_S16:
-       case FILTER_OP_LOAD_FIELD_S32:
-       case FILTER_OP_LOAD_FIELD_S64:
-       case FILTER_OP_LOAD_FIELD_U8:
-       case FILTER_OP_LOAD_FIELD_U16:
-       case FILTER_OP_LOAD_FIELD_U32:
-       case FILTER_OP_LOAD_FIELD_U64:
-       case FILTER_OP_LOAD_FIELD_STRING:
-       case FILTER_OP_LOAD_FIELD_SEQUENCE:
-       case FILTER_OP_LOAD_FIELD_DOUBLE:
-               if (unlikely(pc + sizeof(struct load_op)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-
-       case FILTER_OP_GET_SYMBOL:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_symbol *sym = (struct get_symbol *) insn->data;
-
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-                       break;
-               }
-               ret = validate_get_symbol(bytecode, sym);
-               break;
-       }
-
-       case FILTER_OP_GET_SYMBOL_FIELD:
-               printk(KERN_WARNING "Unexpected get symbol field\n");
-               ret = -EINVAL;
-               break;
-
-       case FILTER_OP_GET_INDEX_U16:
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-
-       case FILTER_OP_GET_INDEX_U64:
-               if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
-                               > start_pc + bytecode->len)) {
-                       ret = -ERANGE;
-               }
-               break;
-       }
-
-       return ret;
-}
-
-static
-unsigned long delete_all_nodes(struct mp_table *mp_table)
-{
-       struct mp_node *mp_node;
-       struct hlist_node *tmp;
-       unsigned long nr_nodes = 0;
-       int i;
-
-       for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
-               struct hlist_head *head;
-
-               head = &mp_table->mp_head[i];
-               lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
-                       kfree(mp_node);
-                       nr_nodes++;
-               }
-       }
-       return nr_nodes;
-}
-
-/*
- * Return value:
- * >=0: success
- * <0: error
- */
-static
-int validate_instruction_context(struct bytecode_runtime *bytecode,
-               struct vstack *stack,
-               char *start_pc,
-               char *pc)
-{
-       int ret = 0;
-       const filter_opcode_t opcode = *(filter_opcode_t *) pc;
-
-       switch (opcode) {
-       case FILTER_OP_UNKNOWN:
-       default:
-       {
-               printk(KERN_WARNING "unknown bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               goto end;
-       }
-
-       case FILTER_OP_RETURN:
-       case FILTER_OP_RETURN_S64:
-       {
-               goto end;
-       }
-
-       /* binary */
-       case FILTER_OP_MUL:
-       case FILTER_OP_DIV:
-       case FILTER_OP_MOD:
-       case FILTER_OP_PLUS:
-       case FILTER_OP_MINUS:
-       /* Floating point */
-       case FILTER_OP_EQ_DOUBLE:
-       case FILTER_OP_NE_DOUBLE:
-       case FILTER_OP_GT_DOUBLE:
-       case FILTER_OP_LT_DOUBLE:
-       case FILTER_OP_GE_DOUBLE:
-       case FILTER_OP_LE_DOUBLE:
-       case FILTER_OP_EQ_DOUBLE_S64:
-       case FILTER_OP_NE_DOUBLE_S64:
-       case FILTER_OP_GT_DOUBLE_S64:
-       case FILTER_OP_LT_DOUBLE_S64:
-       case FILTER_OP_GE_DOUBLE_S64:
-       case FILTER_OP_LE_DOUBLE_S64:
-       case FILTER_OP_EQ_S64_DOUBLE:
-       case FILTER_OP_NE_S64_DOUBLE:
-       case FILTER_OP_GT_S64_DOUBLE:
-       case FILTER_OP_LT_S64_DOUBLE:
-       case FILTER_OP_GE_S64_DOUBLE:
-       case FILTER_OP_LE_S64_DOUBLE:
-       case FILTER_OP_UNARY_PLUS_DOUBLE:
-       case FILTER_OP_UNARY_MINUS_DOUBLE:
-       case FILTER_OP_UNARY_NOT_DOUBLE:
-       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
-       case FILTER_OP_LOAD_DOUBLE:
-       case FILTER_OP_CAST_DOUBLE_TO_S64:
-       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
-       {
-               printk(KERN_WARNING "unsupported bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               goto end;
-       }
-
-       case FILTER_OP_EQ:
-       {
-               ret = bin_op_compare_check(stack, opcode, "==");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_NE:
-       {
-               ret = bin_op_compare_check(stack, opcode, "!=");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_GT:
-       {
-               ret = bin_op_compare_check(stack, opcode, ">");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_LT:
-       {
-               ret = bin_op_compare_check(stack, opcode, "<");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_GE:
-       {
-               ret = bin_op_compare_check(stack, opcode, ">=");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-       case FILTER_OP_LE:
-       {
-               ret = bin_op_compare_check(stack, opcode, "<=");
-               if (ret < 0)
-                       goto end;
-               break;
-       }
-
-       case FILTER_OP_EQ_STRING:
-       case FILTER_OP_NE_STRING:
-       case FILTER_OP_GT_STRING:
-       case FILTER_OP_LT_STRING:
-       case FILTER_OP_GE_STRING:
-       case FILTER_OP_LE_STRING:
-       {
-               if (!vstack_ax(stack) || !vstack_bx(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_STRING
-                               || vstack_bx(stack)->type != REG_STRING) {
-                       printk(KERN_WARNING "Unexpected register type for string comparator\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-
-       case FILTER_OP_EQ_STAR_GLOB_STRING:
-       case FILTER_OP_NE_STAR_GLOB_STRING:
-       {
-               if (!vstack_ax(stack) || !vstack_bx(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
-                               && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
-                       printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-       case FILTER_OP_EQ_S64:
-       case FILTER_OP_NE_S64:
-       case FILTER_OP_GT_S64:
-       case FILTER_OP_LT_S64:
-       case FILTER_OP_GE_S64:
-       case FILTER_OP_LE_S64:
-       {
-               if (!vstack_ax(stack) || !vstack_bx(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_S64
-                               || vstack_bx(stack)->type != REG_S64) {
-                       printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-       case FILTER_OP_BIT_RSHIFT:
-               ret = bin_op_bitwise_check(stack, opcode, ">>");
-               if (ret < 0)
-                       goto end;
-               break;
-       case FILTER_OP_BIT_LSHIFT:
-               ret = bin_op_bitwise_check(stack, opcode, "<<");
-               if (ret < 0)
-                       goto end;
-               break;
-       case FILTER_OP_BIT_AND:
-               ret = bin_op_bitwise_check(stack, opcode, "&");
-               if (ret < 0)
-                       goto end;
-               break;
-       case FILTER_OP_BIT_OR:
-               ret = bin_op_bitwise_check(stack, opcode, "|");
-               if (ret < 0)
-                       goto end;
-               break;
-       case FILTER_OP_BIT_XOR:
-               ret = bin_op_bitwise_check(stack, opcode, "^");
-               if (ret < 0)
-                       goto end;
-               break;
-
-       /* unary */
-       case FILTER_OP_UNARY_PLUS:
-       case FILTER_OP_UNARY_MINUS:
-       case FILTER_OP_UNARY_NOT:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       printk(KERN_WARNING "unknown register type\n");
-                       ret = -EINVAL;
-                       goto end;
-
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-                       printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
-                       ret = -EINVAL;
-                       goto end;
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               }
-               break;
-       }
-       case FILTER_OP_UNARY_BIT_NOT:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               default:
-                       printk(KERN_WARNING "unknown register type\n");
-                       ret = -EINVAL;
-                       goto end;
-
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-               case REG_DOUBLE:
-                       printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
-                       ret = -EINVAL;
-                       goto end;
-               case REG_S64:
-                       break;
-               case REG_TYPE_UNKNOWN:
-                       break;
-               }
-               break;
-       }
-
-       case FILTER_OP_UNARY_PLUS_S64:
-       case FILTER_OP_UNARY_MINUS_S64:
-       case FILTER_OP_UNARY_NOT_S64:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_S64) {
-                       printk(KERN_WARNING "Invalid register type\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-       /* logical */
-       case FILTER_OP_AND:
-       case FILTER_OP_OR:
-       {
-               struct logical_op *insn = (struct logical_op *) pc;
-
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_S64) {
-                       printk(KERN_WARNING "Logical comparator expects S64 register\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               dbg_printk("Validate jumping to bytecode offset %u\n",
-                       (unsigned int) insn->skip_offset);
-               if (unlikely(start_pc + insn->skip_offset <= pc)) {
-                       printk(KERN_WARNING "Loops are not allowed in bytecode\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               break;
-       }
-
-       /* load field ref */
-       case FILTER_OP_LOAD_FIELD_REF:
-       {
-               printk(KERN_WARNING "Unknown field ref type\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
-       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct field_ref *ref = (struct field_ref *) insn->data;
-
-               dbg_printk("Validate load field ref offset %u type string\n",
-                       ref->offset);
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_S64:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct field_ref *ref = (struct field_ref *) insn->data;
-
-               dbg_printk("Validate load field ref offset %u type s64\n",
-                       ref->offset);
-               break;
-       }
-
-       /* load from immediate operand */
-       case FILTER_OP_LOAD_STRING:
-       case FILTER_OP_LOAD_STAR_GLOB_STRING:
-       {
-               break;
-       }
-
-       case FILTER_OP_LOAD_S64:
-       {
-               break;
-       }
-
-       case FILTER_OP_CAST_TO_S64:
-       {
-               struct cast_op *insn = (struct cast_op *) pc;
-
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               default:
-               case REG_DOUBLE:
-                       printk(KERN_WARNING "unknown register type\n");
-                       ret = -EINVAL;
-                       goto end;
-
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-                       printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
-                       ret = -EINVAL;
-                       goto end;
-               case REG_S64:
-                       break;
-               }
-               if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
-                       if (vstack_ax(stack)->type != REG_DOUBLE) {
-                               printk(KERN_WARNING "Cast expects double\n");
-                               ret = -EINVAL;
-                               goto end;
-                       }
-               }
-               break;
-       }
-       case FILTER_OP_CAST_NOP:
-       {
-               break;
-       }
-
-       /* get context ref */
-       case FILTER_OP_GET_CONTEXT_REF:
-       {
-               printk(KERN_WARNING "Unknown get context ref type\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       case FILTER_OP_GET_CONTEXT_REF_STRING:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct field_ref *ref = (struct field_ref *) insn->data;
-
-               dbg_printk("Validate get context ref offset %u type string\n",
-                       ref->offset);
-               break;
-       }
-       case FILTER_OP_GET_CONTEXT_REF_S64:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct field_ref *ref = (struct field_ref *) insn->data;
-
-               dbg_printk("Validate get context ref offset %u type s64\n",
-                       ref->offset);
-               break;
-       }
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       case FILTER_OP_GET_CONTEXT_ROOT:
-       {
-               dbg_printk("Validate get context root\n");
-               break;
-       }
-       case FILTER_OP_GET_APP_CONTEXT_ROOT:
-       {
-               dbg_printk("Validate get app context root\n");
-               break;
-       }
-       case FILTER_OP_GET_PAYLOAD_ROOT:
-       {
-               dbg_printk("Validate get payload root\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD:
-       {
-               /*
-                * We tolerate that field type is unknown at validation,
-                * because we are performing the load specialization in
-                * a phase after validation.
-                */
-               dbg_printk("Validate load field\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_S8:
-       {
-               dbg_printk("Validate load field s8\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_S16:
-       {
-               dbg_printk("Validate load field s16\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_S32:
-       {
-               dbg_printk("Validate load field s32\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_S64:
-       {
-               dbg_printk("Validate load field s64\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_U8:
-       {
-               dbg_printk("Validate load field u8\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_U16:
-       {
-               dbg_printk("Validate load field u16\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_U32:
-       {
-               dbg_printk("Validate load field u32\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_U64:
-       {
-               dbg_printk("Validate load field u64\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_STRING:
-       {
-               dbg_printk("Validate load field string\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_SEQUENCE:
-       {
-               dbg_printk("Validate load field sequence\n");
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_DOUBLE:
-       {
-               dbg_printk("Validate load field double\n");
-               break;
-       }
-
-       case FILTER_OP_GET_SYMBOL:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_symbol *sym = (struct get_symbol *) insn->data;
-
-               dbg_printk("Validate get symbol offset %u\n", sym->offset);
-               break;
-       }
-
-       case FILTER_OP_GET_SYMBOL_FIELD:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_symbol *sym = (struct get_symbol *) insn->data;
-
-               dbg_printk("Validate get symbol field offset %u\n", sym->offset);
-               break;
-       }
-
-       case FILTER_OP_GET_INDEX_U16:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
-
-               dbg_printk("Validate get index u16 index %u\n", get_index->index);
-               break;
-       }
-
-       case FILTER_OP_GET_INDEX_U64:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-               struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
-
-               dbg_printk("Validate get index u64 index %llu\n",
-                       (unsigned long long) get_index->index);
-               break;
-       }
-       }
-end:
-       return ret;
-}
-
-/*
- * Return value:
- * 0: success
- * <0: error
- */
-static
-int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
-               struct mp_table *mp_table,
-               struct vstack *stack,
-               char *start_pc,
-               char *pc)
-{
-       int ret, found = 0;
-       unsigned long target_pc = pc - start_pc;
-       unsigned long hash;
-       struct hlist_head *head;
-       struct mp_node *mp_node;
-
-       /* Validate the context resulting from the previous instruction */
-       ret = validate_instruction_context(bytecode, stack, start_pc, pc);
-       if (ret < 0)
-               return ret;
-
-       /* Validate merge points */
-       hash = jhash_1word(target_pc, 0);
-       head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
-       lttng_hlist_for_each_entry(mp_node, head, node) {
-               if (lttng_hash_match(mp_node, target_pc)) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (found) {
-               dbg_printk("Filter: validate merge point at offset %lu\n",
-                               target_pc);
-               if (merge_points_compare(stack, &mp_node->stack)) {
-                       printk(KERN_WARNING "Merge points differ for offset %lu\n",
-                               target_pc);
-                       return -EINVAL;
-               }
-               /* Once validated, we can remove the merge point */
-               dbg_printk("Filter: remove merge point at offset %lu\n",
-                               target_pc);
-               hlist_del(&mp_node->node);
-       }
-       return 0;
-}
-
-/*
- * Return value:
- * >0: going to next insn.
- * 0: success, stop iteration.
- * <0: error
- */
-static
-int exec_insn(struct bytecode_runtime *bytecode,
-               struct mp_table *mp_table,
-               struct vstack *stack,
-               char **_next_pc,
-               char *pc)
-{
-       int ret = 1;
-       char *next_pc = *_next_pc;
-
-       switch (*(filter_opcode_t *) pc) {
-       case FILTER_OP_UNKNOWN:
-       default:
-       {
-               printk(KERN_WARNING "unknown bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               goto end;
-       }
-
-       case FILTER_OP_RETURN:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               ret = 0;
-               goto end;
-       }
-
-       case FILTER_OP_RETURN_S64:
-       {
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-                       break;
-               default:
-               case REG_TYPE_UNKNOWN:
-                       printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               ret = 0;
-               goto end;
-       }
-
-       /* binary */
-       case FILTER_OP_MUL:
-       case FILTER_OP_DIV:
-       case FILTER_OP_MOD:
-       case FILTER_OP_PLUS:
-       case FILTER_OP_MINUS:
-       /* Floating point */
-       case FILTER_OP_EQ_DOUBLE:
-       case FILTER_OP_NE_DOUBLE:
-       case FILTER_OP_GT_DOUBLE:
-       case FILTER_OP_LT_DOUBLE:
-       case FILTER_OP_GE_DOUBLE:
-       case FILTER_OP_LE_DOUBLE:
-       case FILTER_OP_EQ_DOUBLE_S64:
-       case FILTER_OP_NE_DOUBLE_S64:
-       case FILTER_OP_GT_DOUBLE_S64:
-       case FILTER_OP_LT_DOUBLE_S64:
-       case FILTER_OP_GE_DOUBLE_S64:
-       case FILTER_OP_LE_DOUBLE_S64:
-       case FILTER_OP_EQ_S64_DOUBLE:
-       case FILTER_OP_NE_S64_DOUBLE:
-       case FILTER_OP_GT_S64_DOUBLE:
-       case FILTER_OP_LT_S64_DOUBLE:
-       case FILTER_OP_GE_S64_DOUBLE:
-       case FILTER_OP_LE_S64_DOUBLE:
-       case FILTER_OP_UNARY_PLUS_DOUBLE:
-       case FILTER_OP_UNARY_MINUS_DOUBLE:
-       case FILTER_OP_UNARY_NOT_DOUBLE:
-       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
-       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
-       case FILTER_OP_LOAD_DOUBLE:
-       case FILTER_OP_CAST_DOUBLE_TO_S64:
-       {
-               printk(KERN_WARNING "unsupported bytecode op %u\n",
-                       (unsigned int) *(filter_opcode_t *) pc);
-               ret = -EINVAL;
-               goto end;
-       }
-
-       case FILTER_OP_EQ:
-       case FILTER_OP_NE:
-       case FILTER_OP_GT:
-       case FILTER_OP_LT:
-       case FILTER_OP_GE:
-       case FILTER_OP_LE:
-       case FILTER_OP_EQ_STRING:
-       case FILTER_OP_NE_STRING:
-       case FILTER_OP_GT_STRING:
-       case FILTER_OP_LT_STRING:
-       case FILTER_OP_GE_STRING:
-       case FILTER_OP_LE_STRING:
-       case FILTER_OP_EQ_STAR_GLOB_STRING:
-       case FILTER_OP_NE_STAR_GLOB_STRING:
-       case FILTER_OP_EQ_S64:
-       case FILTER_OP_NE_S64:
-       case FILTER_OP_GT_S64:
-       case FILTER_OP_LT_S64:
-       case FILTER_OP_GE_S64:
-       case FILTER_OP_LE_S64:
-       case FILTER_OP_BIT_RSHIFT:
-       case FILTER_OP_BIT_LSHIFT:
-       case FILTER_OP_BIT_AND:
-       case FILTER_OP_BIT_OR:
-       case FILTER_OP_BIT_XOR:
-       {
-               /* Pop 2, push 1 */
-               if (vstack_pop(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_DOUBLE:
-               case REG_STRING:
-               case REG_STAR_GLOB_STRING:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct binary_op);
-               break;
-       }
-
-       /* unary */
-       case FILTER_OP_UNARY_PLUS:
-       case FILTER_OP_UNARY_MINUS:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
-               next_pc += sizeof(struct unary_op);
-               break;
-       }
-
-       case FILTER_OP_UNARY_PLUS_S64:
-       case FILTER_OP_UNARY_MINUS_S64:
-       case FILTER_OP_UNARY_NOT_S64:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct unary_op);
-               break;
-       }
-
-       case FILTER_OP_UNARY_NOT:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct unary_op);
-               break;
-       }
-
-       case FILTER_OP_UNARY_BIT_NOT:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               case REG_DOUBLE:
-               default:
-                       printk(KERN_WARNING "Unexpected register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct unary_op);
-               break;
-       }
-
-       /* logical */
-       case FILTER_OP_AND:
-       case FILTER_OP_OR:
-       {
-               struct logical_op *insn = (struct logical_op *) pc;
-               int merge_ret;
-
-               /* Add merge point to table */
-               merge_ret = merge_point_add_check(mp_table,
-                                       insn->skip_offset, stack);
-               if (merge_ret) {
-                       ret = merge_ret;
-                       goto end;
-               }
-
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               /* There is always a cast-to-s64 operation before a or/and op. */
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-                       break;
-               default:
-                       printk(KERN_WARNING "Incorrect register type %d for operation\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-
-               /* Continue to next instruction */
-               /* Pop 1 when jump not taken */
-               if (vstack_pop(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               next_pc += sizeof(struct logical_op);
-               break;
-       }
-
-       /* load field ref */
-       case FILTER_OP_LOAD_FIELD_REF:
-       {
-               printk(KERN_WARNING "Unknown field ref type\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       /* get context ref */
-       case FILTER_OP_GET_CONTEXT_REF:
-       {
-               printk(KERN_WARNING "Unknown get context ref type\n");
-               ret = -EINVAL;
-               goto end;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
-       case FILTER_OP_GET_CONTEXT_REF_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
-       case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
-       {
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_STRING;
-               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-               break;
-       }
-       case FILTER_OP_LOAD_FIELD_REF_S64:
-       case FILTER_OP_GET_CONTEXT_REF_S64:
-       {
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
-               break;
-       }
-
-       /* load from immediate operand */
-       case FILTER_OP_LOAD_STRING:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_STRING;
-               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-               break;
-       }
-
-       case FILTER_OP_LOAD_STAR_GLOB_STRING:
-       {
-               struct load_op *insn = (struct load_op *) pc;
-
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
-               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
-               break;
-       }
-
-       case FILTER_OP_LOAD_S64:
-       {
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct load_op)
-                               + sizeof(struct literal_numeric);
-               break;
-       }
-
-       case FILTER_OP_CAST_TO_S64:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               switch (vstack_ax(stack)->type) {
-               case REG_S64:
-               case REG_DOUBLE:
-               case REG_TYPE_UNKNOWN:
-                       break;
-               default:
-                       printk(KERN_WARNING "Incorrect register type %d for cast\n",
-                               (int) vstack_ax(stack)->type);
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct cast_op);
-               break;
-       }
-       case FILTER_OP_CAST_NOP:
-       {
-               next_pc += sizeof(struct cast_op);
-               break;
-       }
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       case FILTER_OP_GET_CONTEXT_ROOT:
-       case FILTER_OP_GET_APP_CONTEXT_ROOT:
-       case FILTER_OP_GET_PAYLOAD_ROOT:
-       {
-               if (vstack_push(stack)) {
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_PTR;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_LOAD_FIELD:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_LOAD_FIELD_S8:
-       case FILTER_OP_LOAD_FIELD_S16:
-       case FILTER_OP_LOAD_FIELD_S32:
-       case FILTER_OP_LOAD_FIELD_S64:
-       case FILTER_OP_LOAD_FIELD_U8:
-       case FILTER_OP_LOAD_FIELD_U16:
-       case FILTER_OP_LOAD_FIELD_U32:
-       case FILTER_OP_LOAD_FIELD_U64:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_S64;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_LOAD_FIELD_STRING:
-       case FILTER_OP_LOAD_FIELD_SEQUENCE:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_STRING;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_LOAD_FIELD_DOUBLE:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               vstack_ax(stack)->type = REG_DOUBLE;
-               next_pc += sizeof(struct load_op);
-               break;
-       }
-
-       case FILTER_OP_GET_SYMBOL:
-       case FILTER_OP_GET_SYMBOL_FIELD:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
-               break;
-       }
-
-       case FILTER_OP_GET_INDEX_U16:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
-               break;
-       }
-
-       case FILTER_OP_GET_INDEX_U64:
-       {
-               /* Pop 1, push 1 */
-               if (!vstack_ax(stack)) {
-                       printk(KERN_WARNING "Empty stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               if (vstack_ax(stack)->type != REG_PTR) {
-                       printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
-                       ret = -EINVAL;
-                       goto end;
-               }
-               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
-               break;
-       }
-
-       }
-end:
-       *_next_pc = next_pc;
-       return ret;
-}
-
-/*
- * Never called concurrently (hash seed is shared).
- */
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
-{
-       struct mp_table *mp_table;
-       char *pc, *next_pc, *start_pc;
-       int ret = -EINVAL;
-       struct vstack stack;
-
-       vstack_init(&stack);
-
-       mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
-       if (!mp_table) {
-               printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
-               return -ENOMEM;
-       }
-       start_pc = &bytecode->code[0];
-       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
-                       pc = next_pc) {
-               ret = bytecode_validate_overflow(bytecode, start_pc, pc);
-               if (ret != 0) {
-                       if (ret == -ERANGE)
-                               printk(KERN_WARNING "filter bytecode overflow\n");
-                       goto end;
-               }
-               dbg_printk("Validating op %s (%u)\n",
-                       lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
-                       (unsigned int) *(filter_opcode_t *) pc);
-
-               /*
-                * For each instruction, validate the current context
-                * (traversal of entire execution flow), and validate
-                * all merge points targeting this instruction.
-                */
-               ret = validate_instruction_all_contexts(bytecode, mp_table,
-                                       &stack, start_pc, pc);
-               if (ret)
-                       goto end;
-               ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
-               if (ret <= 0)
-                       goto end;
-       }
-end:
-       if (delete_all_nodes(mp_table)) {
-               if (!ret) {
-                       printk(KERN_WARNING "Unexpected merge points\n");
-                       ret = -EINVAL;
-               }
-       }
-       kfree(mp_table);
-       return ret;
-}
diff --git a/src/lttng-filter.c b/src/lttng-filter.c
deleted file mode 100644 (file)
index 12c2264..0000000
+++ /dev/null
@@ -1,565 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter.c
- *
- * LTTng modules filter code.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <lttng/filter.h>
-
-static const char *opnames[] = {
-       [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
-
-       [ FILTER_OP_RETURN ] = "RETURN",
-
-       /* binary */
-       [ FILTER_OP_MUL ] = "MUL",
-       [ FILTER_OP_DIV ] = "DIV",
-       [ FILTER_OP_MOD ] = "MOD",
-       [ FILTER_OP_PLUS ] = "PLUS",
-       [ FILTER_OP_MINUS ] = "MINUS",
-       [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
-       [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
-       [ FILTER_OP_BIT_AND ] = "BIT_AND",
-       [ FILTER_OP_BIT_OR ] = "BIT_OR",
-       [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
-
-       /* binary comparators */
-       [ FILTER_OP_EQ ] = "EQ",
-       [ FILTER_OP_NE ] = "NE",
-       [ FILTER_OP_GT ] = "GT",
-       [ FILTER_OP_LT ] = "LT",
-       [ FILTER_OP_GE ] = "GE",
-       [ FILTER_OP_LE ] = "LE",
-
-       /* string binary comparators */
-       [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
-       [ FILTER_OP_NE_STRING ] = "NE_STRING",
-       [ FILTER_OP_GT_STRING ] = "GT_STRING",
-       [ FILTER_OP_LT_STRING ] = "LT_STRING",
-       [ FILTER_OP_GE_STRING ] = "GE_STRING",
-       [ FILTER_OP_LE_STRING ] = "LE_STRING",
-
-       /* s64 binary comparators */
-       [ FILTER_OP_EQ_S64 ] = "EQ_S64",
-       [ FILTER_OP_NE_S64 ] = "NE_S64",
-       [ FILTER_OP_GT_S64 ] = "GT_S64",
-       [ FILTER_OP_LT_S64 ] = "LT_S64",
-       [ FILTER_OP_GE_S64 ] = "GE_S64",
-       [ FILTER_OP_LE_S64 ] = "LE_S64",
-
-       /* double binary comparators */
-       [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
-       [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
-       [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
-       [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
-       [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
-       [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
-
-       /* Mixed S64-double binary comparators */
-       [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
-       [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
-       [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
-       [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
-       [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
-       [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
-
-       [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
-       [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
-       [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
-       [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
-       [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
-       [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
-
-       /* unary */
-       [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
-       [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
-       [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
-       [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
-       [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
-       [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
-       [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
-       [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
-       [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
-
-       /* logical */
-       [ FILTER_OP_AND ] = "AND",
-       [ FILTER_OP_OR ] = "OR",
-
-       /* load field ref */
-       [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
-       [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
-       [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
-       [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
-       [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
-
-       /* load from immediate operand */
-       [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
-       [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
-       [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
-
-       /* cast */
-       [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
-       [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
-       [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
-
-       /* get context ref */
-       [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
-       [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
-       [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
-       [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
-
-       /* load userspace field ref */
-       [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
-       [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
-
-       /*
-        * load immediate star globbing pattern (literal string)
-        * from immediate.
-        */
-       [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
-
-       /* globbing pattern binary operator: apply to */
-       [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
-       [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
-
-       /*
-        * Instructions for recursive traversal through composed types.
-        */
-       [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
-       [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
-       [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
-
-       [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
-       [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
-       [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
-       [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
-
-       [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
-       [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
-       [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
-       [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
-       [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
-       [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
-       [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
-       [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
-       [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
-       [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
-       [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
-       [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
-
-       [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
-
-       [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
-};
-
-const char *lttng_filter_print_op(enum filter_op op)
-{
-       if (op >= NR_FILTER_OPS)
-               return "UNKNOWN";
-       else
-               return opnames[op];
-}
-
-static
-int apply_field_reloc(struct lttng_event *event,
-               struct bytecode_runtime *runtime,
-               uint32_t runtime_len,
-               uint32_t reloc_offset,
-               const char *field_name,
-               enum filter_op filter_op)
-{
-       const struct lttng_event_desc *desc;
-       const struct lttng_event_field *fields, *field = NULL;
-       unsigned int nr_fields, i;
-       struct load_op *op;
-       uint32_t field_offset = 0;
-
-       dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
-
-       /* Lookup event by name */
-       desc = event->desc;
-       if (!desc)
-               return -EINVAL;
-       fields = desc->fields;
-       if (!fields)
-               return -EINVAL;
-       nr_fields = desc->nr_fields;
-       for (i = 0; i < nr_fields; i++) {
-               if (fields[i].nofilter)
-                       continue;
-               if (!strcmp(fields[i].name, field_name)) {
-                       field = &fields[i];
-                       break;
-               }
-               /* compute field offset */
-               switch (fields[i].type.atype) {
-               case atype_integer:
-               case atype_enum_nestable:
-                       field_offset += sizeof(int64_t);
-                       break;
-               case atype_array_nestable:
-                       if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
-                               return -EINVAL;
-                       field_offset += sizeof(unsigned long);
-                       field_offset += sizeof(void *);
-                       break;
-               case atype_sequence_nestable:
-                       if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
-                               return -EINVAL;
-                       field_offset += sizeof(unsigned long);
-                       field_offset += sizeof(void *);
-                       break;
-               case atype_string:
-                       field_offset += sizeof(void *);
-                       break;
-               case atype_struct_nestable:     /* Unsupported. */
-               case atype_variant_nestable:    /* Unsupported. */
-               default:
-                       return -EINVAL;
-               }
-       }
-       if (!field)
-               return -EINVAL;
-
-       /* Check if field offset is too large for 16-bit offset */
-       if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
-               return -EINVAL;
-
-       /* set type */
-       op = (struct load_op *) &runtime->code[reloc_offset];
-
-       switch (filter_op) {
-       case FILTER_OP_LOAD_FIELD_REF:
-       {
-               struct field_ref *field_ref;
-
-               field_ref = (struct field_ref *) op->data;
-               switch (field->type.atype) {
-               case atype_integer:
-               case atype_enum_nestable:
-                       op->op = FILTER_OP_LOAD_FIELD_REF_S64;
-                       break;
-               case atype_array_nestable:
-               case atype_sequence_nestable:
-                       if (field->user)
-                               op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
-                       else
-                               op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
-                       break;
-               case atype_string:
-                       if (field->user)
-                               op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
-                       else
-                               op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
-                       break;
-               case atype_struct_nestable:     /* Unsupported. */
-               case atype_variant_nestable:    /* Unsupported. */
-               default:
-                       return -EINVAL;
-               }
-               /* set offset */
-               field_ref->offset = (uint16_t) field_offset;
-               break;
-       }
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static
-int apply_context_reloc(struct lttng_event *event,
-               struct bytecode_runtime *runtime,
-               uint32_t runtime_len,
-               uint32_t reloc_offset,
-               const char *context_name,
-               enum filter_op filter_op)
-{
-       struct load_op *op;
-       struct lttng_ctx_field *ctx_field;
-       int idx;
-
-       dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
-
-       /* Get context index */
-       idx = lttng_get_context_index(lttng_static_ctx, context_name);
-       if (idx < 0)
-               return -ENOENT;
-
-       /* Check if idx is too large for 16-bit offset */
-       if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
-               return -EINVAL;
-
-       /* Get context return type */
-       ctx_field = &lttng_static_ctx->fields[idx];
-       op = (struct load_op *) &runtime->code[reloc_offset];
-
-       switch (filter_op) {
-       case FILTER_OP_GET_CONTEXT_REF:
-       {
-               struct field_ref *field_ref;
-
-               field_ref = (struct field_ref *) op->data;
-               switch (ctx_field->event_field.type.atype) {
-               case atype_integer:
-               case atype_enum_nestable:
-                       op->op = FILTER_OP_GET_CONTEXT_REF_S64;
-                       break;
-                       /* Sequence and array supported as string */
-               case atype_string:
-                       BUG_ON(ctx_field->event_field.user);
-                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
-                       break;
-               case atype_array_nestable:
-                       if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
-                               return -EINVAL;
-                       BUG_ON(ctx_field->event_field.user);
-                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
-                       break;
-               case atype_sequence_nestable:
-                       if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
-                               return -EINVAL;
-                       BUG_ON(ctx_field->event_field.user);
-                       op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
-                       break;
-               case atype_struct_nestable:     /* Unsupported. */
-               case atype_variant_nestable:    /* Unsupported. */
-               default:
-                       return -EINVAL;
-               }
-               /* set offset to context index within channel contexts */
-               field_ref->offset = (uint16_t) idx;
-               break;
-       }
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static
-int apply_reloc(struct lttng_event *event,
-               struct bytecode_runtime *runtime,
-               uint32_t runtime_len,
-               uint32_t reloc_offset,
-               const char *name)
-{
-       struct load_op *op;
-
-       dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
-
-       /* Ensure that the reloc is within the code */
-       if (runtime_len - reloc_offset < sizeof(uint16_t))
-               return -EINVAL;
-
-       op = (struct load_op *) &runtime->code[reloc_offset];
-       switch (op->op) {
-       case FILTER_OP_LOAD_FIELD_REF:
-               return apply_field_reloc(event, runtime, runtime_len,
-                       reloc_offset, name, op->op);
-       case FILTER_OP_GET_CONTEXT_REF:
-               return apply_context_reloc(event, runtime, runtime_len,
-                       reloc_offset, name, op->op);
-       case FILTER_OP_GET_SYMBOL:
-       case FILTER_OP_GET_SYMBOL_FIELD:
-               /*
-                * Will be handled by load specialize phase or
-                * dynamically by interpreter.
-                */
-               return 0;
-       default:
-               printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static
-int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
-               struct lttng_event *event)
-{
-       struct lttng_bytecode_runtime *bc_runtime;
-
-       list_for_each_entry(bc_runtime,
-                       &event->bytecode_runtime_head, node) {
-               if (bc_runtime->bc == filter_bytecode)
-                       return 1;
-       }
-       return 0;
-}
-
-/*
- * Take a bytecode with reloc table and link it to an event to create a
- * bytecode runtime.
- */
-static
-int _lttng_filter_event_link_bytecode(struct lttng_event *event,
-               struct lttng_filter_bytecode_node *filter_bytecode,
-               struct list_head *insert_loc)
-{
-       int ret, offset, next_offset;
-       struct bytecode_runtime *runtime = NULL;
-       size_t runtime_alloc_len;
-
-       if (!filter_bytecode)
-               return 0;
-       /* Bytecode already linked */
-       if (bytecode_is_linked(filter_bytecode, event))
-               return 0;
-
-       dbg_printk("Linking...\n");
-
-       /* We don't need the reloc table in the runtime */
-       runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
-       runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
-       if (!runtime) {
-               ret = -ENOMEM;
-               goto alloc_error;
-       }
-       runtime->p.bc = filter_bytecode;
-       runtime->p.event = event;
-       runtime->len = filter_bytecode->bc.reloc_offset;
-       /* copy original bytecode */
-       memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
-       /*
-        * apply relocs. Those are a uint16_t (offset in bytecode)
-        * followed by a string (field name).
-        */
-       for (offset = filter_bytecode->bc.reloc_offset;
-                       offset < filter_bytecode->bc.len;
-                       offset = next_offset) {
-               uint16_t reloc_offset =
-                       *(uint16_t *) &filter_bytecode->bc.data[offset];
-               const char *name =
-                       (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
-
-               ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
-               if (ret) {
-                       goto link_error;
-               }
-               next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
-       }
-       /* Validate bytecode */
-       ret = lttng_filter_validate_bytecode(runtime);
-       if (ret) {
-               goto link_error;
-       }
-       /* Specialize bytecode */
-       ret = lttng_filter_specialize_bytecode(event, runtime);
-       if (ret) {
-               goto link_error;
-       }
-       runtime->p.filter = lttng_filter_interpret_bytecode;
-       runtime->p.link_failed = 0;
-       list_add_rcu(&runtime->p.node, insert_loc);
-       dbg_printk("Linking successful.\n");
-       return 0;
-
-link_error:
-       runtime->p.filter = lttng_filter_false;
-       runtime->p.link_failed = 1;
-       list_add_rcu(&runtime->p.node, insert_loc);
-alloc_error:
-       dbg_printk("Linking failed.\n");
-       return ret;
-}
-
-void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
-{
-       struct lttng_filter_bytecode_node *bc = runtime->bc;
-
-       if (!bc->enabler->enabled || runtime->link_failed)
-               runtime->filter = lttng_filter_false;
-       else
-               runtime->filter = lttng_filter_interpret_bytecode;
-}
-
-/*
- * Link bytecode for all enablers referenced by an event.
- */
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
-               struct lttng_enabler *enabler)
-{
-       struct lttng_filter_bytecode_node *bc;
-       struct lttng_bytecode_runtime *runtime;
-
-       /* Can only be called for events with desc attached */
-       WARN_ON_ONCE(!event->desc);
-
-       /* Link each bytecode. */
-       list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
-               int found = 0, ret;
-               struct list_head *insert_loc;
-
-               list_for_each_entry(runtime,
-                               &event->bytecode_runtime_head, node) {
-                       if (runtime->bc == bc) {
-                               found = 1;
-                               break;
-                       }
-               }
-               /* Skip bytecode already linked */
-               if (found)
-                       continue;
-
-               /*
-                * Insert at specified priority (seqnum) in increasing
-                * order. If there already is a bytecode of the same priority,
-                * insert the new bytecode right after it.
-                */
-               list_for_each_entry_reverse(runtime,
-                               &event->bytecode_runtime_head, node) {
-                       if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
-                               /* insert here */
-                               insert_loc = &runtime->node;
-                               goto add_within;
-                       }
-               }
-               /* Add to head to list */
-               insert_loc = &event->bytecode_runtime_head;
-       add_within:
-               dbg_printk("linking bytecode\n");
-               ret = _lttng_filter_event_link_bytecode(event, bc,
-                               insert_loc);
-               if (ret) {
-                       dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
-               }
-       }
-}
-
-/*
- * We own the filter_bytecode if we return success.
- */
-int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
-               struct lttng_filter_bytecode_node *filter_bytecode)
-{
-       list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
-       return 0;
-}
-
-void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
-{
-       struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
-
-       list_for_each_entry_safe(filter_bytecode, tmp,
-                       &enabler->filter_bytecode_head, node) {
-               kfree(filter_bytecode);
-       }
-}
-
-void lttng_free_event_filter_runtime(struct lttng_event *event)
-{
-       struct bytecode_runtime *runtime, *tmp;
-
-       list_for_each_entry_safe(runtime, tmp,
-                       &event->bytecode_runtime_head, p.node) {
-               kfree(runtime->data);
-               kfree(runtime);
-       }
-}
index 4a2bb630e8503412c3524ddc468aef749c7f2280..eda28d601000b822c33feb66fabb853222d86670 100644 (file)
@@ -124,6 +124,8 @@ void fixup_lazy_probes(void)
        }
        ret = lttng_fix_pending_events();
        WARN_ON_ONCE(ret);
+       ret = lttng_fix_pending_triggers();
+       WARN_ON_ONCE(ret);
        lazy_nesting--;
 }
 
@@ -173,7 +175,7 @@ int lttng_probe_register(struct lttng_probe_desc *desc)
         * the probe immediately, since we cannot delay event
         * registration because they are needed ASAP.
         */
-       if (lttng_session_active())
+       if (lttng_session_active() || lttng_trigger_active())
                fixup_lazy_probes();
 end:
        lttng_unlock_sessions();
@@ -198,7 +200,7 @@ EXPORT_SYMBOL_GPL(lttng_probe_unregister);
  * Called with sessions lock held.
  */
 static
-const struct lttng_event_desc *find_event(const char *name)
+const struct lttng_event_desc *find_event_desc(const char *name)
 {
        struct lttng_probe_desc *probe_desc;
        int i;
@@ -215,28 +217,28 @@ const struct lttng_event_desc *find_event(const char *name)
 /*
  * Called with sessions lock held.
  */
-const struct lttng_event_desc *lttng_event_get(const char *name)
+const struct lttng_event_desc *lttng_event_desc_get(const char *name)
 {
-       const struct lttng_event_desc *event;
+       const struct lttng_event_desc *event_desc;
        int ret;
 
-       event = find_event(name);
-       if (!event)
+       event_desc = find_event_desc(name);
+       if (!event_desc)
                return NULL;
-       ret = try_module_get(event->owner);
+       ret = try_module_get(event_desc->owner);
        WARN_ON_ONCE(!ret);
-       return event;
+       return event_desc;
 }
-EXPORT_SYMBOL_GPL(lttng_event_get);
+EXPORT_SYMBOL_GPL(lttng_event_desc_get);
 
 /*
  * Called with sessions lock held.
  */
-void lttng_event_put(const struct lttng_event_desc *event)
+void lttng_event_desc_put(const struct lttng_event_desc *event_desc)
 {
-       module_put(event->owner);
+       module_put(event_desc->owner);
 }
-EXPORT_SYMBOL_GPL(lttng_event_put);
+EXPORT_SYMBOL_GPL(lttng_event_desc_put);
 
 static
 void *tp_list_start(struct seq_file *m, loff_t *pos)
index aad7955f8accfcdea845b14858b431adab3cf69c..bed8df08749d3b66d7f9fdb1a9e4c41c6fa87e1c 100644 (file)
@@ -550,11 +550,12 @@ void lttng_channel_destroy(struct channel *chan)
 
 static
 struct channel *_channel_create(const char *name,
-                               struct lttng_channel *lttng_chan, void *buf_addr,
+                               void *priv, void *buf_addr,
                                size_t subbuf_size, size_t num_subbuf,
                                unsigned int switch_timer_interval,
                                unsigned int read_timer_interval)
 {
+       struct lttng_channel *lttng_chan = priv;
        struct channel *chan;
 
        chan = channel_create(&client_config, name, lttng_chan, buf_addr,
index 0f68b38538779c2abe404cb59eca9394fead5358..74df6e4b21a4b2c86bfab5d6186b20466a5d6ad1 100644 (file)
@@ -237,11 +237,12 @@ void lttng_channel_destroy(struct channel *chan)
 
 static
 struct channel *_channel_create(const char *name,
-                               struct lttng_channel *lttng_chan, void *buf_addr,
+                               void *priv, void *buf_addr,
                                size_t subbuf_size, size_t num_subbuf,
                                unsigned int switch_timer_interval,
                                unsigned int read_timer_interval)
 {
+       struct lttng_channel *lttng_chan = priv;
        struct channel *chan;
 
        chan = channel_create(&client_config, name,
diff --git a/src/lttng-ring-buffer-trigger-client.c b/src/lttng-ring-buffer-trigger-client.c
new file mode 100644 (file)
index 0000000..6442db6
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
+ * lttng-ring-buffer-trigger-client.c
+ *
+ * LTTng lib ring buffer trigger client.
+ *
+ * Copyright (C) 2010-2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE              RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING       "trigger"
+#define RING_BUFFER_OUTPUT_TEMPLATE            RING_BUFFER_NONE
+#include "lttng-ring-buffer-trigger-client.h"
diff --git a/src/lttng-ring-buffer-trigger-client.h b/src/lttng-ring-buffer-trigger-client.h
new file mode 100644 (file)
index 0000000..1ac5812
--- /dev/null
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
+ * lttng-ring-buffer-trigger-client.h
+ *
+ * LTTng lib ring buffer trigger client template.
+ *
+ * Copyright (C) 2010-2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_mappings() */
+#include <lttng/abi.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+
+static struct lttng_transport lttng_relay_transport;
+
+struct trigger_packet_header {
+       uint8_t  header_end[0];
+};
+
+struct trigger_record_header {
+       uint32_t payload_len;           /* in bytes */
+       uint8_t header_end[0];          /* End of header */
+};
+
+static const struct lib_ring_buffer_config client_config;
+
+static inline
+u64 lib_ring_buffer_clock_read(struct channel *chan)
+{
+       return 0;
+}
+
+static inline
+size_t record_header_size(const struct lib_ring_buffer_config *config,
+                                struct channel *chan, size_t offset,
+                                size_t *pre_header_padding,
+                                struct lib_ring_buffer_ctx *ctx,
+                                void *client_ctx)
+{
+       size_t orig_offset = offset;
+       size_t padding;
+
+       padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
+       offset += padding;
+
+       offset += sizeof(uint32_t);
+
+       *pre_header_padding = padding;
+
+       return offset - orig_offset;
+}
+
+#include <ringbuffer/api.h>
+
+static u64 client_ring_buffer_clock_read(struct channel *chan)
+{
+       return 0;
+}
+
+static
+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+                                struct channel *chan, size_t offset,
+                                size_t *pre_header_padding,
+                                struct lib_ring_buffer_ctx *ctx,
+                                void *client_ctx)
+{
+       return record_header_size(config, chan, offset,
+                                 pre_header_padding, ctx, client_ctx);
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+       return offsetof(struct trigger_packet_header, header_end);
+}
+
+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+                               unsigned int subbuf_idx)
+{
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+                             unsigned int subbuf_idx, unsigned long data_size)
+{
+}
+
+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+                               int cpu, const char *name)
+{
+       return 0;
+}
+
+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
+{
+}
+
+static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *buf, uint64_t *timestamp_begin)
+{
+       return -ENOSYS;
+}
+
+static int client_timestamp_end(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *timestamp_end)
+{
+       return -ENOSYS;
+}
+
+static int client_events_discarded(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *events_discarded)
+{
+       return -ENOSYS;
+}
+
+static int client_current_timestamp(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *bufb,
+               uint64_t *ts)
+{
+       return -ENOSYS;
+}
+
+static int client_content_size(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *content_size)
+{
+       return -ENOSYS;
+}
+
+static int client_packet_size(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *packet_size)
+{
+       return -ENOSYS;
+}
+
+static int client_stream_id(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *stream_id)
+{
+       return -ENOSYS;
+}
+
+static int client_sequence_number(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer *bufb,
+                       uint64_t *seq)
+{
+       return -ENOSYS;
+}
+
+static
+int client_instance_id(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer *bufb,
+               uint64_t *id)
+{
+       return -ENOSYS;
+}
+
+static void client_record_get(const struct lib_ring_buffer_config *config,
+       struct channel *chan, struct lib_ring_buffer *buf,
+       size_t offset, size_t *header_len,
+       size_t *payload_len, u64 *timestamp)
+{
+       struct trigger_record_header header;
+       int ret;
+
+       ret = lib_ring_buffer_read(&buf->backend, offset, &header,
+                       offsetof(struct trigger_record_header, header_end));
+       CHAN_WARN_ON(chan, ret != offsetof(struct trigger_record_header, header_end));
+       *header_len = offsetof(struct trigger_record_header, header_end);
+       *payload_len = header.payload_len;
+       *timestamp = 0;
+}
+
+static const struct lib_ring_buffer_config client_config = {
+       .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+       .cb.record_header_size = client_record_header_size,
+       .cb.subbuffer_header_size = client_packet_header_size,
+       .cb.buffer_begin = client_buffer_begin,
+       .cb.buffer_end = client_buffer_end,
+       .cb.buffer_create = client_buffer_create,
+       .cb.buffer_finalize = client_buffer_finalize,
+       .cb.record_get = client_record_get,
+
+       .tsc_bits = 0,
+       .alloc = RING_BUFFER_ALLOC_GLOBAL,
+       .sync = RING_BUFFER_SYNC_GLOBAL,
+       .mode = RING_BUFFER_MODE_TEMPLATE,
+       .backend = RING_BUFFER_PAGE,
+       .output = RING_BUFFER_OUTPUT_TEMPLATE,
+       .oops = RING_BUFFER_OOPS_CONSISTENCY,
+       .ipi = RING_BUFFER_NO_IPI_BARRIER,
+       .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
+};
+
+static
+void release_priv_ops(void *priv_ops)
+{
+       module_put(THIS_MODULE);
+}
+
+static
+void lttng_channel_destroy(struct channel *chan)
+{
+       channel_destroy(chan);
+}
+
+static
+struct channel *_channel_create(const char *name,
+                               void *priv, void *buf_addr,
+                               size_t subbuf_size, size_t num_subbuf,
+                               unsigned int switch_timer_interval,
+                               unsigned int read_timer_interval)
+{
+       struct lttng_trigger_group *trigger_group = priv;
+       struct channel *chan;
+
+       chan = channel_create(&client_config, name,
+                             trigger_group, buf_addr,
+                             subbuf_size, num_subbuf, switch_timer_interval,
+                             read_timer_interval);
+       if (chan) {
+               /*
+                * Ensure this module is not unloaded before we finish
+                * using lttng_relay_transport.ops.
+                */
+               if (!try_module_get(THIS_MODULE)) {
+                       printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+                       goto error;
+               }
+               chan->backend.priv_ops = &lttng_relay_transport.ops;
+               chan->backend.release_priv_ops = release_priv_ops;
+       }
+       return chan;
+
+error:
+       lttng_channel_destroy(chan);
+       return NULL;
+}
+
+static
+struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
+{
+       struct lib_ring_buffer *buf;
+
+       buf = channel_get_ring_buffer(&client_config, chan, 0);
+       if (!lib_ring_buffer_open_read(buf))
+               return buf;
+       return NULL;
+}
+
+static
+int lttng_buffer_has_read_closed_stream(struct channel *chan)
+{
+       struct lib_ring_buffer *buf;
+       int cpu;
+
+       for_each_channel_cpu(cpu, chan) {
+               buf = channel_get_ring_buffer(&client_config, chan, cpu);
+               if (!atomic_long_read(&buf->active_readers))
+                       return 1;
+       }
+       return 0;
+}
+
+static
+void lttng_buffer_read_close(struct lib_ring_buffer *buf)
+{
+       lib_ring_buffer_release_read(buf);
+}
+
+static
+void lttng_write_trigger_header(const struct lib_ring_buffer_config *config,
+                           struct lib_ring_buffer_ctx *ctx)
+{
+       uint32_t data_size;
+
+       WARN_ON_ONCE(ctx->data_size > U32_MAX);
+
+       data_size = (uint32_t) ctx->data_size;
+
+       lib_ring_buffer_write(config, ctx, &data_size, sizeof(data_size));
+
+       lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+}
+
+static
+int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
+{
+       int ret;
+
+       ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
+       if (ret)
+               return ret;
+       lib_ring_buffer_backend_get_pages(&client_config, ctx,
+                       &ctx->backend_pages);
+
+       lttng_write_trigger_header(&client_config, ctx);
+       return 0;
+}
+
+static
+void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
+{
+       lib_ring_buffer_commit(&client_config, ctx);
+}
+
+static
+void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
+                    size_t len)
+{
+       lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+                              const void __user *src, size_t len)
+{
+       lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
+               int c, size_t len)
+{
+       lib_ring_buffer_memset(&client_config, ctx, c, len);
+}
+
+static
+void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
+               size_t len)
+{
+       lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
+}
+
+static
+size_t lttng_packet_avail_size(struct channel *chan)
+{
+       unsigned long o_begin;
+       struct lib_ring_buffer *buf;
+
+       buf = chan->backend.buf;        /* Only for global buffer ! */
+       o_begin = v_read(&client_config, &buf->offset);
+       if (subbuf_offset(o_begin, chan) != 0) {
+               return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
+       } else {
+               return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
+                       - sizeof(struct trigger_packet_header);
+       }
+}
+
+static
+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
+{
+       struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
+                                       chan, cpu);
+       return &buf->write_wait;
+}
+
+static
+wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+{
+       return &chan->hp_wait;
+}
+
+static
+int lttng_is_finalized(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int lttng_is_disabled(struct channel *chan)
+{
+       return lib_ring_buffer_channel_is_disabled(chan);
+}
+
+static struct lttng_transport lttng_relay_transport = {
+       .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
+       .owner = THIS_MODULE,
+       .ops = {
+               .channel_create = _channel_create,
+               .channel_destroy = lttng_channel_destroy,
+               .buffer_read_open = lttng_buffer_read_open,
+               .buffer_has_read_closed_stream =
+                       lttng_buffer_has_read_closed_stream,
+               .buffer_read_close = lttng_buffer_read_close,
+               .event_reserve = lttng_event_reserve,
+               .event_commit = lttng_event_commit,
+               .event_write_from_user = lttng_event_write_from_user,
+               .event_memset = lttng_event_memset,
+               .event_write = lttng_event_write,
+               .event_strcpy = lttng_event_strcpy,
+               .packet_avail_size = lttng_packet_avail_size,
+               .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
+               .get_hp_wait_queue = lttng_get_hp_wait_queue,
+               .is_finalized = lttng_is_finalized,
+               .is_disabled = lttng_is_disabled,
+               .timestamp_begin = client_timestamp_begin,
+               .timestamp_end = client_timestamp_end,
+               .events_discarded = client_events_discarded,
+               .content_size = client_content_size,
+               .packet_size = client_packet_size,
+               .stream_id = client_stream_id,
+               .current_timestamp = client_current_timestamp,
+               .sequence_number = client_sequence_number,
+               .instance_id = client_instance_id,
+       },
+};
+
+static int __init lttng_ring_buffer_trigger_client_init(void)
+{
+       /*
+        * This vmalloc sync all also takes care of the lib ring buffer
+        * vmalloc'd module pages when it is built as a module into LTTng.
+        */
+       wrapper_vmalloc_sync_mappings();
+       lttng_transport_register(&lttng_relay_transport);
+       return 0;
+}
+
+module_init(lttng_ring_buffer_trigger_client_init);
+
+static void __exit lttng_ring_buffer_trigger_client_exit(void)
+{
+       lttng_transport_unregister(&lttng_relay_transport);
+}
+
+module_exit(lttng_ring_buffer_trigger_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
+                  " client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
index a5b5f403fe359e6531650111fc05cb82a6c60aba..def1b07524299585a3e12047e85c045bafbe9a80 100644 (file)
@@ -28,6 +28,7 @@
 #include <wrapper/rcu.h>
 #include <wrapper/syscall.h>
 #include <lttng/events.h>
+#include <lttng/utils.h>
 
 #ifndef CONFIG_COMPAT
 # ifndef is_compat_task
@@ -58,9 +59,9 @@ enum sc_type {
 #define COMPAT_SYSCALL_EXIT_STR                __stringify(COMPAT_SYSCALL_EXIT_TOK)
 
 static
-void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
+void syscall_entry_event_probe(void *__data, struct pt_regs *regs, long id);
 static
-void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret);
+void syscall_exit_event_probe(void *__data, struct pt_regs *regs, long ret);
 
 /*
  * Forward declarations for old kernels.
@@ -121,7 +122,7 @@ typedef __kernel_old_time_t time_t;
 
 /* Hijack probe callback for system call enter */
 #undef TP_PROBE_CB
-#define TP_PROBE_CB(_template)         &syscall_entry_probe
+#define TP_PROBE_CB(_template)         &syscall_entry_event_probe
 #define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
        LTTNG_TRACEPOINT_EVENT(syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
                PARAMS(_fields))
@@ -157,7 +158,7 @@ typedef __kernel_old_time_t time_t;
 #undef _TRACE_SYSCALLS_POINTERS_H
 
 /* Hijack probe callback for compat system call enter */
-#define TP_PROBE_CB(_template)         &syscall_entry_probe
+#define TP_PROBE_CB(_template)         &syscall_entry_event_probe
 #define LTTNG_SC_COMPAT
 #define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
        LTTNG_TRACEPOINT_EVENT(compat_syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
@@ -206,7 +207,7 @@ typedef __kernel_old_time_t time_t;
 #define sc_inout(...)          __VA_ARGS__
 
 /* Hijack probe callback for system call exit */
-#define TP_PROBE_CB(_template)         &syscall_exit_probe
+#define TP_PROBE_CB(_template)         &syscall_exit_event_probe
 #define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
        LTTNG_TRACEPOINT_EVENT(syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
                PARAMS(_fields))
@@ -241,7 +242,7 @@ typedef __kernel_old_time_t time_t;
 
 
 /* Hijack probe callback for compat system call exit */
-#define TP_PROBE_CB(_template)         &syscall_exit_probe
+#define TP_PROBE_CB(_template)         &syscall_exit_event_probe
 #define LTTNG_SC_COMPAT
 #define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
        LTTNG_TRACEPOINT_EVENT(compat_syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
@@ -283,7 +284,8 @@ typedef __kernel_old_time_t time_t;
 #undef CREATE_TRACE_POINTS
 
 struct trace_syscall_entry {
-       void *func;
+       void *event_func;
+       void *trigger_func;
        const struct lttng_event_desc *desc;
        const struct lttng_event_field *fields;
        unsigned int nrargs;
@@ -299,13 +301,14 @@ struct trace_syscall_entry {
 #undef TRACE_SYSCALL_TABLE
 #define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
        [ _nr ] = {                                             \
-               .func = __event_probe__syscall_entry_##_template, \
+               .event_func = __event_probe__syscall_entry_##_template, \
+               .trigger_func = __trigger_probe__syscall_entry_##_template, \
                .nrargs = (_nrargs),                            \
                .fields = __event_fields___syscall_entry_##_template, \
                .desc = &__event_desc___syscall_entry_##_name,  \
        },
 
-/* Syscall enter tracing table */
+/* Event syscall enter tracing table */
 static const struct trace_syscall_entry sc_table[] = {
 #include <instrumentation/syscalls/headers/syscalls_integers.h>
 #include <instrumentation/syscalls/headers/syscalls_pointers.h>
@@ -314,13 +317,14 @@ static const struct trace_syscall_entry sc_table[] = {
 #undef TRACE_SYSCALL_TABLE
 #define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
        [ _nr ] = {                                             \
-               .func = __event_probe__compat_syscall_entry_##_template, \
+               .event_func = __event_probe__compat_syscall_entry_##_template, \
+               .trigger_func = __trigger_probe__compat_syscall_entry_##_template, \
                .nrargs = (_nrargs),                            \
                .fields = __event_fields___compat_syscall_entry_##_template, \
                .desc = &__event_desc___compat_syscall_entry_##_name, \
        },
 
-/* Compat syscall enter table */
+/* Event compat syscall enter table */
 const struct trace_syscall_entry compat_sc_table[] = {
 #include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
 #include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
@@ -336,13 +340,14 @@ const struct trace_syscall_entry compat_sc_table[] = {
 #undef TRACE_SYSCALL_TABLE
 #define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
        [ _nr ] = {                                             \
-               .func = __event_probe__syscall_exit_##_template, \
+               .event_func = __event_probe__syscall_exit_##_template, \
+               .trigger_func = __trigger_probe__syscall_exit_##_template, \
                .nrargs = (_nrargs),                            \
                .fields = __event_fields___syscall_exit_##_template, \
                .desc = &__event_desc___syscall_exit_##_name, \
        },
 
-/* Syscall exit table */
+/* Event syscall exit table */
 static const struct trace_syscall_entry sc_exit_table[] = {
 #include <instrumentation/syscalls/headers/syscalls_integers.h>
 #include <instrumentation/syscalls/headers/syscalls_pointers.h>
@@ -351,13 +356,14 @@ static const struct trace_syscall_entry sc_exit_table[] = {
 #undef TRACE_SYSCALL_TABLE
 #define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs)    \
        [ _nr ] = {                                             \
-               .func = __event_probe__compat_syscall_exit_##_template, \
+               .event_func = __event_probe__compat_syscall_exit_##_template, \
+               .trigger_func = __trigger_probe__compat_syscall_exit_##_template, \
                .nrargs = (_nrargs),                            \
                .fields = __event_fields___compat_syscall_exit_##_template, \
                .desc = &__event_desc___compat_syscall_exit_##_name, \
        },
 
-/* Compat syscall exit table */
+/* Event compat syscall exit table */
 const struct trace_syscall_entry compat_sc_exit_table[] = {
 #include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
 #include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
@@ -372,7 +378,7 @@ struct lttng_syscall_filter {
        DECLARE_BITMAP(sc_compat, NR_compat_syscalls);
 };
 
-static void syscall_entry_unknown(struct lttng_event *event,
+static void syscall_entry_event_unknown(struct lttng_event *event,
        struct pt_regs *regs, unsigned int id)
 {
        unsigned long args[LTTNG_SYSCALL_NR_ARGS];
@@ -384,83 +390,36 @@ static void syscall_entry_unknown(struct lttng_event *event,
                __event_probe__syscall_entry_unknown(event, id, args);
 }
 
-void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
+static __always_inline
+void syscall_entry_call_func(void *func, unsigned int nrargs, void *data,
+               struct pt_regs *regs)
 {
-       struct lttng_channel *chan = __data;
-       struct lttng_event *event, *unknown_event;
-       const struct trace_syscall_entry *table, *entry;
-       size_t table_len;
-
-       if (unlikely(in_compat_syscall())) {
-               struct lttng_syscall_filter *filter;
-
-               filter = lttng_rcu_dereference(chan->sc_filter);
-               if (filter) {
-                       if (id < 0 || id >= NR_compat_syscalls
-                               || !test_bit(id, filter->sc_compat)) {
-                               /* System call filtered out. */
-                               return;
-                       }
-               }
-               table = compat_sc_table;
-               table_len = ARRAY_SIZE(compat_sc_table);
-               unknown_event = chan->sc_compat_unknown;
-       } else {
-               struct lttng_syscall_filter *filter;
-
-               filter = lttng_rcu_dereference(chan->sc_filter);
-               if (filter) {
-                       if (id < 0 || id >= NR_syscalls
-                               || !test_bit(id, filter->sc)) {
-                               /* System call filtered out. */
-                               return;
-                       }
-               }
-               table = sc_table;
-               table_len = ARRAY_SIZE(sc_table);
-               unknown_event = chan->sc_unknown;
-       }
-       if (unlikely(id < 0 || id >= table_len)) {
-               syscall_entry_unknown(unknown_event, regs, id);
-               return;
-       }
-       if (unlikely(in_compat_syscall()))
-               event = chan->compat_sc_table[id];
-       else
-               event = chan->sc_table[id];
-       if (unlikely(!event)) {
-               syscall_entry_unknown(unknown_event, regs, id);
-               return;
-       }
-       entry = &table[id];
-       WARN_ON_ONCE(!entry);
-
-       switch (entry->nrargs) {
+       switch (nrargs) {
        case 0:
        {
-               void (*fptr)(void *__data) = entry->func;
+               void (*fptr)(void *__data) = func;
 
-               fptr(event);
+               fptr(data);
                break;
        }
        case 1:
        {
-               void (*fptr)(void *__data, unsigned long arg0) = entry->func;
+               void (*fptr)(void *__data, unsigned long arg0) = func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0]);
+               fptr(data, args[0]);
                break;
        }
        case 2:
        {
                void (*fptr)(void *__data,
                        unsigned long arg0,
-                       unsigned long arg1) = entry->func;
+                       unsigned long arg1) = func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1]);
+               fptr(data, args[0], args[1]);
                break;
        }
        case 3:
@@ -468,11 +427,11 @@ void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
                void (*fptr)(void *__data,
                        unsigned long arg0,
                        unsigned long arg1,
-                       unsigned long arg2) = entry->func;
+                       unsigned long arg2) = func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1], args[2]);
+               fptr(data, args[0], args[1], args[2]);
                break;
        }
        case 4:
@@ -481,11 +440,11 @@ void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
                        unsigned long arg0,
                        unsigned long arg1,
                        unsigned long arg2,
-                       unsigned long arg3) = entry->func;
+                       unsigned long arg3) = func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1], args[2], args[3]);
+               fptr(data, args[0], args[1], args[2], args[3]);
                break;
        }
        case 5:
@@ -495,11 +454,11 @@ void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
                        unsigned long arg1,
                        unsigned long arg2,
                        unsigned long arg3,
-                       unsigned long arg4) = entry->func;
+                       unsigned long arg4) = func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1], args[2], args[3], args[4]);
+               fptr(data, args[0], args[1], args[2], args[3], args[4]);
                break;
        }
        case 6:
@@ -510,11 +469,11 @@ void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
                        unsigned long arg2,
                        unsigned long arg3,
                        unsigned long arg4,
-                       unsigned long arg5) = entry->func;
+                       unsigned long arg5) = func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
-               fptr(event, args[0], args[1], args[2],
+               fptr(data, args[0], args[1], args[2],
                        args[3], args[4], args[5]);
                break;
        }
@@ -523,7 +482,91 @@ void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
        }
 }
 
-static void syscall_exit_unknown(struct lttng_event *event,
+void syscall_entry_event_probe(void *__data, struct pt_regs *regs, long id)
+{
+       struct lttng_channel *chan = __data;
+       struct lttng_syscall_filter *filter;
+       struct lttng_event *event, *unknown_event;
+       const struct trace_syscall_entry *table, *entry;
+       size_t table_len;
+
+       filter = lttng_rcu_dereference(chan->sc_filter);
+
+       if (unlikely(in_compat_syscall())) {
+               if (filter) {
+                       if (id < 0 || id >= NR_compat_syscalls
+                               || !test_bit(id, filter->sc_compat)) {
+                               /* System call filtered out. */
+                               return;
+                       }
+               }
+               table = compat_sc_table;
+               table_len = ARRAY_SIZE(compat_sc_table);
+               unknown_event = chan->sc_compat_unknown;
+       } else {
+               if (filter) {
+                       if (id < 0 || id >= NR_syscalls
+                               || !test_bit(id, filter->sc)) {
+                               /* System call filtered out. */
+                               return;
+                       }
+               }
+               table = sc_table;
+               table_len = ARRAY_SIZE(sc_table);
+               unknown_event = chan->sc_unknown;
+       }
+       if (unlikely(id < 0 || id >= table_len)) {
+               syscall_entry_event_unknown(unknown_event, regs, id);
+               return;
+       }
+       if (unlikely(in_compat_syscall()))
+               event = chan->compat_sc_table[id];
+       else
+               event = chan->sc_table[id];
+       if (unlikely(!event)) {
+               syscall_entry_event_unknown(unknown_event, regs, id);
+               return;
+       }
+       entry = &table[id];
+       WARN_ON_ONCE(!entry);
+
+       syscall_entry_call_func(entry->event_func, entry->nrargs, event, regs);
+}
+
+void syscall_entry_trigger_probe(void *__data, struct pt_regs *regs, long id)
+{
+       struct lttng_trigger_group *trigger_group = __data;
+       const struct trace_syscall_entry *entry;
+       struct list_head *dispatch_list;
+       struct lttng_trigger *iter;
+       size_t table_len;
+
+
+       if (unlikely(in_compat_syscall())) {
+               table_len = ARRAY_SIZE(compat_sc_table);
+               if (unlikely(id < 0 || id >= table_len)) {
+                       return;
+               }
+               entry = &compat_sc_table[id];
+               dispatch_list = &trigger_group->trigger_compat_syscall_dispatch[id];
+       } else {
+               table_len = ARRAY_SIZE(sc_table);
+               if (unlikely(id < 0 || id >= table_len)) {
+                       return;
+               }
+               entry = &sc_table[id];
+               dispatch_list = &trigger_group->trigger_syscall_dispatch[id];
+       }
+
+       /* TODO handle unknown syscall */
+
+       list_for_each_entry_rcu(iter, dispatch_list, u.syscall.node) {
+               BUG_ON(iter->u.syscall.syscall_id != id);
+               syscall_entry_call_func(entry->trigger_func, entry->nrargs, iter, regs);
+       }
+}
+
+static void syscall_exit_event_unknown(struct lttng_event *event,
        struct pt_regs *regs, int id, long ret)
 {
        unsigned long args[LTTNG_SYSCALL_NR_ARGS];
@@ -536,19 +579,19 @@ static void syscall_exit_unknown(struct lttng_event *event,
                __event_probe__syscall_exit_unknown(event, id, ret, args);
 }
 
-void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
+void syscall_exit_event_probe(void *__data, struct pt_regs *regs, long ret)
 {
        struct lttng_channel *chan = __data;
+       struct lttng_syscall_filter *filter;
        struct lttng_event *event, *unknown_event;
        const struct trace_syscall_entry *table, *entry;
        size_t table_len;
        long id;
 
+       filter = lttng_rcu_dereference(chan->sc_filter);
+
        id = syscall_get_nr(current, regs);
        if (unlikely(in_compat_syscall())) {
-               struct lttng_syscall_filter *filter;
-
-               filter = lttng_rcu_dereference(chan->sc_filter);
                if (filter) {
                        if (id < 0 || id >= NR_compat_syscalls
                                || !test_bit(id, filter->sc_compat)) {
@@ -560,9 +603,6 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
                table_len = ARRAY_SIZE(compat_sc_exit_table);
                unknown_event = chan->compat_sc_exit_unknown;
        } else {
-               struct lttng_syscall_filter *filter;
-
-               filter = lttng_rcu_dereference(chan->sc_filter);
                if (filter) {
                        if (id < 0 || id >= NR_syscalls
                                || !test_bit(id, filter->sc)) {
@@ -575,7 +615,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
                unknown_event = chan->sc_exit_unknown;
        }
        if (unlikely(id < 0 || id >= table_len)) {
-               syscall_exit_unknown(unknown_event, regs, id, ret);
+               syscall_exit_event_unknown(unknown_event, regs, id, ret);
                return;
        }
        if (unlikely(in_compat_syscall()))
@@ -583,7 +623,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
        else
                event = chan->sc_exit_table[id];
        if (unlikely(!event)) {
-               syscall_exit_unknown(unknown_event, regs, id, ret);
+               syscall_exit_event_unknown(unknown_event, regs, id, ret);
                return;
        }
        entry = &table[id];
@@ -592,7 +632,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
        switch (entry->nrargs) {
        case 0:
        {
-               void (*fptr)(void *__data, long ret) = entry->func;
+               void (*fptr)(void *__data, long ret) = entry->event_func;
 
                fptr(event, ret);
                break;
@@ -601,7 +641,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
        {
                void (*fptr)(void *__data,
                        long ret,
-                       unsigned long arg0) = entry->func;
+                       unsigned long arg0) = entry->event_func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
@@ -613,7 +653,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
                void (*fptr)(void *__data,
                        long ret,
                        unsigned long arg0,
-                       unsigned long arg1) = entry->func;
+                       unsigned long arg1) = entry->event_func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
@@ -626,7 +666,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
                        long ret,
                        unsigned long arg0,
                        unsigned long arg1,
-                       unsigned long arg2) = entry->func;
+                       unsigned long arg2) = entry->event_func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
@@ -640,7 +680,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
                        unsigned long arg0,
                        unsigned long arg1,
                        unsigned long arg2,
-                       unsigned long arg3) = entry->func;
+                       unsigned long arg3) = entry->event_func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
@@ -655,7 +695,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
                        unsigned long arg1,
                        unsigned long arg2,
                        unsigned long arg3,
-                       unsigned long arg4) = entry->func;
+                       unsigned long arg4) = entry->event_func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
@@ -671,7 +711,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
                        unsigned long arg2,
                        unsigned long arg3,
                        unsigned long arg4,
-                       unsigned long arg5) = entry->func;
+                       unsigned long arg5) = entry->event_func;
                unsigned long args[LTTNG_SYSCALL_NR_ARGS];
 
                lttng_syscall_get_arguments(current, regs, args);
@@ -689,7 +729,7 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
  * Should be called with sessions lock held.
  */
 static
-int fill_table(const struct trace_syscall_entry *table, size_t table_len,
+int fill_event_table(const struct trace_syscall_entry *table, size_t table_len,
        struct lttng_event **chan_table, struct lttng_channel *chan,
        void *filter, enum sc_type type)
 {
@@ -756,7 +796,7 @@ int fill_table(const struct trace_syscall_entry *table, size_t table_len,
 /*
  * Should be called with sessions lock held.
  */
-int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
+int lttng_syscalls_register_event(struct lttng_channel *chan, void *filter)
 {
        struct lttng_kernel_event ev;
        int ret;
@@ -863,22 +903,22 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
                }
        }
 
-       ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
+       ret = fill_event_table(sc_table, ARRAY_SIZE(sc_table),
                        chan->sc_table, chan, filter, SC_TYPE_ENTRY);
        if (ret)
                return ret;
-       ret = fill_table(sc_exit_table, ARRAY_SIZE(sc_exit_table),
+       ret = fill_event_table(sc_exit_table, ARRAY_SIZE(sc_exit_table),
                        chan->sc_exit_table, chan, filter, SC_TYPE_EXIT);
        if (ret)
                return ret;
 
 #ifdef CONFIG_COMPAT
-       ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
+       ret = fill_event_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
                        chan->compat_sc_table, chan, filter,
                        SC_TYPE_COMPAT_ENTRY);
        if (ret)
                return ret;
-       ret = fill_table(compat_sc_exit_table, ARRAY_SIZE(compat_sc_exit_table),
+       ret = fill_event_table(compat_sc_exit_table, ARRAY_SIZE(compat_sc_exit_table),
                        chan->compat_sc_exit_table, chan, filter,
                        SC_TYPE_COMPAT_EXIT);
        if (ret)
@@ -886,7 +926,7 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
 #endif
        if (!chan->sys_enter_registered) {
                ret = lttng_wrapper_tracepoint_probe_register("sys_enter",
-                               (void *) syscall_entry_probe, chan);
+                               (void *) syscall_entry_event_probe, chan);
                if (ret)
                        return ret;
                chan->sys_enter_registered = 1;
@@ -897,10 +937,10 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
         */
        if (!chan->sys_exit_registered) {
                ret = lttng_wrapper_tracepoint_probe_register("sys_exit",
-                               (void *) syscall_exit_probe, chan);
+                               (void *) syscall_exit_event_probe, chan);
                if (ret) {
                        WARN_ON_ONCE(lttng_wrapper_tracepoint_probe_unregister("sys_enter",
-                               (void *) syscall_entry_probe, chan));
+                               (void *) syscall_entry_event_probe, chan));
                        return ret;
                }
                chan->sys_exit_registered = 1;
@@ -909,9 +949,152 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
 }
 
 /*
- * Only called at session destruction.
+ * Should be called with sessions lock held.
+ */
+int lttng_syscalls_register_trigger(struct lttng_trigger_enabler *trigger_enabler, void *filter)
+{
+       struct lttng_trigger_group *group = trigger_enabler->group;
+       unsigned int i;
+       int ret = 0;
+
+       wrapper_vmalloc_sync_mappings();
+
+       if (!group->trigger_syscall_dispatch) {
+               group->trigger_syscall_dispatch = kzalloc(sizeof(struct list_head)
+                                       * ARRAY_SIZE(sc_table), GFP_KERNEL);
+               if (!group->trigger_syscall_dispatch)
+                       return -ENOMEM;
+
+               /* Initialize all list_head */
+               for (i = 0; i < ARRAY_SIZE(sc_table); i++)
+                       INIT_LIST_HEAD(&group->trigger_syscall_dispatch[i]);
+       }
+
+#ifdef CONFIG_COMPAT
+       if (!group->trigger_compat_syscall_dispatch) {
+               group->trigger_compat_syscall_dispatch = kzalloc(sizeof(struct list_head)
+                                       * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
+               if (!group->trigger_syscall_dispatch)
+                       return -ENOMEM;
+
+               /* Initialize all list_head */
+               for (i = 0; i < ARRAY_SIZE(compat_sc_table); i++)
+                       INIT_LIST_HEAD(&group->trigger_compat_syscall_dispatch[i]);
+       }
+#endif
+
+       if (!group->sys_enter_registered) {
+               ret = lttng_wrapper_tracepoint_probe_register("sys_enter",
+                               (void *) syscall_entry_trigger_probe, group);
+               if (ret)
+                       return ret;
+               group->sys_enter_registered = 1;
+       }
+
+       return ret;
+}
+
+static int create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler,
+               void *filter, const struct trace_syscall_entry *table,
+               size_t table_len, bool is_compat)
+{
+       struct lttng_trigger_group *group = trigger_enabler->group;
+       const struct lttng_event_desc *desc;
+       uint64_t id = trigger_enabler->id;
+       unsigned int i;
+       int ret = 0;
+
+       /* iterate over all syscall and create trigger that match */
+       for (i = 0; i < table_len; i++) {
+               struct lttng_trigger *trigger;
+               struct lttng_kernel_trigger trigger_param;
+               struct hlist_head *head;
+               int found = 0;
+
+               desc = table[i].desc;
+               if (!desc) {
+                       /* Unknown syscall */
+                       continue;
+               }
+
+               if (!lttng_desc_match_enabler(desc,
+                               lttng_trigger_enabler_as_enabler(trigger_enabler)))
+                       continue;
+
+               /*
+                * Check if already created.
+                */
+               head = utils_borrow_hash_table_bucket(group->triggers_ht.table,
+                       LTTNG_TRIGGER_HT_SIZE, desc->name);
+               lttng_hlist_for_each_entry(trigger, head, hlist) {
+                       if (trigger->desc == desc
+                               && trigger->id == trigger_enabler->id)
+                               found = 1;
+               }
+               if (found)
+                       continue;
+
+               memset(&trigger_param, 0, sizeof(trigger_param));
+               strncat(trigger_param.name, desc->name,
+                       LTTNG_KERNEL_SYM_NAME_LEN - strlen(trigger_param.name) - 1);
+               trigger_param.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+               trigger_param.instrumentation = LTTNG_KERNEL_SYSCALL;
+
+               trigger = _lttng_trigger_create(desc, id, group,
+                       &trigger_param, filter, trigger_param.instrumentation);
+               if (IS_ERR(trigger)) {
+                       printk(KERN_INFO "Unable to create trigger %s\n",
+                               desc->name);
+                       ret = -ENOMEM;
+                       goto end;
+               }
+
+               trigger->u.syscall.syscall_id = i;
+               trigger->u.syscall.is_compat = is_compat;
+       }
+end:
+       return ret;
+
+}
+
+int lttng_syscals_create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler, void *filter)
+{
+       int ret;
+
+       ret = create_matching_triggers(trigger_enabler, filter, sc_table,
+               ARRAY_SIZE(sc_table), false);
+       if (ret)
+               goto end;
+
+       ret = create_matching_triggers(trigger_enabler, filter, compat_sc_table,
+               ARRAY_SIZE(compat_sc_table), true);
+end:
+       return ret;
+}
+
+/*
+ * TODO
  */
-int lttng_syscalls_unregister(struct lttng_channel *chan)
+int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *trigger_group)
+{
+       int ret;
+
+       if (trigger_group->sys_enter_registered) {
+               ret = lttng_wrapper_tracepoint_probe_unregister("sys_enter",
+                               (void *) syscall_entry_trigger_probe, trigger_group);
+               if (ret)
+                       return ret;
+               trigger_group->sys_enter_registered = 0;
+       }
+
+       kfree(trigger_group->trigger_syscall_dispatch);
+#ifdef CONFIG_COMPAT
+       kfree(trigger_group->trigger_compat_syscall_dispatch);
+#endif
+       return 0;
+}
+
+int lttng_syscalls_unregister_event(struct lttng_channel *chan)
 {
        int ret;
 
@@ -919,14 +1102,14 @@ int lttng_syscalls_unregister(struct lttng_channel *chan)
                return 0;
        if (chan->sys_enter_registered) {
                ret = lttng_wrapper_tracepoint_probe_unregister("sys_enter",
-                               (void *) syscall_entry_probe, chan);
+                               (void *) syscall_entry_event_probe, chan);
                if (ret)
                        return ret;
                chan->sys_enter_registered = 0;
        }
        if (chan->sys_exit_registered) {
                ret = lttng_wrapper_tracepoint_probe_unregister("sys_exit",
-                               (void *) syscall_exit_probe, chan);
+                               (void *) syscall_exit_event_probe, chan);
                if (ret)
                        return ret;
                chan->sys_exit_registered = 0;
@@ -950,14 +1133,12 @@ int get_syscall_nr(const char *syscall_name)
 
        for (i = 0; i < ARRAY_SIZE(sc_table); i++) {
                const struct trace_syscall_entry *entry;
-               const char *it_name;
 
                entry = &sc_table[i];
                if (!entry->desc)
                        continue;
-               it_name = entry->desc->name;
-               it_name += strlen(SYSCALL_ENTRY_STR);
-               if (!strcmp(syscall_name, it_name)) {
+
+               if (!strcmp(syscall_name, entry->desc->name)) {
                        syscall_nr = i;
                        break;
                }
@@ -973,14 +1154,12 @@ int get_compat_syscall_nr(const char *syscall_name)
 
        for (i = 0; i < ARRAY_SIZE(compat_sc_table); i++) {
                const struct trace_syscall_entry *entry;
-               const char *it_name;
 
                entry = &compat_sc_table[i];
                if (!entry->desc)
                        continue;
-               it_name = entry->desc->name;
-               it_name += strlen(COMPAT_SYSCALL_ENTRY_STR);
-               if (!strcmp(syscall_name, it_name)) {
+
+               if (!strcmp(syscall_name, entry->desc->name)) {
                        syscall_nr = i;
                        break;
                }
@@ -994,7 +1173,7 @@ uint32_t get_sc_tables_len(void)
        return ARRAY_SIZE(sc_table) + ARRAY_SIZE(compat_sc_table);
 }
 
-int lttng_syscall_filter_enable(struct lttng_channel *chan,
+int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
                const char *name)
 {
        int syscall_nr, compat_syscall_nr, ret;
@@ -1058,7 +1237,23 @@ error:
        return ret;
 }
 
-int lttng_syscall_filter_disable(struct lttng_channel *chan,
+int lttng_syscall_filter_enable_trigger(struct lttng_trigger *trigger)
+{
+       struct lttng_trigger_group *group = trigger->group;
+       unsigned int syscall_id = trigger->u.syscall.syscall_id;
+       struct list_head *dispatch_list;
+
+       if (trigger->u.syscall.is_compat)
+               dispatch_list = &group->trigger_compat_syscall_dispatch[syscall_id];
+       else
+               dispatch_list = &group->trigger_syscall_dispatch[syscall_id];
+
+       list_add_rcu(&trigger->u.syscall.node, dispatch_list);
+
+       return 0;
+}
+
+int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
                const char *name)
 {
        int syscall_nr, compat_syscall_nr, ret;
@@ -1126,6 +1321,12 @@ error:
        return ret;
 }
 
+int lttng_syscall_filter_disable_trigger(struct lttng_trigger *trigger)
+{
+       list_del_rcu(&trigger->u.syscall.node);
+       return 0;
+}
+
 static
 const struct trace_syscall_entry *syscall_list_get_entry(loff_t *pos)
 {
diff --git a/src/lttng-trigger-notification.c b/src/lttng-trigger-notification.c
new file mode 100644 (file)
index 0000000..c2f716f
--- /dev/null
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-trigger-notification.c
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#include <linux/bug.h>
+
+#include <lttng/lttng-bytecode.h>
+#include <lttng/events.h>
+#include <lttng/msgpack.h>
+#include <lttng/trigger-notification.h>
+
+/*
+ * FIXME: this probably too low but it needs to be below 1024 bytes to avoid
+ * the frame to be larger than the 1024 limit enforced by the kernel.
+ */
+#define CAPTURE_BUFFER_SIZE 512
+
+struct lttng_trigger_notification {
+       int notification_fd;
+       uint64_t trigger_id;
+       uint8_t capture_buf[CAPTURE_BUFFER_SIZE];
+       struct lttng_msgpack_writer writer;
+       bool has_captures;
+};
+
+static
+int capture_enum(struct lttng_msgpack_writer *writer,
+               struct lttng_interpreter_output *output)
+{
+       int ret;
+
+       /*
+        * Enums are captured as a map containing 2 key-value pairs. Such as:
+        * - type: enum
+        *   value: 177
+        */
+       ret = lttng_msgpack_begin_map(writer, 2);
+       if (ret) {
+               WARN_ON_ONCE(1);
+               goto end;
+       }
+
+       ret = lttng_msgpack_write_str(writer, "type");
+       if (ret) {
+               WARN_ON_ONCE(1);
+               goto end;
+       }
+
+       ret = lttng_msgpack_write_str(writer, "enum");
+       if (ret) {
+               WARN_ON_ONCE(1);
+               goto end;
+       }
+
+       ret = lttng_msgpack_write_str(writer, "value");
+       if (ret) {
+               WARN_ON_ONCE(1);
+               goto end;
+       }
+
+       switch (output->type) {
+       case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+               ret = lttng_msgpack_write_signed_integer(writer, output->u.s);
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       goto end;
+               }
+               break;
+       case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+               ret = lttng_msgpack_write_signed_integer(writer, output->u.u);
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       goto end;
+               }
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       ret = lttng_msgpack_end_map(writer);
+       if (ret)
+               WARN_ON_ONCE(1);
+
+end:
+       return ret;
+}
+
+static
+int64_t capture_sequence_element_signed(uint8_t *ptr,
+               const struct lttng_integer_type *type)
+{
+       int64_t value = 0;
+       unsigned int size = type->size;
+       bool byte_order_reversed = type->reverse_byte_order;
+
+       switch (size) {
+       case 8:
+               value = *ptr;
+               break;
+       case 16:
+       {
+               int16_t tmp;
+               tmp = *(int16_t *) ptr;
+               if (byte_order_reversed)
+                       __swab16s(&tmp);
+
+               value = tmp;
+               break;
+       }
+       case 32:
+       {
+               int32_t tmp;
+               tmp = *(int32_t *) ptr;
+               if (byte_order_reversed)
+                       __swab32s(&tmp);
+
+               value = tmp;
+               break;
+       }
+       case 64:
+       {
+               int64_t tmp;
+               tmp = *(int64_t *) ptr;
+               if (byte_order_reversed)
+                       __swab64s(&tmp);
+
+               value = tmp;
+               break;
+       }
+       default:
+               WARN_ON(1);
+       }
+
+       return value;
+}
+
+static
+uint64_t capture_sequence_element_unsigned(uint8_t *ptr,
+               const struct lttng_integer_type *type)
+{
+       uint64_t value = 0;
+       unsigned int size = type->size;
+       bool byte_order_reversed = type->reverse_byte_order;
+
+       switch (size) {
+       case 8:
+               value = *ptr;
+               break;
+       case 16:
+       {
+               uint16_t tmp;
+               tmp = *(uint16_t *) ptr;
+               if (byte_order_reversed)
+                       __swab16s(&tmp);
+
+               value = tmp;
+               break;
+       }
+       case 32:
+       {
+               uint32_t tmp;
+               tmp = *(uint32_t *) ptr;
+               if (byte_order_reversed)
+                       __swab32s(&tmp);
+
+               value = tmp;
+               break;
+       }
+       case 64:
+       {
+               uint64_t tmp;
+               tmp = *(uint64_t *) ptr;
+               if (byte_order_reversed)
+                       __swab64s(&tmp);
+
+               value = tmp;
+               break;
+       }
+       default:
+               WARN_ON(1);
+       }
+
+       return value;
+}
+
+int capture_sequence(struct lttng_msgpack_writer *writer,
+               struct lttng_interpreter_output *output)
+{
+       const struct lttng_integer_type *integer_type = NULL;
+       const struct lttng_type *nested_type;
+       uint8_t *ptr;
+       bool signedness;
+       int ret, i;
+
+       ret = lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem);
+       if (ret) {
+               WARN_ON_ONCE(1);
+               goto end;
+       }
+
+       ptr = (uint8_t *) output->u.sequence.ptr;
+       nested_type = output->u.sequence.nested_type;
+       switch (nested_type->atype) {
+       case atype_integer:
+               integer_type = &nested_type->u.integer;
+               break;
+       case atype_enum_nestable:
+               /* Treat enumeration as an integer. */
+               integer_type = &nested_type->u.enum_nestable.container_type->u.integer;
+               break;
+       default:
+               /* Capture of array of non-integer are not supported. */
+               WARN_ON(1);
+       }
+       signedness = integer_type->signedness;
+       for (i = 0; i < output->u.sequence.nr_elem; i++) {
+               if (signedness) {
+                       ret =lttng_msgpack_write_signed_integer(writer,
+                               capture_sequence_element_signed(ptr, integer_type));
+                       if (ret) {
+                               WARN_ON_ONCE(1);
+                               goto end;
+                       }
+               } else {
+                       ret = lttng_msgpack_write_unsigned_integer(writer,
+                               capture_sequence_element_unsigned(ptr, integer_type));
+                       if (ret) {
+                               WARN_ON_ONCE(1);
+                               goto end;
+                       }
+               }
+
+               /*
+                * We assume that alignment is smaller or equal to the size.
+                * This currently holds true but if it changes in the future,
+                * we will want to change the pointer arithmetics below to
+                * take into account that the next element might be further
+                * away.
+                */
+               WARN_ON(integer_type->alignment > integer_type->size);
+
+               /* Size is in number of bits. */
+               ptr += (integer_type->size / CHAR_BIT) ;
+       }
+
+       ret = lttng_msgpack_end_array(writer);
+       if (ret)
+               WARN_ON_ONCE(1);
+end:
+       return ret;
+}
+
+static
+int notification_append_capture(
+               struct lttng_trigger_notification *notif,
+               struct lttng_interpreter_output *output)
+{
+       struct lttng_msgpack_writer *writer = &notif->writer;
+       int ret = 0;
+
+       switch (output->type) {
+       case LTTNG_INTERPRETER_TYPE_S64:
+               ret = lttng_msgpack_write_signed_integer(writer, output->u.s);
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       goto end;
+               }
+               break;
+       case LTTNG_INTERPRETER_TYPE_U64:
+               ret = lttng_msgpack_write_unsigned_integer(writer, output->u.u);
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       goto end;
+               }
+               break;
+       case LTTNG_INTERPRETER_TYPE_STRING:
+               ret = lttng_msgpack_write_str(writer, output->u.str.str);
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       goto end;
+               }
+               break;
+       case LTTNG_INTERPRETER_TYPE_SEQUENCE:
+               ret = capture_sequence(writer, output);
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       goto end;
+               }
+               break;
+       case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+       case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+               ret = capture_enum(writer, output);
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       goto end;
+               }
+               break;
+       default:
+               ret = -1;
+               WARN_ON(1);
+       }
+end:
+       return ret;
+}
+
+static
+int notification_append_empty_capture(
+               struct lttng_trigger_notification *notif)
+{
+       int ret = lttng_msgpack_write_nil(&notif->writer);
+       if (ret)
+               WARN_ON_ONCE(1);
+
+       return ret;
+}
+
+static
+int notification_init(struct lttng_trigger_notification *notif,
+               struct lttng_trigger *trigger)
+{
+       struct lttng_msgpack_writer *writer = &notif->writer;
+       int ret = 0;
+
+       notif->has_captures = false;
+
+       if (trigger->num_captures > 0) {
+               lttng_msgpack_writer_init(writer, notif->capture_buf,
+                               CAPTURE_BUFFER_SIZE);
+
+               ret = lttng_msgpack_begin_array(writer, trigger->num_captures);
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       goto end;
+               }
+
+               notif->has_captures = true;
+       }
+
+end:
+       return ret;
+}
+
+static
+void notification_send(struct lttng_trigger_notification *notif,
+               struct lttng_trigger *trigger)
+{
+       struct lttng_trigger_group *trigger_group = trigger->group;
+       struct lib_ring_buffer_ctx ctx;
+       struct lttng_kernel_trigger_notification kernel_notif;
+       size_t capture_buffer_content_len, reserve_size;
+       int ret;
+
+       reserve_size = sizeof(kernel_notif);
+       kernel_notif.id = trigger->id;
+
+       if (notif->has_captures) {
+               capture_buffer_content_len = notif->writer.write_pos - notif->writer.buffer;
+       } else {
+               capture_buffer_content_len = 0;
+       }
+
+       WARN_ON_ONCE(capture_buffer_content_len > CAPTURE_BUFFER_SIZE);
+
+       reserve_size += capture_buffer_content_len;
+       kernel_notif.capture_buf_size = capture_buffer_content_len;
+
+       lib_ring_buffer_ctx_init(&ctx, trigger_group->chan, NULL, reserve_size,
+                       lttng_alignof(kernel_notif), -1);
+       ret = trigger_group->ops->event_reserve(&ctx, 0);
+       if (ret < 0) {
+               //TODO: error handling with counter maps
+               //silently drop for now.
+               WARN_ON_ONCE(1);
+               return;
+       }
+       lib_ring_buffer_align_ctx(&ctx, lttng_alignof(kernel_notif));
+
+       /* Write the notif structure. */
+       trigger_group->ops->event_write(&ctx, &kernel_notif,
+                       sizeof(kernel_notif));
+
+       /*
+        * Write the capture buffer. No need to realigned as the below is a raw
+        * char* buffer.
+        */
+       trigger_group->ops->event_write(&ctx, &notif->capture_buf,
+                       capture_buffer_content_len);
+
+       trigger_group->ops->event_commit(&ctx);
+       irq_work_queue(&trigger_group->wakeup_pending);
+}
+
+void lttng_trigger_notification_send(struct lttng_trigger *trigger,
+               struct lttng_probe_ctx *lttng_probe_ctx,
+               const char *stack_data)
+{
+       struct lttng_trigger_notification notif = {0};
+       int ret;
+
+       if (unlikely(!READ_ONCE(trigger->enabled)))
+               return;
+
+       ret = notification_init(&notif, trigger);
+       if (ret) {
+               WARN_ON_ONCE(1);
+               goto end;
+       }
+
+       if (unlikely(!list_empty(&trigger->capture_bytecode_runtime_head))) {
+               struct lttng_bytecode_runtime *capture_bc_runtime;
+
+               /*
+                * Iterate over all the capture bytecodes. If the interpreter
+                * functions returns successfully, append the value of the
+                * `output` parameter to the capture buffer. If the interpreter
+                * fails, append an empty capture to the buffer.
+                */
+               list_for_each_entry(capture_bc_runtime,
+                               &trigger->capture_bytecode_runtime_head, node) {
+                       struct lttng_interpreter_output output;
+
+                       if (capture_bc_runtime->interpreter_funcs.capture(capture_bc_runtime,
+                                       lttng_probe_ctx, stack_data, &output) & LTTNG_INTERPRETER_RECORD_FLAG)
+                               ret = notification_append_capture(&notif, &output);
+                       else
+                               ret = notification_append_empty_capture(&notif);
+
+                       if (ret)
+                               printk(KERN_WARNING "Error appending capture to notification");
+               }
+       }
+
+       /*
+        * Send the notification (including the capture buffer) to the
+        * sessiond.
+        */
+       notification_send(&notif, trigger);
+end:
+       return;
+}
index a2474d0d6e9da08c2a8518bf3f86eeaf1dd0be9b..8f66e91a81d41981fe84ce0d1496c7a3ed38b6d5 100644 (file)
@@ -18,7 +18,7 @@
 #include <blacklist/kprobes.h>
 
 static
-int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
+int lttng_kprobes_event_handler_pre(struct kprobe *p, struct pt_regs *regs)
 {
        struct lttng_event *event =
                container_of(p, struct lttng_event, u.kprobe.kp);
@@ -49,6 +49,20 @@ int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
        return 0;
 }
 
+static
+int lttng_kprobes_trigger_handler_pre(struct kprobe *p, struct pt_regs *regs)
+{
+       struct lttng_trigger *trigger =
+               container_of(p, struct lttng_trigger, u.kprobe.kp);
+
+       if (unlikely(!READ_ONCE(trigger->enabled)))
+               return 0;
+
+       trigger->send_notification(trigger, NULL, NULL);
+
+       return 0;
+}
+
 /*
  * Create event description
  */
@@ -94,11 +108,41 @@ error_str:
        return ret;
 }
 
-int lttng_kprobes_register(const char *name,
-                          const char *symbol_name,
+/*
+ * Create trigger description
+ */
+static
+int lttng_create_kprobe_trigger(const char *name, struct lttng_trigger *trigger)
+{
+       struct lttng_event_desc *desc;
+       int ret;
+
+       desc = kzalloc(sizeof(*trigger->desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+       desc->name = kstrdup(name, GFP_KERNEL);
+       if (!desc->name) {
+               ret = -ENOMEM;
+               goto error_str;
+       }
+       desc->nr_fields = 0;
+
+       desc->owner = THIS_MODULE;
+       trigger->desc = desc;
+
+       return 0;
+
+error_str:
+       kfree(desc);
+       return ret;
+}
+
+static
+int _lttng_kprobes_register(const char *symbol_name,
                           uint64_t offset,
                           uint64_t addr,
-                          struct lttng_event *event)
+                          struct lttng_kprobe *lttng_kp,
+                          kprobe_pre_handler_t pre_handler)
 {
        int ret;
 
@@ -106,26 +150,24 @@ int lttng_kprobes_register(const char *name,
        if (symbol_name[0] == '\0')
                symbol_name = NULL;
 
-       ret = lttng_create_kprobe_event(name, event);
-       if (ret)
-               goto error;
-       memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
-       event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
+       memset(&lttng_kp->kp, 0, sizeof(lttng_kp->kp));
+       lttng_kp->kp.pre_handler = pre_handler;
+
        if (symbol_name) {
-               event->u.kprobe.symbol_name =
+               lttng_kp->symbol_name =
                        kzalloc(LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char),
                                GFP_KERNEL);
-               if (!event->u.kprobe.symbol_name) {
+               if (!lttng_kp->symbol_name) {
                        ret = -ENOMEM;
                        goto name_error;
                }
-               memcpy(event->u.kprobe.symbol_name, symbol_name,
+               memcpy(lttng_kp->symbol_name, symbol_name,
                       LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char));
-               event->u.kprobe.kp.symbol_name =
-                       event->u.kprobe.symbol_name;
+               lttng_kp->kp.symbol_name = lttng_kp->symbol_name;
        }
-       event->u.kprobe.kp.offset = offset;
-       event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
+
+       lttng_kp->kp.offset = offset;
+       lttng_kp->kp.addr = (void *) (unsigned long) addr;
 
        /*
         * Ensure the memory we just allocated don't trigger page faults.
@@ -134,36 +176,99 @@ int lttng_kprobes_register(const char *name,
         */
        wrapper_vmalloc_sync_mappings();
 
-       ret = register_kprobe(&event->u.kprobe.kp);
+       ret = register_kprobe(&lttng_kp->kp);
        if (ret)
                goto register_error;
+
        return 0;
 
 register_error:
-       kfree(event->u.kprobe.symbol_name);
+       kfree(lttng_kp->symbol_name);
 name_error:
+       return ret;
+}
+
+int lttng_kprobes_register_event(const char *name,
+                          const char *symbol_name,
+                          uint64_t offset,
+                          uint64_t addr,
+                          struct lttng_event *event)
+{
+       int ret;
+
+       ret = lttng_create_kprobe_event(name, event);
+       if (ret)
+               goto error;
+
+       ret = _lttng_kprobes_register(symbol_name, offset, addr,
+               &event->u.kprobe, lttng_kprobes_event_handler_pre);
+       if (ret)
+               goto register_error;
+
+       return 0;
+
+register_error:
        kfree(event->desc->fields);
        kfree(event->desc->name);
        kfree(event->desc);
 error:
        return ret;
 }
-EXPORT_SYMBOL_GPL(lttng_kprobes_register);
+EXPORT_SYMBOL_GPL(lttng_kprobes_register_event);
 
-void lttng_kprobes_unregister(struct lttng_event *event)
+int lttng_kprobes_register_trigger(const char *symbol_name,
+                          uint64_t offset,
+                          uint64_t addr,
+                          struct lttng_trigger *trigger)
+{
+       int ret;
+       ret = lttng_create_kprobe_trigger(symbol_name, trigger);
+       if (ret)
+               goto error;
+
+       ret = _lttng_kprobes_register(symbol_name, offset, addr,
+               &trigger->u.kprobe, lttng_kprobes_trigger_handler_pre);
+       if (ret)
+               goto register_error;
+
+       return 0;
+
+register_error:
+       kfree(trigger->desc->name);
+       kfree(trigger->desc);
+error:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_register_trigger);
+
+void lttng_kprobes_unregister_event(struct lttng_event *event)
 {
        unregister_kprobe(&event->u.kprobe.kp);
 }
-EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
+EXPORT_SYMBOL_GPL(lttng_kprobes_unregister_event);
+
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger)
+{
+       unregister_kprobe(&trigger->u.kprobe.kp);
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_unregister_trigger);
 
-void lttng_kprobes_destroy_private(struct lttng_event *event)
+void lttng_kprobes_destroy_event_private(struct lttng_event *event)
 {
        kfree(event->u.kprobe.symbol_name);
        kfree(event->desc->fields);
        kfree(event->desc->name);
        kfree(event->desc);
 }
-EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
+EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_event_private);
+
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger)
+{
+       kfree(trigger->u.kprobe.symbol_name);
+       kfree(trigger->desc->name);
+       kfree(trigger->desc);
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_trigger_private);
 
 MODULE_LICENSE("GPL and additional rights");
 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
index c0f6e7c31c2a81c749735de4094637a75294b757..959d4ccf4acfde9c5c55777f3b8621e7b81d73f4 100644 (file)
 #include <wrapper/vmalloc.h>
 
 static
-int lttng_uprobes_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
+int lttng_uprobes_event_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
 {
        struct lttng_uprobe_handler *uprobe_handler =
                container_of(uc, struct lttng_uprobe_handler, up_consumer);
-       struct lttng_event *event = uprobe_handler->event;
+       struct lttng_event *event = uprobe_handler->u.event;
        struct lttng_probe_ctx lttng_probe_ctx = {
                .event = event,
                .interruptible = !lttng_regs_irqs_disabled(regs),
@@ -63,6 +63,20 @@ int lttng_uprobes_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
        return 0;
 }
 
+static
+int lttng_uprobes_trigger_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
+{
+       struct lttng_uprobe_handler *uprobe_handler =
+               container_of(uc, struct lttng_uprobe_handler, up_consumer);
+       struct lttng_trigger *trigger = uprobe_handler->u.trigger;
+
+       if (unlikely(!READ_ONCE(trigger->enabled)))
+               return 0;
+
+       trigger->send_notification(trigger, NULL, NULL);
+       return 0;
+}
+
 /*
  * Create event description.
  */
@@ -111,6 +125,36 @@ error_str:
        return ret;
 }
 
+/*
+ * Create trigger description.
+ */
+static
+int lttng_create_uprobe_trigger(const char *name, struct lttng_trigger *trigger)
+{
+       struct lttng_event_desc *desc;
+       int ret;
+
+       desc = kzalloc(sizeof(*trigger->desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+       desc->name = kstrdup(name, GFP_KERNEL);
+       if (!desc->name) {
+               ret = -ENOMEM;
+               goto error_str;
+       }
+
+       desc->nr_fields = 0;
+
+       desc->owner = THIS_MODULE;
+       trigger->desc = desc;
+
+       return 0;
+
+error_str:
+       kfree(desc);
+       return ret;
+}
+
 /*
  * Returns the inode struct from the current task and an fd. The inode is
  * grabbed by this function and must be put once we are done with it using
@@ -142,13 +186,17 @@ error:
        return inode;
 }
 
-int lttng_uprobes_add_callsite(struct lttng_event *event,
-       struct lttng_kernel_event_callsite __user *callsite)
+
+static
+int lttng_uprobes_add_callsite(struct lttng_uprobe *uprobe,
+       struct lttng_kernel_event_callsite __user *callsite,
+       int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs),
+       void *priv_data)
 {
        int ret = 0;
        struct lttng_uprobe_handler *uprobe_handler;
 
-       if (!event) {
+       if (!priv_data) {
                ret = -EINVAL;
                goto end;
        }
@@ -163,25 +211,25 @@ int lttng_uprobes_add_callsite(struct lttng_event *event,
        /* Ensure the memory we just allocated don't trigger page faults. */
        wrapper_vmalloc_sync_mappings();
 
-       uprobe_handler->event = event;
-       uprobe_handler->up_consumer.handler = lttng_uprobes_handler_pre;
+       uprobe_handler->u.event = priv_data;
+       uprobe_handler->up_consumer.handler = handler;
 
        ret = copy_from_user(&uprobe_handler->offset, &callsite->u.uprobe.offset, sizeof(uint64_t));
        if (ret) {
                goto register_error;
        }
 
-       ret = wrapper_uprobe_register(event->u.uprobe.inode,
+       ret = wrapper_uprobe_register(uprobe->inode,
                      uprobe_handler->offset, &uprobe_handler->up_consumer);
        if (ret) {
                printk(KERN_WARNING "Error registering probe on inode %lu "
-                      "and offset 0x%llx\n", event->u.uprobe.inode->i_ino,
+                      "and offset 0x%llx\n", uprobe->inode->i_ino,
                       uprobe_handler->offset);
                ret = -1;
                goto register_error;
        }
 
-       list_add(&uprobe_handler->node, &event->u.uprobe.head);
+       list_add(&uprobe_handler->node, &uprobe->head);
 
        return ret;
 
@@ -190,37 +238,89 @@ register_error:
 end:
        return ret;
 }
-EXPORT_SYMBOL_GPL(lttng_uprobes_add_callsite);
 
-int lttng_uprobes_register(const char *name, int fd, struct lttng_event *event)
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
+       struct lttng_kernel_event_callsite __user *callsite)
+{
+       return lttng_uprobes_add_callsite(&event->u.uprobe, callsite,
+               lttng_uprobes_event_handler_pre, event);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_event_add_callsite);
+
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
+       struct lttng_kernel_event_callsite __user *callsite)
+{
+       return lttng_uprobes_add_callsite(&trigger->u.uprobe, callsite,
+               lttng_uprobes_trigger_handler_pre, trigger);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_trigger_add_callsite);
+
+static
+int lttng_uprobes_register(struct lttng_uprobe *uprobe, int fd)
 {
        int ret = 0;
        struct inode *inode;
 
-       ret = lttng_create_uprobe_event(name, event);
-       if (ret)
-               goto error;
-
        inode = get_inode_from_fd(fd);
        if (!inode) {
                printk(KERN_WARNING "Cannot get inode from fd\n");
                ret = -EBADF;
                goto inode_error;
        }
-       event->u.uprobe.inode = inode;
-       INIT_LIST_HEAD(&event->u.uprobe.head);
+       uprobe->inode = inode;
+       INIT_LIST_HEAD(&uprobe->head);
+
+inode_error:
+       return ret;
+}
+
+int lttng_uprobes_register_event(const char *name, int fd, struct lttng_event *event)
+{
+       int ret = 0;
+
+       ret = lttng_create_uprobe_event(name, event);
+       if (ret)
+               goto error;
+
+       ret = lttng_uprobes_register(&event->u.uprobe, fd);
+       if (ret)
+               goto register_error;
 
        return 0;
 
-inode_error:
+register_error:
        kfree(event->desc->name);
        kfree(event->desc);
 error:
        return ret;
 }
-EXPORT_SYMBOL_GPL(lttng_uprobes_register);
+EXPORT_SYMBOL_GPL(lttng_uprobes_register_event);
 
-void lttng_uprobes_unregister(struct lttng_event *event)
+int lttng_uprobes_register_trigger(const char *name, int fd,
+               struct lttng_trigger *trigger)
+{
+       int ret = 0;
+
+       ret = lttng_create_uprobe_trigger(name, trigger);
+       if (ret)
+               goto error;
+
+       ret = lttng_uprobes_register(&trigger->u.uprobe, fd);
+       if (ret)
+               goto register_error;
+
+       return 0;
+
+register_error:
+       kfree(trigger->desc->name);
+       kfree(trigger->desc);
+error:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_register_trigger);
+
+static
+void lttng_uprobes_unregister(struct inode *inode, struct list_head *head)
 {
        struct lttng_uprobe_handler *iter, *tmp;
 
@@ -228,22 +328,41 @@ void lttng_uprobes_unregister(struct lttng_event *event)
         * Iterate over the list of handler, remove each handler from the list
         * and free the struct.
         */
-       list_for_each_entry_safe(iter, tmp, &event->u.uprobe.head, node) {
-               wrapper_uprobe_unregister(event->u.uprobe.inode, iter->offset,
-                       &iter->up_consumer);
+       list_for_each_entry_safe(iter, tmp, head, node) {
+               wrapper_uprobe_unregister(inode, iter->offset, &iter->up_consumer);
                list_del(&iter->node);
                kfree(iter);
        }
+
+}
+
+void lttng_uprobes_unregister_event(struct lttng_event *event)
+{
+       lttng_uprobes_unregister(event->u.uprobe.inode, &event->u.uprobe.head);
 }
-EXPORT_SYMBOL_GPL(lttng_uprobes_unregister);
+EXPORT_SYMBOL_GPL(lttng_uprobes_unregister_event);
 
-void lttng_uprobes_destroy_private(struct lttng_event *event)
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger)
+{
+       lttng_uprobes_unregister(trigger->u.uprobe.inode, &trigger->u.uprobe.head);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_unregister_trigger);
+
+void lttng_uprobes_destroy_event_private(struct lttng_event *event)
 {
        iput(event->u.uprobe.inode);
        kfree(event->desc->name);
        kfree(event->desc);
 }
-EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_private);
+EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_event_private);
+
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger)
+{
+       iput(trigger->u.uprobe.inode);
+       kfree(trigger->desc->name);
+       kfree(trigger->desc);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_trigger_private);
 
 MODULE_LICENSE("GPL and additional rights");
 MODULE_AUTHOR("Yannick Brosseau");
This page took 0.234924 seconds and 5 git commands to generate.