--- /dev/null
+capture
+trigger error counter
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/config.h
+ *
+ * LTTng Counters Configuration
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_CONFIG_H
+#define _LTTNG_COUNTER_CONFIG_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+enum lib_counter_config_alloc {
+ COUNTER_ALLOC_PER_CPU = (1 << 0),
+ COUNTER_ALLOC_GLOBAL = (1 << 1),
+};
+
+enum lib_counter_config_sync {
+ COUNTER_SYNC_PER_CPU,
+ COUNTER_SYNC_GLOBAL,
+};
+
+struct lib_counter_config {
+ u32 alloc; /* enum lib_counter_config_alloc flags */
+ enum lib_counter_config_sync sync;
+ enum {
+ COUNTER_ARITHMETIC_MODULAR,
+ COUNTER_ARITHMETIC_SATURATE, /* TODO */
+ } arithmetic;
+ enum {
+ COUNTER_SIZE_8_BIT = 1,
+ COUNTER_SIZE_16_BIT = 2,
+ COUNTER_SIZE_32_BIT = 4,
+ COUNTER_SIZE_64_BIT = 8,
+ } counter_size;
+};
+
+#endif /* _LTTNG_COUNTER_CONFIG_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/counter-api.h
+ *
+ * LTTng Counters API, requiring counter/config.h
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_API_H
+#define _LTTNG_COUNTER_API_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <counter/counter.h>
+#include <counter/counter-internal.h>
+
+/*
+ * Using unsigned arithmetic because overflow is defined.
+ */
+static inline int __lttng_counter_add(const struct lib_counter_config *config,
+ enum lib_counter_config_alloc alloc,
+ enum lib_counter_config_sync sync,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v,
+ int64_t *remainder)
+{
+ size_t index;
+ bool overflow = false, underflow = false;
+ struct lib_counter_layout *layout;
+ int64_t move_sum = 0;
+
+ if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ layout = per_cpu_ptr(counter->percpu_counters, smp_processor_id());
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ int8_t old, n, res;
+ int8_t global_sum_step = counter->global_sum_step.s8;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ if (unlikely(n > (int8_t) global_sum_step))
+ move_sum = (int8_t) global_sum_step / 2;
+ else if (unlikely(n < -(int8_t) global_sum_step))
+ move_sum = -((int8_t) global_sum_step / 2);
+ n -= move_sum;
+ res = cmpxchg_local(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ res = cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= U8_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -U8_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ int16_t old, n, res;
+ int16_t global_sum_step = counter->global_sum_step.s16;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ if (unlikely(n > (int16_t) global_sum_step))
+ move_sum = (int16_t) global_sum_step / 2;
+ else if (unlikely(n < -(int16_t) global_sum_step))
+ move_sum = -((int16_t) global_sum_step / 2);
+ n -= move_sum;
+ res = cmpxchg_local(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ res = cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= U16_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -U16_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ int32_t old, n, res;
+ int32_t global_sum_step = counter->global_sum_step.s32;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ if (unlikely(n > (int32_t) global_sum_step))
+ move_sum = (int32_t) global_sum_step / 2;
+ else if (unlikely(n < -(int32_t) global_sum_step))
+ move_sum = -((int32_t) global_sum_step / 2);
+ n -= move_sum;
+ res = cmpxchg_local(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ res = cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= U32_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -U32_MAX || n > old))
+ underflow = true;
+ break;
+ }
+#if BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ int64_t old, n, res;
+ int64_t global_sum_step = counter->global_sum_step.s64;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (unlikely(n > (int64_t) global_sum_step))
+ move_sum = (int64_t) global_sum_step / 2;
+ else if (unlikely(n < -(int64_t) global_sum_step))
+ move_sum = -((int64_t) global_sum_step / 2);
+ n -= move_sum;
+ res = cmpxchg_local(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ res = cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && n < old)
+ overflow = true;
+ else if (v < 0 && n > old)
+ underflow = true;
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ if (unlikely(overflow && !test_bit(index, layout->overflow_bitmap)))
+ set_bit(index, layout->overflow_bitmap);
+ else if (unlikely(underflow && !test_bit(index, layout->underflow_bitmap)))
+ set_bit(index, layout->underflow_bitmap);
+ if (remainder)
+ *remainder = move_sum;
+ return 0;
+}
+
+static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ int64_t move_sum;
+ int ret;
+
+ ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
+ counter, dimension_indexes, v, &move_sum);
+ if (unlikely(ret))
+ return ret;
+ if (unlikely(move_sum))
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
+ counter, dimension_indexes, move_sum, NULL);
+ return 0;
+}
+
+static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
+ dimension_indexes, v, NULL);
+}
+
+static inline int lttng_counter_add(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
+ case COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_global(config, counter, dimension_indexes, v);
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int lttng_counter_inc(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, 1);
+}
+
+static inline int lttng_counter_dec(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, -1);
+}
+
+#endif /* _LTTNG_COUNTER_API_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/counter-internal.h
+ *
+ * LTTng Counters Internal Header
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_INTERNAL_H
+#define _LTTNG_COUNTER_INTERNAL_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <counter/counter-types.h>
+#include <counter/config.h>
+
+static inline int lttng_counter_validate_indexes(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ if (unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem))
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+static inline size_t lttng_counter_get_index(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+ size_t index = 0;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ const size_t *dimension_index = &dimension_indexes[i];
+
+ index += *dimension_index * dimension->stride;
+ }
+ return index;
+}
+
+#endif /* _LTTNG_COUNTER_INTERNAL_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/counter-types.h
+ *
+ * LTTng Counters Types
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_TYPES_H
+#define _LTTNG_COUNTER_TYPES_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <counter/config.h>
+
+struct lib_counter_dimension {
+ /*
+ * Max. number of indexable elements.
+ */
+ size_t max_nr_elem;
+ /*
+ * The stride for a dimension is the multiplication factor which
+ * should be applied to its index to take into account other
+ * dimensions nested inside.
+ */
+ size_t stride;
+};
+
+struct lib_counter_layout {
+ void *counters;
+ unsigned long *underflow_bitmap;
+ unsigned long *overflow_bitmap;
+};
+
+enum lib_counter_arithmetic {
+ LIB_COUNTER_ARITHMETIC_MODULAR,
+ LIB_COUNTER_ARITHMETIC_SATURATE,
+};
+
+struct lib_counter {
+ size_t nr_dimensions;
+ int64_t allocated_elem;
+ struct lib_counter_dimension *dimensions;
+ enum lib_counter_arithmetic arithmetic;
+ union {
+ struct {
+ int32_t max, min;
+ } limits_32_bit;
+ struct {
+ int64_t max, min;
+ } limits_64_bit;
+ } saturation;
+ union {
+ int8_t s8;
+ int16_t s16;
+ int32_t s32;
+ int64_t s64;
+ } global_sum_step; /* 0 if unused */
+ struct lib_counter_config config;
+
+ struct lib_counter_layout global_counters;
+ struct lib_counter_layout __percpu *percpu_counters;
+};
+
+#endif /* _LTTNG_COUNTER_TYPES_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/counter.h
+ *
+ * LTTng Counters API
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_H
+#define _LTTNG_COUNTER_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <counter/config.h>
+#include <counter/counter-types.h>
+
+/* max_nr_elem is for each dimension. */
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step);
+void lttng_counter_destroy(struct lib_counter *counter);
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow);
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow);
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes);
+
+#endif /* _LTTNG_COUNTER_H */
ctf_enum_auto("AUTO: EXPECT 28")
ctf_enum_range("RANGE: 101 TO 303", 101, 303)
ctf_enum_auto("AUTO: EXPECT 304")
+ ctf_enum_value("VALUE: -1", -1)
)
)
ctf_integer(int, intfield, anint)
ctf_integer_hex(int, intfield2, anint)
ctf_integer(long, longfield, anint)
+ ctf_integer(int, signedfield, -1)
ctf_integer_network(int, netintfield, netint)
ctf_integer_network_hex(int, netintfieldhex, netint)
ctf_array(long, arrfield1, values, 3)
ctf_enum(lttng_test_filter_event_enum, int, enum28, 28)
ctf_enum(lttng_test_filter_event_enum, int, enum202, 202)
ctf_enum(lttng_test_filter_event_enum, int, enum304, 304)
+ ctf_enum(lttng_test_filter_event_enum, int, enumnegative, -1)
)
)
#define UNKNOWN_SYSCALL_NRARGS 6
#undef TP_PROBE_CB
-#define TP_PROBE_CB(_template) &syscall_entry_probe
+#define TP_PROBE_CB(_template) &syscall_entry_event_probe
LTTNG_TRACEPOINT_EVENT(syscall_entry_unknown,
TP_PROTO(int id, unsigned long *args),
)
#undef TP_PROBE_CB
-#define TP_PROBE_CB(_template) &syscall_exit_probe
+#define TP_PROBE_CB(_template) &syscall_exit_event_probe
LTTNG_TRACEPOINT_EVENT(syscall_exit_unknown,
TP_PROTO(int id, long ret, unsigned long *args),
#define _LTTNG_ABI_H
#include <linux/fs.h>
+#include <linux/types.h>
/*
* Major/minor version of ABI exposed to lttng tools. Major number
uint64_t num_subbuf;
unsigned int switch_timer_interval; /* usecs */
unsigned int read_timer_interval; /* usecs */
- enum lttng_kernel_output output; /* splice, mmap */
+ uint32_t output; /* enum lttng_kernel_output (splice, mmap) */
int overwrite; /* 1: overwrite, 0: discard */
char padding[LTTNG_KERNEL_CHANNEL_PADDING];
} __attribute__((packed));
#define LTTNG_KERNEL_EVENT_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
struct lttng_kernel_event {
char name[LTTNG_KERNEL_SYM_NAME_LEN]; /* event name */
- enum lttng_kernel_instrumentation instrumentation;
+ uint32_t instrumentation; /* enum lttng_kernel_instrumentation */
char padding[LTTNG_KERNEL_EVENT_PADDING1];
/* Per instrumentation type configuration */
} u;
} __attribute__((packed));
+#define LTTNG_KERNEL_TRIGGER_PADDING1 16
+#define LTTNG_KERNEL_TRIGGER_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
+struct lttng_kernel_trigger {
+ uint64_t id;
+ uint64_t error_counter_index;
+ char name[LTTNG_KERNEL_SYM_NAME_LEN]; /* event name */
+ uint32_t instrumentation; /* enum lttng_kernel_instrumentation */
+ char padding[LTTNG_KERNEL_TRIGGER_PADDING1];
+
+ /* Per instrumentation type configuration */
+ union {
+ struct lttng_kernel_kretprobe kretprobe;
+ struct lttng_kernel_kprobe kprobe;
+ struct lttng_kernel_function_tracer ftrace;
+ struct lttng_kernel_uprobe uprobe;
+ char padding[LTTNG_KERNEL_TRIGGER_PADDING2];
+ } u;
+} __attribute__((packed));
+
+enum lttng_kernel_counter_arithmetic {
+ LTTNG_KERNEL_COUNTER_ARITHMETIC_MODULAR = 1,
+};
+
+enum lttng_kernel_counter_bitness {
+ LTTNG_KERNEL_COUNTER_BITNESS_32BITS = 1,
+ LTTNG_KERNEL_COUNTER_BITNESS_64BITS = 2,
+};
+
+struct lttng_kernel_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+} __attribute__((packed));
+
+#define LTTNG_KERNEL_COUNTER_DIMENSION_MAX 8
+struct lttng_kernel_counter_conf {
+ uint32_t arithmetic; /* enum lttng_kernel_counter_arithmetic */
+ uint32_t bitness; /* enum lttng_kernel_counter_bitness */
+ uint32_t number_dimensions;
+ int64_t global_sum_step;
+ struct lttng_kernel_counter_dimension dimensions[LTTNG_KERNEL_COUNTER_DIMENSION_MAX];
+} __attribute__((packed));
+
+struct lttng_kernel_counter_value {
+ uint32_t number_dimensions;
+ uint64_t dimension_indexes[LTTNG_KERNEL_COUNTER_DIMENSION_MAX];
+ int64_t value;
+} __attribute__((packed));
+
+#define LTTNG_KERNEL_TRIGGER_NOTIFICATION_PADDING 32
+struct lttng_kernel_trigger_notification {
+ uint64_t id;
+ uint16_t capture_buf_size;
+ char padding[LTTNG_KERNEL_TRIGGER_NOTIFICATION_PADDING];
+} __attribute__((packed));
+
struct lttng_kernel_tracer_version {
uint32_t major;
uint32_t minor;
};
struct lttng_kernel_calibrate {
- enum lttng_kernel_calibrate_type type; /* type (input) */
+ uint32_t type; /* enum lttng_kernel_calibrate_type (input) */
} __attribute__((packed));
struct lttng_kernel_syscall_mask {
#define LTTNG_KERNEL_CONTEXT_PADDING1 16
#define LTTNG_KERNEL_CONTEXT_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
struct lttng_kernel_context {
- enum lttng_kernel_context_type ctx;
+ uint32_t ctx; /*enum lttng_kernel_context_type */
char padding[LTTNG_KERNEL_CONTEXT_PADDING1];
union {
char data[0];
} __attribute__((packed));
+#define LTTNG_KERNEL_CAPTURE_BYTECODE_MAX_LEN 65536
+struct lttng_kernel_capture_bytecode {
+ uint32_t len;
+ uint32_t reloc_offset;
+ uint64_t seqnum;
+ char data[0];
+} __attribute__((packed));
+
enum lttng_kernel_tracker_type {
LTTNG_KERNEL_TRACKER_UNKNOWN = -1,
};
struct lttng_kernel_tracker_args {
- enum lttng_kernel_tracker_type type;
+ uint32_t type; /* enum lttng_kernel_tracker_type */
int32_t id;
};
#define LTTNG_KERNEL_SYSCALL_LIST _IO(0xF6, 0x4A)
#define LTTNG_KERNEL_TRACER_ABI_VERSION \
_IOR(0xF6, 0x4B, struct lttng_kernel_tracer_abi_version)
+#define LTTNG_KERNEL_TRIGGER_GROUP_CREATE _IO(0xF6, 0x4C)
+
+/* Trigger group file descriptor ioctl */
+#define LTTNG_KERNEL_TRIGGER_GROUP_NOTIFICATION_FD \
+ _IO(0xF6, 0x30)
+
+#define LTTNG_KERNEL_TRIGGER_CREATE \
+ _IOW(0xF6, 0x31, struct lttng_kernel_trigger)
+
+#define LTTNG_KERNEL_CAPTURE _IO(0xF6, 0x32)
+
+#define LTTNG_KERNEL_COUNTER \
+ _IOW(0xF6, 0x33, struct lttng_kernel_counter_conf)
+
+#define LTTNG_KERNEL_COUNTER_VALUE \
+ _IOWR(0xF6, 0x34, struct lttng_kernel_counter_value)
/* Session FD ioctl */
/* lttng/abi-old.h reserve 0x50, 0x51, 0x52, and 0x53. */
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng/filter-bytecode.h
+ *
+ * LTTng filter bytecode
+ *
+ * Copyright 2012-2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _FILTER_BYTECODE_H
+#define _FILTER_BYTECODE_H
+
+/*
+ * offsets are absolute from start of bytecode.
+ */
+
+struct field_ref {
+ /* Initially, symbol offset. After link, field offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_symbol {
+ /* Symbol offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_index_u16 {
+ uint16_t index;
+} __attribute__((packed));
+
+struct get_index_u64 {
+ uint64_t index;
+} __attribute__((packed));
+
+struct literal_numeric {
+ int64_t v;
+} __attribute__((packed));
+
+struct literal_double {
+ double v;
+} __attribute__((packed));
+
+struct literal_string {
+ char string[0];
+} __attribute__((packed));
+
+enum bytecode_op {
+ BYTECODE_OP_UNKNOWN = 0,
+
+ BYTECODE_OP_RETURN = 1,
+
+ /* binary */
+ BYTECODE_OP_MUL = 2,
+ BYTECODE_OP_DIV = 3,
+ BYTECODE_OP_MOD = 4,
+ BYTECODE_OP_PLUS = 5,
+ BYTECODE_OP_MINUS = 6,
+ BYTECODE_OP_BIT_RSHIFT = 7,
+ BYTECODE_OP_BIT_LSHIFT = 8,
+ BYTECODE_OP_BIT_AND = 9,
+ BYTECODE_OP_BIT_OR = 10,
+ BYTECODE_OP_BIT_XOR = 11,
+
+ /* binary comparators */
+ BYTECODE_OP_EQ = 12,
+ BYTECODE_OP_NE = 13,
+ BYTECODE_OP_GT = 14,
+ BYTECODE_OP_LT = 15,
+ BYTECODE_OP_GE = 16,
+ BYTECODE_OP_LE = 17,
+
+ /* string binary comparator: apply to */
+ BYTECODE_OP_EQ_STRING = 18,
+ BYTECODE_OP_NE_STRING = 19,
+ BYTECODE_OP_GT_STRING = 20,
+ BYTECODE_OP_LT_STRING = 21,
+ BYTECODE_OP_GE_STRING = 22,
+ BYTECODE_OP_LE_STRING = 23,
+
+ /* s64 binary comparator */
+ BYTECODE_OP_EQ_S64 = 24,
+ BYTECODE_OP_NE_S64 = 25,
+ BYTECODE_OP_GT_S64 = 26,
+ BYTECODE_OP_LT_S64 = 27,
+ BYTECODE_OP_GE_S64 = 28,
+ BYTECODE_OP_LE_S64 = 29,
+
+ /* double binary comparator */
+ BYTECODE_OP_EQ_DOUBLE = 30,
+ BYTECODE_OP_NE_DOUBLE = 31,
+ BYTECODE_OP_GT_DOUBLE = 32,
+ BYTECODE_OP_LT_DOUBLE = 33,
+ BYTECODE_OP_GE_DOUBLE = 34,
+ BYTECODE_OP_LE_DOUBLE = 35,
+
+ /* Mixed S64-double binary comparators */
+ BYTECODE_OP_EQ_DOUBLE_S64 = 36,
+ BYTECODE_OP_NE_DOUBLE_S64 = 37,
+ BYTECODE_OP_GT_DOUBLE_S64 = 38,
+ BYTECODE_OP_LT_DOUBLE_S64 = 39,
+ BYTECODE_OP_GE_DOUBLE_S64 = 40,
+ BYTECODE_OP_LE_DOUBLE_S64 = 41,
+
+ BYTECODE_OP_EQ_S64_DOUBLE = 42,
+ BYTECODE_OP_NE_S64_DOUBLE = 43,
+ BYTECODE_OP_GT_S64_DOUBLE = 44,
+ BYTECODE_OP_LT_S64_DOUBLE = 45,
+ BYTECODE_OP_GE_S64_DOUBLE = 46,
+ BYTECODE_OP_LE_S64_DOUBLE = 47,
+
+ /* unary */
+ BYTECODE_OP_UNARY_PLUS = 48,
+ BYTECODE_OP_UNARY_MINUS = 49,
+ BYTECODE_OP_UNARY_NOT = 50,
+ BYTECODE_OP_UNARY_PLUS_S64 = 51,
+ BYTECODE_OP_UNARY_MINUS_S64 = 52,
+ BYTECODE_OP_UNARY_NOT_S64 = 53,
+ BYTECODE_OP_UNARY_PLUS_DOUBLE = 54,
+ BYTECODE_OP_UNARY_MINUS_DOUBLE = 55,
+ BYTECODE_OP_UNARY_NOT_DOUBLE = 56,
+
+ /* logical */
+ BYTECODE_OP_AND = 57,
+ BYTECODE_OP_OR = 58,
+
+ /* load field ref */
+ BYTECODE_OP_LOAD_FIELD_REF = 59,
+ BYTECODE_OP_LOAD_FIELD_REF_STRING = 60,
+ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61,
+ BYTECODE_OP_LOAD_FIELD_REF_S64 = 62,
+ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63,
+
+ /* load immediate from operand */
+ BYTECODE_OP_LOAD_STRING = 64,
+ BYTECODE_OP_LOAD_S64 = 65,
+ BYTECODE_OP_LOAD_DOUBLE = 66,
+
+ /* cast */
+ BYTECODE_OP_CAST_TO_S64 = 67,
+ BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68,
+ BYTECODE_OP_CAST_NOP = 69,
+
+ /* get context ref */
+ BYTECODE_OP_GET_CONTEXT_REF = 70,
+ BYTECODE_OP_GET_CONTEXT_REF_STRING = 71,
+ BYTECODE_OP_GET_CONTEXT_REF_S64 = 72,
+ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73,
+
+ /* load userspace field ref */
+ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74,
+ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate
+ */
+ BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76,
+
+ /* globbing pattern binary operator: apply to */
+ BYTECODE_OP_EQ_STAR_GLOB_STRING = 77,
+ BYTECODE_OP_NE_STAR_GLOB_STRING = 78,
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ BYTECODE_OP_GET_CONTEXT_ROOT = 79,
+ BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80,
+ BYTECODE_OP_GET_PAYLOAD_ROOT = 81,
+
+ BYTECODE_OP_GET_SYMBOL = 82,
+ BYTECODE_OP_GET_SYMBOL_FIELD = 83,
+ BYTECODE_OP_GET_INDEX_U16 = 84,
+ BYTECODE_OP_GET_INDEX_U64 = 85,
+
+ BYTECODE_OP_LOAD_FIELD = 86,
+ BYTECODE_OP_LOAD_FIELD_S8 = 87,
+ BYTECODE_OP_LOAD_FIELD_S16 = 88,
+ BYTECODE_OP_LOAD_FIELD_S32 = 89,
+ BYTECODE_OP_LOAD_FIELD_S64 = 90,
+ BYTECODE_OP_LOAD_FIELD_U8 = 91,
+ BYTECODE_OP_LOAD_FIELD_U16 = 92,
+ BYTECODE_OP_LOAD_FIELD_U32 = 93,
+ BYTECODE_OP_LOAD_FIELD_U64 = 94,
+ BYTECODE_OP_LOAD_FIELD_STRING = 95,
+ BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96,
+ BYTECODE_OP_LOAD_FIELD_DOUBLE = 97,
+
+ BYTECODE_OP_UNARY_BIT_NOT = 98,
+
+ BYTECODE_OP_RETURN_S64 = 99,
+
+ NR_BYTECODE_OPS,
+};
+
+typedef uint8_t bytecode_opcode_t;
+
+struct load_op {
+ bytecode_opcode_t op;
+ char data[0];
+ /* data to load. Size known by enum filter_opcode and null-term char. */
+} __attribute__((packed));
+
+struct binary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct unary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+/* skip_offset is absolute from start of bytecode */
+struct logical_op {
+ bytecode_opcode_t op;
+ uint16_t skip_offset; /* bytecode insn, if skip second test */
+} __attribute__((packed));
+
+struct cast_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct return_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+#endif /* _FILTER_BYTECODE_H */
#ifndef _LTTNG_EVENTS_H
#define _LTTNG_EVENTS_H
+#include <linux/irq_work.h>
#include <linux/version.h>
#include <linux/list.h>
#include <linux/kprobes.h>
struct lttng_probe_ctx {
struct lttng_event *event;
+ struct lttng_trigger *trigger; // Not sure if we will ever need it.
uint8_t interruptible;
};
const struct lttng_event_field *fields; /* event payload */
unsigned int nr_fields;
struct module *owner;
+ void *trigger_callback;
};
struct lttng_probe_desc {
LTTNG_TYPE_ENABLER = 1,
};
-struct lttng_filter_bytecode_node {
+enum lttng_bytecode_node_type {
+ LTTNG_BYTECODE_NODE_TYPE_FILTER,
+ LTTNG_BYTECODE_NODE_TYPE_CAPTURE,
+};
+
+struct lttng_bytecode_node {
+ enum lttng_bytecode_node_type type;
struct list_head node;
struct lttng_enabler *enabler;
- /*
- * struct lttng_kernel_filter_bytecode has var. sized array, must be
- * last field.
- */
- struct lttng_kernel_filter_bytecode bc;
+ struct {
+ uint32_t len;
+ uint32_t reloc_offset;
+ uint64_t seqnum;
+ char data[];
+ } bc;
};
/*
- * Filter return value masks.
+ * Bytecode interpreter return value masks.
*/
-enum lttng_filter_ret {
- LTTNG_FILTER_DISCARD = 0,
- LTTNG_FILTER_RECORD_FLAG = (1ULL << 0),
+enum lttng_bytecode_interpreter_ret {
+ LTTNG_INTERPRETER_DISCARD = 0,
+ LTTNG_INTERPRETER_RECORD_FLAG = (1ULL << 0),
/* Other bits are kept for future use. */
};
+struct lttng_interpreter_output;
+
struct lttng_bytecode_runtime {
/* Associated bytecode */
- struct lttng_filter_bytecode_node *bc;
- uint64_t (*filter)(void *filter_data, struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data);
+ struct lttng_bytecode_node *bc;
+ union {
+ uint64_t (*filter)(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data);
+ uint64_t (*capture)(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output);
+ } interpreter_funcs;
int link_failed;
struct list_head node; /* list of bytecode runtime in event */
- struct lttng_event *event;
+ struct lttng_ctx *ctx;
};
/*
};
struct lttng_uprobe_handler {
- struct lttng_event *event;
+ union {
+ struct lttng_event *event;
+ struct lttng_trigger *trigger;
+ } u;
loff_t offset;
struct uprobe_consumer up_consumer;
struct list_head node;
};
+struct lttng_kprobe {
+ struct kprobe kp;
+ char *symbol_name;
+};
+
+struct lttng_uprobe {
+ struct inode *inode;
+ struct list_head head;
+};
+
enum lttng_syscall_entryexit {
LTTNG_SYSCALL_ENTRY,
LTTNG_SYSCALL_EXIT,
LTTNG_SYSCALL_ABI_COMPAT,
};
+struct lttng_syscall {
+ struct list_head node; /* chain registered syscall trigger */
+ unsigned int syscall_id;
+ bool is_compat;
+};
+
/*
* lttng_event structure is referred to by the tracing fast path. It must be
* kept small.
struct lttng_ctx *ctx;
enum lttng_kernel_instrumentation instrumentation;
union {
- struct {
- struct kprobe kp;
- char *symbol_name;
- } kprobe;
+ struct lttng_kprobe kprobe;
struct {
struct lttng_krp *lttng_krp;
char *symbol_name;
} kretprobe;
- struct {
- struct inode *inode;
- struct list_head head;
- } uprobe;
+ struct lttng_uprobe uprobe;
struct {
char *syscall_name;
enum lttng_syscall_entryexit entryexit;
struct hlist_node hlist; /* session ht of events */
int registered; /* has reg'd tracepoint probe */
/* list of struct lttng_bytecode_runtime, sorted by seqnum */
- struct list_head bytecode_runtime_head;
+ struct list_head filter_bytecode_runtime_head;
int has_enablers_without_bytecode;
};
-enum lttng_enabler_type {
- LTTNG_ENABLER_STAR_GLOB,
- LTTNG_ENABLER_NAME,
+// FIXME: Really similar to lttng_event above. Could those be merged ?
+struct lttng_trigger {
+ enum lttng_event_type evtype; /* First field. */
+ uint64_t id;
+ uint64_t error_counter_index;
+ int enabled;
+ int registered; /* has reg'd tracepoint probe */
+ const struct lttng_event_desc *desc;
+ void *filter;
+ struct list_head list; /* Trigger list in trigger group */
+
+ enum lttng_kernel_instrumentation instrumentation;
+ union {
+ struct lttng_kprobe kprobe;
+ struct lttng_uprobe uprobe;
+ struct lttng_syscall syscall;
+ } u;
+
+ /* Backward references: list of lttng_enabler_ref (ref to enablers) */
+ struct list_head enablers_ref_head;
+ struct hlist_node hlist; /* session ht of triggers */
+ /* list of struct lttng_bytecode_runtime, sorted by seqnum */
+ struct list_head filter_bytecode_runtime_head;
+ size_t num_captures;
+ struct list_head capture_bytecode_runtime_head;
+ int has_enablers_without_bytecode;
+
+ void (*send_notification)(struct lttng_trigger *trigger,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *interpreter_stack_data);
+ struct lttng_trigger_group *group; /* Weak ref */
+};
+
+enum lttng_enabler_format_type {
+ LTTNG_ENABLER_FORMAT_STAR_GLOB,
+ LTTNG_ENABLER_FORMAT_NAME,
};
/*
struct lttng_enabler {
enum lttng_event_type evtype; /* First field. */
- enum lttng_enabler_type type;
+ enum lttng_enabler_format_type format_type;
- struct list_head node; /* per-session list of enablers */
/* head list of struct lttng_ust_filter_bytecode_node */
struct list_head filter_bytecode_head;
struct lttng_kernel_event event_param;
+ unsigned int enabled:1;
+};
+
+struct lttng_event_enabler {
+ struct lttng_enabler base;
+ struct list_head node; /* per-session list of enablers */
struct lttng_channel *chan;
+ /*
+ * Unused, but kept around to make it explicit that the tracer can do
+ * it.
+ */
struct lttng_ctx *ctx;
- unsigned int enabled:1;
};
+struct lttng_trigger_enabler {
+ struct lttng_enabler base;
+ uint64_t id;
+ uint64_t error_counter_index;
+ struct list_head node; /* List of trigger enablers */
+ struct lttng_trigger_group *group;
+
+ /* head list of struct lttng_ust_filter_bytecode_node */
+ struct list_head capture_bytecode_head;
+ uint64_t num_captures;
+};
+
+
+static inline
+struct lttng_enabler *lttng_event_enabler_as_enabler(
+ struct lttng_event_enabler *event_enabler)
+{
+ return &event_enabler->base;
+}
+
+static inline
+struct lttng_enabler *lttng_trigger_enabler_as_enabler(
+ struct lttng_trigger_enabler *trigger_enabler)
+{
+ return &trigger_enabler->base;
+}
+
struct lttng_channel_ops {
struct channel *(*channel_create)(const char *name,
- struct lttng_channel *lttng_chan,
+ void *priv,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
uint64_t *id);
};
+struct lttng_counter_ops {
+ struct lib_counter *(*counter_create)(size_t nr_dimensions,
+ const size_t *max_nr_elem, /* for each dimension */
+ int64_t global_sum_step);
+ void (*counter_destroy)(struct lib_counter *counter);
+ int (*counter_add)(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t v);
+ /*
+ * counter_read reads a specific cpu's counter if @cpu >= 0, or
+ * the global aggregation counter if @cpu == -1.
+ */
+ int (*counter_read)(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow);
+ /*
+ * counter_aggregate returns the total sum of all per-cpu counters and
+ * the global aggregation counter.
+ */
+ int (*counter_aggregate)(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow);
+ int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
+};
+
struct lttng_transport {
char *name;
struct module *owner;
struct lttng_channel_ops ops;
};
+struct lttng_counter_transport {
+ char *name;
+ struct module *owner;
+ struct list_head node;
+ struct lttng_counter_ops ops;
+};
+
struct lttng_syscall_filter;
#define LTTNG_EVENT_HT_BITS 12
struct hlist_head table[LTTNG_EVENT_HT_SIZE];
};
+#define LTTNG_TRIGGER_HT_BITS 12
+#define LTTNG_TRIGGER_HT_SIZE (1U << LTTNG_TRIGGER_HT_BITS)
+
+struct lttng_trigger_ht {
+ struct hlist_head table[LTTNG_TRIGGER_HT_SIZE];
+};
+
struct lttng_channel {
unsigned int id;
struct channel *chan; /* Channel buffers */
struct lttng_id_tracker vgid_tracker;
unsigned int metadata_dumped:1,
tstate:1; /* Transient enable state */
- /* List of enablers */
+ /* List of event enablers */
struct list_head enablers_head;
/* Hash table of events */
struct lttng_event_ht events_ht;
char creation_time[LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN];
};
+struct lttng_counter {
+ struct file *file; /* File associated to counter. */
+ struct file *owner;
+ struct lttng_counter_transport *transport;
+ struct lib_counter *counter;
+ struct lttng_counter_ops *ops;
+};
+
+struct lttng_trigger_group {
+ struct file *file; /* File associated to trigger group */
+ struct file *notif_file; /* File used to expose notifications to userspace. */
+ struct list_head node; /* Trigger group list */
+ struct list_head enablers_head; /* List of enablers */
+ struct list_head triggers_head; /* List of triggers */
+ struct lttng_trigger_ht triggers_ht; /* Hash table of triggers */
+ struct lttng_ctx *ctx; /* Contexts for filters. */
+ struct lttng_channel_ops *ops;
+ struct lttng_transport *transport;
+ struct channel *chan; /* Ring buffer channel for trigger group. */
+ struct lib_ring_buffer *buf; /* Ring buffer for trigger group. */
+ wait_queue_head_t read_wait;
+ struct irq_work wakeup_pending; /* Pending wakeup irq work. */
+
+ struct list_head *trigger_syscall_dispatch;
+ struct list_head *trigger_compat_syscall_dispatch;
+
+ struct lttng_counter *error_counter;
+ size_t error_counter_len;
+
+ unsigned int syscall_all:1,
+ sys_enter_registered:1;
+};
+
struct lttng_metadata_cache {
char *data; /* Metadata cache */
unsigned int cache_alloc; /* Metadata allocated size (bytes) */
struct list_head *lttng_get_probe_list_head(void);
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
struct lttng_kernel_event *event_param,
struct lttng_channel *chan);
-int lttng_enabler_enable(struct lttng_enabler *enabler);
-int lttng_enabler_disable(struct lttng_enabler *enabler);
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler);
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler);
+struct lttng_trigger_enabler *lttng_trigger_enabler_create(
+ struct lttng_trigger_group *trigger_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_kernel_trigger *trigger_param);
+
+int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler);
+int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler);
int lttng_fix_pending_events(void);
+int lttng_fix_pending_triggers(void);
int lttng_session_active(void);
+bool lttng_trigger_active(void);
struct lttng_session *lttng_session_create(void);
int lttng_session_enable(struct lttng_session *session);
int lttng_session_statedump(struct lttng_session *session);
void metadata_cache_destroy(struct kref *kref);
+
+struct lttng_counter *lttng_kernel_counter_create(
+ const char *counter_transport_name, size_t number_dimensions,
+ const size_t *dimensions_sizes);
+int lttng_kernel_counter_value(struct lttng_counter *counter,
+ const size_t *dimension_indexes, int64_t *val);
+
+struct lttng_trigger_group *lttng_trigger_group_create(void);
+int lttng_trigger_group_create_error_counter(struct file *trigger_group_file,
+ const struct lttng_kernel_counter_conf *error_counter_conf);
+void lttng_trigger_group_destroy(struct lttng_trigger_group *trigger_group);
+
struct lttng_channel *lttng_channel_create(struct lttng_session *session,
const char *transport_name,
void *buf_addr,
void *filter,
const struct lttng_event_desc *internal_desc);
+struct lttng_trigger *lttng_trigger_create(
+ const struct lttng_event_desc *trigger_desc,
+ uint64_t id,
+ uint64_t error_counter_idx,
+ struct lttng_trigger_group *trigger_group,
+ struct lttng_kernel_trigger *trigger_param,
+ void *filter,
+ enum lttng_kernel_instrumentation itype);
+struct lttng_trigger *_lttng_trigger_create(
+ const struct lttng_event_desc *trigger_desc,
+ uint64_t id,
+ uint64_t error_counter_idx,
+ struct lttng_trigger_group *trigger_group,
+ struct lttng_kernel_trigger *trigger_param,
+ void *filter,
+ enum lttng_kernel_instrumentation itype);
+
int lttng_channel_enable(struct lttng_channel *channel);
int lttng_channel_disable(struct lttng_channel *channel);
int lttng_event_enable(struct lttng_event *event);
int lttng_event_disable(struct lttng_event *event);
+int lttng_trigger_enable(struct lttng_trigger *trigger);
+int lttng_trigger_disable(struct lttng_trigger *trigger);
+
void lttng_transport_register(struct lttng_transport *transport);
void lttng_transport_unregister(struct lttng_transport *transport);
+void lttng_counter_transport_register(struct lttng_counter_transport *transport);
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport);
+
void synchronize_trace(void);
int lttng_abi_init(void);
int lttng_abi_compat_old_init(void);
int lttng_probe_register(struct lttng_probe_desc *desc);
void lttng_probe_unregister(struct lttng_probe_desc *desc);
-const struct lttng_event_desc *lttng_event_get(const char *name);
-void lttng_event_put(const struct lttng_event_desc *desc);
+const struct lttng_event_desc *lttng_event_desc_get(const char *name);
+void lttng_event_desc_put(const struct lttng_event_desc *desc);
int lttng_probes_init(void);
void lttng_probes_exit(void);
void lttng_clock_ref(void);
void lttng_clock_unref(void);
+int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
+ struct lttng_enabler *enabler);
+
#if defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
-int lttng_syscalls_register(struct lttng_channel *chan, void *filter);
-int lttng_syscalls_unregister(struct lttng_channel *chan);
-int lttng_syscalls_destroy(struct lttng_channel *chan);
-int lttng_syscall_filter_enable(struct lttng_channel *chan,
+int lttng_syscalls_register_event(struct lttng_channel *chan, void *filter);
+int lttng_syscalls_unregister_event(struct lttng_channel *chan);
+int lttng_syscalls_destroy_event(struct lttng_channel *chan);
+int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
struct lttng_event *event);
-int lttng_syscall_filter_disable(struct lttng_channel *chan,
+int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
struct lttng_event *event);
long lttng_channel_syscall_mask(struct lttng_channel *channel,
struct lttng_kernel_syscall_mask __user *usyscall_mask);
+
+int lttng_syscalls_register_trigger(struct lttng_trigger_enabler *trigger_enabler, void *filter);
+int lttng_syscals_create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler, void *filter);
+int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *group);
+int lttng_syscall_filter_enable_trigger(struct lttng_trigger *trigger);
+int lttng_syscall_filter_disable_trigger(struct lttng_trigger *trigger);
#else
-static inline int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
+static inline int lttng_syscalls_register_event(
+ struct lttng_channel *chan, void *filter)
{
return -ENOSYS;
}
-static inline int lttng_syscalls_unregister(struct lttng_channel *chan)
+static inline int lttng_syscalls_unregister_event(struct lttng_channel *chan)
{
return 0;
}
return 0;
}
-static inline int lttng_syscall_filter_enable(struct lttng_channel *chan,
+static inline int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
struct lttng_event *event);
{
return -ENOSYS;
}
-static inline int lttng_syscall_filter_disable(struct lttng_channel *chan,
+static inline int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
struct lttng_event *event);
{
return -ENOSYS;
{
return -ENOSYS;
}
+
+static inline int lttng_syscalls_register_trigger(
+ struct lttng_trigger_group *group, void *filter)
+{
+ return -ENOSYS;
+}
+
+static inline int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *group)
+{
+ return 0;
+}
+
+static inline int lttng_syscall_filter_enable_trigger(struct lttng_trigger_group *group,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
+static inline int lttng_syscall_filter_disable_trigger(struct lttng_trigger_group *group,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
#endif
-void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime);
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
struct lttng_kernel_filter_bytecode __user *bytecode);
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
- struct lttng_enabler *enabler);
+int lttng_trigger_enabler_attach_filter_bytecode(struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode);
+int lttng_trigger_enabler_attach_capture_bytecode(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_kernel_capture_bytecode __user *bytecode);
+
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx *ctx,
+ struct list_head *instance_bytecode_runtime_head,
+ struct list_head *enabler_bytecode_runtime_head);
int lttng_probes_init(void);
extern int lttng_statedump_start(struct lttng_session *session);
#ifdef CONFIG_KPROBES
-int lttng_kprobes_register(const char *name,
+int lttng_kprobes_register_event(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
struct lttng_event *event);
-void lttng_kprobes_unregister(struct lttng_event *event);
-void lttng_kprobes_destroy_private(struct lttng_event *event);
+void lttng_kprobes_unregister_event(struct lttng_event *event);
+void lttng_kprobes_destroy_event_private(struct lttng_event *event);
+int lttng_kprobes_register_trigger(const char *symbol_name,
+ uint64_t offset,
+ uint64_t addr,
+ struct lttng_trigger *trigger);
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger);
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger);
#else
static inline
-int lttng_kprobes_register(const char *name,
+int lttng_kprobes_register_event(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
}
static inline
-void lttng_kprobes_unregister(struct lttng_event *event)
+void lttng_kprobes_unregister_event(struct lttng_event *event)
+{
+}
+
+static inline
+void lttng_kprobes_destroy_event_private(struct lttng_event *event)
{
}
static inline
-void lttng_kprobes_destroy_private(struct lttng_event *event)
+int lttng_kprobes_register_trigger(const char *symbol_name,
+ uint64_t offset,
+ uint64_t addr,
+ struct lttng_trigger *trigger)
+{
+ return -ENOSYS;
+}
+
+static inline
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger)
+{
+}
+
+static inline
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger)
{
}
#endif
int lttng_event_add_callsite(struct lttng_event *event,
struct lttng_kernel_event_callsite *callsite);
+int lttng_trigger_add_callsite(struct lttng_trigger *trigger,
+ struct lttng_kernel_event_callsite *callsite);
+
#ifdef CONFIG_UPROBES
-int lttng_uprobes_register(const char *name,
+int lttng_uprobes_register_event(const char *name,
int fd, struct lttng_event *event);
-int lttng_uprobes_add_callsite(struct lttng_event *event,
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
+ struct lttng_kernel_event_callsite *callsite);
+void lttng_uprobes_unregister_event(struct lttng_event *event);
+void lttng_uprobes_destroy_event_private(struct lttng_event *event);
+int lttng_uprobes_register_trigger(const char *name,
+ int fd, struct lttng_trigger *trigger);
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
struct lttng_kernel_event_callsite *callsite);
-void lttng_uprobes_unregister(struct lttng_event *event);
-void lttng_uprobes_destroy_private(struct lttng_event *event);
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger);
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger);
#else
static inline
-int lttng_uprobes_register(const char *name,
+int lttng_uprobes_register_event(const char *name,
int fd, struct lttng_event *event)
{
return -ENOSYS;
}
static inline
-int lttng_uprobes_add_callsite(struct lttng_event *event,
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
+ struct lttng_kernel_event_callsite *callsite)
+{
+ return -ENOSYS;
+}
+
+static inline
+void lttng_uprobes_unregister_event(struct lttng_event *event)
+{
+}
+
+static inline
+void lttng_uprobes_destroy_event_private(struct lttng_event *event)
+{
+}
+
+static inline
+int lttng_uprobes_register_trigger(const char *name,
+ int fd, struct lttng_trigger *trigger)
+{
+ return -ENOSYS;
+}
+
+static inline
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
struct lttng_kernel_event_callsite *callsite)
{
return -ENOSYS;
}
static inline
-void lttng_uprobes_unregister(struct lttng_event *event)
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger)
{
}
static inline
-void lttng_uprobes_destroy_private(struct lttng_event *event)
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger)
{
}
#endif
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng/filter-bytecode.h
- *
- * LTTng filter bytecode
- *
- * Copyright 2012-2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _FILTER_BYTECODE_H
-#define _FILTER_BYTECODE_H
-
-/*
- * offsets are absolute from start of bytecode.
- */
-
-struct field_ref {
- /* Initially, symbol offset. After link, field offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_symbol {
- /* Symbol offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_index_u16 {
- uint16_t index;
-} __attribute__((packed));
-
-struct get_index_u64 {
- uint64_t index;
-} __attribute__((packed));
-
-struct literal_numeric {
- int64_t v;
-} __attribute__((packed));
-
-struct literal_double {
- double v;
-} __attribute__((packed));
-
-struct literal_string {
- char string[0];
-} __attribute__((packed));
-
-enum filter_op {
- FILTER_OP_UNKNOWN = 0,
-
- FILTER_OP_RETURN = 1,
-
- /* binary */
- FILTER_OP_MUL = 2,
- FILTER_OP_DIV = 3,
- FILTER_OP_MOD = 4,
- FILTER_OP_PLUS = 5,
- FILTER_OP_MINUS = 6,
- FILTER_OP_BIT_RSHIFT = 7,
- FILTER_OP_BIT_LSHIFT = 8,
- FILTER_OP_BIT_AND = 9,
- FILTER_OP_BIT_OR = 10,
- FILTER_OP_BIT_XOR = 11,
-
- /* binary comparators */
- FILTER_OP_EQ = 12,
- FILTER_OP_NE = 13,
- FILTER_OP_GT = 14,
- FILTER_OP_LT = 15,
- FILTER_OP_GE = 16,
- FILTER_OP_LE = 17,
-
- /* string binary comparator: apply to */
- FILTER_OP_EQ_STRING = 18,
- FILTER_OP_NE_STRING = 19,
- FILTER_OP_GT_STRING = 20,
- FILTER_OP_LT_STRING = 21,
- FILTER_OP_GE_STRING = 22,
- FILTER_OP_LE_STRING = 23,
-
- /* s64 binary comparator */
- FILTER_OP_EQ_S64 = 24,
- FILTER_OP_NE_S64 = 25,
- FILTER_OP_GT_S64 = 26,
- FILTER_OP_LT_S64 = 27,
- FILTER_OP_GE_S64 = 28,
- FILTER_OP_LE_S64 = 29,
-
- /* double binary comparator */
- FILTER_OP_EQ_DOUBLE = 30,
- FILTER_OP_NE_DOUBLE = 31,
- FILTER_OP_GT_DOUBLE = 32,
- FILTER_OP_LT_DOUBLE = 33,
- FILTER_OP_GE_DOUBLE = 34,
- FILTER_OP_LE_DOUBLE = 35,
-
- /* Mixed S64-double binary comparators */
- FILTER_OP_EQ_DOUBLE_S64 = 36,
- FILTER_OP_NE_DOUBLE_S64 = 37,
- FILTER_OP_GT_DOUBLE_S64 = 38,
- FILTER_OP_LT_DOUBLE_S64 = 39,
- FILTER_OP_GE_DOUBLE_S64 = 40,
- FILTER_OP_LE_DOUBLE_S64 = 41,
-
- FILTER_OP_EQ_S64_DOUBLE = 42,
- FILTER_OP_NE_S64_DOUBLE = 43,
- FILTER_OP_GT_S64_DOUBLE = 44,
- FILTER_OP_LT_S64_DOUBLE = 45,
- FILTER_OP_GE_S64_DOUBLE = 46,
- FILTER_OP_LE_S64_DOUBLE = 47,
-
- /* unary */
- FILTER_OP_UNARY_PLUS = 48,
- FILTER_OP_UNARY_MINUS = 49,
- FILTER_OP_UNARY_NOT = 50,
- FILTER_OP_UNARY_PLUS_S64 = 51,
- FILTER_OP_UNARY_MINUS_S64 = 52,
- FILTER_OP_UNARY_NOT_S64 = 53,
- FILTER_OP_UNARY_PLUS_DOUBLE = 54,
- FILTER_OP_UNARY_MINUS_DOUBLE = 55,
- FILTER_OP_UNARY_NOT_DOUBLE = 56,
-
- /* logical */
- FILTER_OP_AND = 57,
- FILTER_OP_OR = 58,
-
- /* load field ref */
- FILTER_OP_LOAD_FIELD_REF = 59,
- FILTER_OP_LOAD_FIELD_REF_STRING = 60,
- FILTER_OP_LOAD_FIELD_REF_SEQUENCE = 61,
- FILTER_OP_LOAD_FIELD_REF_S64 = 62,
- FILTER_OP_LOAD_FIELD_REF_DOUBLE = 63,
-
- /* load immediate from operand */
- FILTER_OP_LOAD_STRING = 64,
- FILTER_OP_LOAD_S64 = 65,
- FILTER_OP_LOAD_DOUBLE = 66,
-
- /* cast */
- FILTER_OP_CAST_TO_S64 = 67,
- FILTER_OP_CAST_DOUBLE_TO_S64 = 68,
- FILTER_OP_CAST_NOP = 69,
-
- /* get context ref */
- FILTER_OP_GET_CONTEXT_REF = 70,
- FILTER_OP_GET_CONTEXT_REF_STRING = 71,
- FILTER_OP_GET_CONTEXT_REF_S64 = 72,
- FILTER_OP_GET_CONTEXT_REF_DOUBLE = 73,
-
- /* load userspace field ref */
- FILTER_OP_LOAD_FIELD_REF_USER_STRING = 74,
- FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate
- */
- FILTER_OP_LOAD_STAR_GLOB_STRING = 76,
-
- /* globbing pattern binary operator: apply to */
- FILTER_OP_EQ_STAR_GLOB_STRING = 77,
- FILTER_OP_NE_STAR_GLOB_STRING = 78,
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- FILTER_OP_GET_CONTEXT_ROOT = 79,
- FILTER_OP_GET_APP_CONTEXT_ROOT = 80,
- FILTER_OP_GET_PAYLOAD_ROOT = 81,
-
- FILTER_OP_GET_SYMBOL = 82,
- FILTER_OP_GET_SYMBOL_FIELD = 83,
- FILTER_OP_GET_INDEX_U16 = 84,
- FILTER_OP_GET_INDEX_U64 = 85,
-
- FILTER_OP_LOAD_FIELD = 86,
- FILTER_OP_LOAD_FIELD_S8 = 87,
- FILTER_OP_LOAD_FIELD_S16 = 88,
- FILTER_OP_LOAD_FIELD_S32 = 89,
- FILTER_OP_LOAD_FIELD_S64 = 90,
- FILTER_OP_LOAD_FIELD_U8 = 91,
- FILTER_OP_LOAD_FIELD_U16 = 92,
- FILTER_OP_LOAD_FIELD_U32 = 93,
- FILTER_OP_LOAD_FIELD_U64 = 94,
- FILTER_OP_LOAD_FIELD_STRING = 95,
- FILTER_OP_LOAD_FIELD_SEQUENCE = 96,
- FILTER_OP_LOAD_FIELD_DOUBLE = 97,
-
- FILTER_OP_UNARY_BIT_NOT = 98,
-
- FILTER_OP_RETURN_S64 = 99,
-
- NR_FILTER_OPS,
-};
-
-typedef uint8_t filter_opcode_t;
-
-struct load_op {
- filter_opcode_t op;
- char data[0];
- /* data to load. Size known by enum filter_opcode and null-term char. */
-} __attribute__((packed));
-
-struct binary_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-struct unary_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-/* skip_offset is absolute from start of bytecode */
-struct logical_op {
- filter_opcode_t op;
- uint16_t skip_offset; /* bytecode insn, if skip second test */
-} __attribute__((packed));
-
-struct cast_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-struct return_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-#endif /* _FILTER_BYTECODE_H */
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng/filter.h
- *
- * LTTng modules filter header.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_FILTER_H
-#define _LTTNG_FILTER_H
-
-#include <linux/kernel.h>
-
-#include <lttng/events.h>
-#include <lttng/filter-bytecode.h>
-
-/* Filter stack length, in number of entries */
-#define FILTER_STACK_LEN 10 /* includes 2 dummy */
-#define FILTER_STACK_EMPTY 1
-
-#define FILTER_MAX_DATA_LEN 65536
-
-#ifdef DEBUG
-#define dbg_printk(fmt, args...) \
- printk(KERN_DEBUG "LTTng: [debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args)
-#else
-#define dbg_printk(fmt, args...) \
-do { \
- /* do nothing but check printf format */ \
- if (0) \
- printk(KERN_DEBUG "LTTng: [debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args); \
-} while (0)
-#endif
-
-/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
-struct bytecode_runtime {
- struct lttng_bytecode_runtime p;
- size_t data_len;
- size_t data_alloc_len;
- char *data;
- uint16_t len;
- char code[0];
-};
-
-enum entry_type {
- REG_S64,
- REG_DOUBLE,
- REG_STRING,
- REG_STAR_GLOB_STRING,
- REG_TYPE_UNKNOWN,
- REG_PTR,
-};
-
-enum load_type {
- LOAD_ROOT_CONTEXT,
- LOAD_ROOT_APP_CONTEXT,
- LOAD_ROOT_PAYLOAD,
- LOAD_OBJECT,
-};
-
-enum object_type {
- OBJECT_TYPE_S8,
- OBJECT_TYPE_S16,
- OBJECT_TYPE_S32,
- OBJECT_TYPE_S64,
- OBJECT_TYPE_U8,
- OBJECT_TYPE_U16,
- OBJECT_TYPE_U32,
- OBJECT_TYPE_U64,
-
- OBJECT_TYPE_DOUBLE,
- OBJECT_TYPE_STRING,
- OBJECT_TYPE_STRING_SEQUENCE,
-
- OBJECT_TYPE_SEQUENCE,
- OBJECT_TYPE_ARRAY,
- OBJECT_TYPE_STRUCT,
- OBJECT_TYPE_VARIANT,
-
- OBJECT_TYPE_DYNAMIC,
-};
-
-struct filter_get_index_data {
- uint64_t offset; /* in bytes */
- size_t ctx_index;
- size_t array_len;
- struct {
- size_t len;
- enum object_type type;
- bool rev_bo; /* reverse byte order */
- } elem;
-};
-
-/* Validation stack */
-struct vstack_load {
- enum load_type type;
- enum object_type object_type;
- const struct lttng_event_field *field;
- bool rev_bo; /* reverse byte order */
-};
-
-struct vstack_entry {
- enum entry_type type;
- struct vstack_load load;
-};
-
-struct vstack {
- int top; /* top of stack */
- struct vstack_entry e[FILTER_STACK_LEN];
-};
-
-static inline
-void vstack_init(struct vstack *stack)
-{
- stack->top = -1;
-}
-
-static inline
-struct vstack_entry *vstack_ax(struct vstack *stack)
-{
- if (unlikely(stack->top < 0))
- return NULL;
- return &stack->e[stack->top];
-}
-
-static inline
-struct vstack_entry *vstack_bx(struct vstack *stack)
-{
- if (unlikely(stack->top < 1))
- return NULL;
- return &stack->e[stack->top - 1];
-}
-
-static inline
-int vstack_push(struct vstack *stack)
-{
- if (stack->top >= FILTER_STACK_LEN - 1) {
- printk(KERN_WARNING "LTTng: filter: Stack full\n");
- return -EINVAL;
- }
- ++stack->top;
- return 0;
-}
-
-static inline
-int vstack_pop(struct vstack *stack)
-{
- if (unlikely(stack->top < 0)) {
- printk(KERN_WARNING "LTTng: filter: Stack empty\n");
- return -EINVAL;
- }
- stack->top--;
- return 0;
-}
-
-/* Execution stack */
-enum estack_string_literal_type {
- ESTACK_STRING_LITERAL_TYPE_NONE,
- ESTACK_STRING_LITERAL_TYPE_PLAIN,
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
-};
-
-struct load_ptr {
- enum load_type type;
- enum object_type object_type;
- const void *ptr;
- bool rev_bo;
- /* Temporary place-holders for contexts. */
- union {
- int64_t s64;
- uint64_t u64;
- double d;
- } u;
- /*
- * "field" is only needed when nested under a variant, in which
- * case we cannot specialize the nested operations.
- */
- const struct lttng_event_field *field;
-};
-
-struct estack_entry {
- union {
- int64_t v;
-
- struct {
- const char *str;
- const char __user *user_str;
- size_t seq_len;
- enum estack_string_literal_type literal_type;
- int user; /* is string from userspace ? */
- } s;
- struct load_ptr ptr;
- } u;
-};
-
-struct estack {
- int top; /* top of stack */
- struct estack_entry e[FILTER_STACK_LEN];
-};
-
-#define estack_ax_v ax
-#define estack_bx_v bx
-
-#define estack_ax(stack, top) \
- ({ \
- BUG_ON((top) <= FILTER_STACK_EMPTY); \
- &(stack)->e[top]; \
- })
-
-#define estack_bx(stack, top) \
- ({ \
- BUG_ON((top) <= FILTER_STACK_EMPTY + 1); \
- &(stack)->e[(top) - 1]; \
- })
-
-#define estack_push(stack, top, ax, bx) \
- do { \
- BUG_ON((top) >= FILTER_STACK_LEN - 1); \
- (stack)->e[(top) - 1].u.v = (bx); \
- (bx) = (ax); \
- ++(top); \
- } while (0)
-
-#define estack_pop(stack, top, ax, bx) \
- do { \
- BUG_ON((top) <= FILTER_STACK_EMPTY); \
- (ax) = (bx); \
- (bx) = (stack)->e[(top) - 2].u.v; \
- (top)--; \
- } while (0)
-
-const char *lttng_filter_print_op(enum filter_op op);
-
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode);
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
- struct bytecode_runtime *bytecode);
-
-uint64_t lttng_filter_false(void *filter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data);
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data);
-
-#endif /* _LTTNG_FILTER_H */
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng/lttng-bytecode.h
+ *
+ * LTTng modules bytecode header.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_BYTECODE_H
+#define _LTTNG_BYTECODE_H
+
+#include <linux/kernel.h>
+
+#include <lttng/events.h>
+#include <lttng/bytecode.h>
+
+/* Interpreter stack length, in number of entries */
+#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */
+#define INTERPRETER_STACK_EMPTY 1
+#define INTERPRETER_MAX_DATA_LEN 65536
+
+#ifdef DEBUG
+#define dbg_printk(fmt, args...) \
+ printk(KERN_DEBUG "LTTng: [debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args)
+#else
+#define dbg_printk(fmt, args...) \
+do { \
+ /* do nothing but check printf format */ \
+ if (0) \
+ printk(KERN_DEBUG "LTTng: [debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args); \
+} while (0)
+#endif
+
+/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
+struct bytecode_runtime {
+ struct lttng_bytecode_runtime p;
+ size_t data_len;
+ size_t data_alloc_len;
+ char *data;
+ uint16_t len;
+ char code[0];
+};
+
+enum entry_type {
+ REG_S64,
+ REG_U64,
+ REG_DOUBLE,
+ REG_STRING,
+ REG_STAR_GLOB_STRING,
+ REG_TYPE_UNKNOWN,
+ REG_PTR,
+};
+
+enum load_type {
+ LOAD_ROOT_CONTEXT,
+ LOAD_ROOT_APP_CONTEXT,
+ LOAD_ROOT_PAYLOAD,
+ LOAD_OBJECT,
+};
+
+enum object_type {
+ OBJECT_TYPE_S8,
+ OBJECT_TYPE_S16,
+ OBJECT_TYPE_S32,
+ OBJECT_TYPE_S64,
+ OBJECT_TYPE_U8,
+ OBJECT_TYPE_U16,
+ OBJECT_TYPE_U32,
+ OBJECT_TYPE_U64,
+
+ OBJECT_TYPE_SIGNED_ENUM,
+ OBJECT_TYPE_UNSIGNED_ENUM,
+
+ OBJECT_TYPE_DOUBLE,
+ OBJECT_TYPE_STRING,
+ OBJECT_TYPE_STRING_SEQUENCE,
+
+ OBJECT_TYPE_SEQUENCE,
+ OBJECT_TYPE_ARRAY,
+ OBJECT_TYPE_STRUCT,
+ OBJECT_TYPE_VARIANT,
+
+ OBJECT_TYPE_DYNAMIC,
+};
+
+struct bytecode_get_index_data {
+ uint64_t offset; /* in bytes */
+ size_t ctx_index;
+ size_t array_len;
+ /*
+ * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
+ * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
+ * interpreter needs to find it from the event fields and types to
+ * support variants.
+ */
+ const struct lttng_event_field *field;
+ struct {
+ size_t len;
+ enum object_type type;
+ bool rev_bo; /* reverse byte order */
+ } elem;
+};
+
+/* Validation stack */
+struct vstack_load {
+ enum load_type type;
+ enum object_type object_type;
+ const struct lttng_event_field *field;
+ bool rev_bo; /* reverse byte order */
+};
+
+struct vstack_entry {
+ enum entry_type type;
+ struct vstack_load load;
+};
+
+struct vstack {
+ int top; /* top of stack */
+ struct vstack_entry e[INTERPRETER_STACK_LEN];
+};
+
+static inline
+void vstack_init(struct vstack *stack)
+{
+ stack->top = -1;
+}
+
+static inline
+struct vstack_entry *vstack_ax(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0))
+ return NULL;
+ return &stack->e[stack->top];
+}
+
+static inline
+struct vstack_entry *vstack_bx(struct vstack *stack)
+{
+ if (unlikely(stack->top < 1))
+ return NULL;
+ return &stack->e[stack->top - 1];
+}
+
+static inline
+int vstack_push(struct vstack *stack)
+{
+ if (stack->top >= INTERPRETER_STACK_LEN - 1) {
+ printk(KERN_WARNING "LTTng: filter: Stack full\n");
+ return -EINVAL;
+ }
+ ++stack->top;
+ return 0;
+}
+
+static inline
+int vstack_pop(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0)) {
+ printk(KERN_WARNING "LTTng: filter: Stack empty\n");
+ return -EINVAL;
+ }
+ stack->top--;
+ return 0;
+}
+
+/* Execution stack */
+enum estack_string_literal_type {
+ ESTACK_STRING_LITERAL_TYPE_NONE,
+ ESTACK_STRING_LITERAL_TYPE_PLAIN,
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
+};
+
+struct load_ptr {
+ enum load_type type;
+ enum object_type object_type;
+ const void *ptr;
+ size_t nr_elem;
+ bool rev_bo;
+ /* Temporary place-holders for contexts. */
+ union {
+ int64_t s64;
+ uint64_t u64;
+ double d;
+ } u;
+ const struct lttng_event_field *field;
+};
+
+struct estack_entry {
+ enum entry_type type;
+ union {
+ int64_t v;
+
+ struct {
+ const char *str;
+ const char __user *user_str;
+ size_t seq_len;
+ enum estack_string_literal_type literal_type;
+ int user; /* is string from userspace ? */
+ } s;
+ struct load_ptr ptr;
+ } u;
+};
+
+struct estack {
+ int top; /* top of stack */
+ struct estack_entry e[INTERPRETER_STACK_LEN];
+};
+
+#define estack_ax_v ax
+#define estack_bx_v bx
+
+#define estack_ax_t ax_t
+#define estack_bx_t bx_t
+
+#define estack_ax(stack, top) \
+ ({ \
+ BUG_ON((top) <= INTERPRETER_STACK_EMPTY); \
+ &(stack)->e[top]; \
+ })
+
+#define estack_bx(stack, top) \
+ ({ \
+ BUG_ON((top) <= INTERPRETER_STACK_EMPTY + 1); \
+ &(stack)->e[(top) - 1]; \
+ })
+
+#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ BUG_ON((top) >= INTERPRETER_STACK_LEN - 1); \
+ (stack)->e[(top) - 1].u.v = (bx); \
+ (stack)->e[(top) - 1].type = (bx_t); \
+ (bx) = (ax); \
+ (bx_t) = (ax_t); \
+ ++(top); \
+ } while (0)
+
+#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ BUG_ON((top) <= INTERPRETER_STACK_EMPTY); \
+ (ax) = (bx); \
+ (ax_t) = (bx_t); \
+ (bx) = (stack)->e[(top) - 2].u.v; \
+ (bx_t) = (stack)->e[(top) - 2].type; \
+ (top)--; \
+ } while (0)
+
+enum lttng_interpreter_type {
+ LTTNG_INTERPRETER_TYPE_S64,
+ LTTNG_INTERPRETER_TYPE_U64,
+ LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_DOUBLE,
+ LTTNG_INTERPRETER_TYPE_STRING,
+ LTTNG_INTERPRETER_TYPE_SEQUENCE,
+};
+
+/*
+ * Represents the output parameter of the lttng interpreter.
+ * Currently capturable field classes are integer, double, string and sequence
+ * of integer.
+ */
+struct lttng_interpreter_output {
+ enum lttng_interpreter_type type;
+ union {
+ int64_t s;
+ uint64_t u;
+
+ struct {
+ const char *str;
+ size_t len;
+ } str;
+ struct {
+ const void *ptr;
+ size_t nr_elem;
+
+ /* Inner type. */
+ const struct lttng_type *nested_type;
+ } sequence;
+ } u;
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op);
+
+void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime);
+void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime);
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode);
+int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *bytecode);
+
+uint64_t lttng_bytecode_filter_interpret_false(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data);
+uint64_t lttng_bytecode_filter_interpret(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data);
+
+uint64_t lttng_bytecode_capture_interpret_false(void *capture_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output);
+uint64_t lttng_bytecode_capture_interpret(void *capture_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output);
+
+#endif /* _LTTNG_FILTER_H */
--- /dev/null
+#ifndef _LTTNG_UST_MSGPACK_H
+#define _LTTNG_UST_MSGPACK_H
+
+/*
+ * msgpack.h
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else /* __KERNEL__ */
+#include <stdint.h>
+#endif /* __KERNEL__ */
+
+struct lttng_msgpack_writer {
+ uint8_t *buffer;
+ uint8_t *write_pos;
+ const uint8_t *end_write_pos;
+ uint8_t array_nesting;
+ uint8_t map_nesting;
+};
+
+void lttng_msgpack_writer_init(
+ struct lttng_msgpack_writer *writer,
+ uint8_t *buffer, size_t size);
+
+void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer);
+
+int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_unsigned_integer(
+ struct lttng_msgpack_writer *writer, uint64_t value);
+int lttng_msgpack_write_signed_integer(
+ struct lttng_msgpack_writer *writer, int64_t value);
+int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value);
+int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
+ const char *value);
+int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count);
+int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_begin_array(
+ struct lttng_msgpack_writer *writer, size_t count);
+int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer);
+
+#endif /* _LTTNG_UST_MSGPACK_H */
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/*
+ * Stage 1.2 of the trace trigger.
+ *
+ * Create dummy trace prototypes for each event class, and for each used
+ * template. This will allow checking whether the prototypes from the
+ * class and the instance using the class actually match.
+ */
+
+#include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
+
+#undef TP_PROTO
+#define TP_PROTO(...) __VA_ARGS__
+
+#undef TP_ARGS
+#define TP_ARGS(...) __VA_ARGS__
+
+#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
+#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
+void __trigger_template_proto___##_template(_proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
+void __trigger_template_proto___##_template(void);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+void __trigger_template_proto___##_name(_proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+void __trigger_template_proto___##_name(void);
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+
/*
* Stage 1.2 of tracepoint event generation
*
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+/*
+ * Stage 3.1 of the trace triggers.
+ *
+ * Create trigger probe callback prototypes.
+ */
+
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <lttng/events-reset.h>
+
+#undef TP_PROTO
+#define TP_PROTO(...) __VA_ARGS__
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data, _proto);
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data);
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
/*
* Stage 4 of the trace events.
*
#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
static inline \
-void __event_prepare_filter_stack__##_name(char *__stack_data, \
+void __event_prepare_interpreter_stack__##_name(char *__stack_data, \
void *__tp_locvar) \
{ \
struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
static inline \
-void __event_prepare_filter_stack__##_name(char *__stack_data, \
+void __event_prepare_interpreter_stack__##_name(char *__stack_data, \
void *__tp_locvar, _proto) \
{ \
struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
struct lttng_event *__event = __data; \
struct lttng_probe_ctx __lttng_probe_ctx = { \
.event = __event, \
+ .trigger = NULL, \
.interruptible = !irqs_disabled(), \
}; \
struct lttng_channel *__chan = __event->chan; \
__orig_dynamic_len_offset = this_cpu_ptr(<tng_dynamic_len_stack)->offset; \
__dynamic_len_idx = __orig_dynamic_len_offset; \
_code_pre \
- if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
+ if (unlikely(!list_empty(&__event->filter_bytecode_runtime_head))) { \
struct lttng_bytecode_runtime *bc_runtime; \
int __filter_record = __event->has_enablers_without_bytecode; \
\
- __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+ __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
tp_locvar, _args); \
- lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
- if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
- __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
+ __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
__filter_record = 1; \
break; \
} \
struct lttng_event *__event = __data; \
struct lttng_probe_ctx __lttng_probe_ctx = { \
.event = __event, \
+ .trigger = NULL, \
.interruptible = !irqs_disabled(), \
}; \
struct lttng_channel *__chan = __event->chan; \
__orig_dynamic_len_offset = this_cpu_ptr(<tng_dynamic_len_stack)->offset; \
__dynamic_len_idx = __orig_dynamic_len_offset; \
_code_pre \
- if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
+ if (unlikely(!list_empty(&__event->filter_bytecode_runtime_head))) { \
struct lttng_bytecode_runtime *bc_runtime; \
int __filter_record = __event->has_enablers_without_bytecode; \
\
- __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+ __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
tp_locvar); \
- lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
- if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
- __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
+ __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
__filter_record = 1; \
break; \
} \
#undef __get_dynamic_len
+/*
+ *
+ */
+
+#include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
+
+#undef TP_PROTO
+#define TP_PROTO(...) __VA_ARGS__
+
+#undef TP_ARGS
+#define TP_ARGS(...) __VA_ARGS__
+
+#undef TP_FIELDS
+#define TP_FIELDS(...) __VA_ARGS__
+
+#undef TP_locvar
+#define TP_locvar(...) __VA_ARGS__
+
+#undef TP_code_pre
+#define TP_code_pre(...) __VA_ARGS__
+
+#undef TP_code_post
+#define TP_code_post(...) __VA_ARGS__
+
+/*
+ * Using twice size for filter stack data to hold size and pointer for
+ * each field (worse case). For integers, max size required is 64-bit.
+ * Same for double-precision floats. Those fit within
+ * 2*sizeof(unsigned long) for all supported architectures.
+ * Perform UNION (||) of filter runtime list.
+ */
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data, _proto) \
+{ \
+ struct probe_local_vars { _locvar }; \
+ struct lttng_trigger *__trigger = __data; \
+ struct lttng_probe_ctx __lttng_probe_ctx = { \
+ .event = NULL, \
+ .trigger = __trigger, \
+ .interruptible = !irqs_disabled(), \
+ }; \
+ union { \
+ size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
+ char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+ } __stackvar; \
+ struct probe_local_vars __tp_locvar; \
+ struct probe_local_vars *tp_locvar __attribute__((unused)) = \
+ &__tp_locvar; \
+ \
+ if (unlikely(!READ_ONCE(__trigger->enabled))) \
+ return; \
+ _code_pre \
+ if (unlikely(!list_empty(&__trigger->filter_bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *bc_runtime; \
+ int __filter_record = __trigger->has_enablers_without_bytecode; \
+ \
+ __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
+ tp_locvar, _args); \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__trigger->filter_bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
+ __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
+ __filter_record = 1; \
+ } \
+ if (likely(!__filter_record)) \
+ goto __post; \
+ } \
+ \
+ if (unlikely(!list_empty(&__trigger->capture_bytecode_runtime_head))) \
+ __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
+ tp_locvar, _args); \
+ \
+ __trigger->send_notification(__trigger, &__lttng_probe_ctx, __stackvar.__interpreter_stack_data); \
+ \
+__post: \
+ _code_post \
+ return; \
+}
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
+static void __trigger_probe__##_name(void *__data) \
+{ \
+ struct probe_local_vars { _locvar }; \
+ struct lttng_trigger *__trigger = __data; \
+ struct lttng_probe_ctx __lttng_probe_ctx = { \
+ .event = NULL, \
+ .trigger = __trigger, \
+ .interruptible = !irqs_disabled(), \
+ }; \
+ union { \
+ size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
+ char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+ } __stackvar; \
+ struct probe_local_vars __tp_locvar; \
+ struct probe_local_vars *tp_locvar __attribute__((unused)) = \
+ &__tp_locvar; \
+ \
+ if (unlikely(!READ_ONCE(__trigger->enabled))) \
+ return; \
+ _code_pre \
+ if (unlikely(!list_empty(&__trigger->filter_bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *bc_runtime; \
+ int __filter_record = __trigger->has_enablers_without_bytecode; \
+ \
+ __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
+ tp_locvar); \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__trigger->filter_bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
+ __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
+ __filter_record = 1; \
+ } \
+ if (likely(!__filter_record)) \
+ goto __post; \
+ } \
+ \
+ if (unlikely(!list_empty(&__trigger->capture_bytecode_runtime_head))) \
+ __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
+ tp_locvar); \
+ \
+ __trigger->send_notification(__trigger, &__lttng_probe_ctx, __stackvar.__interpreter_stack_data); \
+__post: \
+ _code_post \
+ return; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 7 of the trace events.
*
#define TP_PROBE_CB(_template) &__event_probe__##_template
#endif
+#ifndef TP_TRIGGER_PROBE_CB
+#define TP_TRIGGER_PROBE_CB(_template) &__trigger_probe__##_template
+#endif
+
#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
static const struct lttng_event_desc __event_desc___##_map = { \
.probe_callback = (void *) TP_PROBE_CB(_template), \
.nr_fields = ARRAY_SIZE(__event_fields___##_template), \
.owner = THIS_MODULE, \
+ .trigger_callback = (void *) TP_TRIGGER_PROBE_CB(_template), \
};
#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng/trigger-notification.h
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_TRIGGER_NOTIFICATION_H
+#define _LTTNG_TRIGGER_NOTIFICATION_H
+
+#include <lttng/events.h>
+
+void lttng_trigger_notification_send(struct lttng_trigger *trigger,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *stack_data);
+
+#endif /* _LTTNG_TRIGGER_NOTIFICATION_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) */
+#ifndef _LTTNG_UTILS_H
+#define _LTTNG_UTILS_H
+
+/*
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#include <linux/jhash.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static inline
+struct hlist_head *utils_borrow_hash_table_bucket(
+ struct hlist_head *hash_table,
+ unsigned int hash_table_size,
+ const char *event_name)
+{
+ size_t name_len;
+ uint32_t hash;
+
+ name_len = strlen(event_name);
+
+ hash = jhash(event_name, name_len, 0);
+ return &hash_table[hash & (hash_table_size - 1)];
+}
+#endif /* _LTTNG_UTILS_H */
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
v_inc(config, &bufb->array[sb_bindex]->records_commit);
}
-#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
-static inline
-void subbuffer_count_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb,
- unsigned long idx)
-{
-}
-#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
/*
* Reader has exclusive subbuffer access for record consumption. No need to
_v_dec(config, &bufb->array[sb_bindex]->records_unread);
v_inc(config, &bufb->records_read);
}
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_backend *bufb,
+ unsigned long idx)
+{
+}
+static inline
+void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_backend *bufb)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
static inline
unsigned long subbuffer_get_records_count(
*
* RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
* ready to read. Lower latencies before the reader is woken up. Mainly suitable
- * for drivers.
+ * for drivers. Going through an "irq_work" allows triggering this type of wakeup
+ * even from NMI context: the wakeup will be slightly delayed until the next
+ * interrupts are handled.
*
* RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
* has the responsibility to perform wakeups.
enum {
RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
RING_BUFFER_WAKEUP_BY_WRITER, /*
- * writer wakes up reader,
- * not lock-free
- * (takes spinlock).
+ * writer wakes up reader through
+ * irq_work.
*/
} wakeup;
/*
#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
#include <linux/kref.h>
+#include <linux/irq_work.h>
#include <ringbuffer/config.h>
#include <ringbuffer/backend_types.h>
#include <lttng/prio_heap.h> /* For per-CPU read-side iterator */
struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
wait_queue_head_t read_wait; /* reader wait queue */
wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
+ struct irq_work wakeup_pending; /* Pending wakeup irq work */
int finalized; /* Has channel been finalized */
struct channel_iter iter; /* Channel read-side iterator */
struct kref ref; /* Reference count */
union v_atomic records_overrun; /* Number of overwritten records */
wait_queue_head_t read_wait; /* reader buffer-level wait queue */
wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
+ struct irq_work wakeup_pending; /* Pending wakeup irq work */
int finalized; /* buffer has been finalized */
struct timer_list switch_timer; /* timer for periodical switch */
struct timer_list read_timer; /* timer for read poll */
extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
struct lib_ring_buffer *buf);
+/*
+ * Ensure that the current subbuffer is put after client code has read the
+ * payload of the current record. Has an effect when the end of subbuffer is
+ * reached. It is not required if get_next_record is called successively.
+ * However, it should be invoked before returning data to user-space to ensure
+ * that the get/put subbuffer state is quiescent.
+ */
+extern void lib_ring_buffer_put_current_record(struct lib_ring_buffer *buf);
+
/*
* channel_get_next_record advances the buffer read position to the next record.
* It returns either the size of the next record, -EAGAIN if there is currently
obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-discard.o
obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-overwrite.o
obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-mmap-client.o
+obj-$(CONFIG_LTTNG) += lttng-ring-buffer-trigger-client.o
+
+obj-$(CONFIG_LTTNG) += lttng-counter-client-percpu-32-modular.o
+ifneq ($CONFIG_64BIT),)
+ obj-$(CONFIG_LTTNG) += lttng-counter-client-percpu-64-modular.o
+endif # CONFIG_64BIT
+
obj-$(CONFIG_LTTNG) += lttng-clock.o
obj-$(CONFIG_LTTNG) += lttng-tracer.o
obj-$(CONFIG_LTTNG) += lttng-wrapper.o
-lttng-tracer-objs := lttng-events.o lttng-abi.o lttng-string-utils.o \
+lttng-tracer-objs := lib/msgpack/msgpack.o \
+ lttng-events.o lttng-abi.o lttng-string-utils.o \
lttng-probes.o lttng-context.o \
lttng-context-pid.o lttng-context-procname.o \
lttng-context-prio.o lttng-context-nice.o \
lttng-context-hostname.o \
probes/lttng.o \
lttng-tracker-id.o \
- lttng-filter.o lttng-filter-interpreter.o \
- lttng-filter-specialize.o \
- lttng-filter-validator.o \
+ lttng-bytecode.o lttng-bytecode-interpreter.o \
+ lttng-bytecode-specialize.o \
+ lttng-bytecode-validator.o \
probes/lttng-probe-user.o \
lttng-tp-mempool.o \
+ lttng-trigger-notification.o
lttng-wrapper-objs := wrapper/page_alloc.o \
wrapper/random.o \
prio_heap/lttng_prio_heap.o \
../wrapper/splice.o
+obj-$(CONFIG_LTTNG) += lttng-counter.o
+
+lttng-counter-objs := \
+ counter/counter.o
+
# vim:syntax=make
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * counter.c
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <lttng/tracer.h>
+#include <linux/cpumask.h>
+#include <counter/counter.h>
+#include <counter/counter-internal.h>
+#include <wrapper/vmalloc.h>
+
+static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
+{
+ return dimension->max_nr_elem;
+}
+
+static int lttng_counter_init_stride(const struct lib_counter_config *config,
+ struct lib_counter *counter)
+{
+ size_t nr_dimensions = counter->nr_dimensions;
+ size_t stride = 1;
+ ssize_t i;
+
+ for (i = nr_dimensions - 1; i >= 0; i--) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ size_t nr_elem;
+
+ nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
+ dimension->stride = stride;
+ /* nr_elem should be minimum 1 for each dimension. */
+ if (!nr_elem)
+ return -EINVAL;
+ stride *= nr_elem;
+ if (stride > SIZE_MAX / nr_elem)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lttng_counter_layout_init(struct lib_counter *counter, int cpu)
+{
+ struct lib_counter_layout *layout;
+ size_t counter_size;
+ size_t nr_elem = counter->allocated_elem;
+
+ if (cpu == -1)
+ layout = &counter->global_counters;
+ else
+ layout = per_cpu_ptr(counter->percpu_counters, cpu);
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ case COUNTER_SIZE_16_BIT:
+ case COUNTER_SIZE_32_BIT:
+ case COUNTER_SIZE_64_BIT:
+ counter_size = (size_t) counter->config.counter_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ layout->counters = lttng_kvzalloc_node(ALIGN(counter_size * nr_elem,
+ 1 << INTERNODE_CACHE_SHIFT),
+ GFP_KERNEL | __GFP_NOWARN,
+ cpu_to_node(max(cpu, 0)));
+ if (!layout->counters)
+ return -ENOMEM;
+ layout->overflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
+ 1 << INTERNODE_CACHE_SHIFT),
+ GFP_KERNEL | __GFP_NOWARN,
+ cpu_to_node(max(cpu, 0)));
+ if (!layout->overflow_bitmap)
+ return -ENOMEM;
+ layout->underflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
+ 1 << INTERNODE_CACHE_SHIFT),
+ GFP_KERNEL | __GFP_NOWARN,
+ cpu_to_node(max(cpu, 0)));
+ if (!layout->underflow_bitmap)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lttng_counter_layout_fini(struct lib_counter *counter, int cpu)
+{
+ struct lib_counter_layout *layout;
+
+ if (cpu == -1)
+ layout = &counter->global_counters;
+ else
+ layout = per_cpu_ptr(counter->percpu_counters, cpu);
+
+ lttng_kvfree(layout->counters);
+ lttng_kvfree(layout->overflow_bitmap);
+ lttng_kvfree(layout->underflow_bitmap);
+}
+
+static
+int lttng_counter_set_global_sum_step(struct lib_counter *counter,
+ int64_t global_sum_step)
+{
+ if (global_sum_step < 0)
+ return -EINVAL;
+
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ if (global_sum_step > S8_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s8 = (int8_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_16_BIT:
+ if (global_sum_step > S16_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s16 = (int16_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_32_BIT:
+ if (global_sum_step > S32_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s32 = (int32_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_64_BIT:
+ counter->global_sum_step.s64 = global_sum_step;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+int validate_args(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step)
+{
+ if (BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+ if (!max_nr_elem)
+ return -1;
+ /*
+ * global sum step is only useful with allocating both per-cpu
+ * and global counters.
+ */
+ if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
+ !(config->alloc & COUNTER_ALLOC_PER_CPU)))
+ return -1;
+ return 0;
+}
+
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step)
+{
+ struct lib_counter *counter;
+ size_t dimension, nr_elem = 1;
+ int cpu, ret;
+
+ if (validate_args(config, nr_dimensions, max_nr_elem, global_sum_step))
+ return NULL;
+ counter = kzalloc(sizeof(struct lib_counter), GFP_KERNEL);
+ if (!counter)
+ return NULL;
+ counter->config = *config;
+ if (lttng_counter_set_global_sum_step(counter, global_sum_step))
+ goto error_sum_step;
+ counter->nr_dimensions = nr_dimensions;
+ counter->dimensions = kzalloc(nr_dimensions * sizeof(*counter->dimensions), GFP_KERNEL);
+ if (!counter->dimensions)
+ goto error_dimensions;
+ for (dimension = 0; dimension < nr_dimensions; dimension++)
+ counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
+ if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+ counter->percpu_counters = alloc_percpu(struct lib_counter_layout);
+ if (!counter->percpu_counters)
+ goto error_alloc_percpu;
+ }
+
+ if (lttng_counter_init_stride(config, counter))
+ goto error_init_stride;
+ //TODO saturation values.
+ for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
+ nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
+ counter->allocated_elem = nr_elem;
+ if (config->alloc & COUNTER_ALLOC_GLOBAL) {
+ ret = lttng_counter_layout_init(counter, -1); /* global */
+ if (ret)
+ goto layout_init_error;
+ }
+ if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+ //TODO: integrate with CPU hotplug and online cpus
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ ret = lttng_counter_layout_init(counter, cpu);
+ if (ret)
+ goto layout_init_error;
+ }
+ }
+ return counter;
+
+layout_init_error:
+ if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++)
+ lttng_counter_layout_fini(counter, cpu);
+ }
+ if (config->alloc & COUNTER_ALLOC_GLOBAL)
+ lttng_counter_layout_fini(counter, -1);
+error_init_stride:
+ free_percpu(counter->percpu_counters);
+error_alloc_percpu:
+ kfree(counter->dimensions);
+error_dimensions:
+error_sum_step:
+ kfree(counter);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_create);
+
+void lttng_counter_destroy(struct lib_counter *counter)
+{
+ struct lib_counter_config *config = &counter->config;
+ int cpu;
+
+ if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++)
+ lttng_counter_layout_fini(counter, cpu);
+ free_percpu(counter->percpu_counters);
+ }
+ if (config->alloc & COUNTER_ALLOC_GLOBAL)
+ lttng_counter_layout_fini(counter, -1);
+ kfree(counter->dimensions);
+ kfree(counter);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_destroy);
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ struct lib_counter_layout *layout;
+ size_t index;
+
+ if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= num_possible_cpus())
+ return -EINVAL;
+ layout = per_cpu_ptr(counter->percpu_counters, cpu);
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= num_possible_cpus())
+ return -EINVAL;
+ layout = per_cpu_ptr(counter->percpu_counters, cpu);
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ *value = (int64_t) READ_ONCE(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ *value = (int64_t) READ_ONCE(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ *value = (int64_t) READ_ONCE(*int_p);
+ break;
+ }
+#if BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ *value = READ_ONCE(*int_p);
+ break;
+ }
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ }
+ *overflow = test_bit(index, layout->overflow_bitmap);
+ *underflow = test_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_read);
+
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ int cpu, ret;
+ int64_t v, sum = 0;
+ bool of, uf;
+
+ *overflow = false;
+ *underflow = false;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Read global counter. */
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ -1, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ sum += v;
+ *overflow |= of;
+ *underflow |= uf;
+ break;
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU:
+ //TODO: integrate with CPU hotplug and online cpus
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ int64_t old = sum;
+
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ cpu, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ *overflow |= of;
+ *underflow |= uf;
+ /* Overflow is defined on unsigned types. */
+ sum = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (v > 0 && sum < old)
+ *overflow = true;
+ else if (v < 0 && sum > old)
+ *underflow = true;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ *value = sum;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_aggregate);
+
+static
+int lttng_counter_clear_cpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu)
+{
+ struct lib_counter_layout *layout;
+ size_t index;
+
+ if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= num_possible_cpus())
+ return -EINVAL;
+ layout = per_cpu_ptr(counter->percpu_counters, cpu);
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= num_possible_cpus())
+ return -EINVAL;
+ layout = per_cpu_ptr(counter->percpu_counters, cpu);
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ WRITE_ONCE(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ WRITE_ONCE(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ WRITE_ONCE(*int_p, 0);
+ break;
+ }
+#if BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ WRITE_ONCE(*int_p, 0);
+ break;
+ }
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ }
+ clear_bit(index, layout->overflow_bitmap);
+ clear_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ int cpu, ret;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Clear global counter. */
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
+ if (ret < 0)
+ return ret;
+ break;
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU:
+ //TODO: integrate with CPU hotplug and online cpus
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_clear);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng counter library");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+ __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+ __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+ LTTNG_MODULES_EXTRAVERSION);
--- /dev/null
+/*
+ * msgpack.c
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <stddef.h>
+
+#define MSGPACK_FIXSTR_ID_MASK 0xA0
+#define MSGPACK_FIXMAP_ID_MASK 0x80
+#define MSGPACK_FIXARRAY_ID_MASK 0x90
+
+#define MSGPACK_NIL_ID 0xC0
+#define MSGPACK_FALSE_ID 0xC2
+#define MSGPACK_TRUE_ID 0xC3
+#define MSGPACK_MAP16_ID 0xDE
+#define MSGPACK_ARRAY16_ID 0xDC
+
+#define MSGPACK_UINT8_ID 0xCC
+#define MSGPACK_UINT16_ID 0xCD
+#define MSGPACK_UINT32_ID 0xCE
+#define MSGPACK_UINT64_ID 0xCF
+
+#define MSGPACK_INT8_ID 0xD0
+#define MSGPACK_INT16_ID 0xD1
+#define MSGPACK_INT32_ID 0xD2
+#define MSGPACK_INT64_ID 0xD3
+
+#define MSGPACK_FLOAT64_ID 0xCB
+#define MSGPACK_STR16_ID 0xDA
+
+#define MSGPACK_FIXINT_MAX ((1 << 7) - 1)
+#define MSGPACK_FIXINT_MIN -(1 << 5)
+#define MSGPACK_FIXMAP_MAX_COUNT 15
+#define MSGPACK_FIXARRAY_MAX_COUNT 15
+#define MSGPACK_FIXSTR_MAX_LENGTH 31
+
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <lttng/msgpack.h>
+
+#define INT8_MIN (-128)
+#define INT16_MIN (-32767-1)
+#define INT32_MIN (-2147483647-1)
+#define INT8_MAX (127)
+#define INT16_MAX (32767)
+#define INT32_MAX (2147483647)
+#define UINT8_MAX (255)
+#define UINT16_MAX (65535)
+#define UINT32_MAX (4294967295U)
+
+#define byteswap_host_to_be16(_tmp) cpu_to_be16(_tmp)
+#define byteswap_host_to_be32(_tmp) cpu_to_be32(_tmp)
+#define byteswap_host_to_be64(_tmp) cpu_to_be64(_tmp)
+
+#define lttng_msgpack_assert(cond) WARN_ON(!(cond))
+
+#else /* __KERNEL__ */
+
+#include <endian.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "msgpack.h"
+
+#define byteswap_host_to_be16(_tmp) htobe16(_tmp)
+#define byteswap_host_to_be32(_tmp) htobe32(_tmp)
+#define byteswap_host_to_be64(_tmp) htobe64(_tmp)
+
+#define lttng_msgpack_assert(cond) ({ \
+ if (!(cond)) \
+ fprintf(stderr, "Assertion failed. %s:%d\n", __FILE__, __LINE__); \
+ })
+#endif /* __KERNEL__ */
+
+static inline int lttng_msgpack_append_buffer(
+ struct lttng_msgpack_writer *writer,
+ const uint8_t *buf,
+ size_t length)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(buf);
+
+ /* Ensure we are not trying to write after the end of the buffer. */
+ if (writer->write_pos + length > writer->end_write_pos) {
+ ret = -1;
+ goto end;
+ }
+
+ memcpy(writer->write_pos, buf, length);
+ writer->write_pos += length;
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_append_u8(
+ struct lttng_msgpack_writer *writer, uint8_t value)
+{
+ return lttng_msgpack_append_buffer(writer, &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u16(
+ struct lttng_msgpack_writer *writer, uint16_t value)
+{
+ value = byteswap_host_to_be16(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u32(
+ struct lttng_msgpack_writer *writer, uint32_t value)
+{
+ value = byteswap_host_to_be32(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u64(
+ struct lttng_msgpack_writer *writer, uint64_t value)
+{
+ value = byteswap_host_to_be64(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_f64(
+ struct lttng_msgpack_writer *writer, double value)
+{
+
+ union {
+ double d;
+ uint64_t u;
+ } u;
+
+ u.d = value;
+
+ return lttng_msgpack_append_u64(writer, u.u);
+}
+
+static inline int lttng_msgpack_append_i8(
+ struct lttng_msgpack_writer *writer, int8_t value)
+{
+ return lttng_msgpack_append_u8(writer, (uint8_t) value);
+}
+
+static inline int lttng_msgpack_append_i16(
+ struct lttng_msgpack_writer *writer, int16_t value)
+{
+ return lttng_msgpack_append_u16(writer, (uint16_t) value);
+}
+
+static inline int lttng_msgpack_append_i32(
+ struct lttng_msgpack_writer *writer, int32_t value)
+{
+ return lttng_msgpack_append_u32(writer, (uint32_t) value);
+}
+
+static inline int lttng_msgpack_append_i64(
+ struct lttng_msgpack_writer *writer, int64_t value)
+{
+ return lttng_msgpack_append_u64(writer, (uint64_t) value);
+}
+
+static inline int lttng_msgpack_encode_f64(
+ struct lttng_msgpack_writer *writer, double value)
+{
+ int ret;
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FLOAT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_f64(writer, value);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixmap(
+ struct lttng_msgpack_writer *writer, uint8_t count)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(count <= MSGPACK_FIXMAP_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXMAP_ID_MASK | count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_map16(
+ struct lttng_msgpack_writer *writer, uint16_t count)
+{
+ int ret;
+
+ lttng_msgpack_assert(count > MSGPACK_FIXMAP_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_MAP16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixarray(
+ struct lttng_msgpack_writer *writer, uint8_t count)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(count <= MSGPACK_FIXARRAY_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXARRAY_ID_MASK | count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_array16(
+ struct lttng_msgpack_writer *writer, uint16_t count)
+{
+ int ret;
+
+ lttng_msgpack_assert(count > MSGPACK_FIXARRAY_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_ARRAY16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixstr(
+ struct lttng_msgpack_writer *writer,
+ const char *str,
+ uint8_t len)
+{
+ int ret;
+
+ lttng_msgpack_assert(len <= MSGPACK_FIXSTR_MAX_LENGTH);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXSTR_ID_MASK | len);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_str16(
+ struct lttng_msgpack_writer *writer,
+ const char *str,
+ uint16_t len)
+{
+ int ret;
+
+ lttng_msgpack_assert(len > MSGPACK_FIXSTR_MAX_LENGTH);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_STR16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, len);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count)
+{
+ int ret;
+
+ if (count < 0 || count >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (count <= MSGPACK_FIXMAP_MAX_COUNT)
+ ret = lttng_msgpack_encode_fixmap(writer, count);
+ else
+ ret = lttng_msgpack_encode_map16(writer, count);
+
+ writer->map_nesting++;
+end:
+ return ret;
+}
+
+int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer)
+{
+ lttng_msgpack_assert(writer->map_nesting > 0);
+ writer->map_nesting--;
+ return 0;
+}
+
+int lttng_msgpack_begin_array(
+ struct lttng_msgpack_writer *writer, size_t count)
+{
+ int ret;
+
+ if (count < 0 || count >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (count <= MSGPACK_FIXARRAY_MAX_COUNT)
+ ret = lttng_msgpack_encode_fixarray(writer, count);
+ else
+ ret = lttng_msgpack_encode_array16(writer, count);
+
+ writer->array_nesting++;
+end:
+ return ret;
+}
+
+int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer)
+{
+ lttng_msgpack_assert(writer->array_nesting > 0);
+ writer->array_nesting--;
+ return 0;
+}
+
+int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
+ const char *str)
+{
+ int ret;
+ size_t length = strlen(str);
+ if (length < 0 || length >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (length <= MSGPACK_FIXSTR_MAX_LENGTH)
+ ret = lttng_msgpack_encode_fixstr(writer, str, length);
+ else
+ ret = lttng_msgpack_encode_str16(writer, str, length);
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_NIL_ID);
+}
+
+int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_TRUE_ID);
+}
+
+int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_FALSE_ID);
+}
+
+int lttng_msgpack_write_unsigned_integer(
+ struct lttng_msgpack_writer *writer, uint64_t value)
+{
+ int ret = 0;
+
+ if (value <= MSGPACK_FIXINT_MAX) {
+ ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT8_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT8_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT16_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, (uint16_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT32_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT32_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u32(writer, (uint32_t) value);
+ if (ret)
+ goto end;
+ } else {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u64(writer, value);
+ if (ret)
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_signed_integer(struct lttng_msgpack_writer *writer, int64_t value)
+{
+ int ret;
+
+ if (value >= MSGPACK_FIXINT_MIN && value <= MSGPACK_FIXINT_MAX){
+ ret = lttng_msgpack_append_i8(writer, (int8_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT8_MIN && value <= INT8_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT8_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i8(writer, (int8_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT16_MIN && value <= INT16_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i16(writer, (int16_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT32_MIN && value <= INT32_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT32_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i32(writer, (int32_t) value);
+ if (ret)
+ goto end;
+ } else {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i64(writer, value);
+ if (ret)
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value)
+{
+ return lttng_msgpack_encode_f64(writer, value);
+}
+
+void lttng_msgpack_writer_init(struct lttng_msgpack_writer *writer,
+ uint8_t *buffer, size_t size)
+{
+ lttng_msgpack_assert(buffer);
+ lttng_msgpack_assert(size >= 0);
+
+ writer->buffer = buffer;
+ writer->write_pos = buffer;
+ writer->end_write_pos = buffer + size;
+
+ writer->array_nesting = 0;
+ writer->map_nesting = 0;
+}
+
+void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer)
+{
+ memset(writer, 0, sizeof(*writer));
+}
{
struct channel *chan = buf->backend.chan;
+ irq_work_sync(&buf->wakeup_pending);
+
lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
lttng_kvfree(buf->commit_hot);
lttng_kvfree(buf->commit_cold);
}
EXPORT_SYMBOL_GPL(channel_reset);
+static void lib_ring_buffer_pending_wakeup_buf(struct irq_work *entry)
+{
+ struct lib_ring_buffer *buf = container_of(entry, struct lib_ring_buffer,
+ wakeup_pending);
+ wake_up_interruptible(&buf->read_wait);
+}
+
+static void lib_ring_buffer_pending_wakeup_chan(struct irq_work *entry)
+{
+ struct channel *chan = container_of(entry, struct channel, wakeup_pending);
+ wake_up_interruptible(&chan->read_wait);
+}
+
/*
* Must be called under cpu hotplug protection.
*/
init_waitqueue_head(&buf->read_wait);
init_waitqueue_head(&buf->write_wait);
+ init_irq_work(&buf->wakeup_pending, lib_ring_buffer_pending_wakeup_buf);
raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
/*
kref_init(&chan->ref);
init_waitqueue_head(&chan->read_wait);
init_waitqueue_head(&chan->hp_wait);
+ init_irq_work(&chan->wakeup_pending, lib_ring_buffer_pending_wakeup_chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
+ irq_work_sync(&chan->wakeup_pending);
+
channel_unregister_notifiers(chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
commit_count, idx);
/*
- * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
+ * RING_BUFFER_WAKEUP_BY_WRITER uses an irq_work to issue
+ * the wakeups.
*/
if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
&& atomic_long_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
- wake_up_interruptible(&buf->read_wait);
- wake_up_interruptible(&chan->read_wait);
+ irq_work_queue(&buf->wakeup_pending);
+ irq_work_queue(&chan->wakeup_pending);
}
}
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
+void lib_ring_buffer_put_current_record(struct lib_ring_buffer *buf)
+{
+ struct lib_ring_buffer_iter *iter;
+
+ if (!buf)
+ return;
+ iter = &buf->iter;
+ if (iter->state != ITER_NEXT_RECORD)
+ return;
+ iter->read_offset += iter->payload_len;
+ iter->state = ITER_TEST_RECORD;
+ if (iter->read_offset - iter->consumed >= iter->data_size) {
+ lib_ring_buffer_put_next_subbuf(buf);
+ iter->state = ITER_GET_SUBBUF;
+ }
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_put_current_record);
+
static int buf_is_higher(void *a, void *b)
{
struct lib_ring_buffer *bufa = a;
return -EFAULT;
}
read_count += copy_len;
- };
- return read_count;
+ }
+ goto put_record;
nodata:
*ppos = 0;
chan->iter.len_left = 0;
+put_record:
+ lib_ring_buffer_put_current_record(buf);
return read_count;
}
#include <lttng/tracer.h>
#include <lttng/tp-mempool.h>
#include <ringbuffer/frontend_types.h>
+#include <ringbuffer/iterator.h>
/*
* This is LTTng's own personal way to create a system call as an external
static const struct file_operations lttng_proc_ops;
#endif
+static DEFINE_MUTEX(trigger_group_mutex);
+
static const struct file_operations lttng_session_fops;
+static const struct file_operations lttng_trigger_group_fops;
static const struct file_operations lttng_channel_fops;
static const struct file_operations lttng_metadata_fops;
static const struct file_operations lttng_event_fops;
return ret;
}
+static
+void trigger_send_notification_work_wakeup(struct irq_work *entry)
+{
+ struct lttng_trigger_group *trigger_group = container_of(entry,
+ struct lttng_trigger_group, wakeup_pending);
+ wake_up_interruptible(&trigger_group->read_wait);
+}
+
+static
+int lttng_abi_create_trigger_group(void)
+{
+ struct lttng_trigger_group *trigger_group;
+ struct file *trigger_group_file;
+ int trigger_group_fd, ret;
+
+ trigger_group = lttng_trigger_group_create();
+ if (!trigger_group)
+ return -ENOMEM;
+
+ trigger_group_fd = lttng_get_unused_fd();
+ if (trigger_group_fd < 0) {
+ ret = trigger_group_fd;
+ goto fd_error;
+ }
+ trigger_group_file = anon_inode_getfile("[lttng_trigger_group]",
+ <tng_trigger_group_fops,
+ trigger_group, O_RDWR);
+ if (IS_ERR(trigger_group_file)) {
+ ret = PTR_ERR(trigger_group_file);
+ goto file_error;
+ }
+
+ trigger_group->file = trigger_group_file;
+ init_waitqueue_head(&trigger_group->read_wait);
+ init_irq_work(&trigger_group->wakeup_pending,
+ trigger_send_notification_work_wakeup);
+ fd_install(trigger_group_fd, trigger_group_file);
+ return trigger_group_fd;
+
+file_error:
+ put_unused_fd(trigger_group_fd);
+fd_error:
+ lttng_trigger_group_destroy(trigger_group);
+ return ret;
+}
+
static
int lttng_abi_tracepoint_list(void)
{
* Returns after all previously running probes have completed
* LTTNG_KERNEL_TRACER_ABI_VERSION
* Returns the LTTng kernel tracer ABI version
+ * LTTNG_KERNEL_TRIGGER_GROUP_CREATE
+ * Returns a LTTng trigger group file descriptor
*
* The returned session will be deleted when its file descriptor is closed.
*/
case LTTNG_KERNEL_OLD_SESSION:
case LTTNG_KERNEL_SESSION:
return lttng_abi_create_session();
+ case LTTNG_KERNEL_TRIGGER_GROUP_CREATE:
+ return lttng_abi_create_trigger_group();
case LTTNG_KERNEL_OLD_TRACER_VERSION:
{
struct lttng_kernel_tracer_version v;
return 0;
}
+static
+int lttng_counter_release(struct inode *inode, struct file *file)
+{
+ struct lttng_counter *counter = file->private_data;
+
+ if (counter) {
+ /*
+ * Do not destroy the counter itself. Wait of the owner
+ * (trigger group) to be destroyed.
+ */
+ fput(counter->owner);
+ }
+
+ return 0;
+}
+
+static
+long lttng_counter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct lttng_counter *counter = file->private_data;
+ size_t indexes[LTTNG_KERNEL_COUNTER_DIMENSION_MAX] = {0};
+ int i;
+
+ switch (cmd) {
+ case LTTNG_KERNEL_COUNTER_VALUE:
+ {
+ struct lttng_kernel_counter_value local_counter_value;
+ struct lttng_kernel_counter_value __user *ucounter_value =
+ (struct lttng_kernel_counter_value __user *) arg;
+ int64_t value;
+ int ret;
+
+ if (copy_from_user(&local_counter_value, ucounter_value,
+ sizeof(local_counter_value)))
+ return -EFAULT;
+
+ /* Cast all indexes into size_t. */
+ for (i = 0; i < local_counter_value.number_dimensions; i++) {
+ indexes[i] = (size_t) local_counter_value.dimension_indexes[i];
+ }
+
+ ret = lttng_kernel_counter_value(counter, indexes, &value);
+ if (ret)
+ return -EFAULT;
+
+ if (copy_to_user(&ucounter_value->value, &value, sizeof(int64_t)))
+ return -EFAULT;
+
+ return 0;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+}
+
+static const struct file_operations lttng_counter_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_counter_release,
+ .unlocked_ioctl = lttng_counter_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_counter_ioctl,
+#endif
+};
+
+
static
enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
{
#endif
};
+/*
+ * When encountering empty buffer, flush current sub-buffer if non-empty
+ * and retry (if new data available to read after flush).
+ */
+static
+ssize_t lttng_trigger_group_notif_read(struct file *filp, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct lttng_trigger_group *trigger_group = filp->private_data;
+ struct channel *chan = trigger_group->chan;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+ ssize_t read_count = 0, len;
+ size_t read_offset;
+
+ might_sleep();
+ if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
+ return -EFAULT;
+
+ /* Finish copy of previous record */
+ if (*ppos != 0) {
+ if (read_count < count) {
+ len = chan->iter.len_left;
+ read_offset = *ppos;
+ goto skip_get_next;
+ }
+ }
+
+ while (read_count < count) {
+ size_t copy_len, space_left;
+
+ len = lib_ring_buffer_get_next_record(chan, buf);
+len_test:
+ if (len < 0) {
+ /*
+ * Check if buffer is finalized (end of file).
+ */
+ if (len == -ENODATA) {
+ /* A 0 read_count will tell about end of file */
+ goto nodata;
+ }
+ if (filp->f_flags & O_NONBLOCK) {
+ if (!read_count)
+ read_count = -EAGAIN;
+ goto nodata;
+ } else {
+ int error;
+
+ /*
+ * No data available at the moment, return what
+ * we got.
+ */
+ if (read_count)
+ goto nodata;
+
+ /*
+ * Wait for returned len to be >= 0 or -ENODATA.
+ */
+ error = wait_event_interruptible(
+ trigger_group->read_wait,
+ ((len = lib_ring_buffer_get_next_record(
+ chan, buf)), len != -EAGAIN));
+ CHAN_WARN_ON(chan, len == -EBUSY);
+ if (error) {
+ read_count = error;
+ goto nodata;
+ }
+ CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
+ goto len_test;
+ }
+ }
+ read_offset = buf->iter.read_offset;
+skip_get_next:
+ space_left = count - read_count;
+ if (len <= space_left) {
+ copy_len = len;
+ chan->iter.len_left = 0;
+ *ppos = 0;
+ } else {
+ copy_len = space_left;
+ chan->iter.len_left = len - copy_len;
+ *ppos = read_offset + copy_len;
+ }
+ if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
+ &user_buf[read_count],
+ copy_len)) {
+ /*
+ * Leave the len_left and ppos values at their current
+ * state, as we currently have a valid event to read.
+ */
+ return -EFAULT;
+ }
+ read_count += copy_len;
+ }
+ goto put_record;
+
+nodata:
+ *ppos = 0;
+ chan->iter.len_left = 0;
+
+put_record:
+ lib_ring_buffer_put_current_record(buf);
+ return read_count;
+}
+
+/*
+ * If the ring buffer is non empty (even just a partial subbuffer), return that
+ * there is data available. Perform a ring buffer flush if we encounter a
+ * non-empty ring buffer which does not have any consumeable subbuffer available.
+ */
+static
+unsigned int lttng_trigger_group_notif_poll(struct file *filp,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct lttng_trigger_group *trigger_group = filp->private_data;
+ struct channel *chan = trigger_group->chan;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+ int finalized, disabled;
+ unsigned long consumed, offset;
+ size_t subbuffer_header_size = config->cb.subbuffer_header_size();
+
+ if (filp->f_mode & FMODE_READ) {
+ poll_wait_set_exclusive(wait);
+ poll_wait(filp, &trigger_group->read_wait, wait);
+
+ finalized = lib_ring_buffer_is_finalized(config, buf);
+ disabled = lib_ring_buffer_channel_is_disabled(chan);
+
+ /*
+ * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
+ * finalized load before offsets loads.
+ */
+ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+retry:
+ if (disabled)
+ return POLLERR;
+
+ offset = lib_ring_buffer_get_offset(config, buf);
+ consumed = lib_ring_buffer_get_consumed(config, buf);
+
+ /*
+ * If there is no buffer available to consume.
+ */
+ if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
+ /*
+ * If there is a non-empty subbuffer, flush and try again.
+ */
+ if (subbuf_offset(offset, chan) > subbuffer_header_size) {
+ lib_ring_buffer_switch_remote(buf);
+ goto retry;
+ }
+
+ if (finalized)
+ return POLLHUP;
+ else {
+ /*
+ * The memory barriers
+ * __wait_event()/wake_up_interruptible() take
+ * care of "raw_spin_is_locked" memory ordering.
+ */
+ if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
+ goto retry;
+ else
+ return 0;
+ }
+ } else {
+ if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
+ >= chan->backend.buf_size)
+ return POLLPRI | POLLRDBAND;
+ else
+ return POLLIN | POLLRDNORM;
+ }
+ }
+
+ return mask;
+}
+
+/**
+ * lttng_trigger_group_notif_open - trigger ring buffer open file operation
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Open implementation. Makes sure only one open instance of a buffer is
+ * done at a given moment.
+ */
+static int lttng_trigger_group_notif_open(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger_group *trigger_group = inode->i_private;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+
+ file->private_data = trigger_group;
+ return lib_ring_buffer_open(inode, file, buf);
+}
+
+/**
+ * lttng_trigger_group_notif_release - trigger ring buffer release file operation
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Release implementation.
+ */
+static int lttng_trigger_group_notif_release(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger_group *trigger_group = file->private_data;
+ struct lib_ring_buffer *buf = trigger_group->buf;
+ int ret;
+
+ ret = lib_ring_buffer_release(inode, file, buf);
+ if (ret)
+ return ret;
+ fput(trigger_group->file);
+ return 0;
+}
+
+static const struct file_operations lttng_trigger_group_notif_fops = {
+ .owner = THIS_MODULE,
+ .open = lttng_trigger_group_notif_open,
+ .release = lttng_trigger_group_notif_release,
+ .read = lttng_trigger_group_notif_read,
+ .poll = lttng_trigger_group_notif_poll,
+};
+
/**
* lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
* @filp: the file
static
int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
- const struct file_operations *fops)
+ const struct file_operations *fops, const char *name)
{
int stream_fd, ret;
struct file *stream_file;
ret = stream_fd;
goto fd_error;
}
- stream_file = anon_inode_getfile("[lttng_stream]", fops,
- stream_priv, O_RDWR);
+ stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
if (IS_ERR(stream_file)) {
ret = PTR_ERR(stream_file);
goto file_error;
stream_priv = buf;
ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
- <tng_stream_ring_buffer_file_operations);
+ <tng_stream_ring_buffer_file_operations,
+ "[lttng_stream]");
if (ret < 0)
goto fd_error;
}
ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
- <tng_metadata_ring_buffer_file_operations);
+ <tng_metadata_ring_buffer_file_operations,
+ "[lttng_metadata_stream]");
if (ret < 0)
goto fd_error;
return ret;
}
+static
+int lttng_abi_open_trigger_group_stream(struct file *notif_file)
+{
+ struct lttng_trigger_group *trigger_group = notif_file->private_data;
+ struct channel *chan = trigger_group->chan;
+ struct lib_ring_buffer *buf;
+ int ret;
+ void *stream_priv;
+
+ buf = trigger_group->ops->buffer_read_open(chan);
+ if (!buf)
+ return -ENOENT;
+
+ /* The trigger notification fd holds a reference on the trigger group */
+ if (!atomic_long_add_unless(¬if_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+ trigger_group->buf = buf;
+ stream_priv = trigger_group;
+ ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
+ <tng_trigger_group_notif_fops,
+ "[lttng_trigger_stream]");
+ if (ret < 0)
+ goto fd_error;
+
+ return ret;
+
+fd_error:
+ atomic_long_dec(¬if_file->f_count);
+refcount_error:
+ trigger_group->ops->buffer_read_close(buf);
+ return ret;
+}
+
static
int lttng_abi_validate_event_param(struct lttng_kernel_event *event_param)
{
goto event_error;
if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
|| event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
if (strutils_is_star_glob_pattern(event_param->name)) {
/*
* If the event name is a star globbing pattern,
* we create the special star globbing enabler.
*/
- enabler = lttng_enabler_create(LTTNG_ENABLER_STAR_GLOB,
+ event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
event_param, channel);
} else {
- enabler = lttng_enabler_create(LTTNG_ENABLER_NAME,
+ event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
event_param, channel);
}
- priv = enabler;
+ priv = event_enabler;
} else {
struct lttng_event *event;
return ret;
}
+static
+long lttng_trigger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct lttng_trigger *trigger;
+ struct lttng_trigger_enabler *trigger_enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ switch (cmd) {
+ case LTTNG_KERNEL_ENABLE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_enable(trigger);
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_enable(trigger_enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_DISABLE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_disable(trigger);
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_disable(trigger_enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_FILTER:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ return -EINVAL;
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_attach_filter_bytecode(
+ trigger_enabler,
+ (struct lttng_kernel_filter_bytecode __user *) arg);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+
+ case LTTNG_KERNEL_CAPTURE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ return -EINVAL;
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ return lttng_trigger_enabler_attach_capture_bytecode(
+ trigger_enabler,
+ (struct lttng_kernel_capture_bytecode __user *) arg);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_ADD_CALLSITE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ return lttng_trigger_add_callsite(trigger,
+ (struct lttng_kernel_event_callsite __user *) arg);
+ case LTTNG_TYPE_ENABLER:
+ return -EINVAL;
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static
+int lttng_trigger_release(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger *trigger;
+ struct lttng_trigger_enabler *trigger_enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ if (!evtype)
+ return 0;
+
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ trigger = file->private_data;
+ if (trigger)
+ fput(trigger->group->file);
+ break;
+ case LTTNG_TYPE_ENABLER:
+ trigger_enabler = file->private_data;
+ if (trigger_enabler)
+ fput(trigger_enabler->group->file);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct file_operations lttng_trigger_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_trigger_release,
+ .unlocked_ioctl = lttng_trigger_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_trigger_ioctl,
+#endif
+};
+
+static
+int lttng_abi_create_trigger(struct file *trigger_group_file,
+ struct lttng_kernel_trigger *trigger_param)
+{
+ struct lttng_trigger_group *trigger_group = trigger_group_file->private_data;
+ int trigger_fd, ret;
+ struct file *trigger_file;
+ void *priv;
+
+ switch (trigger_param->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_UPROBE:
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ trigger_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ /* Placing a trigger on kretprobe is not supported. */
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ ret = -EINVAL;
+ goto inval_instr;
+ }
+
+ trigger_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+
+ trigger_fd = lttng_get_unused_fd();
+ if (trigger_fd < 0) {
+ ret = trigger_fd;
+ goto fd_error;
+ }
+
+ trigger_file = anon_inode_getfile("[lttng_trigger]",
+ <tng_trigger_fops,
+ NULL, O_RDWR);
+ if (IS_ERR(trigger_file)) {
+ ret = PTR_ERR(trigger_file);
+ goto file_error;
+ }
+
+ /* The trigger holds a reference on the trigger group. */
+ if (!atomic_long_add_unless(&trigger_group_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+
+ if (trigger_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
+ || trigger_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
+ struct lttng_trigger_enabler *enabler;
+
+ if (strutils_is_star_glob_pattern(trigger_param->name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ enabler = lttng_trigger_enabler_create(trigger_group,
+ LTTNG_ENABLER_FORMAT_STAR_GLOB, trigger_param);
+ } else {
+ enabler = lttng_trigger_enabler_create(trigger_group,
+ LTTNG_ENABLER_FORMAT_NAME, trigger_param);
+ }
+ priv = enabler;
+ } else {
+ struct lttng_trigger *trigger;
+
+ /*
+ * We tolerate no failure path after trigger creation. It
+ * will stay invariant for the rest of the session.
+ */
+ trigger = lttng_trigger_create(NULL, trigger_param->id,
+ trigger_param->error_counter_index,
+ trigger_group, trigger_param, NULL,
+ trigger_param->instrumentation);
+ WARN_ON_ONCE(!trigger);
+ if (IS_ERR(trigger)) {
+ ret = PTR_ERR(trigger);
+ goto trigger_error;
+ }
+ priv = trigger;
+ }
+ trigger_file->private_data = priv;
+ fd_install(trigger_fd, trigger_file);
+ return trigger_fd;
+
+trigger_error:
+ atomic_long_dec(&trigger_group_file->f_count);
+refcount_error:
+ fput(trigger_file);
+file_error:
+ put_unused_fd(trigger_fd);
+fd_error:
+inval_instr:
+ return ret;
+}
+
+static
+long lttng_abi_trigger_group_create_error_counter(
+ struct file *trigger_group_file,
+ const struct lttng_kernel_counter_conf *error_counter_conf)
+{
+ int counter_fd, ret;
+ char *counter_transport_name;
+ size_t counter_len;
+ struct lttng_counter *counter = NULL;
+ struct file *counter_file;
+ struct lttng_trigger_group *trigger_group =
+ (struct lttng_trigger_group *) trigger_group_file->private_data;
+
+ if (error_counter_conf->arithmetic != LTTNG_KERNEL_COUNTER_ARITHMETIC_MODULAR) {
+ printk(KERN_ERR "LTTng: Trigger: Error counter of the wrong arithmetic type.\n");
+ return -EINVAL;
+ }
+
+ if (error_counter_conf->number_dimensions != 1) {
+ printk(KERN_ERR "LTTng: Trigger: Error counter has more than one dimension.\n");
+ return -EINVAL;
+ }
+
+ switch (error_counter_conf->bitness) {
+ case LTTNG_KERNEL_COUNTER_BITNESS_64BITS:
+ counter_transport_name = "counter-per-cpu-64-modular";
+ break;
+ case LTTNG_KERNEL_COUNTER_BITNESS_32BITS:
+ counter_transport_name = "counter-per-cpu-32-modular";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&trigger_group_mutex);
+
+ counter_fd = lttng_get_unused_fd();
+ if (counter_fd < 0) {
+ ret = counter_fd;
+ goto fd_error;
+ }
+
+ /* FIXME: Does it need to be RDWR ?*/
+ counter_file = anon_inode_getfile("[lttng_counter]",
+ <tng_counter_fops,
+ NULL, O_RDWR);
+ if (IS_ERR(counter_file)) {
+ ret = PTR_ERR(counter_file);
+ goto file_error;
+ }
+
+ counter_len = error_counter_conf->dimensions[0].size;
+
+ if (!atomic_long_add_unless(&trigger_group_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+
+ counter = lttng_kernel_counter_create(counter_transport_name,
+ 1, &counter_len);
+ if (!counter) {
+ ret = -EINVAL;
+ goto counter_error;
+ }
+
+ trigger_group->error_counter = counter;
+ trigger_group->error_counter_len = counter_len;
+
+ counter->file = counter_file;
+ counter->owner = trigger_group->file;
+ counter_file->private_data = counter;
+ /* Ownership transferred. */
+ counter = NULL;
+
+ fd_install(counter_fd, counter_file);
+ mutex_unlock(&trigger_group_mutex);
+
+ return counter_fd;
+
+counter_error:
+ atomic_long_dec(&trigger_group_file->f_count);
+refcount_error:
+ fput(counter_file);
+file_error:
+ put_unused_fd(counter_fd);
+fd_error:
+ return ret;
+}
+
+static
+long lttng_trigger_group_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case LTTNG_KERNEL_TRIGGER_GROUP_NOTIFICATION_FD:
+ {
+ return lttng_abi_open_trigger_group_stream(file);
+ }
+ case LTTNG_KERNEL_TRIGGER_CREATE:
+ {
+ struct lttng_kernel_trigger utrigger_param;
+
+ if (copy_from_user(&utrigger_param,
+ (struct lttng_kernel_trigger __user *) arg,
+ sizeof(utrigger_param)))
+ return -EFAULT;
+ return lttng_abi_create_trigger(file, &utrigger_param);
+ }
+ case LTTNG_KERNEL_COUNTER:
+ {
+ struct lttng_kernel_counter_conf uerror_counter_conf;
+
+ if (copy_from_user(&uerror_counter_conf,
+ (struct lttng_kernel_counter_conf __user *) arg,
+ sizeof(uerror_counter_conf)))
+ return -EFAULT;
+ return lttng_abi_trigger_group_create_error_counter(file,
+ &uerror_counter_conf);
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static
+int lttng_trigger_group_release(struct inode *inode, struct file *file)
+{
+ struct lttng_trigger_group *trigger_group = file->private_data;
+
+ if (trigger_group)
+ lttng_trigger_group_destroy(trigger_group);
+ return 0;
+}
+
+static const struct file_operations lttng_trigger_group_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_trigger_group_release,
+ .unlocked_ioctl = lttng_trigger_group_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_trigger_group_ioctl,
+#endif
+};
+
/**
* lttng_channel_ioctl - lttng syscall through ioctl
*
long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct lttng_event *event;
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
enum lttng_event_type *evtype = file->private_data;
switch (cmd) {
event = file->private_data;
return lttng_event_enable(event);
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- return lttng_enabler_enable(enabler);
+ event_enabler = file->private_data;
+ return lttng_event_enabler_enable(event_enabler);
default:
WARN_ON_ONCE(1);
return -ENOSYS;
event = file->private_data;
return lttng_event_disable(event);
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- return lttng_enabler_disable(enabler);
+ event_enabler = file->private_data;
+ return lttng_event_enabler_disable(event_enabler);
default:
WARN_ON_ONCE(1);
return -ENOSYS;
return -EINVAL;
case LTTNG_TYPE_ENABLER:
{
- enabler = file->private_data;
- return lttng_enabler_attach_bytecode(enabler,
+ event_enabler = file->private_data;
+ return lttng_event_enabler_attach_filter_bytecode(
+ event_enabler,
(struct lttng_kernel_filter_bytecode __user *) arg);
}
default:
int lttng_event_release(struct inode *inode, struct file *file)
{
struct lttng_event *event;
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
enum lttng_event_type *evtype = file->private_data;
if (!evtype)
fput(event->chan->file);
break;
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- if (enabler)
- fput(enabler->chan->file);
+ event_enabler = file->private_data;
+ if (event_enabler)
+ fput(event_enabler->chan->file);
break;
default:
WARN_ON_ONCE(1);
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-interpreter.c
+ *
+ * LTTng modules bytecode interpreter.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <wrapper/uaccess.h>
+#include <wrapper/frame.h>
+#include <wrapper/types.h>
+#include <linux/swab.h>
+
+#include <lttng/lttng-bytecode.h>
+#include <lttng/string-utils.h>
+
+/*
+ * get_char should be called with page fault handler disabled if it is expected
+ * to handle user-space read.
+ */
+static
+char get_char(struct estack_entry *reg, size_t offset)
+{
+ if (unlikely(offset >= reg->u.s.seq_len))
+ return '\0';
+ if (reg->u.s.user) {
+ char c;
+
+ /* Handle invalid access as end of string. */
+ if (unlikely(!lttng_access_ok(VERIFY_READ,
+ reg->u.s.user_str + offset,
+ sizeof(c))))
+ return '\0';
+ /* Handle fault (nonzero return value) as end of string. */
+ if (unlikely(__copy_from_user_inatomic(&c,
+ reg->u.s.user_str + offset,
+ sizeof(c))))
+ return '\0';
+ return c;
+ } else {
+ return reg->u.s.str[offset];
+ }
+}
+
+/*
+ * -1: wildcard found.
+ * -2: unknown escape char.
+ * 0: normal char.
+ */
+static
+int parse_char(struct estack_entry *reg, char *c, size_t *offset)
+{
+ switch (*c) {
+ case '\\':
+ (*offset)++;
+ *c = get_char(reg, *offset);
+ switch (*c) {
+ case '\\':
+ case '*':
+ return 0;
+ default:
+ return -2;
+ }
+ case '*':
+ return -1;
+ default:
+ return 0;
+ }
+}
+
+static
+char get_char_at_cb(size_t at, void *data)
+{
+ return get_char(data, at);
+}
+
+static
+int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
+{
+ bool has_user = false;
+ int result;
+ struct estack_entry *pattern_reg;
+ struct estack_entry *candidate_reg;
+
+ /* Disable the page fault handler when reading from userspace. */
+ if (estack_bx(stack, top)->u.s.user
+ || estack_ax(stack, top)->u.s.user) {
+ has_user = true;
+ pagefault_disable();
+ }
+
+ /* Find out which side is the pattern vs. the candidate. */
+ if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
+ pattern_reg = estack_ax(stack, top);
+ candidate_reg = estack_bx(stack, top);
+ } else {
+ pattern_reg = estack_bx(stack, top);
+ candidate_reg = estack_ax(stack, top);
+ }
+
+ /* Perform the match operation. */
+ result = !strutils_star_glob_match_char_cb(get_char_at_cb,
+ pattern_reg, get_char_at_cb, candidate_reg);
+ if (has_user)
+ pagefault_enable();
+
+ return result;
+}
+
+static
+int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
+{
+ size_t offset_bx = 0, offset_ax = 0;
+ int diff, has_user = 0;
+
+ if (estack_bx(stack, top)->u.s.user
+ || estack_ax(stack, top)->u.s.user) {
+ has_user = 1;
+ pagefault_disable();
+ }
+
+ for (;;) {
+ int ret;
+ int escaped_r0 = 0;
+ char char_bx, char_ax;
+
+ char_bx = get_char(estack_bx(stack, top), offset_bx);
+ char_ax = get_char(estack_ax(stack, top), offset_ax);
+
+ if (unlikely(char_bx == '\0')) {
+ if (char_ax == '\0') {
+ diff = 0;
+ break;
+ } else {
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(estack_ax(stack, top),
+ &char_ax, &offset_ax);
+ if (ret == -1) {
+ diff = 0;
+ break;
+ }
+ }
+ diff = -1;
+ break;
+ }
+ }
+ if (unlikely(char_ax == '\0')) {
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(estack_bx(stack, top),
+ &char_bx, &offset_bx);
+ if (ret == -1) {
+ diff = 0;
+ break;
+ }
+ }
+ diff = 1;
+ break;
+ }
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(estack_bx(stack, top),
+ &char_bx, &offset_bx);
+ if (ret == -1) {
+ diff = 0;
+ break;
+ } else if (ret == -2) {
+ escaped_r0 = 1;
+ }
+ /* else compare both char */
+ }
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(estack_ax(stack, top),
+ &char_ax, &offset_ax);
+ if (ret == -1) {
+ diff = 0;
+ break;
+ } else if (ret == -2) {
+ if (!escaped_r0) {
+ diff = -1;
+ break;
+ }
+ } else {
+ if (escaped_r0) {
+ diff = 1;
+ break;
+ }
+ }
+ } else {
+ if (escaped_r0) {
+ diff = 1;
+ break;
+ }
+ }
+ diff = char_bx - char_ax;
+ if (diff != 0)
+ break;
+ offset_bx++;
+ offset_ax++;
+ }
+ if (has_user)
+ pagefault_enable();
+
+ return diff;
+}
+
+uint64_t lttng_bytecode_filter_interpret_false(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data)
+{
+ return LTTNG_INTERPRETER_DISCARD;
+}
+
+uint64_t lttng_bytecode_capture_interpret_false(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output)
+{
+ return LTTNG_INTERPRETER_DISCARD;
+}
+
+#ifdef INTERPRETER_USE_SWITCH
+
+/*
+ * Fallback for compilers that do not support taking address of labels.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->data[0]; \
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
+ pc = next_pc) { \
+ dbg_printk("LTTng: Executing op %s (%u)\n", \
+ lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc), \
+ (unsigned int) *(bytecode_opcode_t *) pc); \
+ switch (*(bytecode_opcode_t *) pc) {
+
+#define OP(name) case name
+
+#define PO break
+
+#define END_OP } \
+ }
+
+#else
+
+/*
+ * Dispatch-table based interpreter.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->code[0]; \
+ pc = next_pc = start_pc; \
+ if (unlikely(pc - start_pc >= bytecode->len)) \
+ goto end; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define OP(name) \
+LABEL_##name
+
+#define PO \
+ pc = next_pc; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define END_OP
+
+#endif
+
+#define IS_INTEGER_REGISTER(reg_type) \
+ (reg_type == REG_S64 || reg_type == REG_U64)
+
+static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+ struct load_ptr *ptr,
+ uint32_t idx)
+{
+
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ union lttng_ctx_value v;
+
+ ctx_field = <tng_static_ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ptr->type = LOAD_OBJECT;
+ /* field is only used for types nested within variants. */
+ ptr->field = NULL;
+
+ switch (field->type.atype) {
+ case atype_integer:
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ if (field->type.u.integer.signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ case atype_enum_nestable:
+ {
+ const struct lttng_integer_type *itype =
+ &field->type.u.enum_nestable.container_type->u.integer;
+
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ if (itype->signedness) {
+ ptr->object_type = OBJECT_TYPE_SIGNED_ENUM;
+ ptr->u.s64 = v.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+ ptr->u.u64 = v.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ }
+ case atype_array_nestable:
+ if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+ printk(KERN_WARNING "LTTng: bytecode: Array nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ printk(KERN_WARNING "LTTng: bytecode: Only string arrays are supported for contexts.\n");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_sequence_nestable:
+ if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+ printk(KERN_WARNING "LTTng: bytecode: Sequence nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ printk(KERN_WARNING "LTTng: bytecode: Only string sequences are supported for contexts.\n");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_string:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ ptr->ptr = v.str;
+ break;
+ case atype_struct_nestable:
+ printk(KERN_WARNING "LTTng: bytecode: Structure type cannot be loaded.\n");
+ return -EINVAL;
+ case atype_variant_nestable:
+ printk(KERN_WARNING "LTTng: bytecode: Variant type cannot be loaded.\n");
+ return -EINVAL;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
+ struct bytecode_runtime *runtime,
+ uint64_t index, struct estack_entry *stack_top)
+{
+ int ret;
+ const struct bytecode_get_index_data *gid;
+
+ gid = (const struct bytecode_get_index_data *) &runtime->data[index];
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const char *ptr;
+
+ WARN_ON_ONCE(gid->offset >= gid->array_len);
+ /* Skip count (unsigned long) */
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ BUG_ON(stack_top->u.ptr.field->type.atype != atype_array_nestable);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const char *ptr;
+ size_t ptr_seq_len;
+
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+ if (gid->offset >= gid->elem.len * ptr_seq_len) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ BUG_ON(stack_top->u.ptr.field->type.atype != atype_sequence_nestable);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ printk(KERN_WARNING "LTTng: bytecode: Nested structures are not supported yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_VARIANT:
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected get index type %d",
+ (int) stack_top->u.ptr.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
+ {
+ ret = context_get_index(lttng_probe_ctx,
+ &stack_top->u.ptr,
+ gid->ctx_index);
+ if (ret) {
+ goto end;
+ }
+ break;
+ }
+ case LOAD_ROOT_PAYLOAD:
+ stack_top->u.ptr.ptr += gid->offset;
+ if (gid->elem.type == OBJECT_TYPE_STRING)
+ stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.type = LOAD_OBJECT;
+ stack_top->u.ptr.field = gid->field;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ break;
+ }
+
+ stack_top->type = REG_PTR;
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+ int ret;
+
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printk("Bytecode warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printk("op load field s8\n");
+ stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_S64;
+ break;
+ case OBJECT_TYPE_S16:
+ {
+ int16_t tmp;
+
+ dbg_printk("op load field s16\n");
+ tmp = *(int16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab16s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S32:
+ {
+ int32_t tmp;
+
+ dbg_printk("op load field s32\n");
+ tmp = *(int32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab32s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S64:
+ {
+ int64_t tmp;
+
+ dbg_printk("op load field s64\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab64s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_SIGNED_ENUM:
+ {
+ int64_t tmp;
+
+ dbg_printk("op load field signed enumeration\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab64s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_U8:
+ dbg_printk("op load field u8\n");
+ stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_U64;
+ break;
+ case OBJECT_TYPE_U16:
+ {
+ uint16_t tmp;
+
+ dbg_printk("op load field u16\n");
+ tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab16s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U32:
+ {
+ uint32_t tmp;
+
+ dbg_printk("op load field u32\n");
+ tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab32s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U64:
+ {
+ uint64_t tmp;
+
+ dbg_printk("op load field u64\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab64s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ {
+ uint64_t tmp;
+
+ dbg_printk("op load field unsigned enumeration\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ __swab64s(&tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_STRING:
+ {
+ const char *str;
+
+ dbg_printk("op load field string\n");
+ str = (const char *) stack_top->u.ptr.ptr;
+ stack_top->u.s.str = str;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ stack_top->type = REG_STRING;
+ break;
+ }
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ {
+ const char *ptr;
+
+ dbg_printk("op load field string sequence\n");
+ ptr = stack_top->u.ptr.ptr;
+ stack_top->u.s.seq_len = *(unsigned long *) ptr;
+ stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ stack_top->type = REG_STRING;
+ break;
+ }
+ case OBJECT_TYPE_DYNAMIC:
+ /*
+ * Dynamic types in context are looked up
+ * by context get index.
+ */
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ printk(KERN_WARNING "LTTng: bytecode: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static
+int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
+ struct lttng_interpreter_output *output)
+{
+ int ret;
+
+again:
+ switch (ax->type) {
+ case REG_S64:
+ output->type = LTTNG_INTERPRETER_TYPE_S64;
+ output->u.s = ax->u.v;
+ break;
+ case REG_U64:
+ output->type = LTTNG_INTERPRETER_TYPE_U64;
+ output->u.u = (uint64_t) ax->u.v;
+ break;
+ case REG_STRING:
+ output->type = LTTNG_INTERPRETER_TYPE_STRING;
+ output->u.str.str = ax->u.s.str;
+ output->u.str.len = ax->u.s.seq_len;
+ break;
+ case REG_PTR:
+ switch (ax->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ case OBJECT_TYPE_S16:
+ case OBJECT_TYPE_S32:
+ case OBJECT_TYPE_S64:
+ case OBJECT_TYPE_U8:
+ case OBJECT_TYPE_U16:
+ case OBJECT_TYPE_U32:
+ case OBJECT_TYPE_U64:
+ case OBJECT_TYPE_DOUBLE:
+ case OBJECT_TYPE_STRING:
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ /* Retry after loading ptr into stack top. */
+ goto again;
+ case OBJECT_TYPE_SEQUENCE:
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
+ output->u.sequence.nested_type = ax->u.ptr.field->type.u.sequence_nestable.elem_type;
+ break;
+ case OBJECT_TYPE_ARRAY:
+ /* Skip count (unsigned long) */
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = ax->u.ptr.field->type.u.array_nestable.length;
+ output->u.sequence.nested_type = ax->u.ptr.field->type.u.array_nestable.elem_type;
+ break;
+ case OBJECT_TYPE_SIGNED_ENUM:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ output->type = LTTNG_INTERPRETER_TYPE_SIGNED_ENUM;
+ output->u.s = ax->u.v;
+ break;
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ output->type = LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM;
+ output->u.u = ax->u.v;
+ break;
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ return LTTNG_INTERPRETER_RECORD_FLAG;
+}
+
+/*
+ * Return 0 (discard), or raise the 0x1 flag (log event).
+ * Currently, other flags are kept for future extensions and have no
+ * effect.
+ */
+static
+uint64_t bytecode_interpret(void *interpreter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *interpreter_stack_data,
+ struct lttng_interpreter_output *output)
+{
+ struct bytecode_runtime *bytecode = interpreter_data;
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ uint64_t retval = 0;
+ struct estack _stack;
+ struct estack *stack = &_stack;
+ register int64_t ax = 0, bx = 0;
+ register enum entry_type ax_t = REG_TYPE_UNKNOWN, bx_t = REG_TYPE_UNKNOWN;
+ register int top = INTERPRETER_STACK_EMPTY;
+#ifndef INTERPRETER_USE_SWITCH
+ static void *dispatch[NR_BYTECODE_OPS] = {
+ [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN,
+
+ [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN,
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL,
+ [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV,
+ [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD,
+ [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS,
+ [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS,
+ [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT,
+ [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT,
+ [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND,
+ [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR,
+ [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR,
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ,
+ [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE,
+ [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT,
+ [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT,
+ [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE,
+ [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE,
+
+ /* string binary comparator */
+ [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING,
+ [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING,
+ [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING,
+ [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING,
+ [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING,
+ [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING,
+
+ /* globbing pattern binary comparator */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING,
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING,
+
+ /* s64 binary comparator */
+ [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64,
+ [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64,
+ [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64,
+ [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64,
+ [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64,
+ [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64,
+
+ /* double binary comparator */
+ [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE,
+ [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE,
+ [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE,
+ [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE,
+ [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE,
+ [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE,
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64,
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64,
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64,
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64,
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64,
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64,
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE,
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE,
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE,
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE,
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE,
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE,
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS,
+ [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS,
+ [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT,
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64,
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64,
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64,
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE,
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND,
+ [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR,
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF,
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64,
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE,
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING,
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING,
+ [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64,
+ [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE,
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64,
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64,
+ [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP,
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF,
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING,
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64,
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE,
+
+ /* load userspace field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_USER_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE,
+
+ /* Instructions for recursive traversal through composed types. */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT,
+
+ [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL,
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD,
+ [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16,
+ [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64,
+
+ [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD,
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8,
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16,
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32,
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64,
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8,
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16,
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32,
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64,
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE,
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT,
+
+ [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64,
+ };
+#endif /* #ifndef INTERPRETER_USE_SWITCH */
+
+ START_OP
+
+ OP(BYTECODE_OP_UNKNOWN):
+ OP(BYTECODE_OP_LOAD_FIELD_REF):
+ OP(BYTECODE_OP_GET_CONTEXT_REF):
+#ifdef INTERPRETER_USE_SWITCH
+ default:
+#endif /* INTERPRETER_USE_SWITCH */
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_RETURN):
+ OP(BYTECODE_OP_RETURN_S64):
+ /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */
+ switch (estack_ax_t) {
+ case REG_S64:
+ case REG_U64:
+ retval = !!estack_ax_v;
+ break;
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ if (!output) {
+ ret = -EINVAL;
+ goto end;
+ }
+ retval = 0;
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+
+ /* binary */
+ OP(BYTECODE_OP_MUL):
+ OP(BYTECODE_OP_DIV):
+ OP(BYTECODE_OP_MOD):
+ OP(BYTECODE_OP_PLUS):
+ OP(BYTECODE_OP_MINUS):
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_EQ):
+ OP(BYTECODE_OP_NE):
+ OP(BYTECODE_OP_GT):
+ OP(BYTECODE_OP_LT):
+ OP(BYTECODE_OP_GE):
+ OP(BYTECODE_OP_LE):
+ printk(KERN_WARNING "LTTng: bytecode: unsupported non-specialized bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_EQ_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">") > 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<") < 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">=") >= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<=") <= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_S64):
+ {
+ int res;
+
+ res = (estack_bx_v == estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v != estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v > estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v < estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v >= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v <= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_DOUBLE):
+ OP(BYTECODE_OP_NE_DOUBLE):
+ OP(BYTECODE_OP_GT_DOUBLE):
+ OP(BYTECODE_OP_LT_DOUBLE):
+ OP(BYTECODE_OP_GE_DOUBLE):
+ OP(BYTECODE_OP_LE_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* Mixed S64-double binary comparators */
+ OP(BYTECODE_OP_EQ_DOUBLE_S64):
+ OP(BYTECODE_OP_NE_DOUBLE_S64):
+ OP(BYTECODE_OP_GT_DOUBLE_S64):
+ OP(BYTECODE_OP_LT_DOUBLE_S64):
+ OP(BYTECODE_OP_GE_DOUBLE_S64):
+ OP(BYTECODE_OP_LE_DOUBLE_S64):
+ OP(BYTECODE_OP_EQ_S64_DOUBLE):
+ OP(BYTECODE_OP_NE_S64_DOUBLE):
+ OP(BYTECODE_OP_GT_S64_DOUBLE):
+ OP(BYTECODE_OP_LT_S64_DOUBLE):
+ OP(BYTECODE_OP_GE_S64_DOUBLE):
+ OP(BYTECODE_OP_LE_S64_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_RSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_LSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_AND):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_OR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_XOR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ /* unary */
+ OP(BYTECODE_OP_UNARY_PLUS):
+ OP(BYTECODE_OP_UNARY_MINUS):
+ OP(BYTECODE_OP_UNARY_NOT):
+ printk(KERN_WARNING "LTTng: bytecode: unsupported non-specialized bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+
+ OP(BYTECODE_OP_UNARY_BIT_NOT):
+ {
+ estack_ax_v = ~(uint64_t) estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_UNARY_PLUS_S64):
+ {
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_MINUS_S64):
+ {
+ estack_ax_v = -estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_PLUS_DOUBLE):
+ OP(BYTECODE_OP_UNARY_MINUS_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_S64):
+ {
+ estack_ax_v = !estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* logical */
+ OP(BYTECODE_OP_AND):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ /* If AX is 0, skip and evaluate to 0 */
+ if (unlikely(estack_ax_v == 0)) {
+ dbg_printk("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+ OP(BYTECODE_OP_OR):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ /* If AX is nonzero, skip and evaluate to 1 */
+
+ if (unlikely(estack_ax_v != 0)) {
+ estack_ax_v = 1;
+ dbg_printk("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+
+
+ /* load field ref */
+ OP(BYTECODE_OP_LOAD_FIELD_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type string\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str =
+ *(const char * const *) &interpreter_stack_data[ref->offset];
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 0;
+ estack_ax(stack, top)->type = REG_STRING;
+ dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type sequence\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.seq_len =
+ *(unsigned long *) &interpreter_stack_data[ref->offset];
+ estack_ax(stack, top)->u.s.str =
+ *(const char **) (&interpreter_stack_data[ref->offset
+ + sizeof(unsigned long)]);
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 0;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type s64\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v =
+ ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
+ estack_ax_t = REG_S64;
+ dbg_printk("ref load s64 %lld\n",
+ (long long) estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* load from immediate operand */
+ OP(BYTECODE_OP_LOAD_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printk("load string %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_PLAIN;
+ estack_ax(stack, top)->u.s.user = 0;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printk("load globbing pattern %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
+ estack_ax(stack, top)->u.s.user = 0;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = ((struct literal_numeric *) insn->data)->v;
+ estack_ax_t = REG_S64;
+ dbg_printk("load s64 %lld\n",
+ (long long) estack_ax_v);
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* cast */
+ OP(BYTECODE_OP_CAST_TO_S64):
+ printk(KERN_WARNING "LTTng: bytecode: unsupported non-specialized bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_CAST_DOUBLE_TO_S64):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ OP(BYTECODE_OP_CAST_NOP):
+ {
+ next_pc += sizeof(struct cast_op);
+ PO;
+ }
+
+ /* get context ref */
+ OP(BYTECODE_OP_GET_CONTEXT_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ struct lttng_ctx_field *ctx_field;
+ union lttng_ctx_value v;
+
+ dbg_printk("get context ref offset %u type string\n",
+ ref->offset);
+ ctx_field = <tng_static_ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = v.str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 0;
+ estack_ax(stack, top)->type = REG_STRING;
+ dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ struct lttng_ctx_field *ctx_field;
+ union lttng_ctx_value v;
+
+ dbg_printk("get context ref offset %u type s64\n",
+ ref->offset);
+ ctx_field = <tng_static_ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = v.s64;
+ estack_ax_t = REG_S64;
+ dbg_printk("ref get context s64 %lld\n",
+ (long long) estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ /* load userspace field ref */
+ OP(BYTECODE_OP_LOAD_FIELD_REF_USER_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type user string\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.user_str =
+ *(const char * const *) &interpreter_stack_data[ref->offset];
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 1;
+ estack_ax(stack, top)->type = REG_STRING;
+ dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("load field ref offset %u type user sequence\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.seq_len =
+ *(unsigned long *) &interpreter_stack_data[ref->offset];
+ estack_ax(stack, top)->u.s.user_str =
+ *(const char **) (&interpreter_stack_data[ref->offset
+ + sizeof(unsigned long)]);
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->u.s.user = 1;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_ROOT):
+ {
+ dbg_printk("op get context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax(stack, top)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT):
+ {
+ BUG_ON(1);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_PAYLOAD_ROOT):
+ {
+ dbg_printk("op get app payload root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+ estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax(stack, top)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL):
+ {
+ dbg_printk("op get symbol\n");
+ switch (estack_ax(stack, top)->u.ptr.type) {
+ case LOAD_OBJECT:
+ printk(KERN_WARNING "LTTng: bytecode: Nested fields not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ /*
+ * symbol lookup is performed by
+ * specialization.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL_FIELD):
+ {
+ /*
+ * Used for first variant encountered in a
+ * traversal. Variants are not implemented yet.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U16):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("op get index u16\n");
+ ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("op get index u64\n");
+ ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD):
+ {
+ dbg_printk("op load field\n");
+ ret = dynamic_load_field(estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_S8):
+ {
+ dbg_printk("op load field s8\n");
+
+ estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S16):
+ {
+ dbg_printk("op load field s16\n");
+
+ estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S32):
+ {
+ dbg_printk("op load field s32\n");
+
+ estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S64):
+ {
+ dbg_printk("op load field s64\n");
+
+ estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U8):
+ {
+ dbg_printk("op load field u8\n");
+
+ estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U16):
+ {
+ dbg_printk("op load field u16\n");
+
+ estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U32):
+ {
+ dbg_printk("op load field u32\n");
+
+ estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U64):
+ {
+ dbg_printk("op load field u64\n");
+
+ estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_DOUBLE):
+ {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_STRING):
+ {
+ const char *str;
+
+ dbg_printk("op load field string\n");
+ str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.str = str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE):
+ {
+ const char *ptr;
+
+ dbg_printk("op load field string sequence\n");
+ ptr = estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+ estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printk("Bytecode warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ END_OP
+end:
+ /* Return _DISCARD on error. */
+ if (ret)
+ return LTTNG_INTERPRETER_DISCARD;
+
+ if (output) {
+ return lttng_bytecode_interpret_format_output(
+ estack_ax(stack, top), output);
+ }
+
+ return retval;
+}
+LTTNG_STACK_FRAME_NON_STANDARD(bytecode_interpret);
+
+uint64_t lttng_bytecode_filter_interpret(void *filter_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *filter_stack_data)
+{
+ return bytecode_interpret(filter_data, lttng_probe_ctx,
+ filter_stack_data, NULL);
+}
+
+uint64_t lttng_bytecode_capture_interpret(void *capture_data,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output)
+{
+ return bytecode_interpret(capture_data, lttng_probe_ctx,
+ capture_stack_data, output);
+}
+
+#undef START_OP
+#undef OP
+#undef PO
+#undef END_OP
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-specialize.c
+ *
+ * LTTng modules bytecode code specializer.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/slab.h>
+#include <lttng/lttng-bytecode.h>
+#include <lttng/align.h>
+
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+ size_t align, size_t len)
+{
+ ssize_t ret;
+ size_t padding = offset_align(runtime->data_len, align);
+ size_t new_len = runtime->data_len + padding + len;
+ size_t new_alloc_len = new_len;
+ size_t old_alloc_len = runtime->data_alloc_len;
+
+ if (new_len > INTERPRETER_MAX_DATA_LEN)
+ return -EINVAL;
+
+ if (new_alloc_len > old_alloc_len) {
+ char *newptr;
+
+ new_alloc_len =
+ max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+ newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
+ if (!newptr)
+ return -ENOMEM;
+ runtime->data = newptr;
+ /* We zero directly the memory from start of allocation. */
+ memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+ runtime->data_alloc_len = new_alloc_len;
+ }
+ runtime->data_len += padding;
+ ret = runtime->data_len;
+ runtime->data_len += len;
+ return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+ const void *p, size_t align, size_t len)
+{
+ ssize_t offset;
+
+ offset = bytecode_reserve_data(runtime, align, len);
+ if (offset < 0)
+ return -ENOMEM;
+ memcpy(&runtime->data[offset], p, len);
+ return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+ struct load_op *insn)
+{
+ int ret;
+
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printk("Bytecode warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printk("op load field s8\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S8;
+ break;
+ case OBJECT_TYPE_S16:
+ dbg_printk("op load field s16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S16;
+ break;
+ case OBJECT_TYPE_S32:
+ dbg_printk("op load field s32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S32;
+ break;
+ case OBJECT_TYPE_S64:
+ dbg_printk("op load field s64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S64;
+ break;
+ case OBJECT_TYPE_SIGNED_ENUM:
+ dbg_printk("op load field signed enumeration\n");
+ stack_top->type = REG_PTR;
+ break;
+ case OBJECT_TYPE_U8:
+ dbg_printk("op load field u8\n");
+ stack_top->type = REG_S64;
+ insn->op = BYTECODE_OP_LOAD_FIELD_U8;
+ break;
+ case OBJECT_TYPE_U16:
+ dbg_printk("op load field u16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U16;
+ break;
+ case OBJECT_TYPE_U32:
+ dbg_printk("op load field u32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U32;
+ break;
+ case OBJECT_TYPE_U64:
+ dbg_printk("op load field u64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U64;
+ break;
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ dbg_printk("op load field unsigned enumeration\n");
+ stack_top->type = REG_PTR;
+ break;
+ case OBJECT_TYPE_DOUBLE:
+ printk(KERN_WARNING "LTTng: bytecode: Double type unsupported\n\n");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_STRING:
+ dbg_printk("op load field string\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
+ break;
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ dbg_printk("op load field string sequence\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
+ break;
+ case OBJECT_TYPE_DYNAMIC:
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ printk(KERN_WARNING "LTTng: bytecode: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+ int signedness, uint32_t elem_len)
+{
+ switch (elem_len) {
+ case 8:
+ if (signedness)
+ *otype = OBJECT_TYPE_S8;
+ else
+ *otype = OBJECT_TYPE_U8;
+ break;
+ case 16:
+ if (signedness)
+ *otype = OBJECT_TYPE_S16;
+ else
+ *otype = OBJECT_TYPE_U16;
+ break;
+ case 32:
+ if (signedness)
+ *otype = OBJECT_TYPE_S32;
+ else
+ *otype = OBJECT_TYPE_U32;
+ break;
+ case 64:
+ if (signedness)
+ *otype = OBJECT_TYPE_S64;
+ else
+ *otype = OBJECT_TYPE_U64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+ struct load_op *insn, uint64_t index,
+ struct vstack_entry *stack_top,
+ int idx_len)
+{
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ memset(&gid, 0, sizeof(gid));
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const struct lttng_integer_type *integer_type;
+ const struct lttng_event_field *field;
+ uint32_t elem_len, num_elems;
+ int signedness;
+
+ field = stack_top->load.field;
+ if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = &field->type.u.array_nestable.elem_type->u.integer;
+ num_elems = field->type.u.array_nestable.length;
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ if (index >= num_elems) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.array_len = num_elems * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const struct lttng_integer_type *integer_type;
+ const struct lttng_event_field *field;
+ uint32_t elem_len;
+ int signedness;
+
+ field = stack_top->load.field;
+ if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ /* Only generated by the specialize phase. */
+ case OBJECT_TYPE_VARIANT: /* Fall-through */
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected get index type %d",
+ (int) stack_top->load.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ printk(KERN_WARNING "LTTng: bytecode: Index lookup for root field not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (idx_len) {
+ case 2:
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ break;
+ case 8:
+ ((struct get_index_u64 *) insn->data)->index = data_offset;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_context_lookup_name(struct lttng_ctx *ctx,
+ struct bytecode_runtime *bytecode,
+ struct load_op *insn)
+{
+ uint16_t offset;
+ const char *name;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+ return lttng_get_context_index(ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_event_field *field,
+ struct vstack_load *load, bool is_context)
+{
+ load->type = LOAD_OBJECT;
+
+ switch (field->type.atype) {
+ case atype_integer:
+ if (field->type.u.integer.signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ case atype_enum_nestable:
+ {
+ const struct lttng_integer_type *itype =
+ &field->type.u.enum_nestable.container_type->u.integer;
+
+ if (itype->signedness)
+ load->object_type = OBJECT_TYPE_SIGNED_ENUM;
+ else
+ load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+ load->rev_bo = false;
+ break;
+ }
+ case atype_array_nestable:
+ if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
+ printk(KERN_WARNING "LTTng: bytecode: Array nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_ARRAY;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_sequence_nestable:
+ if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
+ printk(KERN_WARNING "LTTng: bytecode: Sequence nesting only supports integer types.\n");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_SEQUENCE;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_string:
+ load->object_type = OBJECT_TYPE_STRING;
+ break;
+ case atype_struct_nestable:
+ printk(KERN_WARNING "LTTng: bytecode: Structure type cannot be loaded.\n");
+ return -EINVAL;
+ case atype_variant_nestable:
+ printk(KERN_WARNING "LTTng: bytecode: Variant type cannot be loaded.\n");
+ return -EINVAL;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_context_lookup(struct lttng_ctx *ctx,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ int idx, ret;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ idx = specialize_context_lookup_name(ctx, runtime, insn);
+ if (idx < 0) {
+ return -ENOENT;
+ }
+ ctx_field = <tng_static_ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ return ret;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ return -EINVAL;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ return 0;
+}
+
+static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ const char *name;
+ uint16_t offset;
+ unsigned int i, nr_fields;
+ bool found = false;
+ uint32_t field_offset = 0;
+ const struct lttng_event_field *field;
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ nr_fields = event_desc->nr_fields;
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ for (i = 0; i < nr_fields; i++) {
+ field = &event_desc->fields[i];
+ if (field->nofilter) {
+ continue;
+ }
+ if (!strcmp(field->name, name)) {
+ found = true;
+ break;
+ }
+ /* compute field offset on stack */
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum_nestable:
+ field_offset += sizeof(int64_t);
+ break;
+ case atype_array_nestable:
+ case atype_sequence_nestable:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_string:
+ field_offset += sizeof(void *);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (!found) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = specialize_load_object(field, load, false);
+ if (ret)
+ goto end;
+
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.offset = field_offset;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ return ret;
+}
+
+int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *bytecode)
+{
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack _stack;
+ struct vstack *stack = &_stack;
+ struct lttng_ctx *ctx = bytecode->p.ctx;
+
+ vstack_init(stack);
+
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ ret = 0;
+ goto end;
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_EQ:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_EQ_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_EQ_S64;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_NE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_NE_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_NE_S64;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_NE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: invalid register type for '>' binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ insn->op = BYTECODE_OP_GT_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_GT_S64;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_GT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_LT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: invalid register type for '<' binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ insn->op = BYTECODE_OP_LT_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_LT_S64;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_LT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: invalid register type for '>=' binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ insn->op = BYTECODE_OP_GE_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_GE_S64;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_GE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE;
+ break;
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: invalid register type for '<=' binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ insn->op = BYTECODE_OP_LE_STRING;
+ break;
+ case REG_S64:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_LE_S64;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_S64)
+ insn->op = BYTECODE_OP_LE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE;
+ break;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ insn->op = BYTECODE_OP_UNARY_PLUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ insn->op = BYTECODE_OP_UNARY_MINUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ insn->op = BYTECODE_OP_UNARY_NOT_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown get context ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ break;
+ }
+
+ /* cast */
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ insn->op = BYTECODE_OP_CAST_NOP;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
+ break;
+ }
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
+ /* Pop 1, push 1 */
+ ret = specialize_load_field(vstack_ax(stack), insn);
+ if (ret)
+ goto end;
+
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printk("op get symbol\n");
+ switch (vstack_ax(stack)->load.type) {
+ case LOAD_OBJECT:
+ printk(KERN_WARNING "LTTng: bytecode: Nested fields not implemented yet.\n");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ /* Lookup context field. */
+ ret = specialize_context_lookup(ctx, bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_APP_CONTEXT:
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_PAYLOAD:
+ /* Lookup event payload field. */
+ ret = specialize_payload_lookup(event_desc,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Always generated by specialize phase. */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("op get index u16\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("op get index u64\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+ }
+end:
+ return ret;
+}
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode-validator.c
+ *
+ * LTTng modules bytecode bytecode validator.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/types.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+
+#include <wrapper/list.h>
+#include <lttng/lttng-bytecode.h>
+
+#define MERGE_POINT_TABLE_BITS 7
+#define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
+
+/* merge point table node */
+struct mp_node {
+ struct hlist_node node;
+
+ /* Context at merge point */
+ struct vstack stack;
+ unsigned long target_pc;
+};
+
+struct mp_table {
+ struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
+};
+
+static
+int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
+{
+ if (mp_node->target_pc == key_pc)
+ return 1;
+ else
+ return 0;
+}
+
+static
+int merge_points_compare(const struct vstack *stacka,
+ const struct vstack *stackb)
+{
+ int i, len;
+
+ if (stacka->top != stackb->top)
+ return 1;
+ len = stacka->top + 1;
+ WARN_ON_ONCE(len < 0);
+ for (i = 0; i < len; i++) {
+ if (stacka->e[i].type != stackb->e[i].type)
+ return 1;
+ }
+ return 0;
+}
+
+static
+int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
+ const struct vstack *stack)
+{
+ struct mp_node *mp_node;
+ unsigned long hash = jhash_1word(target_pc, 0);
+ struct hlist_head *head;
+ struct mp_node *lookup_node;
+ int found = 0;
+
+ dbg_printk("Bytecode: adding merge point at offset %lu, hash %lu\n",
+ target_pc, hash);
+ mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
+ if (!mp_node)
+ return -ENOMEM;
+ mp_node->target_pc = target_pc;
+ memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
+
+ head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
+ lttng_hlist_for_each_entry(lookup_node, head, node) {
+ if (lttng_hash_match(lookup_node, target_pc)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ /* Key already present */
+ dbg_printk("Bytecode: compare merge points for offset %lu, hash %lu\n",
+ target_pc, hash);
+ kfree(mp_node);
+ if (merge_points_compare(stack, &lookup_node->stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ } else {
+ hlist_add_head(&mp_node->node, head);
+ }
+ return 0;
+}
+
+/*
+ * Binary comparators use top of stack and top of stack -1.
+ */
+static
+int bin_op_compare_check(struct vstack *stack, const bytecode_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+
+ case REG_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ case REG_U64:
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ goto error_mismatch;
+ case REG_S64:
+ case REG_U64:
+ break;
+ }
+ break;
+ case REG_TYPE_UNKNOWN:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ case REG_U64:
+ goto unknown;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ printk(KERN_WARNING "LTTng: bytecode: empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_mismatch:
+ printk(KERN_WARNING "LTTng: bytecode: type mismatch for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ printk(KERN_WARNING "LTTng: bytecode: unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, bytecode_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+
+ case REG_TYPE_UNKNOWN:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ case REG_U64:
+ goto unknown;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ goto error_type;
+ case REG_TYPE_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ case REG_U64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ printk(KERN_WARNING "LTTng: bytecode: empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ printk(KERN_WARNING "LTTng: bytecode: unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Validate bytecode range overflow within the validation pass.
+ * Called for each instruction encountered.
+ */
+static
+int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
+ char *start_pc, char *pc)
+{
+ int ret = 0;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (unlikely(pc + sizeof(struct return_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ /* Floating point */
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ case BYTECODE_OP_LOAD_DOUBLE:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ if (unlikely(pc + sizeof(struct binary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (unlikely(pc + sizeof(struct unary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ if (unlikely(pc + sizeof(struct logical_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ uint32_t str_len, maxlen;
+
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+
+ maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
+ str_len = strnlen(insn->data, maxlen);
+ if (unlikely(str_len >= maxlen)) {
+ /* Final '\0' not found within range */
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_NOP:
+ {
+ if (unlikely(pc + sizeof(struct cast_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ case BYTECODE_OP_LOAD_FIELD:
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected get symbol field\n");
+ ret = -EINVAL;
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static
+unsigned long delete_all_nodes(struct mp_table *mp_table)
+{
+ struct mp_node *mp_node;
+ struct hlist_node *tmp;
+ unsigned long nr_nodes = 0;
+ int i;
+
+ for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
+ struct hlist_head *head;
+
+ head = &mp_table->mp_head[i];
+ lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
+ kfree(mp_node);
+ nr_nodes++;
+ }
+ }
+ return nr_nodes;
+}
+
+/*
+ * Return value:
+ * >=0: success
+ * <0: error
+ */
+static
+int validate_instruction_context(struct bytecode_runtime *bytecode,
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret = 0;
+ const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
+
+ switch (opcode) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ /* Floating point */
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_LOAD_DOUBLE:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ {
+ ret = bin_op_compare_check(stack, opcode, "==");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_NE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "!=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GT:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LT:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GE:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STRING
+ || vstack_bx(stack)->type != REG_STRING) {
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type for string comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+ && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type for globbing pattern comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, ">>");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_LSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, "<<");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: Unary op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ }
+ break;
+ }
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_DOUBLE:
+ printk(KERN_WARNING "LTTng: bytecode: Unary bitwise op can only be applied to numeric registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ printk(KERN_WARNING "LTTng: bytecode: Invalid register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ printk(KERN_WARNING "LTTng: bytecode: Logical comparator expects S64 register\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ dbg_printk("Validate jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ if (unlikely(start_pc + insn->skip_offset <= pc)) {
+ printk(KERN_WARNING "LTTng: bytecode: Loops are not allowed in bytecode\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("Validate load field ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("Validate load field ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ case REG_DOUBLE:
+ printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ printk(KERN_WARNING "LTTng: bytecode: Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ }
+ if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
+ if (vstack_ax(stack)->type != REG_DOUBLE) {
+ printk(KERN_WARNING "LTTng: bytecode: Cast expects double\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown get context ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("Validate get context ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printk("Validate get context ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printk("Validate get context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printk("Validate get app context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printk("Validate get payload root\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printk("Validate load field\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ {
+ dbg_printk("Validate load field s8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ {
+ dbg_printk("Validate load field s16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ {
+ dbg_printk("Validate load field s32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ dbg_printk("Validate load field s64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ {
+ dbg_printk("Validate load field u8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ {
+ dbg_printk("Validate load field u16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ {
+ dbg_printk("Validate load field u32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ dbg_printk("Validate load field u64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ {
+ dbg_printk("Validate load field string\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ dbg_printk("Validate load field sequence\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printk("Validate load field double\n");
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printk("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printk("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printk("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printk("Validate get index u64 index %llu\n",
+ (unsigned long long) get_index->index);
+ break;
+ }
+ }
+end:
+ return ret;
+}
+
+/*
+ * Return value:
+ * 0: success
+ * <0: error
+ */
+static
+int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
+ struct mp_table *mp_table,
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret, found = 0;
+ unsigned long target_pc = pc - start_pc;
+ unsigned long hash;
+ struct hlist_head *head;
+ struct mp_node *mp_node;
+
+ /* Validate the context resulting from the previous instruction */
+ ret = validate_instruction_context(bytecode, stack, start_pc, pc);
+ if (ret < 0)
+ return ret;
+
+ /* Validate merge points */
+ hash = jhash_1word(target_pc, 0);
+ head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
+ lttng_hlist_for_each_entry(mp_node, head, node) {
+ if (lttng_hash_match(mp_node, target_pc)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ dbg_printk("Bytecode: validate merge point at offset %lu\n",
+ target_pc);
+ if (merge_points_compare(stack, &mp_node->stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ /* Once validated, we can remove the merge point */
+ dbg_printk("Bytecode: remove merge point at offset %lu\n",
+ target_pc);
+ hlist_del(&mp_node->node);
+ }
+ return 0;
+}
+
+/*
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int exec_insn(struct bytecode_runtime *bytecode,
+ struct mp_table *mp_table,
+ struct vstack *stack,
+ char **_next_pc,
+ char *pc)
+{
+ int ret = 1;
+ char *next_pc = *_next_pc;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ case REG_TYPE_UNKNOWN:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ /* Floating point */
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ case BYTECODE_OP_LOAD_DOUBLE:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_TYPE_UNKNOWN:
+ break;
+ case REG_DOUBLE:
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+ int merge_ret;
+
+ /* Add merge point to table */
+ merge_ret = merge_point_add_check(mp_table,
+ insn->skip_offset, stack);
+ if (merge_ret) {
+ ret = merge_ret;
+ goto end;
+ }
+
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ printk(KERN_WARNING "LTTng: bytecode: Unknown get context ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_TYPE_UNKNOWN:
+ break;
+ default:
+ printk(KERN_WARNING "LTTng: bytecode: Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ printk(KERN_WARNING "LTTng: bytecode: Empty stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ printk(KERN_WARNING "LTTng: bytecode: Expecting pointer on top of stack\n\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+end:
+ *_next_pc = next_pc;
+ return ret;
+}
+
+/*
+ * Never called concurrently (hash seed is shared).
+ */
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
+{
+ struct mp_table *mp_table;
+ char *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack stack;
+
+ vstack_init(&stack);
+
+ mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
+ if (!mp_table) {
+ printk(KERN_WARNING "LTTng: bytecode: Error allocating hash table for bytecode validation\n");
+ return -ENOMEM;
+ }
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+ if (ret != 0) {
+ if (ret == -ERANGE)
+ printk(KERN_WARNING "LTTng: bytecode: bytecode overflow\n");
+ goto end;
+ }
+ dbg_printk("Validating op %s (%u)\n",
+ lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc),
+ (unsigned int) *(bytecode_opcode_t *) pc);
+
+ /*
+ * For each instruction, validate the current context
+ * (traversal of entire execution flow), and validate
+ * all merge points targeting this instruction.
+ */
+ ret = validate_instruction_all_contexts(bytecode, mp_table,
+ &stack, start_pc, pc);
+ if (ret)
+ goto end;
+ ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
+ if (ret <= 0)
+ goto end;
+ }
+end:
+ if (delete_all_nodes(mp_table)) {
+ if (!ret) {
+ printk(KERN_WARNING "LTTng: bytecode: Unexpected merge points\n");
+ ret = -EINVAL;
+ }
+ }
+ kfree(mp_table);
+ return ret;
+}
--- /dev/null
+/* SPDX-License-Identifier: MIT
+ *
+ * lttng-bytecode.c
+ *
+ * LTTng modules bytecode code.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <lttng/lttng-bytecode.h>
+
+static const char *opnames[] = {
+ [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
+
+ [ BYTECODE_OP_RETURN ] = "RETURN",
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = "MUL",
+ [ BYTECODE_OP_DIV ] = "DIV",
+ [ BYTECODE_OP_MOD ] = "MOD",
+ [ BYTECODE_OP_PLUS ] = "PLUS",
+ [ BYTECODE_OP_MINUS ] = "MINUS",
+ [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
+ [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
+ [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
+ [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
+ [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = "EQ",
+ [ BYTECODE_OP_NE ] = "NE",
+ [ BYTECODE_OP_GT ] = "GT",
+ [ BYTECODE_OP_LT ] = "LT",
+ [ BYTECODE_OP_GE ] = "GE",
+ [ BYTECODE_OP_LE ] = "LE",
+
+ /* string binary comparators */
+ [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
+ [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
+ [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
+ [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
+ [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
+ [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
+
+ /* s64 binary comparators */
+ [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
+ [ BYTECODE_OP_NE_S64 ] = "NE_S64",
+ [ BYTECODE_OP_GT_S64 ] = "GT_S64",
+ [ BYTECODE_OP_LT_S64 ] = "LT_S64",
+ [ BYTECODE_OP_GE_S64 ] = "GE_S64",
+ [ BYTECODE_OP_LE_S64 ] = "LE_S64",
+
+ /* double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
+ [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
+ [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
+ [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
+ [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
+ [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
+ [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
+ [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = "AND",
+ [ BYTECODE_OP_OR ] = "OR",
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
+ [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
+ [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
+ [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
+
+ /* load userspace field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate.
+ */
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
+
+ /* globbing pattern binary operator: apply to */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+ [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+ [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+ [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+ [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
+
+ [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op)
+{
+ if (op >= NR_BYTECODE_OPS)
+ return "UNKNOWN";
+ else
+ return opnames[op];
+}
+
+static
+int apply_field_reloc(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *field_name,
+ enum bytecode_op bytecode_op)
+{
+ const struct lttng_event_field *fields, *field = NULL;
+ unsigned int nr_fields, i;
+ struct load_op *op;
+ uint32_t field_offset = 0;
+
+ dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
+
+ /* Lookup event by name */
+ if (!event_desc)
+ return -EINVAL;
+ fields = event_desc->fields;
+ if (!fields)
+ return -EINVAL;
+ nr_fields = event_desc->nr_fields;
+ for (i = 0; i < nr_fields; i++) {
+ if (fields[i].nofilter)
+ continue;
+ if (!strcmp(fields[i].name, field_name)) {
+ field = &fields[i];
+ break;
+ }
+ /* compute field offset */
+ switch (fields[i].type.atype) {
+ case atype_integer:
+ case atype_enum_nestable:
+ field_offset += sizeof(int64_t);
+ break;
+ case atype_array_nestable:
+ if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
+ return -EINVAL;
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_sequence_nestable:
+ if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
+ return -EINVAL;
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_string:
+ field_offset += sizeof(void *);
+ break;
+ case atype_struct_nestable: /* Unsupported. */
+ case atype_variant_nestable: /* Unsupported. */
+ default:
+ return -EINVAL;
+ }
+ }
+ if (!field)
+ return -EINVAL;
+
+ /* Check if field offset is too large for 16-bit offset */
+ if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* set type */
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum_nestable:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
+ break;
+ case atype_array_nestable:
+ case atype_sequence_nestable:
+ if (field->user)
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE;
+ else
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ case atype_string:
+ if (field->user)
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_STRING;
+ else
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
+ break;
+ case atype_struct_nestable: /* Unsupported. */
+ case atype_variant_nestable: /* Unsupported. */
+ default:
+ return -EINVAL;
+ }
+ /* set offset */
+ field_ref->offset = (uint16_t) field_offset;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_context_reloc(struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *context_name,
+ enum bytecode_op bytecode_op)
+{
+ struct load_op *op;
+ struct lttng_ctx_field *ctx_field;
+ int idx;
+
+ dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
+
+ /* Get context index */
+ idx = lttng_get_context_index(lttng_static_ctx, context_name);
+ if (idx < 0)
+ return -ENOENT;
+
+ /* Check if idx is too large for 16-bit offset */
+ if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* Get context return type */
+ ctx_field = <tng_static_ctx->fields[idx];
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (ctx_field->event_field.type.atype) {
+ case atype_integer:
+ case atype_enum_nestable:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
+ break;
+ /* Sequence and array supported as string */
+ case atype_string:
+ BUG_ON(ctx_field->event_field.user);
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_array_nestable:
+ if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
+ return -EINVAL;
+ BUG_ON(ctx_field->event_field.user);
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_sequence_nestable:
+ if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
+ return -EINVAL;
+ BUG_ON(ctx_field->event_field.user);
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_struct_nestable: /* Unsupported. */
+ case atype_variant_nestable: /* Unsupported. */
+ default:
+ return -EINVAL;
+ }
+ /* set offset to context index within channel contexts */
+ field_ref->offset = (uint16_t) idx;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_reloc(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *name)
+{
+ struct load_op *op;
+
+ dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
+
+ /* Ensure that the reloc is within the code */
+ if (runtime_len - reloc_offset < sizeof(uint16_t))
+ return -EINVAL;
+
+ op = (struct load_op *) &runtime->code[reloc_offset];
+ switch (op->op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ return apply_field_reloc(event_desc, runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ return apply_context_reloc(runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ /*
+ * Will be handled by load specialize phase or
+ * dynamically by interpreter.
+ */
+ return 0;
+ default:
+ printk(KERN_WARNING "LTTng: filter: Unknown reloc op type %u\n", op->op);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int bytecode_is_linked(struct lttng_bytecode_node *bytecode,
+ struct list_head *bytecode_runtime_head)
+{
+ struct lttng_bytecode_runtime *bc_runtime;
+
+ list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
+ if (bc_runtime->bc == bytecode)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Take a bytecode with reloc table and link it to an event to create a
+ * bytecode runtime.
+ */
+static
+int link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx *ctx,
+ struct lttng_bytecode_node *bytecode,
+ struct list_head *insert_loc)
+{
+ int ret, offset, next_offset;
+ struct bytecode_runtime *runtime = NULL;
+ size_t runtime_alloc_len;
+
+ if (!bytecode)
+ return 0;
+ /* Bytecode already linked */
+ if (bytecode_is_linked(bytecode, insert_loc))
+ return 0;
+
+ dbg_printk("Linking...\n");
+
+ /* We don't need the reloc table in the runtime */
+ runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
+ runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
+ if (!runtime) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ runtime->p.bc = bytecode;
+ runtime->p.ctx = ctx;
+ runtime->len = bytecode->bc.reloc_offset;
+ /* copy original bytecode */
+ memcpy(runtime->code, bytecode->bc.data, runtime->len);
+ /*
+ * apply relocs. Those are a uint16_t (offset in bytecode)
+ * followed by a string (field name).
+ */
+ for (offset = bytecode->bc.reloc_offset;
+ offset < bytecode->bc.len;
+ offset = next_offset) {
+ uint16_t reloc_offset =
+ *(uint16_t *) &bytecode->bc.data[offset];
+ const char *name =
+ (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
+
+ ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
+ if (ret) {
+ goto link_error;
+ }
+ next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
+ }
+ /* Validate bytecode */
+ ret = lttng_bytecode_validate(runtime);
+ if (ret) {
+ goto link_error;
+ }
+ /* Specialize bytecode */
+ ret = lttng_bytecode_specialize(event_desc, runtime);
+ if (ret) {
+ goto link_error;
+ }
+
+ switch (bytecode->type) {
+ case LTTNG_BYTECODE_NODE_TYPE_FILTER:
+ runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret;
+ break;
+ case LTTNG_BYTECODE_NODE_TYPE_CAPTURE:
+ runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ runtime->p.link_failed = 0;
+ list_add_rcu(&runtime->p.node, insert_loc);
+ dbg_printk("Linking successful.\n");
+ return 0;
+
+link_error:
+
+ switch (bytecode->type) {
+ case LTTNG_BYTECODE_NODE_TYPE_FILTER:
+ runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
+ break;
+ case LTTNG_BYTECODE_NODE_TYPE_CAPTURE:
+ runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ runtime->p.link_failed = 1;
+ list_add_rcu(&runtime->p.node, insert_loc);
+alloc_error:
+ dbg_printk("Linking failed.\n");
+ return ret;
+}
+
+void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+ struct lttng_bytecode_node *bc = runtime->bc;
+
+ if (!bc->enabler->enabled || runtime->link_failed)
+ runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
+ else
+ runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret;
+}
+
+void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+ struct lttng_bytecode_node *bc = runtime->bc;
+
+ if (!bc->enabler->enabled || runtime->link_failed)
+ runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
+ else
+ runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret;
+}
+
+/*
+ * Given the lists of bytecode programs of an instance (trigger or event) and
+ * of a matching enabler, try to link all the enabler's bytecode programs with
+ * the instance.
+ *
+ * This function is called after we confirmed that name enabler and the
+ * instance are matching names (or glob pattern matching).
+ */
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx *ctx,
+ struct list_head *instance_bytecode_head,
+ struct list_head *enabler_bytecode_head)
+{
+ struct lttng_bytecode_node *enabler_bc;
+ struct lttng_bytecode_runtime *runtime;
+
+ WARN_ON_ONCE(!event_desc);
+
+ /* Go over all the bytecode programs of the enabler. */
+ list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
+ int found = 0, ret;
+ struct list_head *insert_loc;
+
+ /*
+ * Check if the current enabler bytecode program is already
+ * linked with the instance.
+ */
+ list_for_each_entry(runtime, instance_bytecode_head, node) {
+ if (runtime->bc == enabler_bc) {
+ found = 1;
+ break;
+ }
+ }
+
+ /*
+ * Skip bytecode already linked, go to the next enabler
+ * bytecode program.
+ */
+ if (found)
+ continue;
+
+ /*
+ * Insert at specified priority (seqnum) in increasing
+ * order. If there already is a bytecode of the same priority,
+ * insert the new bytecode right after it.
+ */
+ list_for_each_entry_reverse(runtime,
+ instance_bytecode_head, node) {
+ if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
+ /* insert here */
+ insert_loc = &runtime->node;
+ goto add_within;
+ }
+ }
+ /* Add to head to list */
+ insert_loc = instance_bytecode_head;
+ add_within:
+ dbg_printk("linking bytecode\n");
+ ret = link_bytecode(event_desc, ctx, enabler_bc, insert_loc);
+ if (ret) {
+ dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
+ }
+ }
+}
+
+/*
+ * We own the filter_bytecode if we return success.
+ */
+int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
+ struct lttng_bytecode_node *filter_bytecode)
+{
+ list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
+ return 0;
+}
+
+void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
+{
+ struct lttng_bytecode_node *filter_bytecode, *tmp;
+
+ list_for_each_entry_safe(filter_bytecode, tmp,
+ &enabler->filter_bytecode_head, node) {
+ kfree(filter_bytecode);
+ }
+}
+
+void lttng_free_event_filter_runtime(struct lttng_event *event)
+{
+ struct bytecode_runtime *runtime, *tmp;
+
+ list_for_each_entry_safe(runtime, tmp,
+ &event->filter_bytecode_runtime_head, p.node) {
+ kfree(runtime->data);
+ kfree(runtime);
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-32-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 32-bit counters in overflow
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+#include <counter/counter.h>
+#include <counter/counter-api.h>
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+ .counter_size = COUNTER_SIZE_32_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step)
+{
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ return lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-32-modular",
+ .owner = THIS_MODULE,
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+};
+
+static int __init lttng_counter_client_init(void)
+{
+ /*
+ * This vmalloc sync all also takes care of the lib counter
+ * vmalloc'd module pages when it is built as a module into LTTng.
+ */
+ wrapper_vmalloc_sync_mappings();
+ lttng_counter_transport_register(<tng_counter_transport);
+ return 0;
+}
+
+module_init(lttng_counter_client_init);
+
+static void __exit lttng_counter_client_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
+
+module_exit(lttng_counter_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng counter per-cpu 32-bit overflow client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+ __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+ __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+ LTTNG_MODULES_EXTRAVERSION);
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-64-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 64-bit counters in overflow
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+#include <counter/counter.h>
+#include <counter/counter-api.h>
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+ .counter_size = COUNTER_SIZE_64_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step)
+{
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ return lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-64-modular",
+ .owner = THIS_MODULE,
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+};
+
+static int __init lttng_counter_client_init(void)
+{
+ /*
+ * This vmalloc sync all also takes care of the lib counter
+ * vmalloc'd module pages when it is built as a module into LTTng.
+ */
+ wrapper_vmalloc_sync_mappings();
+ lttng_counter_transport_register(<tng_counter_transport);
+ return 0;
+}
+
+module_init(lttng_counter_client_init);
+
+static void __exit lttng_counter_client_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
+
+module_exit(lttng_counter_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng counter per-cpu 32-bit overflow client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+ __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+ __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+ LTTNG_MODULES_EXTRAVERSION);
#include <linux/file.h>
#include <linux/anon_inodes.h>
#include <wrapper/file.h>
-#include <linux/jhash.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/dmi.h>
#include <wrapper/types.h>
#include <lttng/kernel-version.h>
#include <lttng/events.h>
+#include <lttng/lttng-bytecode.h>
#include <lttng/tracer.h>
+#include <lttng/trigger-notification.h>
#include <lttng/abi-old.h>
#include <lttng/endian.h>
#include <lttng/string-utils.h>
+#include <lttng/utils.h>
#include <ringbuffer/backend.h>
#include <ringbuffer/frontend.h>
#include <wrapper/time.h>
#define METADATA_CACHE_DEFAULT_SIZE 4096
static LIST_HEAD(sessions);
+static LIST_HEAD(trigger_groups);
static LIST_HEAD(lttng_transport_list);
+static LIST_HEAD(lttng_counter_transport_list);
/*
* Protect the sessions and metadata caches.
*/
static DEFINE_MUTEX(sessions_mutex);
static struct kmem_cache *event_cache;
+static struct kmem_cache *trigger_cache;
-static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
-static void lttng_session_sync_enablers(struct lttng_session *session);
-static void lttng_enabler_destroy(struct lttng_enabler *enabler);
+static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
+static void lttng_session_sync_event_enablers(struct lttng_session *session);
+static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler);
+static void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler);
+static void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group);
static void _lttng_event_destroy(struct lttng_event *event);
+static void _lttng_trigger_destroy(struct lttng_trigger *trigger);
static void _lttng_channel_destroy(struct lttng_channel *chan);
static int _lttng_event_unregister(struct lttng_event *event);
+static int _lttng_trigger_unregister(struct lttng_trigger *trigger);
static
int _lttng_event_metadata_statedump(struct lttng_session *session,
struct lttng_channel *chan,
mutex_unlock(&sessions_mutex);
}
+static struct lttng_transport *lttng_transport_find(const char *name)
+{
+ struct lttng_transport *transport;
+
+ list_for_each_entry(transport, <tng_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
/*
* Called with sessions lock held.
*/
return NULL;
}
+static
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+ struct lttng_counter_transport *transport;
+
+ list_for_each_entry(transport, <tng_counter_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
+struct lttng_counter *lttng_kernel_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const size_t *dimensions_sizes)
+{
+ struct lttng_counter *counter = NULL;
+ struct lttng_counter_transport *counter_transport = NULL;
+
+ counter_transport = lttng_counter_transport_find(counter_transport_name);
+ if (!counter_transport) {
+ printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
+ counter_transport_name);
+ goto notransport;
+ }
+ if (!try_module_get(counter_transport->owner)) {
+ printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
+ goto notransport;
+ }
+
+ counter = kzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
+ if (!counter)
+ goto nomem;
+
+ /* Create trigger error counter. */
+ counter->ops = &counter_transport->ops;
+ counter->transport = counter_transport;
+
+ counter->counter = counter->ops->counter_create(
+ number_dimensions, dimensions_sizes, 0);
+ if (!counter->counter) {
+ goto create_error;
+ }
+
+ return counter;
+
+create_error:
+ kfree(counter);
+nomem:
+ if (counter_transport)
+ module_put(counter_transport->owner);
+notransport:
+ return NULL;
+}
+
+struct lttng_trigger_group *lttng_trigger_group_create(void)
+{
+ struct lttng_transport *transport = NULL;
+ struct lttng_trigger_group *trigger_group;
+ const char *transport_name = "relay-trigger";
+ size_t subbuf_size = 4096; //TODO
+ size_t num_subbuf = 16; //TODO
+ unsigned int switch_timer_interval = 0;
+ unsigned int read_timer_interval = 0;
+ int i;
+
+ mutex_lock(&sessions_mutex);
+
+ transport = lttng_transport_find(transport_name);
+ if (!transport) {
+ printk(KERN_WARNING "LTTng transport %s not found\n",
+ transport_name);
+ goto notransport;
+ }
+ if (!try_module_get(transport->owner)) {
+ printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
+ goto notransport;
+ }
+
+ trigger_group = lttng_kvzalloc(sizeof(struct lttng_trigger_group),
+ GFP_KERNEL);
+ if (!trigger_group)
+ goto nomem;
+
+ trigger_group->ops = &transport->ops;
+ trigger_group->chan = transport->ops.channel_create(transport_name,
+ trigger_group, NULL, subbuf_size, num_subbuf,
+ switch_timer_interval, read_timer_interval);
+ if (!trigger_group->chan)
+ goto create_error;
+
+ trigger_group->transport = transport;
+ INIT_LIST_HEAD(&trigger_group->enablers_head);
+ INIT_LIST_HEAD(&trigger_group->triggers_head);
+ for (i = 0; i < LTTNG_TRIGGER_HT_SIZE; i++)
+ INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]);
+
+ list_add(&trigger_group->node, &trigger_groups);
+ mutex_unlock(&sessions_mutex);
+
+ return trigger_group;
+
+create_error:
+ lttng_kvfree(trigger_group);
+nomem:
+ if (transport)
+ module_put(transport->owner);
+notransport:
+ mutex_unlock(&sessions_mutex);
+ return NULL;
+}
+
void metadata_cache_destroy(struct kref *kref)
{
struct lttng_metadata_cache *cache =
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
struct lttng_metadata_stream *metadata_stream;
- struct lttng_enabler *enabler, *tmpenabler;
+ struct lttng_event_enabler *event_enabler, *tmp_event_enabler;
int ret;
mutex_lock(&sessions_mutex);
WRITE_ONCE(session->active, 0);
list_for_each_entry(chan, &session->chan, list) {
- ret = lttng_syscalls_unregister(chan);
+ ret = lttng_syscalls_unregister_event(chan);
WARN_ON(ret);
}
list_for_each_entry(event, &session->events, list) {
}
synchronize_trace(); /* Wait for in-flight events to complete */
list_for_each_entry(chan, &session->chan, list) {
- ret = lttng_syscalls_destroy(chan);
+ ret = lttng_syscalls_destroy_event(chan);
WARN_ON(ret);
}
- list_for_each_entry_safe(enabler, tmpenabler,
+ list_for_each_entry_safe(event_enabler, tmp_event_enabler,
&session->enablers_head, node)
- lttng_enabler_destroy(enabler);
+ lttng_event_enabler_destroy(event_enabler);
list_for_each_entry_safe(event, tmpevent, &session->events, list)
_lttng_event_destroy(event);
list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
lttng_kvfree(session);
}
+void lttng_trigger_group_destroy(struct lttng_trigger_group *trigger_group)
+{
+ struct lttng_trigger_enabler *trigger_enabler, *tmp_trigger_enabler;
+ struct lttng_trigger *trigger, *tmptrigger;
+ int ret;
+
+ if (!trigger_group)
+ return;
+
+ mutex_lock(&sessions_mutex);
+
+ ret = lttng_syscalls_unregister_trigger(trigger_group);
+ WARN_ON(ret);
+
+ list_for_each_entry_safe(trigger, tmptrigger,
+ &trigger_group->triggers_head, list) {
+ ret = _lttng_trigger_unregister(trigger);
+ WARN_ON(ret);
+ }
+
+ synchronize_trace(); /* Wait for in-flight triggers to complete */
+
+ irq_work_sync(&trigger_group->wakeup_pending);
+
+ list_for_each_entry_safe(trigger_enabler, tmp_trigger_enabler,
+ &trigger_group->enablers_head, node)
+ lttng_trigger_enabler_destroy(trigger_enabler);
+
+ list_for_each_entry_safe(trigger, tmptrigger,
+ &trigger_group->triggers_head, list)
+ _lttng_trigger_destroy(trigger);
+
+ if (trigger_group->error_counter) {
+ struct lttng_counter *error_counter = trigger_group->error_counter;
+ error_counter->ops->counter_destroy(error_counter->counter);
+ module_put(error_counter->transport->owner);
+ lttng_kvfree(error_counter);
+ trigger_group->error_counter = NULL;
+ }
+ trigger_group->ops->channel_destroy(trigger_group->chan);
+ module_put(trigger_group->transport->owner);
+ list_del(&trigger_group->node);
+ mutex_unlock(&sessions_mutex);
+ lttng_kvfree(trigger_group);
+}
+
int lttng_session_statedump(struct lttng_session *session)
{
int ret;
session->tstate = 1;
/* We need to sync enablers with session before activation. */
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/*
* Snapshot the number of events per channel to know the type of header
/* Set transient enabler state to "disabled" */
session->tstate = 0;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/* Set each stream's quiescent state. */
list_for_each_entry(chan, &session->chan, list) {
}
/* Set transient enabler state to "enabled" */
channel->tstate = 1;
- lttng_session_sync_enablers(channel->session);
+ lttng_session_sync_event_enablers(channel->session);
/* Set atomically the state to "enabled" */
WRITE_ONCE(channel->enabled, 1);
end:
WRITE_ONCE(channel->enabled, 0);
/* Set transient enabler state to "enabled" */
channel->tstate = 0;
- lttng_session_sync_enablers(channel->session);
+ lttng_session_sync_event_enablers(channel->session);
end:
mutex_unlock(&sessions_mutex);
return ret;
return ret;
}
-static struct lttng_transport *lttng_transport_find(const char *name)
+int lttng_trigger_enable(struct lttng_trigger *trigger)
{
- struct lttng_transport *transport;
+ int ret = 0;
- list_for_each_entry(transport, <tng_transport_list, node) {
- if (!strcmp(transport->name, name))
- return transport;
+ mutex_lock(&sessions_mutex);
+ if (trigger->enabled) {
+ ret = -EEXIST;
+ goto end;
}
- return NULL;
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ WRITE_ONCE(trigger->enabled, 1);
+ break;
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_KRETPROBE:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+int lttng_trigger_disable(struct lttng_trigger *trigger)
+{
+ int ret = 0;
+
+ mutex_lock(&sessions_mutex);
+ if (!trigger->enabled) {
+ ret = -EEXIST;
+ goto end;
+ }
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ ret = -EINVAL;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ WRITE_ONCE(trigger->enabled, 0);
+ break;
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_KRETPROBE:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
+end:
+ mutex_unlock(&sessions_mutex);
+ return ret;
}
struct lttng_channel *lttng_channel_create(struct lttng_session *session,
wake_up_interruptible(&stream->read_wait);
}
+
/*
* Supports event creation while tracing session is active.
* Needs to be called with sessions mutex held.
struct lttng_event *event;
const char *event_name;
struct hlist_head *head;
- size_t name_len;
- uint32_t hash;
int ret;
if (chan->free_event_id == -1U) {
ret = -EINVAL;
goto type_error;
}
- name_len = strlen(event_name);
- hash = jhash(event_name, name_len, 0);
- head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+
+ head = utils_borrow_hash_table_bucket(session->events_ht.table,
+ LTTNG_EVENT_HT_SIZE, event_name);
lttng_hlist_for_each_entry(event, head, hlist) {
WARN_ON_ONCE(!event->desc);
if (!strncmp(event->desc->name, event_name,
event->id = chan->free_event_id++;
event->instrumentation = itype;
event->evtype = LTTNG_TYPE_EVENT;
- INIT_LIST_HEAD(&event->bytecode_runtime_head);
+ INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
INIT_LIST_HEAD(&event->enablers_ref_head);
switch (itype) {
/* Event will be enabled by enabler sync. */
event->enabled = 0;
event->registered = 0;
- event->desc = lttng_event_get(event_name);
+ event->desc = lttng_event_desc_get(event_name);
if (!event->desc) {
ret = -ENOENT;
goto register_error;
* registration.
*/
smp_wmb();
- ret = lttng_kprobes_register(event_name,
+ ret = lttng_kprobes_register_event(event_name,
event_param->u.kprobe.symbol_name,
event_param->u.kprobe.offset,
event_param->u.kprobe.addr,
*/
smp_wmb();
- ret = lttng_uprobes_register(event_param->name,
+ ret = lttng_uprobes_register_event(event_param->name,
event_param->u.uprobe.fd,
event);
if (ret)
return ERR_PTR(ret);
}
+struct lttng_trigger *_lttng_trigger_create(
+ const struct lttng_event_desc *event_desc,
+ uint64_t id, uint64_t error_counter_index,
+ struct lttng_trigger_group *trigger_group,
+ struct lttng_kernel_trigger *trigger_param, void *filter,
+ enum lttng_kernel_instrumentation itype)
+{
+ struct lttng_trigger *trigger;
+ const char *event_name;
+ struct hlist_head *head;
+ int ret;
+ size_t dimension_index[1];
+
+ switch (itype) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ event_name = event_desc->name;
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ case LTTNG_KERNEL_SYSCALL:
+ event_name = trigger_param->name;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto type_error;
+ }
+
+ head = utils_borrow_hash_table_bucket(trigger_group->triggers_ht.table,
+ LTTNG_TRIGGER_HT_SIZE, event_name);
+ lttng_hlist_for_each_entry(trigger, head, hlist) {
+ WARN_ON_ONCE(!trigger->desc);
+ if (!strncmp(trigger->desc->name, event_name,
+ LTTNG_KERNEL_SYM_NAME_LEN - 1)
+ && trigger_group == trigger->group
+ && id == trigger->id) {
+ ret = -EEXIST;
+ goto exist;
+ }
+ }
+
+ trigger = kmem_cache_zalloc(trigger_cache, GFP_KERNEL);
+ if (!trigger) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+ trigger->group = trigger_group;
+ trigger->id = id;
+ trigger->error_counter_index = error_counter_index;
+ trigger->num_captures = 0;
+ trigger->filter = filter;
+ trigger->instrumentation = itype;
+ trigger->evtype = LTTNG_TYPE_EVENT;
+ trigger->send_notification = lttng_trigger_notification_send;
+ INIT_LIST_HEAD(&trigger->filter_bytecode_runtime_head);
+ INIT_LIST_HEAD(&trigger->capture_bytecode_runtime_head);
+ INIT_LIST_HEAD(&trigger->enablers_ref_head);
+
+ switch (itype) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ /* Event will be enabled by enabler sync. */
+ trigger->enabled = 0;
+ trigger->registered = 0;
+ trigger->desc = lttng_event_desc_get(event_name);
+ if (!trigger->desc) {
+ ret = -ENOENT;
+ goto register_error;
+ }
+ /* Populate lttng_trigger structure before event registration. */
+ smp_wmb();
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ trigger->enabled = 0;
+ trigger->registered = 1;
+ /*
+ * Populate lttng_trigger structure before event
+ * registration.
+ */
+ smp_wmb();
+ ret = lttng_kprobes_register_trigger(
+ trigger_param->u.kprobe.symbol_name,
+ trigger_param->u.kprobe.offset,
+ trigger_param->u.kprobe.addr,
+ trigger);
+ if (ret) {
+ ret = -EINVAL;
+ goto register_error;
+ }
+ ret = try_module_get(trigger->desc->owner);
+ WARN_ON_ONCE(!ret);
+ break;
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_SYSCALL:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ trigger->enabled = 0;
+ trigger->registered = 0;
+ trigger->desc = event_desc;
+ if (!trigger->desc) {
+ ret = -EINVAL;
+ goto register_error;
+ }
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ /*
+ * Needs to be explicitly enabled after creation, since
+ * we may want to apply filters.
+ */
+ trigger->enabled = 0;
+ trigger->registered = 1;
+
+ /*
+ * Populate lttng_trigger structure before trigger
+ * registration.
+ */
+ smp_wmb();
+
+ ret = lttng_uprobes_register_trigger(trigger_param->name,
+ trigger_param->u.uprobe.fd,
+ trigger);
+ if (ret)
+ goto register_error;
+ ret = try_module_get(trigger->desc->owner);
+ WARN_ON_ONCE(!ret);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto register_error;
+ }
+
+ list_add(&trigger->list, &trigger_group->triggers_head);
+ hlist_add_head(&trigger->hlist, head);
+
+ /*
+ * Clear the error counter bucket. The sessiond keeps track of which
+ * bucket is currently in use. We trust it.
+ */
+ if (trigger_group->error_counter) {
+ /*
+ * Check that the index is within the boundary of the counter.
+ */
+ if (trigger->error_counter_index >= trigger_group->error_counter_len) {
+ printk(KERN_INFO "LTTng: Trigger: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
+ trigger_group->error_counter_len, trigger->error_counter_index);
+ ret = -EINVAL;
+ goto register_error;
+ }
+
+ dimension_index[0] = trigger->error_counter_index;
+ ret = trigger_group->error_counter->ops->counter_clear(
+ trigger_group->error_counter->counter,
+ dimension_index);
+ if (ret) {
+ printk(KERN_INFO "LTTng: Trigger: Unable to clear error counter bucket %llu\n",
+ trigger->error_counter_index);
+ goto register_error;
+ }
+ }
+
+ return trigger;
+
+register_error:
+ kmem_cache_free(trigger_cache, trigger);
+cache_error:
+exist:
+type_error:
+ return ERR_PTR(ret);
+}
+
+int lttng_kernel_counter_value(struct lttng_counter *counter,
+ const size_t *dim_indexes, int64_t *val)
+{
+ int ret;
+ bool overflow, underflow;
+
+ ret = counter->ops->counter_aggregate(counter->counter, dim_indexes,
+ val, &overflow, &underflow);
+ if (ret) {
+ printk(KERN_WARNING "LTTng: Error getting counter value.\n");
+ goto error;
+ }
+
+ if (overflow)
+ printk(KERN_WARNING "LTTng: counter overflow detected.\n");
+
+ if (underflow)
+ printk(KERN_WARNING "LTTng: counter underflow detected.\n");
+
+error:
+ return ret;
+}
+
struct lttng_event *lttng_event_create(struct lttng_channel *chan,
struct lttng_kernel_event *event_param,
void *filter,
return event;
}
+struct lttng_trigger *lttng_trigger_create(
+ const struct lttng_event_desc *event_desc,
+ uint64_t id, uint64_t error_counter_index,
+ struct lttng_trigger_group *trigger_group,
+ struct lttng_kernel_trigger *trigger_param, void *filter,
+ enum lttng_kernel_instrumentation itype)
+{
+ struct lttng_trigger *trigger;
+
+ mutex_lock(&sessions_mutex);
+ trigger = _lttng_trigger_create(event_desc, id, error_counter_index,
+ trigger_group, trigger_param, filter, itype);
+ mutex_unlock(&sessions_mutex);
+ return trigger;
+}
+
/* Only used for tracepoints for now. */
static
void register_event(struct lttng_event *event)
event);
break;
case LTTNG_KERNEL_SYSCALL:
- ret = lttng_syscall_filter_enable(event->chan, event);
+ ret = lttng_syscall_filter_enable_event(event->chan, event);
break;
case LTTNG_KERNEL_KPROBE:
case LTTNG_KERNEL_UPROBE:
event);
break;
case LTTNG_KERNEL_KPROBE:
- lttng_kprobes_unregister(event);
+ lttng_kprobes_unregister_event(event);
ret = 0;
break;
case LTTNG_KERNEL_KRETPROBE:
ret = 0;
break;
case LTTNG_KERNEL_SYSCALL:
- ret = lttng_syscall_filter_disable(event->chan, event);
+ ret = lttng_syscall_filter_disable_event(event->chan, event);
break;
case LTTNG_KERNEL_NOOP:
ret = 0;
break;
case LTTNG_KERNEL_UPROBE:
- lttng_uprobes_unregister(event);
+ lttng_uprobes_unregister_event(event);
ret = 0;
break;
case LTTNG_KERNEL_FUNCTION: /* Fall-through */
return ret;
}
+/* Only used for tracepoints for now. */
+static
+void register_trigger(struct lttng_trigger *trigger)
+{
+ const struct lttng_event_desc *desc;
+ int ret = -EINVAL;
+
+ if (trigger->registered)
+ return;
+
+ desc = trigger->desc;
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
+ desc->trigger_callback,
+ trigger);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_enable_trigger(trigger);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ if (!ret)
+ trigger->registered = 1;
+}
+
+static
+int _lttng_trigger_unregister(struct lttng_trigger *trigger)
+{
+ const struct lttng_event_desc *desc;
+ int ret = -EINVAL;
+
+ if (!trigger->registered)
+ return 0;
+
+ desc = trigger->desc;
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_unregister(trigger->desc->kname,
+ trigger->desc->trigger_callback,
+ trigger);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ lttng_kprobes_unregister_trigger(trigger);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ lttng_uprobes_unregister_trigger(trigger);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_disable_trigger(trigger);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ if (!ret)
+ trigger->registered = 0;
+ return ret;
+}
+
/*
* Only used internally at session destruction.
*/
{
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- lttng_event_put(event->desc);
+ lttng_event_desc_put(event->desc);
break;
case LTTNG_KERNEL_KPROBE:
module_put(event->desc->owner);
- lttng_kprobes_destroy_private(event);
+ lttng_kprobes_destroy_event_private(event);
break;
case LTTNG_KERNEL_KRETPROBE:
module_put(event->desc->owner);
break;
case LTTNG_KERNEL_UPROBE:
module_put(event->desc->owner);
- lttng_uprobes_destroy_private(event);
+ lttng_uprobes_destroy_event_private(event);
break;
case LTTNG_KERNEL_FUNCTION: /* Fall-through */
default:
kmem_cache_free(event_cache, event);
}
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_trigger_destroy(struct lttng_trigger *trigger)
+{
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_event_desc_put(trigger->desc);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ module_put(trigger->desc->owner);
+ lttng_kprobes_destroy_trigger_private(trigger);
+ break;
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_SYSCALL:
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ module_put(trigger->desc->owner);
+ lttng_uprobes_destroy_trigger_private(trigger);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_FUNCTION:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ list_del(&trigger->list);
+ kmem_cache_free(trigger_cache, trigger);
+}
+
struct lttng_id_tracker *get_tracker(struct lttng_session *session,
enum tracker_type tracker_type)
{
return 1;
}
-static
int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
struct lttng_enabler *enabler)
{
switch (enabler->event_param.instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
desc_name = desc->name;
- switch (enabler->type) {
- case LTTNG_ENABLER_STAR_GLOB:
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
return lttng_match_enabler_star_glob(desc_name, enabler_name);
- case LTTNG_ENABLER_NAME:
+ case LTTNG_ENABLER_FORMAT_NAME:
return lttng_match_enabler_name(desc_name, enabler_name);
default:
return -EINVAL;
}
switch (enabler->event_param.u.syscall.match) {
case LTTNG_SYSCALL_MATCH_NAME:
- switch (enabler->type) {
- case LTTNG_ENABLER_STAR_GLOB:
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
return lttng_match_enabler_star_glob(desc_name, enabler_name);
- case LTTNG_ENABLER_NAME:
+ case LTTNG_ENABLER_FORMAT_NAME:
return lttng_match_enabler_name(desc_name, enabler_name);
default:
return -EINVAL;
}
static
-int lttng_event_match_enabler(struct lttng_event *event,
- struct lttng_enabler *enabler)
+int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
+ struct lttng_event *event)
{
- if (enabler->event_param.instrumentation != event->instrumentation)
+ struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(
+ event_enabler);
+
+ if (base_enabler->event_param.instrumentation != event->instrumentation)
return 0;
- if (lttng_desc_match_enabler(event->desc, enabler)
- && event->chan == enabler->chan)
+ if (lttng_desc_match_enabler(event->desc, base_enabler)
+ && event->chan == event_enabler->chan)
return 1;
else
return 0;
}
static
-struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+int lttng_trigger_enabler_match_trigger(struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_trigger *trigger)
+{
+ struct lttng_enabler *base_enabler = lttng_trigger_enabler_as_enabler(
+ trigger_enabler);
+
+ if (base_enabler->event_param.instrumentation != trigger->instrumentation)
+ return 0;
+ if (lttng_desc_match_enabler(trigger->desc, base_enabler)
+ && trigger->group == trigger_enabler->group
+ && trigger->id == trigger_enabler->id)
+ return 1;
+ else
+ return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_enabler_ref(
+ struct list_head *enablers_ref_list,
struct lttng_enabler *enabler)
{
struct lttng_enabler_ref *enabler_ref;
- list_for_each_entry(enabler_ref,
- &event->enablers_ref_head, node) {
+ list_for_each_entry(enabler_ref, enablers_ref_list, node) {
if (enabler_ref->ref == enabler)
return enabler_ref;
}
}
static
-void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
+void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler)
{
- struct lttng_session *session = enabler->chan->session;
+ struct lttng_session *session = event_enabler->chan->session;
struct lttng_probe_desc *probe_desc;
const struct lttng_event_desc *desc;
int i;
for (i = 0; i < probe_desc->nr_events; i++) {
int found = 0;
struct hlist_head *head;
- const char *event_name;
- size_t name_len;
- uint32_t hash;
struct lttng_event *event;
desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc, enabler))
+ if (!lttng_desc_match_enabler(desc,
+ lttng_event_enabler_as_enabler(event_enabler)))
continue;
- event_name = desc->name;
- name_len = strlen(event_name);
/*
* Check if already created.
*/
- hash = jhash(event_name, name_len, 0);
- head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+ head = utils_borrow_hash_table_bucket(
+ session->events_ht.table, LTTNG_EVENT_HT_SIZE,
+ desc->name);
lttng_hlist_for_each_entry(event, head, hlist) {
if (event->desc == desc
- && event->chan == enabler->chan)
+ && event->chan == event_enabler->chan)
+ found = 1;
+ }
+ if (found)
+ continue;
+
+ /*
+ * We need to create an event for this
+ * event probe.
+ */
+ event = _lttng_event_create(event_enabler->chan,
+ NULL, NULL, desc,
+ LTTNG_KERNEL_TRACEPOINT);
+ if (!event) {
+ printk(KERN_INFO "LTTng: Unable to create event %s\n",
+ probe_desc->event_desc[i]->name);
+ }
+ }
+ }
+}
+
+static
+void lttng_create_tracepoint_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
+{
+ struct lttng_trigger_group *trigger_group = trigger_enabler->group;
+ struct lttng_probe_desc *probe_desc;
+ const struct lttng_event_desc *desc;
+ int i;
+ struct list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ /*
+ * For each probe event, if we find that a probe event matches
+ * our enabler, create an associated lttng_trigger if not
+ * already present.
+ */
+ list_for_each_entry(probe_desc, probe_list, head) {
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int found = 0;
+ struct hlist_head *head;
+ struct lttng_trigger *trigger;
+
+ desc = probe_desc->event_desc[i];
+ if (!lttng_desc_match_enabler(desc,
+ lttng_trigger_enabler_as_enabler(trigger_enabler)))
+ continue;
+
+ /*
+ * Check if already created.
+ */
+ head = utils_borrow_hash_table_bucket(
+ trigger_group->triggers_ht.table,
+ LTTNG_TRIGGER_HT_SIZE, desc->name);
+ lttng_hlist_for_each_entry(trigger, head, hlist) {
+ if (trigger->desc == desc
+ && trigger->id == trigger_enabler->id)
found = 1;
}
if (found)
continue;
/*
- * We need to create an event for this
- * event probe.
+ * We need to create a trigger for this event probe.
*/
- event = _lttng_event_create(enabler->chan,
- NULL, NULL, desc,
- LTTNG_KERNEL_TRACEPOINT);
- if (!event) {
- printk(KERN_INFO "LTTng: Unable to create event %s\n",
+ trigger = _lttng_trigger_create(desc,
+ trigger_enabler->id,
+ trigger_enabler->error_counter_index,
+ trigger_group, NULL, NULL,
+ LTTNG_KERNEL_TRACEPOINT);
+ if (IS_ERR(trigger)) {
+ printk(KERN_INFO "Unable to create trigger %s\n",
probe_desc->event_desc[i]->name);
}
}
}
static
-void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
+void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_enabler)
+{
+ int ret;
+
+ ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
+ WARN_ON_ONCE(ret);
+}
+
+static
+void lttng_create_syscall_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
{
int ret;
- ret = lttng_syscalls_register(enabler->chan, NULL);
+ ret = lttng_syscalls_register_trigger(trigger_enabler, NULL);
+ WARN_ON_ONCE(ret);
+ ret = lttng_syscals_create_matching_triggers(trigger_enabler, NULL);
WARN_ON_ONCE(ret);
}
* Should be called with sessions mutex held.
*/
static
-void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
{
- switch (enabler->event_param.instrumentation) {
+ switch (event_enabler->base.event_param.instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- lttng_create_tracepoint_if_missing(enabler);
+ lttng_create_tracepoint_event_if_missing(event_enabler);
break;
case LTTNG_KERNEL_SYSCALL:
- lttng_create_syscall_if_missing(enabler);
+ lttng_create_syscall_event_if_missing(event_enabler);
break;
default:
WARN_ON_ONCE(1);
}
/*
- * Create events associated with an enabler (if not already present),
+ * Create events associated with an event_enabler (if not already present),
* and add backward reference from the event to the enabler.
* Should be called with sessions mutex held.
*/
static
-int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
{
- struct lttng_channel *chan = enabler->chan;
- struct lttng_session *session = chan->session;
+ struct lttng_channel *chan = event_enabler->chan;
+ struct lttng_session *session = event_enabler->chan->session;
+ struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
struct lttng_event *event;
- if (enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
- enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
- enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
- enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
- !strcmp(enabler->event_param.name, "*")) {
- if (enabler->enabled)
+ if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
+ base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
+ base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
+ base_enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
+ !strcmp(base_enabler->event_param.name, "*")) {
+ if (base_enabler->enabled)
WRITE_ONCE(chan->syscall_all, 1);
else
WRITE_ONCE(chan->syscall_all, 0);
}
/* First ensure that probe events are created for this enabler. */
- lttng_create_event_if_missing(enabler);
+ lttng_create_event_if_missing(event_enabler);
- /* For each event matching enabler in session event list. */
+ /* For each event matching event_enabler in session event list. */
list_for_each_entry(event, &session->events, list) {
struct lttng_enabler_ref *enabler_ref;
- if (!lttng_event_match_enabler(event, enabler))
+ if (!lttng_event_enabler_match_event(event_enabler, event))
continue;
- enabler_ref = lttng_event_enabler_ref(event, enabler);
+ enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
+ lttng_event_enabler_as_enabler(event_enabler));
if (!enabler_ref) {
/*
* If no backward ref, create it.
- * Add backward ref from event to enabler.
+ * Add backward ref from event to event_enabler.
*/
enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
if (!enabler_ref)
return -ENOMEM;
- enabler_ref->ref = enabler;
+ enabler_ref->ref = lttng_event_enabler_as_enabler(event_enabler);
list_add(&enabler_ref->node,
&event->enablers_ref_head);
}
/*
* Link filter bytecodes if not linked yet.
*/
- lttng_enabler_event_link_bytecode(event, enabler);
+ lttng_enabler_link_bytecode(event->desc,
+ lttng_static_ctx,
+ &event->filter_bytecode_runtime_head,
+ <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
/* TODO: merge event context. */
}
return 0;
}
+/*
+ * Create struct lttng_trigger if it is missing and present in the list of
+ * tracepoint probes.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
+{
+ switch (trigger_enabler->base.event_param.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_create_tracepoint_trigger_if_missing(trigger_enabler);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ lttng_create_syscall_trigger_if_missing(trigger_enabler);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+/*
+ * Create triggers associated with a trigger enabler (if not already present).
+ */
+static
+int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler)
+{
+ struct lttng_trigger_group *trigger_group = trigger_enabler->group;
+ struct lttng_trigger *trigger;
+
+ /* First ensure that probe triggers are created for this enabler. */
+ lttng_create_trigger_if_missing(trigger_enabler);
+
+ /* Link the created trigger with its associated enabler. */
+ list_for_each_entry(trigger, &trigger_group->triggers_head, list) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger))
+ continue;
+
+ enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head,
+ lttng_trigger_enabler_as_enabler(trigger_enabler));
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from trigger to enabler.
+ */
+ enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
+ if (!enabler_ref)
+ return -ENOMEM;
+
+ enabler_ref->ref = lttng_trigger_enabler_as_enabler(
+ trigger_enabler);
+ list_add(&enabler_ref->node,
+ &trigger->enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(trigger->desc,
+ lttng_static_ctx, &trigger->filter_bytecode_runtime_head,
+ <tng_trigger_enabler_as_enabler(trigger_enabler)->filter_bytecode_head);
+
+ /* Link capture bytecodes if not linked yet. */
+ lttng_enabler_link_bytecode(trigger->desc,
+ lttng_static_ctx, &trigger->capture_bytecode_runtime_head,
+ &trigger_enabler->capture_bytecode_head);
+
+ trigger->num_captures = trigger_enabler->num_captures;
+ }
+ return 0;
+}
+
/*
* Called at module load: connect the probe on all enablers matching
* this event.
struct lttng_session *session;
list_for_each_entry(session, &sessions, list)
- lttng_session_lazy_sync_enablers(session);
+ lttng_session_lazy_sync_event_enablers(session);
+ return 0;
+}
+
+static bool lttng_trigger_group_has_active_triggers(
+ struct lttng_trigger_group *trigger_group)
+{
+ struct lttng_trigger_enabler *trigger_enabler;
+
+ list_for_each_entry(trigger_enabler, &trigger_group->enablers_head,
+ node) {
+ if (trigger_enabler->base.enabled)
+ return true;
+ }
+ return false;
+}
+
+bool lttng_trigger_active(void)
+{
+ struct lttng_trigger_group *trigger_group;
+
+ list_for_each_entry(trigger_group, &trigger_groups, node) {
+ if (lttng_trigger_group_has_active_triggers(trigger_group))
+ return true;
+ }
+ return false;
+}
+
+int lttng_fix_pending_triggers(void)
+{
+ struct lttng_trigger_group *trigger_group;
+
+ list_for_each_entry(trigger_group, &trigger_groups, node)
+ lttng_trigger_group_sync_enablers(trigger_group);
return 0;
}
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
struct lttng_kernel_event *event_param,
struct lttng_channel *chan)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
- enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
- if (!enabler)
+ event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
+ if (!event_enabler)
return NULL;
- enabler->type = type;
- INIT_LIST_HEAD(&enabler->filter_bytecode_head);
- memcpy(&enabler->event_param, event_param,
- sizeof(enabler->event_param));
- enabler->chan = chan;
+ event_enabler->base.format_type = format_type;
+ INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
+ memcpy(&event_enabler->base.event_param, event_param,
+ sizeof(event_enabler->base.event_param));
+ event_enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 0;
- enabler->evtype = LTTNG_TYPE_ENABLER;
+ event_enabler->base.enabled = 0;
+ event_enabler->base.evtype = LTTNG_TYPE_ENABLER;
mutex_lock(&sessions_mutex);
- list_add(&enabler->node, &enabler->chan->session->enablers_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
- return enabler;
+ return event_enabler;
}
-int lttng_enabler_enable(struct lttng_enabler *enabler)
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
{
mutex_lock(&sessions_mutex);
- enabler->enabled = 1;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
return 0;
}
-int lttng_enabler_disable(struct lttng_enabler *enabler)
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
{
mutex_lock(&sessions_mutex);
- enabler->enabled = 0;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
mutex_unlock(&sessions_mutex);
return 0;
}
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+static
+int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
- struct lttng_filter_bytecode_node *bytecode_node;
+ struct lttng_bytecode_node *bytecode_node;
uint32_t bytecode_len;
int ret;
sizeof(*bytecode) + bytecode_len);
if (ret)
goto error_free;
+
+ bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
bytecode_node->enabler = enabler;
/* Enforce length based on allocated size */
bytecode_node->bc.len = bytecode_len;
list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+
return 0;
error_free:
return ret;
}
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ int ret;
+ ret = lttng_enabler_attach_filter_bytecode(
+ lttng_event_enabler_as_enabler(event_enabler), bytecode);
+ if (ret)
+ goto error;
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
+ return 0;
+
+error:
+ return ret;
+}
+
int lttng_event_add_callsite(struct lttng_event *event,
struct lttng_kernel_event_callsite __user *callsite)
{
switch (event->instrumentation) {
case LTTNG_KERNEL_UPROBE:
- return lttng_uprobes_add_callsite(event, callsite);
+ return lttng_uprobes_event_add_callsite(event, callsite);
default:
return -EINVAL;
}
}
-int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler,
struct lttng_kernel_context *context_param)
{
return -ENOSYS;
static
void lttng_enabler_destroy(struct lttng_enabler *enabler)
{
- struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_bytecode_node *filter_node, *tmp_filter_node;
/* Destroy filter bytecode */
list_for_each_entry_safe(filter_node, tmp_filter_node,
&enabler->filter_bytecode_head, node) {
kfree(filter_node);
}
+}
+
+static
+void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
+{
+ lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
/* Destroy contexts */
- lttng_destroy_context(enabler->ctx);
+ lttng_destroy_context(event_enabler->ctx);
+
+ list_del(&event_enabler->node);
+ kfree(event_enabler);
+}
+
+struct lttng_trigger_enabler *lttng_trigger_enabler_create(
+ struct lttng_trigger_group *trigger_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_kernel_trigger *trigger_param)
+{
+ struct lttng_trigger_enabler *trigger_enabler;
+
+ trigger_enabler = kzalloc(sizeof(*trigger_enabler), GFP_KERNEL);
+ if (!trigger_enabler)
+ return NULL;
+
+ trigger_enabler->base.format_type = format_type;
+ INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head);
+ INIT_LIST_HEAD(&trigger_enabler->capture_bytecode_head);
+
+ trigger_enabler->id = trigger_param->id;
+ trigger_enabler->error_counter_index = trigger_param->error_counter_index;
+ trigger_enabler->num_captures = 0;
+
+ memcpy(&trigger_enabler->base.event_param.name, trigger_param->name,
+ sizeof(trigger_enabler->base.event_param.name));
+ trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation;
+ trigger_enabler->base.evtype = LTTNG_TYPE_ENABLER;
+
+ trigger_enabler->base.enabled = 0;
+ trigger_enabler->group = trigger_group;
+
+ mutex_lock(&sessions_mutex);
+ list_add(&trigger_enabler->node, &trigger_enabler->group->enablers_head);
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+
+ mutex_unlock(&sessions_mutex);
+
+ return trigger_enabler;
+}
+
+int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler)
+{
+ mutex_lock(&sessions_mutex);
+ lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1;
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler)
+{
+ mutex_lock(&sessions_mutex);
+ lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0;
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_trigger_enabler_attach_filter_bytecode(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ int ret;
+
+ ret = lttng_enabler_attach_filter_bytecode(
+ lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode);
+ if (ret)
+ goto error;
+
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ return 0;
+
+error:
+ return ret;
+}
+
+int lttng_trigger_enabler_attach_capture_bytecode(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_kernel_capture_bytecode __user *bytecode)
+{
+ struct lttng_bytecode_node *bytecode_node;
+ struct lttng_enabler *enabler =
+ lttng_trigger_enabler_as_enabler(trigger_enabler);
+ uint32_t bytecode_len;
+ int ret;
+
+ ret = get_user(bytecode_len, &bytecode->len);
+ if (ret)
+ return ret;
+
+ bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+ GFP_KERNEL);
+ if (!bytecode_node)
+ return -ENOMEM;
+
+ ret = copy_from_user(&bytecode_node->bc, bytecode,
+ sizeof(*bytecode) + bytecode_len);
+ if (ret)
+ goto error_free;
+
+ bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
+ bytecode_node->enabler = enabler;
+
+ /* Enforce length based on allocated size */
+ bytecode_node->bc.len = bytecode_len;
+ list_add_tail(&bytecode_node->node, &trigger_enabler->capture_bytecode_head);
+
+ trigger_enabler->num_captures++;
+
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ goto end;
+
+error_free:
+ kfree(bytecode_node);
+end:
+ return ret;
+}
+
+int lttng_trigger_add_callsite(struct lttng_trigger *trigger,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_UPROBE:
+ return lttng_uprobes_trigger_add_callsite(trigger, callsite);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_trigger_enabler_attach_context(struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_kernel_context *context_param)
+{
+ return -ENOSYS;
+}
+
+static
+void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler)
+{
+ if (!trigger_enabler) {
+ return;
+ }
- list_del(&enabler->node);
- kfree(enabler);
+ list_del(&trigger_enabler->node);
+
+ lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler));
+ kfree(trigger_enabler);
}
/*
- * lttng_session_sync_enablers should be called just before starting a
+ * lttng_session_sync_event_enablers should be called just before starting a
* session.
* Should be called with sessions mutex held.
*/
static
-void lttng_session_sync_enablers(struct lttng_session *session)
+void lttng_session_sync_event_enablers(struct lttng_session *session)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
struct lttng_event *event;
- list_for_each_entry(enabler, &session->enablers_head, node)
- lttng_enabler_ref_events(enabler);
+ list_for_each_entry(event_enabler, &session->enablers_head, node)
+ lttng_event_enabler_ref_events(event_enabler);
/*
* For each event, if at least one of its enablers is enabled,
* and its channel and session transient states are enabled, we
/* Enable filters */
list_for_each_entry(runtime,
- &event->bytecode_runtime_head, node)
- lttng_filter_sync_state(runtime);
+ &event->filter_bytecode_runtime_head, node)
+ lttng_bytecode_filter_sync_state(runtime);
}
}
* Should be called with sessions mutex held.
*/
static
-void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
{
/* We can skip if session is not active */
if (!session->active)
return;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
+}
+
+static
+void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group)
+{
+ struct lttng_trigger_enabler *trigger_enabler;
+ struct lttng_trigger *trigger;
+
+ list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node)
+ lttng_trigger_enabler_ref_triggers(trigger_enabler);
+
+ /*
+ * For each trigger, if at least one of its enablers is enabled,
+ * we enable the trigger, else we disable it.
+ */
+ list_for_each_entry(trigger, &trigger_group->triggers_head, list) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_bytecode = 0;
+
+ switch (trigger->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ /* Enable triggers */
+ list_for_each_entry(enabler_ref,
+ &trigger->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+ break;
+ default:
+ /* Not handled with sync. */
+ continue;
+ }
+
+ WRITE_ONCE(trigger->enabled, enabled);
+ /*
+ * Sync tracepoint registration with trigger enabled
+ * state.
+ */
+ if (enabled) {
+ if (!trigger->registered)
+ register_trigger(trigger);
+ } else {
+ if (trigger->registered)
+ _lttng_trigger_unregister(trigger);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ list_for_each_entry(enabler_ref,
+ &trigger->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_bytecode = 1;
+ break;
+ }
+ }
+ trigger->has_enablers_without_bytecode =
+ has_enablers_without_bytecode;
+
+ /* Enable filters */
+ list_for_each_entry(runtime,
+ &trigger->filter_bytecode_runtime_head, node)
+ lttng_bytecode_filter_sync_state(runtime);
+
+ /* Enable captures */
+ list_for_each_entry(runtime,
+ &trigger->capture_bytecode_runtime_head, node)
+ lttng_bytecode_capture_sync_state(runtime);
+ }
}
/*
}
EXPORT_SYMBOL_GPL(lttng_transport_unregister);
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+ /*
+ * Make sure no page fault can be triggered by the module about to be
+ * registered. We deal with this here so we don't have to call
+ * vmalloc_sync_mappings() in each module's init.
+ */
+ wrapper_vmalloc_sync_mappings();
+
+ mutex_lock(&sessions_mutex);
+ list_add_tail(&transport->node, <tng_counter_transport_list);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
+
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+ mutex_lock(&sessions_mutex);
+ list_del(&transport->node);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
enum cpuhp_state lttng_hp_prepare;
event_cache = KMEM_CACHE(lttng_event, 0);
if (!event_cache) {
ret = -ENOMEM;
- goto error_kmem;
+ goto error_kmem_event;
+ }
+ trigger_cache = KMEM_CACHE(lttng_trigger, 0);
+ if (!trigger_cache) {
+ ret = -ENOMEM;
+ goto error_kmem_trigger;
}
ret = lttng_abi_init();
if (ret)
error_logger:
lttng_abi_exit();
error_abi:
+ kmem_cache_destroy(trigger_cache);
+error_kmem_trigger:
kmem_cache_destroy(event_cache);
-error_kmem:
+error_kmem_event:
lttng_tracepoint_exit();
error_tp:
lttng_context_exit();
list_for_each_entry_safe(session, tmpsession, &sessions, list)
lttng_session_destroy(session);
kmem_cache_destroy(event_cache);
+ kmem_cache_destroy(trigger_cache);
lttng_tracepoint_exit();
lttng_context_exit();
printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-interpreter.c
- *
- * LTTng modules filter interpreter.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <wrapper/uaccess.h>
-#include <wrapper/frame.h>
-#include <wrapper/types.h>
-#include <linux/swab.h>
-
-#include <lttng/filter.h>
-#include <lttng/string-utils.h>
-
-LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
-
-/*
- * get_char should be called with page fault handler disabled if it is expected
- * to handle user-space read.
- */
-static
-char get_char(struct estack_entry *reg, size_t offset)
-{
- if (unlikely(offset >= reg->u.s.seq_len))
- return '\0';
- if (reg->u.s.user) {
- char c;
-
- /* Handle invalid access as end of string. */
- if (unlikely(!lttng_access_ok(VERIFY_READ,
- reg->u.s.user_str + offset,
- sizeof(c))))
- return '\0';
- /* Handle fault (nonzero return value) as end of string. */
- if (unlikely(__copy_from_user_inatomic(&c,
- reg->u.s.user_str + offset,
- sizeof(c))))
- return '\0';
- return c;
- } else {
- return reg->u.s.str[offset];
- }
-}
-
-/*
- * -1: wildcard found.
- * -2: unknown escape char.
- * 0: normal char.
- */
-static
-int parse_char(struct estack_entry *reg, char *c, size_t *offset)
-{
- switch (*c) {
- case '\\':
- (*offset)++;
- *c = get_char(reg, *offset);
- switch (*c) {
- case '\\':
- case '*':
- return 0;
- default:
- return -2;
- }
- case '*':
- return -1;
- default:
- return 0;
- }
-}
-
-static
-char get_char_at_cb(size_t at, void *data)
-{
- return get_char(data, at);
-}
-
-static
-int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
-{
- bool has_user = false;
- int result;
- struct estack_entry *pattern_reg;
- struct estack_entry *candidate_reg;
-
- /* Disable the page fault handler when reading from userspace. */
- if (estack_bx(stack, top)->u.s.user
- || estack_ax(stack, top)->u.s.user) {
- has_user = true;
- pagefault_disable();
- }
-
- /* Find out which side is the pattern vs. the candidate. */
- if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
- pattern_reg = estack_ax(stack, top);
- candidate_reg = estack_bx(stack, top);
- } else {
- pattern_reg = estack_bx(stack, top);
- candidate_reg = estack_ax(stack, top);
- }
-
- /* Perform the match operation. */
- result = !strutils_star_glob_match_char_cb(get_char_at_cb,
- pattern_reg, get_char_at_cb, candidate_reg);
- if (has_user)
- pagefault_enable();
-
- return result;
-}
-
-static
-int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
-{
- size_t offset_bx = 0, offset_ax = 0;
- int diff, has_user = 0;
-
- if (estack_bx(stack, top)->u.s.user
- || estack_ax(stack, top)->u.s.user) {
- has_user = 1;
- pagefault_disable();
- }
-
- for (;;) {
- int ret;
- int escaped_r0 = 0;
- char char_bx, char_ax;
-
- char_bx = get_char(estack_bx(stack, top), offset_bx);
- char_ax = get_char(estack_ax(stack, top), offset_ax);
-
- if (unlikely(char_bx == '\0')) {
- if (char_ax == '\0') {
- diff = 0;
- break;
- } else {
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(estack_ax(stack, top),
- &char_ax, &offset_ax);
- if (ret == -1) {
- diff = 0;
- break;
- }
- }
- diff = -1;
- break;
- }
- }
- if (unlikely(char_ax == '\0')) {
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(estack_bx(stack, top),
- &char_bx, &offset_bx);
- if (ret == -1) {
- diff = 0;
- break;
- }
- }
- diff = 1;
- break;
- }
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(estack_bx(stack, top),
- &char_bx, &offset_bx);
- if (ret == -1) {
- diff = 0;
- break;
- } else if (ret == -2) {
- escaped_r0 = 1;
- }
- /* else compare both char */
- }
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(estack_ax(stack, top),
- &char_ax, &offset_ax);
- if (ret == -1) {
- diff = 0;
- break;
- } else if (ret == -2) {
- if (!escaped_r0) {
- diff = -1;
- break;
- }
- } else {
- if (escaped_r0) {
- diff = 1;
- break;
- }
- }
- } else {
- if (escaped_r0) {
- diff = 1;
- break;
- }
- }
- diff = char_bx - char_ax;
- if (diff != 0)
- break;
- offset_bx++;
- offset_ax++;
- }
- if (has_user)
- pagefault_enable();
-
- return diff;
-}
-
-uint64_t lttng_filter_false(void *filter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data)
-{
- return LTTNG_FILTER_DISCARD;
-}
-
-#ifdef INTERPRETER_USE_SWITCH
-
-/*
- * Fallback for compilers that do not support taking address of labels.
- */
-
-#define START_OP \
- start_pc = &bytecode->data[0]; \
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
- pc = next_pc) { \
- dbg_printk("LTTng: Executing op %s (%u)\n", \
- lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
- (unsigned int) *(filter_opcode_t *) pc); \
- switch (*(filter_opcode_t *) pc) {
-
-#define OP(name) case name
-
-#define PO break
-
-#define END_OP } \
- }
-
-#else
-
-/*
- * Dispatch-table based interpreter.
- */
-
-#define START_OP \
- start_pc = &bytecode->code[0]; \
- pc = next_pc = start_pc; \
- if (unlikely(pc - start_pc >= bytecode->len)) \
- goto end; \
- goto *dispatch[*(filter_opcode_t *) pc];
-
-#define OP(name) \
-LABEL_##name
-
-#define PO \
- pc = next_pc; \
- goto *dispatch[*(filter_opcode_t *) pc];
-
-#define END_OP
-
-#endif
-
-static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
- struct load_ptr *ptr,
- uint32_t idx)
-{
-
- struct lttng_ctx_field *ctx_field;
- struct lttng_event_field *field;
- union lttng_ctx_value v;
-
- ctx_field = <tng_static_ctx->fields[idx];
- field = &ctx_field->event_field;
- ptr->type = LOAD_OBJECT;
- /* field is only used for types nested within variants. */
- ptr->field = NULL;
-
- switch (field->type.atype) {
- case atype_integer:
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- if (field->type.u.integer.signedness) {
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- case atype_enum_nestable:
- {
- const struct lttng_integer_type *itype =
- &field->type.u.enum_nestable.container_type->u.integer;
-
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- if (itype->signedness) {
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- }
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
- printk(KERN_WARNING "LTTng: filter: Array nesting only supports integer types.\n");
- return -EINVAL;
- }
- if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- printk(KERN_WARNING "LTTng: filter: Only string arrays are supported for contexts.\n");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- ptr->ptr = v.str;
- break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
- printk(KERN_WARNING "LTTng: filter: Sequence nesting only supports integer types.\n");
- return -EINVAL;
- }
- if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- printk(KERN_WARNING "LTTng: filter: Only string sequences are supported for contexts.\n");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- ptr->ptr = v.str;
- break;
- case atype_string:
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- ptr->ptr = v.str;
- break;
- case atype_struct_nestable:
- printk(KERN_WARNING "LTTng: filter: Structure type cannot be loaded.\n");
- return -EINVAL;
- case atype_variant_nestable:
- printk(KERN_WARNING "LTTng: filter: Variant type cannot be loaded.\n");
- return -EINVAL;
- default:
- printk(KERN_WARNING "LTTng: filter: Unknown type: %d", (int) field->type.atype);
- return -EINVAL;
- }
- return 0;
-}
-
-static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
- struct bytecode_runtime *runtime,
- uint64_t index, struct estack_entry *stack_top)
-{
- int ret;
- const struct filter_get_index_data *gid;
-
- /*
- * Types nested within variants need to perform dynamic lookup
- * based on the field descriptions. LTTng-UST does not implement
- * variants for now.
- */
- if (stack_top->u.ptr.field)
- return -EINVAL;
- gid = (const struct filter_get_index_data *) &runtime->data[index];
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const char *ptr;
-
- WARN_ON_ONCE(gid->offset >= gid->array_len);
- /* Skip count (unsigned long) */
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- /* field is only used for types nested within variants. */
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const char *ptr;
- size_t ptr_seq_len;
-
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
- if (gid->offset >= gid->elem.len * ptr_seq_len) {
- ret = -EINVAL;
- goto end;
- }
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- /* field is only used for types nested within variants. */
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- printk(KERN_WARNING "LTTng: filter: Nested structures are not supported yet.\n");
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_VARIANT:
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected get index type %d",
- (int) stack_top->u.ptr.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
- {
- ret = context_get_index(lttng_probe_ctx,
- &stack_top->u.ptr,
- gid->ctx_index);
- if (ret) {
- goto end;
- }
- break;
- }
- case LOAD_ROOT_PAYLOAD:
- stack_top->u.ptr.ptr += gid->offset;
- if (gid->elem.type == OBJECT_TYPE_STRING)
- stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.type = LOAD_OBJECT;
- /* field is only used for types nested within variants. */
- stack_top->u.ptr.field = NULL;
- break;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static int dynamic_load_field(struct estack_entry *stack_top)
-{
- int ret;
-
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printk("Filter warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printk("op load field s8\n");
- stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
- break;
- case OBJECT_TYPE_S16:
- {
- int16_t tmp;
-
- dbg_printk("op load field s16\n");
- tmp = *(int16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab16s(&tmp);
- stack_top->u.v = tmp;
- break;
- }
- case OBJECT_TYPE_S32:
- {
- int32_t tmp;
-
- dbg_printk("op load field s32\n");
- tmp = *(int32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab32s(&tmp);
- stack_top->u.v = tmp;
- break;
- }
- case OBJECT_TYPE_S64:
- {
- int64_t tmp;
-
- dbg_printk("op load field s64\n");
- tmp = *(int64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab64s(&tmp);
- stack_top->u.v = tmp;
- break;
- }
- case OBJECT_TYPE_U8:
- dbg_printk("op load field u8\n");
- stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
- break;
- case OBJECT_TYPE_U16:
- {
- uint16_t tmp;
-
- dbg_printk("op load field u16\n");
- tmp = *(uint16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab16s(&tmp);
- stack_top->u.v = tmp;
- break;
- }
- case OBJECT_TYPE_U32:
- {
- uint32_t tmp;
-
- dbg_printk("op load field u32\n");
- tmp = *(uint32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab32s(&tmp);
- stack_top->u.v = tmp;
- break;
- }
- case OBJECT_TYPE_U64:
- {
- uint64_t tmp;
-
- dbg_printk("op load field u64\n");
- tmp = *(uint64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- __swab64s(&tmp);
- stack_top->u.v = tmp;
- break;
- }
- case OBJECT_TYPE_STRING:
- {
- const char *str;
-
- dbg_printk("op load field string\n");
- str = (const char *) stack_top->u.ptr.ptr;
- stack_top->u.s.str = str;
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- break;
- }
- case OBJECT_TYPE_STRING_SEQUENCE:
- {
- const char *ptr;
-
- dbg_printk("op load field string sequence\n");
- ptr = stack_top->u.ptr.ptr;
- stack_top->u.s.seq_len = *(unsigned long *) ptr;
- stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- break;
- }
- case OBJECT_TYPE_DYNAMIC:
- /*
- * Dynamic types in context are looked up
- * by context get index.
- */
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_DOUBLE:
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- printk(KERN_WARNING "LTTng: filter: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-/*
- * Return 0 (discard), or raise the 0x1 flag (log event).
- * Currently, other flags are kept for future extensions and have no
- * effect.
- */
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
- struct lttng_probe_ctx *lttng_probe_ctx,
- const char *filter_stack_data)
-{
- struct bytecode_runtime *bytecode = filter_data;
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- uint64_t retval = 0;
- struct estack _stack;
- struct estack *stack = &_stack;
- register int64_t ax = 0, bx = 0;
- register int top = FILTER_STACK_EMPTY;
-#ifndef INTERPRETER_USE_SWITCH
- static void *dispatch[NR_FILTER_OPS] = {
- [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
-
- [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
-
- /* binary */
- [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
- [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
- [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
- [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
- [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
- [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
- [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
- [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
- [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
- [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
-
- /* binary comparators */
- [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
- [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
- [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
- [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
- [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
- [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
-
- /* string binary comparator */
- [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
- [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
- [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
- [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
- [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
- [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
-
- /* globbing pattern binary comparator */
- [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
- [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
-
- /* s64 binary comparator */
- [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
- [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
- [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
- [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
- [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
- [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
-
- /* double binary comparator */
- [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
- [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
- [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
- [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
- [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
- [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
-
- /* Mixed S64-double binary comparators */
- [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
- [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
- [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
- [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
- [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
- [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
-
- [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
- [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
- [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
- [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
- [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
- [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
-
- /* unary */
- [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
- [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
- [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
- [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
- [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
- [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
- [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
- [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
- [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
-
- /* logical */
- [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
- [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
-
- /* load field ref */
- [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
- [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
- [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
- [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
- [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
-
- /* load from immediate operand */
- [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
- [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
- [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
- [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
-
- /* cast */
- [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
- [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
- [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
-
- /* get context ref */
- [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
- [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
- [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
- [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
-
- /* load userspace field ref */
- [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
- [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
-
- /* Instructions for recursive traversal through composed types. */
- [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
- [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
- [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
-
- [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
- [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
- [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
- [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
-
- [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
- [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
- [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
- [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
- [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
- [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
- [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
- [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
- [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
- [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
- [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
- [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
-
- [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
-
- [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
- };
-#endif /* #ifndef INTERPRETER_USE_SWITCH */
-
- START_OP
-
- OP(FILTER_OP_UNKNOWN):
- OP(FILTER_OP_LOAD_FIELD_REF):
- OP(FILTER_OP_GET_CONTEXT_REF):
-#ifdef INTERPRETER_USE_SWITCH
- default:
-#endif /* INTERPRETER_USE_SWITCH */
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_RETURN):
- OP(FILTER_OP_RETURN_S64):
- /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
- retval = !!estack_ax_v;
- ret = 0;
- goto end;
-
- /* binary */
- OP(FILTER_OP_MUL):
- OP(FILTER_OP_DIV):
- OP(FILTER_OP_MOD):
- OP(FILTER_OP_PLUS):
- OP(FILTER_OP_MINUS):
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_EQ):
- OP(FILTER_OP_NE):
- OP(FILTER_OP_GT):
- OP(FILTER_OP_LT):
- OP(FILTER_OP_GE):
- OP(FILTER_OP_LE):
- printk(KERN_WARNING "LTTng: filter: unsupported non-specialized bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_EQ_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">") > 0);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<") < 0);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">=") >= 0);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<=") <= 0);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_S64):
- {
- int res;
-
- res = (estack_bx_v == estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_S64):
- {
- int res;
-
- res = (estack_bx_v != estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_S64):
- {
- int res;
-
- res = (estack_bx_v > estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_S64):
- {
- int res;
-
- res = (estack_bx_v < estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_S64):
- {
- int res;
-
- res = (estack_bx_v >= estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_S64):
- {
- int res;
-
- res = (estack_bx_v <= estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_DOUBLE):
- OP(FILTER_OP_NE_DOUBLE):
- OP(FILTER_OP_GT_DOUBLE):
- OP(FILTER_OP_LT_DOUBLE):
- OP(FILTER_OP_GE_DOUBLE):
- OP(FILTER_OP_LE_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* Mixed S64-double binary comparators */
- OP(FILTER_OP_EQ_DOUBLE_S64):
- OP(FILTER_OP_NE_DOUBLE_S64):
- OP(FILTER_OP_GT_DOUBLE_S64):
- OP(FILTER_OP_LT_DOUBLE_S64):
- OP(FILTER_OP_GE_DOUBLE_S64):
- OP(FILTER_OP_LE_DOUBLE_S64):
- OP(FILTER_OP_EQ_S64_DOUBLE):
- OP(FILTER_OP_NE_S64_DOUBLE):
- OP(FILTER_OP_GT_S64_DOUBLE):
- OP(FILTER_OP_LT_S64_DOUBLE):
- OP(FILTER_OP_GE_S64_DOUBLE):
- OP(FILTER_OP_LE_S64_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
- OP(FILTER_OP_BIT_RSHIFT):
- {
- int64_t res;
-
- /* Catch undefined behavior. */
- if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_LSHIFT):
- {
- int64_t res;
-
- /* Catch undefined behavior. */
- if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_AND):
- {
- int64_t res;
-
- res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_OR):
- {
- int64_t res;
-
- res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_XOR):
- {
- int64_t res;
-
- res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx);
- estack_ax_v = res;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- /* unary */
- OP(FILTER_OP_UNARY_PLUS):
- OP(FILTER_OP_UNARY_MINUS):
- OP(FILTER_OP_UNARY_NOT):
- printk(KERN_WARNING "LTTng: filter: unsupported non-specialized bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
-
- OP(FILTER_OP_UNARY_BIT_NOT):
- {
- estack_ax_v = ~(uint64_t) estack_ax_v;
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- OP(FILTER_OP_UNARY_PLUS_S64):
- {
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_MINUS_S64):
- {
- estack_ax_v = -estack_ax_v;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_PLUS_DOUBLE):
- OP(FILTER_OP_UNARY_MINUS_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
- OP(FILTER_OP_UNARY_NOT_S64):
- {
- estack_ax_v = !estack_ax_v;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_NOT_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* logical */
- OP(FILTER_OP_AND):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- /* If AX is 0, skip and evaluate to 0 */
- if (unlikely(estack_ax_v == 0)) {
- dbg_printk("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
- OP(FILTER_OP_OR):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- /* If AX is nonzero, skip and evaluate to 1 */
-
- if (unlikely(estack_ax_v != 0)) {
- estack_ax_v = 1;
- dbg_printk("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
-
-
- /* load field ref */
- OP(FILTER_OP_LOAD_FIELD_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type string\n",
- ref->offset);
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.s.str =
- *(const char * const *) &filter_stack_data[ref->offset];
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 0;
- dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type sequence\n",
- ref->offset);
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.s.seq_len =
- *(unsigned long *) &filter_stack_data[ref->offset];
- estack_ax(stack, top)->u.s.str =
- *(const char **) (&filter_stack_data[ref->offset
- + sizeof(unsigned long)]);
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 0;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type s64\n",
- ref->offset);
- estack_push(stack, top, ax, bx);
- estack_ax_v =
- ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
- dbg_printk("ref load s64 %lld\n",
- (long long) estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* load from immediate operand */
- OP(FILTER_OP_LOAD_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printk("load string %s\n", insn->data);
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_PLAIN;
- estack_ax(stack, top)->u.s.user = 0;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printk("load globbing pattern %s\n", insn->data);
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
- estack_ax(stack, top)->u.s.user = 0;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(FILTER_OP_LOAD_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- estack_push(stack, top, ax, bx);
- estack_ax_v = ((struct literal_numeric *) insn->data)->v;
- dbg_printk("load s64 %lld\n",
- (long long) estack_ax_v);
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- PO;
- }
-
- OP(FILTER_OP_LOAD_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* cast */
- OP(FILTER_OP_CAST_TO_S64):
- printk(KERN_WARNING "LTTng: filter: unsupported non-specialized bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_CAST_DOUBLE_TO_S64):
- {
- BUG_ON(1);
- PO;
- }
-
- OP(FILTER_OP_CAST_NOP):
- {
- next_pc += sizeof(struct cast_op);
- PO;
- }
-
- /* get context ref */
- OP(FILTER_OP_GET_CONTEXT_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- struct lttng_ctx_field *ctx_field;
- union lttng_ctx_value v;
-
- dbg_printk("get context ref offset %u type string\n",
- ref->offset);
- ctx_field = <tng_static_ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.s.str = v.str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 0;
- dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- struct lttng_ctx_field *ctx_field;
- union lttng_ctx_value v;
-
- dbg_printk("get context ref offset %u type s64\n",
- ref->offset);
- ctx_field = <tng_static_ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
- estack_push(stack, top, ax, bx);
- estack_ax_v = v.s64;
- dbg_printk("ref get context s64 %lld\n",
- (long long) estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
- {
- BUG_ON(1);
- PO;
- }
-
- /* load userspace field ref */
- OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type user string\n",
- ref->offset);
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.s.user_str =
- *(const char * const *) &filter_stack_data[ref->offset];
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 1;
- dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("load field ref offset %u type user sequence\n",
- ref->offset);
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.s.seq_len =
- *(unsigned long *) &filter_stack_data[ref->offset];
- estack_ax(stack, top)->u.s.user_str =
- *(const char **) (&filter_stack_data[ref->offset
- + sizeof(unsigned long)]);
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->u.s.user = 1;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_ROOT):
- {
- dbg_printk("op get context root\n");
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
- {
- BUG_ON(1);
- PO;
- }
-
- OP(FILTER_OP_GET_PAYLOAD_ROOT):
- {
- dbg_printk("op get app payload root\n");
- estack_push(stack, top, ax, bx);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
- estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_GET_SYMBOL):
- {
- dbg_printk("op get symbol\n");
- switch (estack_ax(stack, top)->u.ptr.type) {
- case LOAD_OBJECT:
- printk(KERN_WARNING "LTTng: filter: Nested fields not implemented yet.\n");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- /*
- * symbol lookup is performed by
- * specialization.
- */
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- PO;
- }
-
- OP(FILTER_OP_GET_SYMBOL_FIELD):
- {
- /*
- * Used for first variant encountered in a
- * traversal. Variants are not implemented yet.
- */
- ret = -EINVAL;
- goto end;
- }
-
- OP(FILTER_OP_GET_INDEX_U16):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printk("op get index u16\n");
- ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- PO;
- }
-
- OP(FILTER_OP_GET_INDEX_U64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printk("op get index u64\n");
- ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD):
- {
- dbg_printk("op load field\n");
- ret = dynamic_load_field(estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_S8):
- {
- dbg_printk("op load field s8\n");
-
- estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S16):
- {
- dbg_printk("op load field s16\n");
-
- estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S32):
- {
- dbg_printk("op load field s32\n");
-
- estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S64):
- {
- dbg_printk("op load field s64\n");
-
- estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U8):
- {
- dbg_printk("op load field u8\n");
-
- estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U16):
- {
- dbg_printk("op load field u16\n");
-
- estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U32):
- {
- dbg_printk("op load field u32\n");
-
- estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U64):
- {
- dbg_printk("op load field u64\n");
-
- estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_DOUBLE):
- {
- ret = -EINVAL;
- goto end;
- }
-
- OP(FILTER_OP_LOAD_FIELD_STRING):
- {
- const char *str;
-
- dbg_printk("op load field string\n");
- str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.str = str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
- {
- const char *ptr;
-
- dbg_printk("op load field string sequence\n");
- ptr = estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
- estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printk("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- END_OP
-end:
- /* Return _DISCARD on error. */
- if (ret)
- return LTTNG_FILTER_DISCARD;
- return retval;
-}
-
-#undef START_OP
-#undef OP
-#undef PO
-#undef END_OP
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-specialize.c
- *
- * LTTng modules filter code specializer.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/slab.h>
-#include <lttng/filter.h>
-#include <lttng/align.h>
-
-static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
- size_t align, size_t len)
-{
- ssize_t ret;
- size_t padding = offset_align(runtime->data_len, align);
- size_t new_len = runtime->data_len + padding + len;
- size_t new_alloc_len = new_len;
- size_t old_alloc_len = runtime->data_alloc_len;
-
- if (new_len > FILTER_MAX_DATA_LEN)
- return -EINVAL;
-
- if (new_alloc_len > old_alloc_len) {
- char *newptr;
-
- new_alloc_len =
- max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
- newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
- if (!newptr)
- return -ENOMEM;
- runtime->data = newptr;
- /* We zero directly the memory from start of allocation. */
- memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
- runtime->data_alloc_len = new_alloc_len;
- }
- runtime->data_len += padding;
- ret = runtime->data_len;
- runtime->data_len += len;
- return ret;
-}
-
-static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
- const void *p, size_t align, size_t len)
-{
- ssize_t offset;
-
- offset = bytecode_reserve_data(runtime, align, len);
- if (offset < 0)
- return -ENOMEM;
- memcpy(&runtime->data[offset], p, len);
- return offset;
-}
-
-static int specialize_load_field(struct vstack_entry *stack_top,
- struct load_op *insn)
-{
- int ret;
-
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printk("Filter warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printk("op load field s8\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S8;
- break;
- case OBJECT_TYPE_S16:
- dbg_printk("op load field s16\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S16;
- break;
- case OBJECT_TYPE_S32:
- dbg_printk("op load field s32\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S32;
- break;
- case OBJECT_TYPE_S64:
- dbg_printk("op load field s64\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S64;
- break;
- case OBJECT_TYPE_U8:
- dbg_printk("op load field u8\n");
- stack_top->type = REG_S64;
- insn->op = FILTER_OP_LOAD_FIELD_U8;
- break;
- case OBJECT_TYPE_U16:
- dbg_printk("op load field u16\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U16;
- break;
- case OBJECT_TYPE_U32:
- dbg_printk("op load field u32\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U32;
- break;
- case OBJECT_TYPE_U64:
- dbg_printk("op load field u64\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U64;
- break;
- case OBJECT_TYPE_DOUBLE:
- printk(KERN_WARNING "LTTng: filter: Double type unsupported\n\n");
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_STRING:
- dbg_printk("op load field string\n");
- stack_top->type = REG_STRING;
- insn->op = FILTER_OP_LOAD_FIELD_STRING;
- break;
- case OBJECT_TYPE_STRING_SEQUENCE:
- dbg_printk("op load field string sequence\n");
- stack_top->type = REG_STRING;
- insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
- break;
- case OBJECT_TYPE_DYNAMIC:
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- printk(KERN_WARNING "LTTng: filter: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_get_index_object_type(enum object_type *otype,
- int signedness, uint32_t elem_len)
-{
- switch (elem_len) {
- case 8:
- if (signedness)
- *otype = OBJECT_TYPE_S8;
- else
- *otype = OBJECT_TYPE_U8;
- break;
- case 16:
- if (signedness)
- *otype = OBJECT_TYPE_S16;
- else
- *otype = OBJECT_TYPE_U16;
- break;
- case 32:
- if (signedness)
- *otype = OBJECT_TYPE_S32;
- else
- *otype = OBJECT_TYPE_U32;
- break;
- case 64:
- if (signedness)
- *otype = OBJECT_TYPE_S64;
- else
- *otype = OBJECT_TYPE_U64;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_get_index(struct bytecode_runtime *runtime,
- struct load_op *insn, uint64_t index,
- struct vstack_entry *stack_top,
- int idx_len)
-{
- int ret;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- memset(&gid, 0, sizeof(gid));
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const struct lttng_integer_type *integer_type;
- const struct lttng_event_field *field;
- uint32_t elem_len, num_elems;
- int signedness;
-
- field = stack_top->load.field;
- if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = &field->type.u.array_nestable.elem_type->u.integer;
- num_elems = field->type.u.array_nestable.length;
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- if (index >= num_elems) {
- ret = -EINVAL;
- goto end;
- }
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.array_len = num_elems * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const struct lttng_integer_type *integer_type;
- const struct lttng_event_field *field;
- uint32_t elem_len;
- int signedness;
-
- field = stack_top->load.field;
- if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- /* Only generated by the specialize phase. */
- case OBJECT_TYPE_VARIANT: /* Fall-through */
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected get index type %d",
- (int) stack_top->load.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- printk(KERN_WARNING "LTTng: filter: Index lookup for root field not implemented yet.\n");
- ret = -EINVAL;
- goto end;
- }
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- switch (idx_len) {
- case 2:
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- break;
- case 8:
- ((struct get_index_u64 *) insn->data)->index = data_offset;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
-
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
- struct load_op *insn)
-{
- uint16_t offset;
- const char *name;
-
- offset = ((struct get_symbol *) insn->data)->offset;
- name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
- return lttng_get_context_index(lttng_static_ctx, name);
-}
-
-static int specialize_load_object(const struct lttng_event_field *field,
- struct vstack_load *load, bool is_context)
-{
- load->type = LOAD_OBJECT;
- /*
- * LTTng-UST layout all integer fields as s64 on the stack for the filter.
- */
- switch (field->type.atype) {
- case atype_integer:
- if (field->type.u.integer.signedness)
- load->object_type = OBJECT_TYPE_S64;
- else
- load->object_type = OBJECT_TYPE_U64;
- load->rev_bo = false;
- break;
- case atype_enum_nestable:
- {
- const struct lttng_integer_type *itype =
- &field->type.u.enum_nestable.container_type->u.integer;
-
- if (itype->signedness)
- load->object_type = OBJECT_TYPE_S64;
- else
- load->object_type = OBJECT_TYPE_U64;
- load->rev_bo = false;
- break;
- }
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
- printk(KERN_WARNING "LTTng: filter Array nesting only supports integer types.\n");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- load->object_type = OBJECT_TYPE_ARRAY;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
- printk(KERN_WARNING "LTTng: filter Sequence nesting only supports integer types.\n");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- load->object_type = OBJECT_TYPE_SEQUENCE;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case atype_string:
- load->object_type = OBJECT_TYPE_STRING;
- break;
- case atype_struct_nestable:
- printk(KERN_WARNING "LTTng: filter: Structure type cannot be loaded.\n");
- return -EINVAL;
- case atype_variant_nestable:
- printk(KERN_WARNING "LTTng: filter: Variant type cannot be loaded.\n");
- return -EINVAL;
- default:
- printk(KERN_WARNING "LTTng: filter: Unknown type: %d", (int) field->type.atype);
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_context_lookup(struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- int idx, ret;
- struct lttng_ctx_field *ctx_field;
- struct lttng_event_field *field;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- idx = specialize_context_lookup_name(runtime, insn);
- if (idx < 0) {
- return -ENOENT;
- }
- ctx_field = <tng_static_ctx->fields[idx];
- field = &ctx_field->event_field;
- ret = specialize_load_object(field, load, true);
- if (ret)
- return ret;
- /* Specialize each get_symbol into a get_index. */
- insn->op = FILTER_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.ctx_index = idx;
- gid.elem.type = load->object_type;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- return -EINVAL;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- return 0;
-}
-
-static int specialize_event_payload_lookup(struct lttng_event *event,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- const char *name;
- uint16_t offset;
- const struct lttng_event_desc *desc = event->desc;
- unsigned int i, nr_fields;
- bool found = false;
- uint32_t field_offset = 0;
- const struct lttng_event_field *field;
- int ret;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- nr_fields = desc->nr_fields;
- offset = ((struct get_symbol *) insn->data)->offset;
- name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
- for (i = 0; i < nr_fields; i++) {
- field = &desc->fields[i];
- if (field->nofilter) {
- continue;
- }
- if (!strcmp(field->name, name)) {
- found = true;
- break;
- }
- /* compute field offset on stack */
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum_nestable:
- field_offset += sizeof(int64_t);
- break;
- case atype_array_nestable:
- case atype_sequence_nestable:
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case atype_string:
- field_offset += sizeof(void *);
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- }
- if (!found) {
- ret = -EINVAL;
- goto end;
- }
-
- ret = specialize_load_object(field, load, false);
- if (ret)
- goto end;
-
- /* Specialize each get_symbol into a get_index. */
- insn->op = FILTER_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.offset = field_offset;
- gid.elem.type = load->object_type;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- ret = 0;
-end:
- return ret;
-}
-
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
- struct bytecode_runtime *bytecode)
-{
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack _stack;
- struct vstack *stack = &_stack;
-
- vstack_init(stack);
-
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case FILTER_OP_RETURN:
- case FILTER_OP_RETURN_S64:
- ret = 0;
- goto end;
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case FILTER_OP_EQ:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
- else
- insn->op = FILTER_OP_EQ_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_EQ_S64;
- else
- insn->op = FILTER_OP_EQ_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_EQ_S64_DOUBLE;
- else
- insn->op = FILTER_OP_EQ_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_NE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
- else
- insn->op = FILTER_OP_NE_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_NE_S64;
- else
- insn->op = FILTER_OP_NE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_NE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_NE_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_GT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: invalid register type for '>' binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- insn->op = FILTER_OP_GT_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GT_S64;
- else
- insn->op = FILTER_OP_GT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GT_S64_DOUBLE;
- else
- insn->op = FILTER_OP_GT_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_LT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: invalid register type for '<' binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- insn->op = FILTER_OP_LT_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LT_S64;
- else
- insn->op = FILTER_OP_LT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LT_S64_DOUBLE;
- else
- insn->op = FILTER_OP_LT_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_GE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: invalid register type for '>=' binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- insn->op = FILTER_OP_GE_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GE_S64;
- else
- insn->op = FILTER_OP_GE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_GE_DOUBLE;
- break;
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
- case FILTER_OP_LE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: invalid register type for '<=' binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- insn->op = FILTER_OP_LE_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LE_S64;
- else
- insn->op = FILTER_OP_LE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_LE_DOUBLE;
- break;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_PLUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_MINUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_MINUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_NOT:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_NOT_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown get context ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case FILTER_OP_LOAD_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- break;
- }
-
- /* cast */
- case FILTER_OP_CAST_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- switch (vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- insn->op = FILTER_OP_CAST_NOP;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
- break;
- }
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
- /* Pop 1, push 1 */
- ret = specialize_load_field(vstack_ax(stack), insn);
- if (ret)
- goto end;
-
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printk("op get symbol\n");
- switch (vstack_ax(stack)->load.type) {
- case LOAD_OBJECT:
- printk(KERN_WARNING "LTTng: filter: Nested fields not implemented yet.\n");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- /* Lookup context field. */
- ret = specialize_context_lookup(bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- case LOAD_ROOT_APP_CONTEXT:
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_PAYLOAD:
- /* Lookup event payload field. */
- ret = specialize_event_payload_lookup(event,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- /* Always generated by specialize phase. */
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printk("op get index u16\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printk("op get index u64\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
- }
-end:
- return ret;
-}
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter-validator.c
- *
- * LTTng modules filter bytecode validator.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/types.h>
-#include <linux/jhash.h>
-#include <linux/slab.h>
-
-#include <wrapper/list.h>
-#include <lttng/filter.h>
-
-#define MERGE_POINT_TABLE_BITS 7
-#define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
-
-/* merge point table node */
-struct mp_node {
- struct hlist_node node;
-
- /* Context at merge point */
- struct vstack stack;
- unsigned long target_pc;
-};
-
-struct mp_table {
- struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
-};
-
-static
-int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
-{
- if (mp_node->target_pc == key_pc)
- return 1;
- else
- return 0;
-}
-
-static
-int merge_points_compare(const struct vstack *stacka,
- const struct vstack *stackb)
-{
- int i, len;
-
- if (stacka->top != stackb->top)
- return 1;
- len = stacka->top + 1;
- WARN_ON_ONCE(len < 0);
- for (i = 0; i < len; i++) {
- if (stacka->e[i].type != stackb->e[i].type)
- return 1;
- }
- return 0;
-}
-
-static
-int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
- const struct vstack *stack)
-{
- struct mp_node *mp_node;
- unsigned long hash = jhash_1word(target_pc, 0);
- struct hlist_head *head;
- struct mp_node *lookup_node;
- int found = 0;
-
- dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
- target_pc, hash);
- mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
- if (!mp_node)
- return -ENOMEM;
- mp_node->target_pc = target_pc;
- memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
-
- head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
- lttng_hlist_for_each_entry(lookup_node, head, node) {
- if (lttng_hash_match(lookup_node, target_pc)) {
- found = 1;
- break;
- }
- }
- if (found) {
- /* Key already present */
- dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
- target_pc, hash);
- kfree(mp_node);
- if (merge_points_compare(stack, &lookup_node->stack)) {
- printk(KERN_WARNING "LTTng: filter: Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- } else {
- hlist_add_head(&mp_node->node, head);
- }
- return 0;
-}
-
-/*
- * Binary comparators use top of stack and top of stack -1.
- */
-static
-int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
-
- case REG_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- goto unknown;
- case REG_STRING:
- break;
- case REG_STAR_GLOB_STRING:
- if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_S64:
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- goto unknown;
- case REG_STRING:
- if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- case REG_S64:
- goto error_mismatch;
- }
- break;
- case REG_S64:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- goto unknown;
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- goto error_mismatch;
- case REG_S64:
- break;
- }
- break;
- case REG_TYPE_UNKNOWN:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_S64:
- goto unknown;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_empty:
- printk(KERN_WARNING "LTTng: filter: empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_mismatch:
- printk(KERN_WARNING "LTTng: filter: type mismatch for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- printk(KERN_WARNING "LTTng: filter: unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-/*
- * Binary bitwise operators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
-
- case REG_TYPE_UNKNOWN:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_S64:
- goto unknown;
- }
- break;
- case REG_S64:
- switch (vstack_bx(stack)->type) {
- default:
- case REG_DOUBLE:
- goto error_type;
- case REG_TYPE_UNKNOWN:
- goto unknown;
- case REG_S64:
- break;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_empty:
- printk(KERN_WARNING "LTTng: filter: empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- printk(KERN_WARNING "LTTng: filter: unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-static
-int validate_get_symbol(struct bytecode_runtime *bytecode,
- const struct get_symbol *sym)
-{
- const char *str, *str_limit;
- size_t len_limit;
-
- if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
- return -EINVAL;
-
- str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
- str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
- len_limit = str_limit - str;
- if (strnlen(str, len_limit) == len_limit)
- return -EINVAL;
- return 0;
-}
-
-/*
- * Validate bytecode range overflow within the validation pass.
- * Called for each instruction encountered.
- */
-static
-int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
- char *start_pc, char *pc)
-{
- int ret = 0;
-
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case FILTER_OP_RETURN:
- case FILTER_OP_RETURN_S64:
- {
- if (unlikely(pc + sizeof(struct return_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- /* Floating point */
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- case FILTER_OP_LOAD_DOUBLE:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- {
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case FILTER_OP_EQ:
- case FILTER_OP_NE:
- case FILTER_OP_GT:
- case FILTER_OP_LT:
- case FILTER_OP_GE:
- case FILTER_OP_LE:
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- if (unlikely(pc + sizeof(struct binary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- case FILTER_OP_UNARY_NOT:
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- case FILTER_OP_UNARY_BIT_NOT:
- {
- if (unlikely(pc + sizeof(struct unary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- if (unlikely(pc + sizeof(struct logical_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
-
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- uint32_t str_len, maxlen;
-
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
-
- maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
- str_len = strnlen(insn->data, maxlen);
- if (unlikely(str_len >= maxlen)) {
- /* Final '\0' not found within range */
- ret = -ERANGE;
- }
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- case FILTER_OP_CAST_NOP:
- {
- if (unlikely(pc + sizeof(struct cast_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- case FILTER_OP_GET_PAYLOAD_ROOT:
- case FILTER_OP_LOAD_FIELD:
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
- ret = validate_get_symbol(bytecode, sym);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- printk(KERN_WARNING "LTTng: filter: Unexpected get symbol field\n");
- ret = -EINVAL;
- break;
-
- case FILTER_OP_GET_INDEX_U16:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case FILTER_OP_GET_INDEX_U64:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- return ret;
-}
-
-static
-unsigned long delete_all_nodes(struct mp_table *mp_table)
-{
- struct mp_node *mp_node;
- struct hlist_node *tmp;
- unsigned long nr_nodes = 0;
- int i;
-
- for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
- struct hlist_head *head;
-
- head = &mp_table->mp_head[i];
- lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
- kfree(mp_node);
- nr_nodes++;
- }
- }
- return nr_nodes;
-}
-
-/*
- * Return value:
- * >=0: success
- * <0: error
- */
-static
-int validate_instruction_context(struct bytecode_runtime *bytecode,
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret = 0;
- const filter_opcode_t opcode = *(filter_opcode_t *) pc;
-
- switch (opcode) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_RETURN:
- case FILTER_OP_RETURN_S64:
- {
- goto end;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- /* Floating point */
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_LOAD_DOUBLE:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- {
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_EQ:
- {
- ret = bin_op_compare_check(stack, opcode, "==");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_NE:
- {
- ret = bin_op_compare_check(stack, opcode, "!=");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_GT:
- {
- ret = bin_op_compare_check(stack, opcode, ">");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_LT:
- {
- ret = bin_op_compare_check(stack, opcode, "<");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_GE:
- {
- ret = bin_op_compare_check(stack, opcode, ">=");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_LE:
- {
- ret = bin_op_compare_check(stack, opcode, "<=");
- if (ret < 0)
- goto end;
- break;
- }
-
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STRING
- || vstack_bx(stack)->type != REG_STRING) {
- printk(KERN_WARNING "LTTng: filter: Unexpected register type for string comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
-
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
- && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
- printk(KERN_WARNING "LTTng: filter: Unexpected register type for globbing pattern comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64
- || vstack_bx(stack)->type != REG_S64) {
- printk(KERN_WARNING "LTTng: filter: Unexpected register type for s64 comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_BIT_RSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, ">>");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_LSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, "<<");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_AND:
- ret = bin_op_bitwise_check(stack, opcode, "&");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_OR:
- ret = bin_op_bitwise_check(stack, opcode, "|");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_XOR:
- ret = bin_op_bitwise_check(stack, opcode, "^");
- if (ret < 0)
- goto end;
- break;
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- case FILTER_OP_UNARY_NOT:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- case REG_DOUBLE:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: Unary op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- case REG_TYPE_UNKNOWN:
- break;
- }
- break;
- }
- case FILTER_OP_UNARY_BIT_NOT:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_DOUBLE:
- printk(KERN_WARNING "LTTng: filter: Unary bitwise op can only be applied to numeric registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_TYPE_UNKNOWN:
- break;
- }
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64) {
- printk(KERN_WARNING "LTTng: filter: Invalid register type\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64) {
- printk(KERN_WARNING "LTTng: filter: Logical comparator expects S64 register\n");
- ret = -EINVAL;
- goto end;
- }
-
- dbg_printk("Validate jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- if (unlikely(start_pc + insn->skip_offset <= pc)) {
- printk(KERN_WARNING "LTTng: filter: Loops are not allowed in bytecode\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("Validate load field ref offset %u type string\n",
- ref->offset);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("Validate load field ref offset %u type s64\n",
- ref->offset);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- case REG_DOUBLE:
- printk(KERN_WARNING "LTTng: filter: unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- printk(KERN_WARNING "LTTng: filter: Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- }
- if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
- if (vstack_ax(stack)->type != REG_DOUBLE) {
- printk(KERN_WARNING "LTTng: filter: Cast expects double\n");
- ret = -EINVAL;
- goto end;
- }
- }
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- break;
- }
-
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown get context ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("Validate get context ref offset %u type string\n",
- ref->offset);
- break;
- }
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printk("Validate get context ref offset %u type s64\n",
- ref->offset);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- {
- dbg_printk("Validate get context root\n");
- break;
- }
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- {
- dbg_printk("Validate get app context root\n");
- break;
- }
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- dbg_printk("Validate get payload root\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD:
- {
- /*
- * We tolerate that field type is unknown at validation,
- * because we are performing the load specialization in
- * a phase after validation.
- */
- dbg_printk("Validate load field\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S8:
- {
- dbg_printk("Validate load field s8\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S16:
- {
- dbg_printk("Validate load field s16\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S32:
- {
- dbg_printk("Validate load field s32\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S64:
- {
- dbg_printk("Validate load field s64\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U8:
- {
- dbg_printk("Validate load field u8\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U16:
- {
- dbg_printk("Validate load field u16\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U32:
- {
- dbg_printk("Validate load field u32\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U64:
- {
- dbg_printk("Validate load field u64\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_STRING:
- {
- dbg_printk("Validate load field string\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- dbg_printk("Validate load field sequence\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- dbg_printk("Validate load field double\n");
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printk("Validate get symbol offset %u\n", sym->offset);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printk("Validate get symbol field offset %u\n", sym->offset);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
-
- dbg_printk("Validate get index u16 index %u\n", get_index->index);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
-
- dbg_printk("Validate get index u64 index %llu\n",
- (unsigned long long) get_index->index);
- break;
- }
- }
-end:
- return ret;
-}
-
-/*
- * Return value:
- * 0: success
- * <0: error
- */
-static
-int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
- struct mp_table *mp_table,
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret, found = 0;
- unsigned long target_pc = pc - start_pc;
- unsigned long hash;
- struct hlist_head *head;
- struct mp_node *mp_node;
-
- /* Validate the context resulting from the previous instruction */
- ret = validate_instruction_context(bytecode, stack, start_pc, pc);
- if (ret < 0)
- return ret;
-
- /* Validate merge points */
- hash = jhash_1word(target_pc, 0);
- head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
- lttng_hlist_for_each_entry(mp_node, head, node) {
- if (lttng_hash_match(mp_node, target_pc)) {
- found = 1;
- break;
- }
- }
- if (found) {
- dbg_printk("Filter: validate merge point at offset %lu\n",
- target_pc);
- if (merge_points_compare(stack, &mp_node->stack)) {
- printk(KERN_WARNING "LTTng: filter: Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- /* Once validated, we can remove the merge point */
- dbg_printk("Filter: remove merge point at offset %lu\n",
- target_pc);
- hlist_del(&mp_node->node);
- }
- return 0;
-}
-
-/*
- * Return value:
- * >0: going to next insn.
- * 0: success, stop iteration.
- * <0: error
- */
-static
-int exec_insn(struct bytecode_runtime *bytecode,
- struct mp_table *mp_table,
- struct vstack *stack,
- char **_next_pc,
- char *pc)
-{
- int ret = 1;
- char *next_pc = *_next_pc;
-
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_RETURN:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
-
- case FILTER_OP_RETURN_S64:
- {
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- break;
- default:
- case REG_TYPE_UNKNOWN:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- /* Floating point */
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- case FILTER_OP_LOAD_DOUBLE:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- {
- printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_EQ:
- case FILTER_OP_NE:
- case FILTER_OP_GT:
- case FILTER_OP_LT:
- case FILTER_OP_GE:
- case FILTER_OP_LE:
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_TYPE_UNKNOWN:
- break;
- case REG_DOUBLE:
- default:
- printk(KERN_WARNING "LTTng: filter: Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
- int merge_ret;
-
- /* Add merge point to table */
- merge_ret = merge_point_add_check(mp_table,
- insn->skip_offset, stack);
- if (merge_ret) {
- ret = merge_ret;
- goto end;
- }
-
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- /* There is always a cast-to-s64 operation before a or/and op. */
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- printk(KERN_WARNING "LTTng: filter: Unknown get context ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
- case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_DOUBLE:
- case REG_TYPE_UNKNOWN:
- break;
- default:
- printk(KERN_WARNING "LTTng: filter: Incorrect register type %d for cast\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- printk(KERN_WARNING "LTTng: filter: Empty stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- printk(KERN_WARNING "LTTng: filter: Expecting pointer on top of stack\n\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
-end:
- *_next_pc = next_pc;
- return ret;
-}
-
-/*
- * Never called concurrently (hash seed is shared).
- */
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
-{
- struct mp_table *mp_table;
- char *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack stack;
-
- vstack_init(&stack);
-
- mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
- if (!mp_table) {
- printk(KERN_WARNING "LTTng: filter: Error allocating hash table for bytecode validation\n");
- return -ENOMEM;
- }
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- ret = bytecode_validate_overflow(bytecode, start_pc, pc);
- if (ret != 0) {
- if (ret == -ERANGE)
- printk(KERN_WARNING "LTTng: filter: filter bytecode overflow\n");
- goto end;
- }
- dbg_printk("Validating op %s (%u)\n",
- lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
- (unsigned int) *(filter_opcode_t *) pc);
-
- /*
- * For each instruction, validate the current context
- * (traversal of entire execution flow), and validate
- * all merge points targeting this instruction.
- */
- ret = validate_instruction_all_contexts(bytecode, mp_table,
- &stack, start_pc, pc);
- if (ret)
- goto end;
- ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
- if (ret <= 0)
- goto end;
- }
-end:
- if (delete_all_nodes(mp_table)) {
- if (!ret) {
- printk(KERN_WARNING "LTTng: filter: Unexpected merge points\n");
- ret = -EINVAL;
- }
- }
- kfree(mp_table);
- return ret;
-}
+++ /dev/null
-/* SPDX-License-Identifier: MIT
- *
- * lttng-filter.c
- *
- * LTTng modules filter code.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <lttng/filter.h>
-
-static const char *opnames[] = {
- [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
-
- [ FILTER_OP_RETURN ] = "RETURN",
-
- /* binary */
- [ FILTER_OP_MUL ] = "MUL",
- [ FILTER_OP_DIV ] = "DIV",
- [ FILTER_OP_MOD ] = "MOD",
- [ FILTER_OP_PLUS ] = "PLUS",
- [ FILTER_OP_MINUS ] = "MINUS",
- [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
- [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
- [ FILTER_OP_BIT_AND ] = "BIT_AND",
- [ FILTER_OP_BIT_OR ] = "BIT_OR",
- [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
-
- /* binary comparators */
- [ FILTER_OP_EQ ] = "EQ",
- [ FILTER_OP_NE ] = "NE",
- [ FILTER_OP_GT ] = "GT",
- [ FILTER_OP_LT ] = "LT",
- [ FILTER_OP_GE ] = "GE",
- [ FILTER_OP_LE ] = "LE",
-
- /* string binary comparators */
- [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
- [ FILTER_OP_NE_STRING ] = "NE_STRING",
- [ FILTER_OP_GT_STRING ] = "GT_STRING",
- [ FILTER_OP_LT_STRING ] = "LT_STRING",
- [ FILTER_OP_GE_STRING ] = "GE_STRING",
- [ FILTER_OP_LE_STRING ] = "LE_STRING",
-
- /* s64 binary comparators */
- [ FILTER_OP_EQ_S64 ] = "EQ_S64",
- [ FILTER_OP_NE_S64 ] = "NE_S64",
- [ FILTER_OP_GT_S64 ] = "GT_S64",
- [ FILTER_OP_LT_S64 ] = "LT_S64",
- [ FILTER_OP_GE_S64 ] = "GE_S64",
- [ FILTER_OP_LE_S64 ] = "LE_S64",
-
- /* double binary comparators */
- [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
- [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
- [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
- [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
- [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
- [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
-
- /* Mixed S64-double binary comparators */
- [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
- [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
- [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
- [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
- [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
- [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
-
- [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
- [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
- [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
- [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
- [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
- [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
-
- /* unary */
- [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
- [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
- [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
- [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
- [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
- [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
- [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
- [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
- [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
-
- /* logical */
- [ FILTER_OP_AND ] = "AND",
- [ FILTER_OP_OR ] = "OR",
-
- /* load field ref */
- [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
- [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
- [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
- [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
- [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
-
- /* load from immediate operand */
- [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
- [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
- [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
-
- /* cast */
- [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
- [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
- [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
-
- /* get context ref */
- [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
- [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
- [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
- [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
-
- /* load userspace field ref */
- [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
- [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate.
- */
- [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
-
- /* globbing pattern binary operator: apply to */
- [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
- [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
- [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
- [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
-
- [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
- [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
- [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
- [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
-
- [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
- [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
- [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
- [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
- [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
- [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
- [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
- [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
- [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
- [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
- [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
- [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
-
- [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
-
- [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
-};
-
-const char *lttng_filter_print_op(enum filter_op op)
-{
- if (op >= NR_FILTER_OPS)
- return "UNKNOWN";
- else
- return opnames[op];
-}
-
-static
-int apply_field_reloc(struct lttng_event *event,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *field_name,
- enum filter_op filter_op)
-{
- const struct lttng_event_desc *desc;
- const struct lttng_event_field *fields, *field = NULL;
- unsigned int nr_fields, i;
- struct load_op *op;
- uint32_t field_offset = 0;
-
- dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
-
- /* Lookup event by name */
- desc = event->desc;
- if (!desc)
- return -EINVAL;
- fields = desc->fields;
- if (!fields)
- return -EINVAL;
- nr_fields = desc->nr_fields;
- for (i = 0; i < nr_fields; i++) {
- if (fields[i].nofilter)
- continue;
- if (!strcmp(fields[i].name, field_name)) {
- field = &fields[i];
- break;
- }
- /* compute field offset */
- switch (fields[i].type.atype) {
- case atype_integer:
- case atype_enum_nestable:
- field_offset += sizeof(int64_t);
- break;
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
- return -EINVAL;
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
- return -EINVAL;
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case atype_string:
- field_offset += sizeof(void *);
- break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
- default:
- return -EINVAL;
- }
- }
- if (!field)
- return -EINVAL;
-
- /* Check if field offset is too large for 16-bit offset */
- if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* set type */
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (filter_op) {
- case FILTER_OP_LOAD_FIELD_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum_nestable:
- op->op = FILTER_OP_LOAD_FIELD_REF_S64;
- break;
- case atype_array_nestable:
- case atype_sequence_nestable:
- if (field->user)
- op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
- else
- op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- case atype_string:
- if (field->user)
- op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
- else
- op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
- break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
- default:
- return -EINVAL;
- }
- /* set offset */
- field_ref->offset = (uint16_t) field_offset;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_context_reloc(struct lttng_event *event,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *context_name,
- enum filter_op filter_op)
-{
- struct load_op *op;
- struct lttng_ctx_field *ctx_field;
- int idx;
-
- dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
-
- /* Get context index */
- idx = lttng_get_context_index(lttng_static_ctx, context_name);
- if (idx < 0)
- return -ENOENT;
-
- /* Check if idx is too large for 16-bit offset */
- if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* Get context return type */
- ctx_field = <tng_static_ctx->fields[idx];
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (filter_op) {
- case FILTER_OP_GET_CONTEXT_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (ctx_field->event_field.type.atype) {
- case atype_integer:
- case atype_enum_nestable:
- op->op = FILTER_OP_GET_CONTEXT_REF_S64;
- break;
- /* Sequence and array supported as string */
- case atype_string:
- BUG_ON(ctx_field->event_field.user);
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
- break;
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
- return -EINVAL;
- BUG_ON(ctx_field->event_field.user);
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
- break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
- return -EINVAL;
- BUG_ON(ctx_field->event_field.user);
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
- break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
- default:
- return -EINVAL;
- }
- /* set offset to context index within channel contexts */
- field_ref->offset = (uint16_t) idx;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_reloc(struct lttng_event *event,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *name)
-{
- struct load_op *op;
-
- dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
-
- /* Ensure that the reloc is within the code */
- if (runtime_len - reloc_offset < sizeof(uint16_t))
- return -EINVAL;
-
- op = (struct load_op *) &runtime->code[reloc_offset];
- switch (op->op) {
- case FILTER_OP_LOAD_FIELD_REF:
- return apply_field_reloc(event, runtime, runtime_len,
- reloc_offset, name, op->op);
- case FILTER_OP_GET_CONTEXT_REF:
- return apply_context_reloc(event, runtime, runtime_len,
- reloc_offset, name, op->op);
- case FILTER_OP_GET_SYMBOL:
- case FILTER_OP_GET_SYMBOL_FIELD:
- /*
- * Will be handled by load specialize phase or
- * dynamically by interpreter.
- */
- return 0;
- default:
- printk(KERN_WARNING "LTTng: filter: Unknown reloc op type %u\n", op->op);
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
- struct lttng_event *event)
-{
- struct lttng_bytecode_runtime *bc_runtime;
-
- list_for_each_entry(bc_runtime,
- &event->bytecode_runtime_head, node) {
- if (bc_runtime->bc == filter_bytecode)
- return 1;
- }
- return 0;
-}
-
-/*
- * Take a bytecode with reloc table and link it to an event to create a
- * bytecode runtime.
- */
-static
-int _lttng_filter_event_link_bytecode(struct lttng_event *event,
- struct lttng_filter_bytecode_node *filter_bytecode,
- struct list_head *insert_loc)
-{
- int ret, offset, next_offset;
- struct bytecode_runtime *runtime = NULL;
- size_t runtime_alloc_len;
-
- if (!filter_bytecode)
- return 0;
- /* Bytecode already linked */
- if (bytecode_is_linked(filter_bytecode, event))
- return 0;
-
- dbg_printk("Linking...\n");
-
- /* We don't need the reloc table in the runtime */
- runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
- runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
- if (!runtime) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- runtime->p.bc = filter_bytecode;
- runtime->p.event = event;
- runtime->len = filter_bytecode->bc.reloc_offset;
- /* copy original bytecode */
- memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
- /*
- * apply relocs. Those are a uint16_t (offset in bytecode)
- * followed by a string (field name).
- */
- for (offset = filter_bytecode->bc.reloc_offset;
- offset < filter_bytecode->bc.len;
- offset = next_offset) {
- uint16_t reloc_offset =
- *(uint16_t *) &filter_bytecode->bc.data[offset];
- const char *name =
- (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
-
- ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
- if (ret) {
- goto link_error;
- }
- next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
- }
- /* Validate bytecode */
- ret = lttng_filter_validate_bytecode(runtime);
- if (ret) {
- goto link_error;
- }
- /* Specialize bytecode */
- ret = lttng_filter_specialize_bytecode(event, runtime);
- if (ret) {
- goto link_error;
- }
- runtime->p.filter = lttng_filter_interpret_bytecode;
- runtime->p.link_failed = 0;
- list_add_rcu(&runtime->p.node, insert_loc);
- dbg_printk("Linking successful.\n");
- return 0;
-
-link_error:
- runtime->p.filter = lttng_filter_false;
- runtime->p.link_failed = 1;
- list_add_rcu(&runtime->p.node, insert_loc);
-alloc_error:
- dbg_printk("Linking failed.\n");
- return ret;
-}
-
-void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
-{
- struct lttng_filter_bytecode_node *bc = runtime->bc;
-
- if (!bc->enabler->enabled || runtime->link_failed)
- runtime->filter = lttng_filter_false;
- else
- runtime->filter = lttng_filter_interpret_bytecode;
-}
-
-/*
- * Link bytecode for all enablers referenced by an event.
- */
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
- struct lttng_enabler *enabler)
-{
- struct lttng_filter_bytecode_node *bc;
- struct lttng_bytecode_runtime *runtime;
-
- /* Can only be called for events with desc attached */
- WARN_ON_ONCE(!event->desc);
-
- /* Link each bytecode. */
- list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
- int found = 0, ret;
- struct list_head *insert_loc;
-
- list_for_each_entry(runtime,
- &event->bytecode_runtime_head, node) {
- if (runtime->bc == bc) {
- found = 1;
- break;
- }
- }
- /* Skip bytecode already linked */
- if (found)
- continue;
-
- /*
- * Insert at specified priority (seqnum) in increasing
- * order. If there already is a bytecode of the same priority,
- * insert the new bytecode right after it.
- */
- list_for_each_entry_reverse(runtime,
- &event->bytecode_runtime_head, node) {
- if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
- /* insert here */
- insert_loc = &runtime->node;
- goto add_within;
- }
- }
- /* Add to head to list */
- insert_loc = &event->bytecode_runtime_head;
- add_within:
- dbg_printk("linking bytecode\n");
- ret = _lttng_filter_event_link_bytecode(event, bc,
- insert_loc);
- if (ret) {
- dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
- }
- }
-}
-
-/*
- * We own the filter_bytecode if we return success.
- */
-int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
- struct lttng_filter_bytecode_node *filter_bytecode)
-{
- list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
- return 0;
-}
-
-void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
-{
- struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
-
- list_for_each_entry_safe(filter_bytecode, tmp,
- &enabler->filter_bytecode_head, node) {
- kfree(filter_bytecode);
- }
-}
-
-void lttng_free_event_filter_runtime(struct lttng_event *event)
-{
- struct bytecode_runtime *runtime, *tmp;
-
- list_for_each_entry_safe(runtime, tmp,
- &event->bytecode_runtime_head, p.node) {
- kfree(runtime->data);
- kfree(runtime);
- }
-}
}
ret = lttng_fix_pending_events();
WARN_ON_ONCE(ret);
+ ret = lttng_fix_pending_triggers();
+ WARN_ON_ONCE(ret);
lazy_nesting--;
}
* the probe immediately, since we cannot delay event
* registration because they are needed ASAP.
*/
- if (lttng_session_active())
+ if (lttng_session_active() || lttng_trigger_active())
fixup_lazy_probes();
end:
lttng_unlock_sessions();
* Called with sessions lock held.
*/
static
-const struct lttng_event_desc *find_event(const char *name)
+const struct lttng_event_desc *find_event_desc(const char *name)
{
struct lttng_probe_desc *probe_desc;
int i;
/*
* Called with sessions lock held.
*/
-const struct lttng_event_desc *lttng_event_get(const char *name)
+const struct lttng_event_desc *lttng_event_desc_get(const char *name)
{
- const struct lttng_event_desc *event;
+ const struct lttng_event_desc *event_desc;
int ret;
- event = find_event(name);
- if (!event)
+ event_desc = find_event_desc(name);
+ if (!event_desc)
return NULL;
- ret = try_module_get(event->owner);
+ ret = try_module_get(event_desc->owner);
WARN_ON_ONCE(!ret);
- return event;
+ return event_desc;
}
-EXPORT_SYMBOL_GPL(lttng_event_get);
+EXPORT_SYMBOL_GPL(lttng_event_desc_get);
/*
* Called with sessions lock held.
*/
-void lttng_event_put(const struct lttng_event_desc *event)
+void lttng_event_desc_put(const struct lttng_event_desc *event_desc)
{
- module_put(event->owner);
+ module_put(event_desc->owner);
}
-EXPORT_SYMBOL_GPL(lttng_event_put);
+EXPORT_SYMBOL_GPL(lttng_event_desc_put);
static
void *tp_list_start(struct seq_file *m, loff_t *pos)
static
struct channel *_channel_create(const char *name,
- struct lttng_channel *lttng_chan, void *buf_addr,
+ void *priv, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
+ struct lttng_channel *lttng_chan = priv;
struct channel *chan;
chan = channel_create(&client_config, name, lttng_chan, buf_addr,
static
struct channel *_channel_create(const char *name,
- struct lttng_channel *lttng_chan, void *buf_addr,
+ void *priv, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
+ struct lttng_channel *lttng_chan = priv;
struct channel *chan;
chan = channel_create(&client_config, name,
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
+ * lttng-ring-buffer-trigger-client.c
+ *
+ * LTTng lib ring buffer trigger client.
+ *
+ * Copyright (C) 2010-2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING "trigger"
+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_NONE
+#include "lttng-ring-buffer-trigger-client.h"
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
+ * lttng-ring-buffer-trigger-client.h
+ *
+ * LTTng lib ring buffer trigger client template.
+ *
+ * Copyright (C) 2010-2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
+#include <lttng/abi.h>
+#include <lttng/events.h>
+#include <lttng/tracer.h>
+
+static struct lttng_transport lttng_relay_transport;
+
+struct trigger_packet_header {
+ uint8_t header_end[0];
+};
+
+struct trigger_record_header {
+ uint32_t payload_len; /* in bytes */
+ uint8_t header_end[0]; /* End of header */
+};
+
+static const struct lib_ring_buffer_config client_config;
+
+static inline
+u64 lib_ring_buffer_clock_read(struct channel *chan)
+{
+ return 0;
+}
+
+static inline
+size_t record_header_size(const struct lib_ring_buffer_config *config,
+ struct channel *chan, size_t offset,
+ size_t *pre_header_padding,
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ size_t orig_offset = offset;
+ size_t padding;
+
+ padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
+ offset += padding;
+
+ offset += sizeof(uint32_t);
+
+ *pre_header_padding = padding;
+
+ return offset - orig_offset;
+}
+
+#include <ringbuffer/api.h>
+
+static u64 client_ring_buffer_clock_read(struct channel *chan)
+{
+ return 0;
+}
+
+static
+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
+ struct channel *chan, size_t offset,
+ size_t *pre_header_padding,
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ return record_header_size(config, chan, offset,
+ pre_header_padding, ctx, client_ctx);
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+ return offsetof(struct trigger_packet_header, header_end);
+}
+
+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
+ unsigned int subbuf_idx)
+{
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
+ unsigned int subbuf_idx, unsigned long data_size)
+{
+}
+
+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
+ int cpu, const char *name)
+{
+ return 0;
+}
+
+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
+{
+}
+
+static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *buf, uint64_t *timestamp_begin)
+{
+ return -ENOSYS;
+}
+
+static int client_timestamp_end(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *timestamp_end)
+{
+ return -ENOSYS;
+}
+
+static int client_events_discarded(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *events_discarded)
+{
+ return -ENOSYS;
+}
+
+static int client_current_timestamp(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *ts)
+{
+ return -ENOSYS;
+}
+
+static int client_content_size(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *content_size)
+{
+ return -ENOSYS;
+}
+
+static int client_packet_size(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *packet_size)
+{
+ return -ENOSYS;
+}
+
+static int client_stream_id(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *stream_id)
+{
+ return -ENOSYS;
+}
+
+static int client_sequence_number(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *seq)
+{
+ return -ENOSYS;
+}
+
+static
+int client_instance_id(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *bufb,
+ uint64_t *id)
+{
+ return -ENOSYS;
+}
+
+static void client_record_get(const struct lib_ring_buffer_config *config,
+ struct channel *chan, struct lib_ring_buffer *buf,
+ size_t offset, size_t *header_len,
+ size_t *payload_len, u64 *timestamp)
+{
+ struct trigger_record_header header;
+ int ret;
+
+ ret = lib_ring_buffer_read(&buf->backend, offset, &header,
+ offsetof(struct trigger_record_header, header_end));
+ CHAN_WARN_ON(chan, ret != offsetof(struct trigger_record_header, header_end));
+ *header_len = offsetof(struct trigger_record_header, header_end);
+ *payload_len = header.payload_len;
+ *timestamp = 0;
+}
+
+static const struct lib_ring_buffer_config client_config = {
+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .cb.record_header_size = client_record_header_size,
+ .cb.subbuffer_header_size = client_packet_header_size,
+ .cb.buffer_begin = client_buffer_begin,
+ .cb.buffer_end = client_buffer_end,
+ .cb.buffer_create = client_buffer_create,
+ .cb.buffer_finalize = client_buffer_finalize,
+ .cb.record_get = client_record_get,
+
+ .tsc_bits = 0,
+ .alloc = RING_BUFFER_ALLOC_GLOBAL,
+ .sync = RING_BUFFER_SYNC_GLOBAL,
+ .mode = RING_BUFFER_MODE_TEMPLATE,
+ .backend = RING_BUFFER_PAGE,
+ .output = RING_BUFFER_OUTPUT_TEMPLATE,
+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
+ .ipi = RING_BUFFER_NO_IPI_BARRIER,
+ .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
+};
+
+static
+void release_priv_ops(void *priv_ops)
+{
+ module_put(THIS_MODULE);
+}
+
+static
+void lttng_channel_destroy(struct channel *chan)
+{
+ channel_destroy(chan);
+}
+
+static
+struct channel *_channel_create(const char *name,
+ void *priv, void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval)
+{
+ struct lttng_trigger_group *trigger_group = priv;
+ struct channel *chan;
+
+ chan = channel_create(&client_config, name,
+ trigger_group, buf_addr,
+ subbuf_size, num_subbuf, switch_timer_interval,
+ read_timer_interval);
+ if (chan) {
+ /*
+ * Ensure this module is not unloaded before we finish
+ * using lttng_relay_transport.ops.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ printk(KERN_WARNING "LTTng: Can't lock trigger transport module.\n");
+ goto error;
+ }
+ chan->backend.priv_ops = <tng_relay_transport.ops;
+ chan->backend.release_priv_ops = release_priv_ops;
+ }
+ return chan;
+
+error:
+ lttng_channel_destroy(chan);
+ return NULL;
+}
+
+static
+struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
+{
+ struct lib_ring_buffer *buf;
+
+ buf = channel_get_ring_buffer(&client_config, chan, 0);
+ if (!lib_ring_buffer_open_read(buf))
+ return buf;
+ return NULL;
+}
+
+static
+int lttng_buffer_has_read_closed_stream(struct channel *chan)
+{
+ struct lib_ring_buffer *buf;
+ int cpu;
+
+ for_each_channel_cpu(cpu, chan) {
+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
+ if (!atomic_long_read(&buf->active_readers))
+ return 1;
+ }
+ return 0;
+}
+
+static
+void lttng_buffer_read_close(struct lib_ring_buffer *buf)
+{
+ lib_ring_buffer_release_read(buf);
+}
+
+static
+void lttng_write_trigger_header(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer_ctx *ctx)
+{
+ uint32_t data_size;
+
+ WARN_ON_ONCE(ctx->data_size > U32_MAX);
+
+ data_size = (uint32_t) ctx->data_size;
+
+ lib_ring_buffer_write(config, ctx, &data_size, sizeof(data_size));
+
+ lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+}
+
+static
+int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
+{
+ int ret;
+
+ ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
+ if (ret)
+ return ret;
+ lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &ctx->backend_pages);
+
+ lttng_write_trigger_header(&client_config, ctx);
+ return 0;
+}
+
+static
+void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
+{
+ lib_ring_buffer_commit(&client_config, ctx);
+}
+
+static
+void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
+ size_t len)
+{
+ lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
+ const void __user *src, size_t len)
+{
+ lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
+ int c, size_t len)
+{
+ lib_ring_buffer_memset(&client_config, ctx, c, len);
+}
+
+static
+void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
+ size_t len)
+{
+ lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
+}
+
+static
+size_t lttng_packet_avail_size(struct channel *chan)
+{
+ unsigned long o_begin;
+ struct lib_ring_buffer *buf;
+
+ buf = chan->backend.buf; /* Only for global buffer ! */
+ o_begin = v_read(&client_config, &buf->offset);
+ if (subbuf_offset(o_begin, chan) != 0) {
+ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
+ } else {
+ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
+ - sizeof(struct trigger_packet_header);
+ }
+}
+
+static
+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
+{
+ struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
+ chan, cpu);
+ return &buf->write_wait;
+}
+
+static
+wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+{
+ return &chan->hp_wait;
+}
+
+static
+int lttng_is_finalized(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_finalized(chan);
+}
+
+static
+int lttng_is_disabled(struct channel *chan)
+{
+ return lib_ring_buffer_channel_is_disabled(chan);
+}
+
+static struct lttng_transport lttng_relay_transport = {
+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
+ .owner = THIS_MODULE,
+ .ops = {
+ .channel_create = _channel_create,
+ .channel_destroy = lttng_channel_destroy,
+ .buffer_read_open = lttng_buffer_read_open,
+ .buffer_has_read_closed_stream =
+ lttng_buffer_has_read_closed_stream,
+ .buffer_read_close = lttng_buffer_read_close,
+ .event_reserve = lttng_event_reserve,
+ .event_commit = lttng_event_commit,
+ .event_write_from_user = lttng_event_write_from_user,
+ .event_memset = lttng_event_memset,
+ .event_write = lttng_event_write,
+ .event_strcpy = lttng_event_strcpy,
+ .packet_avail_size = lttng_packet_avail_size,
+ .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
+ .get_hp_wait_queue = lttng_get_hp_wait_queue,
+ .is_finalized = lttng_is_finalized,
+ .is_disabled = lttng_is_disabled,
+ .timestamp_begin = client_timestamp_begin,
+ .timestamp_end = client_timestamp_end,
+ .events_discarded = client_events_discarded,
+ .content_size = client_content_size,
+ .packet_size = client_packet_size,
+ .stream_id = client_stream_id,
+ .current_timestamp = client_current_timestamp,
+ .sequence_number = client_sequence_number,
+ .instance_id = client_instance_id,
+ },
+};
+
+static int __init lttng_ring_buffer_trigger_client_init(void)
+{
+ /*
+ * This vmalloc sync all also takes care of the lib ring buffer
+ * vmalloc'd module pages when it is built as a module into LTTng.
+ */
+ wrapper_vmalloc_sync_mappings();
+ lttng_transport_register(<tng_relay_transport);
+ return 0;
+}
+
+module_init(lttng_ring_buffer_trigger_client_init);
+
+static void __exit lttng_ring_buffer_trigger_client_exit(void)
+{
+ lttng_transport_unregister(<tng_relay_transport);
+}
+
+module_exit(lttng_ring_buffer_trigger_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
+ " client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+ __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+ __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+ LTTNG_MODULES_EXTRAVERSION);
#include <wrapper/rcu.h>
#include <wrapper/syscall.h>
#include <lttng/events.h>
+#include <lttng/utils.h>
#ifndef CONFIG_COMPAT
# ifndef is_compat_task
#define COMPAT_SYSCALL_EXIT_STR __stringify(COMPAT_SYSCALL_EXIT_TOK)
static
-void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
+void syscall_entry_event_probe(void *__data, struct pt_regs *regs, long id);
static
-void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret);
+void syscall_exit_event_probe(void *__data, struct pt_regs *regs, long ret);
/*
* Forward declarations for old kernels.
/* Hijack probe callback for system call enter */
#undef TP_PROBE_CB
-#define TP_PROBE_CB(_template) &syscall_entry_probe
+#define TP_PROBE_CB(_template) &syscall_entry_event_probe
#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
LTTNG_TRACEPOINT_EVENT(syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
PARAMS(_fields))
#undef _TRACE_SYSCALLS_POINTERS_H
/* Hijack probe callback for compat system call enter */
-#define TP_PROBE_CB(_template) &syscall_entry_probe
+#define TP_PROBE_CB(_template) &syscall_entry_event_probe
#define LTTNG_SC_COMPAT
#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
LTTNG_TRACEPOINT_EVENT(compat_syscall_entry_##_name, PARAMS(_proto), PARAMS(_args), \
#define sc_inout(...) __VA_ARGS__
/* Hijack probe callback for system call exit */
-#define TP_PROBE_CB(_template) &syscall_exit_probe
+#define TP_PROBE_CB(_template) &syscall_exit_event_probe
#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
LTTNG_TRACEPOINT_EVENT(syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
PARAMS(_fields))
/* Hijack probe callback for compat system call exit */
-#define TP_PROBE_CB(_template) &syscall_exit_probe
+#define TP_PROBE_CB(_template) &syscall_exit_event_probe
#define LTTNG_SC_COMPAT
#define SC_LTTNG_TRACEPOINT_EVENT(_name, _proto, _args, _fields) \
LTTNG_TRACEPOINT_EVENT(compat_syscall_exit_##_name, PARAMS(_proto), PARAMS(_args), \
#undef CREATE_TRACE_POINTS
struct trace_syscall_entry {
- void *func;
+ void *event_func;
+ void *trigger_func;
const struct lttng_event_desc *desc;
const struct lttng_event_field *fields;
unsigned int nrargs;
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
- .func = __event_probe__syscall_entry_##_template, \
+ .event_func = __event_probe__syscall_entry_##_template, \
+ .trigger_func = __trigger_probe__syscall_entry_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___syscall_entry_##_template, \
.desc = &__event_desc___syscall_entry_##_name, \
},
-/* Syscall enter tracing table */
+/* Event syscall enter tracing table */
static const struct trace_syscall_entry sc_table[] = {
#include <instrumentation/syscalls/headers/syscalls_integers.h>
#include <instrumentation/syscalls/headers/syscalls_pointers.h>
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
- .func = __event_probe__compat_syscall_entry_##_template, \
+ .event_func = __event_probe__compat_syscall_entry_##_template, \
+ .trigger_func = __trigger_probe__compat_syscall_entry_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___compat_syscall_entry_##_template, \
.desc = &__event_desc___compat_syscall_entry_##_name, \
},
-/* Compat syscall enter table */
+/* Event compat syscall enter table */
const struct trace_syscall_entry compat_sc_table[] = {
#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
- .func = __event_probe__syscall_exit_##_template, \
+ .event_func = __event_probe__syscall_exit_##_template, \
+ .trigger_func = __trigger_probe__syscall_exit_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___syscall_exit_##_template, \
.desc = &__event_desc___syscall_exit_##_name, \
},
-/* Syscall exit table */
+/* Event syscall exit table */
static const struct trace_syscall_entry sc_exit_table[] = {
#include <instrumentation/syscalls/headers/syscalls_integers.h>
#include <instrumentation/syscalls/headers/syscalls_pointers.h>
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
- .func = __event_probe__compat_syscall_exit_##_template, \
+ .event_func = __event_probe__compat_syscall_exit_##_template, \
+ .trigger_func = __trigger_probe__compat_syscall_exit_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___compat_syscall_exit_##_template, \
.desc = &__event_desc___compat_syscall_exit_##_name, \
},
-/* Compat syscall exit table */
+/* Event compat syscall exit table */
const struct trace_syscall_entry compat_sc_exit_table[] = {
#include <instrumentation/syscalls/headers/compat_syscalls_integers.h>
#include <instrumentation/syscalls/headers/compat_syscalls_pointers.h>
DECLARE_BITMAP(sc_compat_exit, NR_compat_syscalls);
};
-static void syscall_entry_unknown(struct lttng_event *event,
+static void syscall_entry_event_unknown(struct lttng_event *event,
struct pt_regs *regs, unsigned int id)
{
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
__event_probe__syscall_entry_unknown(event, id, args);
}
-void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
+static __always_inline
+void syscall_entry_call_func(void *func, unsigned int nrargs, void *data,
+ struct pt_regs *regs)
{
- struct lttng_channel *chan = __data;
- struct lttng_event *event, *unknown_event;
- const struct trace_syscall_entry *table, *entry;
- size_t table_len;
-
- if (unlikely(in_compat_syscall())) {
- struct lttng_syscall_filter *filter = chan->sc_filter;
-
- if (id < 0 || id >= NR_compat_syscalls
- || (!READ_ONCE(chan->syscall_all) && !test_bit(id, filter->sc_compat_entry))) {
- /* System call filtered out. */
- return;
- }
- table = compat_sc_table;
- table_len = ARRAY_SIZE(compat_sc_table);
- unknown_event = chan->sc_compat_unknown;
- } else {
- struct lttng_syscall_filter *filter = chan->sc_filter;
-
- if (id < 0 || id >= NR_syscalls
- || (!READ_ONCE(chan->syscall_all) && !test_bit(id, filter->sc_entry))) {
- /* System call filtered out. */
- return;
- }
- table = sc_table;
- table_len = ARRAY_SIZE(sc_table);
- unknown_event = chan->sc_unknown;
- }
- if (unlikely(id < 0 || id >= table_len)) {
- syscall_entry_unknown(unknown_event, regs, id);
- return;
- }
- if (unlikely(in_compat_syscall()))
- event = chan->compat_sc_table[id];
- else
- event = chan->sc_table[id];
- if (unlikely(!event)) {
- syscall_entry_unknown(unknown_event, regs, id);
- return;
- }
- entry = &table[id];
- WARN_ON_ONCE(!entry);
-
- switch (entry->nrargs) {
+ switch (nrargs) {
case 0:
{
- void (*fptr)(void *__data) = entry->func;
+ void (*fptr)(void *__data) = func;
- fptr(event);
+ fptr(data);
break;
}
case 1:
{
- void (*fptr)(void *__data, unsigned long arg0) = entry->func;
+ void (*fptr)(void *__data, unsigned long arg0) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0]);
+ fptr(data, args[0]);
break;
}
case 2:
{
void (*fptr)(void *__data,
unsigned long arg0,
- unsigned long arg1) = entry->func;
+ unsigned long arg1) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1]);
+ fptr(data, args[0], args[1]);
break;
}
case 3:
void (*fptr)(void *__data,
unsigned long arg0,
unsigned long arg1,
- unsigned long arg2) = entry->func;
+ unsigned long arg2) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1], args[2]);
+ fptr(data, args[0], args[1], args[2]);
break;
}
case 4:
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
- unsigned long arg3) = entry->func;
+ unsigned long arg3) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1], args[2], args[3]);
+ fptr(data, args[0], args[1], args[2], args[3]);
break;
}
case 5:
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
- unsigned long arg4) = entry->func;
+ unsigned long arg4) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1], args[2], args[3], args[4]);
+ fptr(data, args[0], args[1], args[2], args[3], args[4]);
break;
}
case 6:
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5) = entry->func;
+ unsigned long arg5) = func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
- fptr(event, args[0], args[1], args[2],
+ fptr(data, args[0], args[1], args[2],
args[3], args[4], args[5]);
break;
}
}
}
-static void syscall_exit_unknown(struct lttng_event *event,
+void syscall_entry_event_probe(void *__data, struct pt_regs *regs, long id)
+{
+ struct lttng_channel *chan = __data;
+ struct lttng_event *event, *unknown_event;
+ const struct trace_syscall_entry *table, *entry;
+ size_t table_len;
+
+ if (unlikely(in_compat_syscall())) {
+ struct lttng_syscall_filter *filter = chan->sc_filter;
+
+ if (id < 0 || id >= NR_compat_syscalls
+ || (!READ_ONCE(chan->syscall_all) && !test_bit(id, filter->sc_compat_entry))) {
+ /* System call filtered out. */
+ return;
+ }
+ table = compat_sc_table;
+ table_len = ARRAY_SIZE(compat_sc_table);
+ unknown_event = chan->sc_compat_unknown;
+ } else {
+ struct lttng_syscall_filter *filter = chan->sc_filter;
+
+ if (id < 0 || id >= NR_syscalls
+ || (!READ_ONCE(chan->syscall_all) && !test_bit(id, filter->sc_entry))) {
+ /* System call filtered out. */
+ return;
+ }
+ table = sc_table;
+ table_len = ARRAY_SIZE(sc_table);
+ unknown_event = chan->sc_unknown;
+ }
+ if (unlikely(id < 0 || id >= table_len)) {
+ syscall_entry_event_unknown(unknown_event, regs, id);
+ return;
+ }
+ if (unlikely(in_compat_syscall()))
+ event = chan->compat_sc_table[id];
+ else
+ event = chan->sc_table[id];
+ if (unlikely(!event)) {
+ syscall_entry_event_unknown(unknown_event, regs, id);
+ return;
+ }
+ entry = &table[id];
+ WARN_ON_ONCE(!entry);
+ syscall_entry_call_func(entry->event_func, entry->nrargs, event, regs);
+}
+
+void syscall_entry_trigger_probe(void *__data, struct pt_regs *regs, long id)
+{
+ struct lttng_trigger_group *trigger_group = __data;
+ const struct trace_syscall_entry *entry;
+ struct list_head *dispatch_list;
+ struct lttng_trigger *iter;
+ size_t table_len;
+
+
+ if (unlikely(in_compat_syscall())) {
+ table_len = ARRAY_SIZE(compat_sc_table);
+ if (unlikely(id < 0 || id >= table_len)) {
+ return;
+ }
+ entry = &compat_sc_table[id];
+ dispatch_list = &trigger_group->trigger_compat_syscall_dispatch[id];
+ } else {
+ table_len = ARRAY_SIZE(sc_table);
+ if (unlikely(id < 0 || id >= table_len)) {
+ return;
+ }
+ entry = &sc_table[id];
+ dispatch_list = &trigger_group->trigger_syscall_dispatch[id];
+ }
+
+ /* TODO handle unknown syscall */
+
+ list_for_each_entry_rcu(iter, dispatch_list, u.syscall.node) {
+ BUG_ON(iter->u.syscall.syscall_id != id);
+ syscall_entry_call_func(entry->trigger_func, entry->nrargs, iter, regs);
+ }
+}
+
+static void syscall_exit_event_unknown(struct lttng_event *event,
struct pt_regs *regs, int id, long ret)
{
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
__event_probe__syscall_exit_unknown(event, id, ret, args);
}
-void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret)
+void syscall_exit_event_probe(void *__data, struct pt_regs *regs, long ret)
{
struct lttng_channel *chan = __data;
struct lttng_event *event, *unknown_event;
unknown_event = chan->sc_exit_unknown;
}
if (unlikely(id < 0 || id >= table_len)) {
- syscall_exit_unknown(unknown_event, regs, id, ret);
+ syscall_exit_event_unknown(unknown_event, regs, id, ret);
return;
}
if (unlikely(in_compat_syscall()))
else
event = chan->sc_exit_table[id];
if (unlikely(!event)) {
- syscall_exit_unknown(unknown_event, regs, id, ret);
+ syscall_exit_event_unknown(unknown_event, regs, id, ret);
return;
}
entry = &table[id];
switch (entry->nrargs) {
case 0:
{
- void (*fptr)(void *__data, long ret) = entry->func;
+ void (*fptr)(void *__data, long ret) = entry->event_func;
fptr(event, ret);
break;
{
void (*fptr)(void *__data,
long ret,
- unsigned long arg0) = entry->func;
+ unsigned long arg0) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
void (*fptr)(void *__data,
long ret,
unsigned long arg0,
- unsigned long arg1) = entry->func;
+ unsigned long arg1) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
long ret,
unsigned long arg0,
unsigned long arg1,
- unsigned long arg2) = entry->func;
+ unsigned long arg2) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
- unsigned long arg3) = entry->func;
+ unsigned long arg3) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
- unsigned long arg4) = entry->func;
+ unsigned long arg4) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5) = entry->func;
+ unsigned long arg5) = entry->event_func;
unsigned long args[LTTNG_SYSCALL_NR_ARGS];
lttng_syscall_get_arguments(current, regs, args);
* Should be called with sessions lock held.
*/
static
-int fill_table(const struct trace_syscall_entry *table, size_t table_len,
+int fill_event_table(const struct trace_syscall_entry *table, size_t table_len,
struct lttng_event **chan_table, struct lttng_channel *chan,
void *filter, enum sc_type type)
{
/*
* Should be called with sessions lock held.
*/
-int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
+int lttng_syscalls_register_event(struct lttng_channel *chan, void *filter)
{
struct lttng_kernel_event ev;
int ret;
}
}
- ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
+ ret = fill_event_table(sc_table, ARRAY_SIZE(sc_table),
chan->sc_table, chan, filter, SC_TYPE_ENTRY);
if (ret)
return ret;
- ret = fill_table(sc_exit_table, ARRAY_SIZE(sc_exit_table),
+ ret = fill_event_table(sc_exit_table, ARRAY_SIZE(sc_exit_table),
chan->sc_exit_table, chan, filter, SC_TYPE_EXIT);
if (ret)
return ret;
#ifdef CONFIG_COMPAT
- ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
+ ret = fill_event_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
chan->compat_sc_table, chan, filter,
SC_TYPE_COMPAT_ENTRY);
if (ret)
return ret;
- ret = fill_table(compat_sc_exit_table, ARRAY_SIZE(compat_sc_exit_table),
+ ret = fill_event_table(compat_sc_exit_table, ARRAY_SIZE(compat_sc_exit_table),
chan->compat_sc_exit_table, chan, filter,
SC_TYPE_COMPAT_EXIT);
if (ret)
if (!chan->sys_enter_registered) {
ret = lttng_wrapper_tracepoint_probe_register("sys_enter",
- (void *) syscall_entry_probe, chan);
+ (void *) syscall_entry_event_probe, chan);
if (ret)
return ret;
chan->sys_enter_registered = 1;
*/
if (!chan->sys_exit_registered) {
ret = lttng_wrapper_tracepoint_probe_register("sys_exit",
- (void *) syscall_exit_probe, chan);
+ (void *) syscall_exit_event_probe, chan);
if (ret) {
WARN_ON_ONCE(lttng_wrapper_tracepoint_probe_unregister("sys_enter",
- (void *) syscall_entry_probe, chan));
+ (void *) syscall_entry_event_probe, chan));
return ret;
}
chan->sys_exit_registered = 1;
}
/*
- * Only called at session destruction.
+ * Should be called with sessions lock held.
+ */
+int lttng_syscalls_register_trigger(struct lttng_trigger_enabler *trigger_enabler, void *filter)
+{
+ struct lttng_trigger_group *group = trigger_enabler->group;
+ unsigned int i;
+ int ret = 0;
+
+ wrapper_vmalloc_sync_mappings();
+
+ if (!group->trigger_syscall_dispatch) {
+ group->trigger_syscall_dispatch = kzalloc(sizeof(struct list_head)
+ * ARRAY_SIZE(sc_table), GFP_KERNEL);
+ if (!group->trigger_syscall_dispatch)
+ return -ENOMEM;
+
+ /* Initialize all list_head */
+ for (i = 0; i < ARRAY_SIZE(sc_table); i++)
+ INIT_LIST_HEAD(&group->trigger_syscall_dispatch[i]);
+ }
+
+#ifdef CONFIG_COMPAT
+ if (!group->trigger_compat_syscall_dispatch) {
+ group->trigger_compat_syscall_dispatch = kzalloc(sizeof(struct list_head)
+ * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
+ if (!group->trigger_syscall_dispatch)
+ return -ENOMEM;
+
+ /* Initialize all list_head */
+ for (i = 0; i < ARRAY_SIZE(compat_sc_table); i++)
+ INIT_LIST_HEAD(&group->trigger_compat_syscall_dispatch[i]);
+ }
+#endif
+
+ if (!group->sys_enter_registered) {
+ ret = lttng_wrapper_tracepoint_probe_register("sys_enter",
+ (void *) syscall_entry_trigger_probe, group);
+ if (ret)
+ return ret;
+ group->sys_enter_registered = 1;
+ }
+
+ return ret;
+}
+
+static int create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler,
+ void *filter, const struct trace_syscall_entry *table,
+ size_t table_len, bool is_compat)
+{
+ struct lttng_trigger_group *group = trigger_enabler->group;
+ const struct lttng_event_desc *desc;
+ uint64_t id = trigger_enabler->id;
+ uint64_t error_counter_index = trigger_enabler->error_counter_index;
+ unsigned int i;
+ int ret = 0;
+
+ /* iterate over all syscall and create trigger that match */
+ for (i = 0; i < table_len; i++) {
+ struct lttng_trigger *trigger;
+ struct lttng_kernel_trigger trigger_param;
+ struct hlist_head *head;
+ int found = 0;
+
+ desc = table[i].desc;
+ if (!desc) {
+ /* Unknown syscall */
+ continue;
+ }
+
+ if (!lttng_desc_match_enabler(desc,
+ lttng_trigger_enabler_as_enabler(trigger_enabler)))
+ continue;
+
+ /*
+ * Check if already created.
+ */
+ head = utils_borrow_hash_table_bucket(group->triggers_ht.table,
+ LTTNG_TRIGGER_HT_SIZE, desc->name);
+ lttng_hlist_for_each_entry(trigger, head, hlist) {
+ if (trigger->desc == desc
+ && trigger->id == trigger_enabler->id)
+ found = 1;
+ }
+ if (found)
+ continue;
+
+ memset(&trigger_param, 0, sizeof(trigger_param));
+ strncat(trigger_param.name, desc->name,
+ LTTNG_KERNEL_SYM_NAME_LEN - strlen(trigger_param.name) - 1);
+ trigger_param.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ trigger_param.instrumentation = LTTNG_KERNEL_SYSCALL;
+
+ trigger = _lttng_trigger_create(desc, id, error_counter_index,
+ group, &trigger_param, filter,
+ trigger_param.instrumentation);
+ if (IS_ERR(trigger)) {
+ printk(KERN_INFO "Unable to create trigger %s\n",
+ desc->name);
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ trigger->u.syscall.syscall_id = i;
+ trigger->u.syscall.is_compat = is_compat;
+ }
+end:
+ return ret;
+
+}
+
+int lttng_syscals_create_matching_triggers(struct lttng_trigger_enabler *trigger_enabler, void *filter)
+{
+ int ret;
+
+ ret = create_matching_triggers(trigger_enabler, filter, sc_table,
+ ARRAY_SIZE(sc_table), false);
+ if (ret)
+ goto end;
+
+ ret = create_matching_triggers(trigger_enabler, filter, compat_sc_table,
+ ARRAY_SIZE(compat_sc_table), true);
+end:
+ return ret;
+}
+
+/*
+ * Unregister the syscall trigger probes from the callsites.
*/
-int lttng_syscalls_unregister(struct lttng_channel *chan)
+int lttng_syscalls_unregister_trigger(struct lttng_trigger_group *trigger_group)
+{
+ int ret;
+
+ /*
+ * Only register the trigger probe on the `sys_enter` callsite for now.
+ * At the moment, we don't think it's desirable to have one fired
+ * trigger for the entry and one for the exit of a syscall.
+ */
+ if (trigger_group->sys_enter_registered) {
+ ret = lttng_wrapper_tracepoint_probe_unregister("sys_enter",
+ (void *) syscall_entry_trigger_probe, trigger_group);
+ if (ret)
+ return ret;
+ trigger_group->sys_enter_registered = 0;
+ }
+
+ kfree(trigger_group->trigger_syscall_dispatch);
+#ifdef CONFIG_COMPAT
+ kfree(trigger_group->trigger_compat_syscall_dispatch);
+#endif
+ return 0;
+}
+
+int lttng_syscalls_unregister_event(struct lttng_channel *chan)
{
int ret;
return 0;
if (chan->sys_enter_registered) {
ret = lttng_wrapper_tracepoint_probe_unregister("sys_enter",
- (void *) syscall_entry_probe, chan);
+ (void *) syscall_entry_event_probe, chan);
if (ret)
return ret;
chan->sys_enter_registered = 0;
}
if (chan->sys_exit_registered) {
ret = lttng_wrapper_tracepoint_probe_unregister("sys_exit",
- (void *) syscall_exit_probe, chan);
+ (void *) syscall_exit_event_probe, chan);
if (ret)
return ret;
chan->sys_exit_registered = 0;
return 0;
}
-int lttng_syscalls_destroy(struct lttng_channel *chan)
+int lttng_syscalls_destroy_event(struct lttng_channel *chan)
{
kfree(chan->sc_table);
kfree(chan->sc_exit_table);
return event->desc->name + prefix_len;
}
-int lttng_syscall_filter_enable(struct lttng_channel *chan,
+int lttng_syscall_filter_enable_event(struct lttng_channel *chan,
struct lttng_event *event)
{
struct lttng_syscall_filter *filter = chan->sc_filter;
return 0;
}
-int lttng_syscall_filter_disable(struct lttng_channel *chan,
+int lttng_syscall_filter_enable_trigger(struct lttng_trigger *trigger)
+{
+ struct lttng_trigger_group *group = trigger->group;
+ unsigned int syscall_id = trigger->u.syscall.syscall_id;
+ struct list_head *dispatch_list;
+
+ if (trigger->u.syscall.is_compat)
+ dispatch_list = &group->trigger_compat_syscall_dispatch[syscall_id];
+ else
+ dispatch_list = &group->trigger_syscall_dispatch[syscall_id];
+
+ list_add_rcu(&trigger->u.syscall.node, dispatch_list);
+
+ return 0;
+}
+
+int lttng_syscall_filter_disable_event(struct lttng_channel *chan,
struct lttng_event *event)
{
struct lttng_syscall_filter *filter = chan->sc_filter;
return 0;
}
+int lttng_syscall_filter_disable_trigger(struct lttng_trigger *trigger)
+{
+ list_del_rcu(&trigger->u.syscall.node);
+ return 0;
+}
+
static
const struct trace_syscall_entry *syscall_list_get_entry(loff_t *pos)
{
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-trigger-notification.c
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#include <linux/bug.h>
+
+#include <lttng/lttng-bytecode.h>
+#include <lttng/events.h>
+#include <lttng/msgpack.h>
+#include <lttng/trigger-notification.h>
+
+/*
+ * FIXME: this probably too low but it needs to be below 1024 bytes to avoid
+ * the frame to be larger than the 1024 limit enforced by the kernel.
+ */
+#define CAPTURE_BUFFER_SIZE 512
+
+struct lttng_trigger_notification {
+ int notification_fd;
+ uint64_t trigger_id;
+ uint8_t capture_buf[CAPTURE_BUFFER_SIZE];
+ struct lttng_msgpack_writer writer;
+ bool has_captures;
+};
+
+static
+int capture_enum(struct lttng_msgpack_writer *writer,
+ struct lttng_interpreter_output *output)
+{
+ int ret;
+
+ /*
+ * Enums are captured as a map containing 2 key-value pairs. Such as:
+ * - type: enum
+ * value: 177
+ */
+ ret = lttng_msgpack_begin_map(writer, 2);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+
+ ret = lttng_msgpack_write_str(writer, "type");
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+
+ ret = lttng_msgpack_write_str(writer, "enum");
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+
+ ret = lttng_msgpack_write_str(writer, "value");
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+
+ switch (output->type) {
+ case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+ ret = lttng_msgpack_write_signed_integer(writer, output->u.s);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ break;
+ case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+ ret = lttng_msgpack_write_signed_integer(writer, output->u.u);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ ret = lttng_msgpack_end_map(writer);
+ if (ret)
+ WARN_ON_ONCE(1);
+
+end:
+ return ret;
+}
+
+static
+int64_t capture_sequence_element_signed(uint8_t *ptr,
+ const struct lttng_integer_type *type)
+{
+ int64_t value = 0;
+ unsigned int size = type->size;
+ bool byte_order_reversed = type->reverse_byte_order;
+
+ switch (size) {
+ case 8:
+ value = *ptr;
+ break;
+ case 16:
+ {
+ int16_t tmp;
+ tmp = *(int16_t *) ptr;
+ if (byte_order_reversed)
+ __swab16s(&tmp);
+
+ value = tmp;
+ break;
+ }
+ case 32:
+ {
+ int32_t tmp;
+ tmp = *(int32_t *) ptr;
+ if (byte_order_reversed)
+ __swab32s(&tmp);
+
+ value = tmp;
+ break;
+ }
+ case 64:
+ {
+ int64_t tmp;
+ tmp = *(int64_t *) ptr;
+ if (byte_order_reversed)
+ __swab64s(&tmp);
+
+ value = tmp;
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ return value;
+}
+
+static
+uint64_t capture_sequence_element_unsigned(uint8_t *ptr,
+ const struct lttng_integer_type *type)
+{
+ uint64_t value = 0;
+ unsigned int size = type->size;
+ bool byte_order_reversed = type->reverse_byte_order;
+
+ switch (size) {
+ case 8:
+ value = *ptr;
+ break;
+ case 16:
+ {
+ uint16_t tmp;
+ tmp = *(uint16_t *) ptr;
+ if (byte_order_reversed)
+ __swab16s(&tmp);
+
+ value = tmp;
+ break;
+ }
+ case 32:
+ {
+ uint32_t tmp;
+ tmp = *(uint32_t *) ptr;
+ if (byte_order_reversed)
+ __swab32s(&tmp);
+
+ value = tmp;
+ break;
+ }
+ case 64:
+ {
+ uint64_t tmp;
+ tmp = *(uint64_t *) ptr;
+ if (byte_order_reversed)
+ __swab64s(&tmp);
+
+ value = tmp;
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ return value;
+}
+
+int capture_sequence(struct lttng_msgpack_writer *writer,
+ struct lttng_interpreter_output *output)
+{
+ const struct lttng_integer_type *integer_type = NULL;
+ const struct lttng_type *nested_type;
+ uint8_t *ptr;
+ bool signedness;
+ int ret, i;
+
+ ret = lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+
+ ptr = (uint8_t *) output->u.sequence.ptr;
+ nested_type = output->u.sequence.nested_type;
+ switch (nested_type->atype) {
+ case atype_integer:
+ integer_type = &nested_type->u.integer;
+ break;
+ case atype_enum_nestable:
+ /* Treat enumeration as an integer. */
+ integer_type = &nested_type->u.enum_nestable.container_type->u.integer;
+ break;
+ default:
+ /* Capture of array of non-integer are not supported. */
+ WARN_ON(1);
+ }
+ signedness = integer_type->signedness;
+ for (i = 0; i < output->u.sequence.nr_elem; i++) {
+ if (signedness) {
+ ret =lttng_msgpack_write_signed_integer(writer,
+ capture_sequence_element_signed(ptr, integer_type));
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ } else {
+ ret = lttng_msgpack_write_unsigned_integer(writer,
+ capture_sequence_element_unsigned(ptr, integer_type));
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ }
+
+ /*
+ * We assume that alignment is smaller or equal to the size.
+ * This currently holds true but if it changes in the future,
+ * we will want to change the pointer arithmetics below to
+ * take into account that the next element might be further
+ * away.
+ */
+ WARN_ON(integer_type->alignment > integer_type->size);
+
+ /* Size is in number of bits. */
+ ptr += (integer_type->size / CHAR_BIT) ;
+ }
+
+ ret = lttng_msgpack_end_array(writer);
+ if (ret)
+ WARN_ON_ONCE(1);
+end:
+ return ret;
+}
+
+static
+int notification_append_capture(
+ struct lttng_trigger_notification *notif,
+ struct lttng_interpreter_output *output)
+{
+ struct lttng_msgpack_writer *writer = ¬if->writer;
+ int ret = 0;
+
+ switch (output->type) {
+ case LTTNG_INTERPRETER_TYPE_S64:
+ ret = lttng_msgpack_write_signed_integer(writer, output->u.s);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ break;
+ case LTTNG_INTERPRETER_TYPE_U64:
+ ret = lttng_msgpack_write_unsigned_integer(writer, output->u.u);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ break;
+ case LTTNG_INTERPRETER_TYPE_STRING:
+ ret = lttng_msgpack_write_str(writer, output->u.str.str);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ break;
+ case LTTNG_INTERPRETER_TYPE_SEQUENCE:
+ ret = capture_sequence(writer, output);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ break;
+ case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+ case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+ ret = capture_enum(writer, output);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+ break;
+ default:
+ ret = -1;
+ WARN_ON(1);
+ }
+end:
+ return ret;
+}
+
+static
+int notification_append_empty_capture(
+ struct lttng_trigger_notification *notif)
+{
+ int ret = lttng_msgpack_write_nil(¬if->writer);
+ if (ret)
+ WARN_ON_ONCE(1);
+
+ return ret;
+}
+
+static
+int notification_init(struct lttng_trigger_notification *notif,
+ struct lttng_trigger *trigger)
+{
+ struct lttng_msgpack_writer *writer = ¬if->writer;
+ int ret = 0;
+
+ notif->has_captures = false;
+
+ if (trigger->num_captures > 0) {
+ lttng_msgpack_writer_init(writer, notif->capture_buf,
+ CAPTURE_BUFFER_SIZE);
+
+ ret = lttng_msgpack_begin_array(writer, trigger->num_captures);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+
+ notif->has_captures = true;
+ }
+
+end:
+ return ret;
+}
+
+static
+void record_error(struct lttng_trigger *trigger)
+{
+
+ struct lttng_trigger_group *trigger_group = trigger->group;
+ size_t dimension_index[1];
+ int ret;
+
+ dimension_index[0] = trigger->error_counter_index;
+
+ ret = trigger_group->error_counter->ops->counter_add(
+ trigger_group->error_counter->counter,
+ dimension_index, 1);
+ if (ret)
+ WARN_ON_ONCE(1);
+}
+
+static
+void notification_send(struct lttng_trigger_notification *notif,
+ struct lttng_trigger *trigger)
+{
+ struct lttng_trigger_group *trigger_group = trigger->group;
+ struct lib_ring_buffer_ctx ctx;
+ struct lttng_kernel_trigger_notification kernel_notif;
+ size_t capture_buffer_content_len, reserve_size;
+ int ret;
+
+ reserve_size = sizeof(kernel_notif);
+ kernel_notif.id = trigger->id;
+
+ if (notif->has_captures) {
+ capture_buffer_content_len = notif->writer.write_pos - notif->writer.buffer;
+ } else {
+ capture_buffer_content_len = 0;
+ }
+
+ WARN_ON_ONCE(capture_buffer_content_len > CAPTURE_BUFFER_SIZE);
+
+ reserve_size += capture_buffer_content_len;
+ kernel_notif.capture_buf_size = capture_buffer_content_len;
+
+ lib_ring_buffer_ctx_init(&ctx, trigger_group->chan, NULL, reserve_size,
+ lttng_alignof(kernel_notif), -1);
+ ret = trigger_group->ops->event_reserve(&ctx, 0);
+ if (ret < 0) {
+ record_error(trigger);
+ return;
+ }
+
+ lib_ring_buffer_align_ctx(&ctx, lttng_alignof(kernel_notif));
+
+ /* Write the notif structure. */
+ trigger_group->ops->event_write(&ctx, &kernel_notif,
+ sizeof(kernel_notif));
+
+ /*
+ * Write the capture buffer. No need to realigned as the below is a raw
+ * char* buffer.
+ */
+ trigger_group->ops->event_write(&ctx, ¬if->capture_buf,
+ capture_buffer_content_len);
+
+ trigger_group->ops->event_commit(&ctx);
+ irq_work_queue(&trigger_group->wakeup_pending);
+}
+
+void lttng_trigger_notification_send(struct lttng_trigger *trigger,
+ struct lttng_probe_ctx *lttng_probe_ctx,
+ const char *stack_data)
+{
+ struct lttng_trigger_notification notif = {0};
+ int ret;
+
+ if (unlikely(!READ_ONCE(trigger->enabled)))
+ return;
+
+ ret = notification_init(¬if, trigger);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ goto end;
+ }
+
+ if (unlikely(!list_empty(&trigger->capture_bytecode_runtime_head))) {
+ struct lttng_bytecode_runtime *capture_bc_runtime;
+
+ /*
+ * Iterate over all the capture bytecodes. If the interpreter
+ * functions returns successfully, append the value of the
+ * `output` parameter to the capture buffer. If the interpreter
+ * fails, append an empty capture to the buffer.
+ */
+ list_for_each_entry(capture_bc_runtime,
+ &trigger->capture_bytecode_runtime_head, node) {
+ struct lttng_interpreter_output output;
+
+ if (capture_bc_runtime->interpreter_funcs.capture(capture_bc_runtime,
+ lttng_probe_ctx, stack_data, &output) & LTTNG_INTERPRETER_RECORD_FLAG)
+ ret = notification_append_capture(¬if, &output);
+ else
+ ret = notification_append_empty_capture(¬if);
+
+ if (ret)
+ printk(KERN_WARNING "Error appending capture to notification");
+ }
+ }
+
+ /*
+ * Send the notification (including the capture buffer) to the
+ * sessiond.
+ */
+ notification_send(¬if, trigger);
+end:
+ return;
+}
#include <blacklist/kprobes.h>
static
-int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
+int lttng_kprobes_event_handler_pre(struct kprobe *p, struct pt_regs *regs)
{
struct lttng_event *event =
container_of(p, struct lttng_event, u.kprobe.kp);
return 0;
}
+static
+int lttng_kprobes_trigger_handler_pre(struct kprobe *p, struct pt_regs *regs)
+{
+ struct lttng_trigger *trigger =
+ container_of(p, struct lttng_trigger, u.kprobe.kp);
+
+ if (unlikely(!READ_ONCE(trigger->enabled)))
+ return 0;
+
+ trigger->send_notification(trigger, NULL, NULL);
+
+ return 0;
+}
+
/*
* Create event description
*/
return ret;
}
-int lttng_kprobes_register(const char *name,
- const char *symbol_name,
+/*
+ * Create trigger description
+ */
+static
+int lttng_create_kprobe_trigger(const char *name, struct lttng_trigger *trigger)
+{
+ struct lttng_event_desc *desc;
+ int ret;
+
+ desc = kzalloc(sizeof(*trigger->desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->name = kstrdup(name, GFP_KERNEL);
+ if (!desc->name) {
+ ret = -ENOMEM;
+ goto error_str;
+ }
+ desc->nr_fields = 0;
+
+ desc->owner = THIS_MODULE;
+ trigger->desc = desc;
+
+ return 0;
+
+error_str:
+ kfree(desc);
+ return ret;
+}
+
+static
+int _lttng_kprobes_register(const char *symbol_name,
uint64_t offset,
uint64_t addr,
- struct lttng_event *event)
+ struct lttng_kprobe *lttng_kp,
+ kprobe_pre_handler_t pre_handler)
{
int ret;
if (symbol_name[0] == '\0')
symbol_name = NULL;
- ret = lttng_create_kprobe_event(name, event);
- if (ret)
- goto error;
- memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
- event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
+ memset(<tng_kp->kp, 0, sizeof(lttng_kp->kp));
+ lttng_kp->kp.pre_handler = pre_handler;
+
if (symbol_name) {
- event->u.kprobe.symbol_name =
+ lttng_kp->symbol_name =
kzalloc(LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char),
GFP_KERNEL);
- if (!event->u.kprobe.symbol_name) {
+ if (!lttng_kp->symbol_name) {
ret = -ENOMEM;
goto name_error;
}
- memcpy(event->u.kprobe.symbol_name, symbol_name,
+ memcpy(lttng_kp->symbol_name, symbol_name,
LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char));
- event->u.kprobe.kp.symbol_name =
- event->u.kprobe.symbol_name;
+ lttng_kp->kp.symbol_name = lttng_kp->symbol_name;
}
- event->u.kprobe.kp.offset = offset;
- event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
+
+ lttng_kp->kp.offset = offset;
+ lttng_kp->kp.addr = (void *) (unsigned long) addr;
/*
* Ensure the memory we just allocated don't trigger page faults.
*/
wrapper_vmalloc_sync_mappings();
- ret = register_kprobe(&event->u.kprobe.kp);
+ ret = register_kprobe(<tng_kp->kp);
if (ret)
goto register_error;
+
return 0;
register_error:
- kfree(event->u.kprobe.symbol_name);
+ kfree(lttng_kp->symbol_name);
name_error:
+ return ret;
+}
+
+int lttng_kprobes_register_event(const char *name,
+ const char *symbol_name,
+ uint64_t offset,
+ uint64_t addr,
+ struct lttng_event *event)
+{
+ int ret;
+
+ ret = lttng_create_kprobe_event(name, event);
+ if (ret)
+ goto error;
+
+ ret = _lttng_kprobes_register(symbol_name, offset, addr,
+ &event->u.kprobe, lttng_kprobes_event_handler_pre);
+ if (ret)
+ goto register_error;
+
+ return 0;
+
+register_error:
kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
error:
return ret;
}
-EXPORT_SYMBOL_GPL(lttng_kprobes_register);
+EXPORT_SYMBOL_GPL(lttng_kprobes_register_event);
-void lttng_kprobes_unregister(struct lttng_event *event)
+int lttng_kprobes_register_trigger(const char *symbol_name,
+ uint64_t offset,
+ uint64_t addr,
+ struct lttng_trigger *trigger)
+{
+ int ret;
+ ret = lttng_create_kprobe_trigger(symbol_name, trigger);
+ if (ret)
+ goto error;
+
+ ret = _lttng_kprobes_register(symbol_name, offset, addr,
+ &trigger->u.kprobe, lttng_kprobes_trigger_handler_pre);
+ if (ret)
+ goto register_error;
+
+ return 0;
+
+register_error:
+ kfree(trigger->desc->name);
+ kfree(trigger->desc);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_register_trigger);
+
+void lttng_kprobes_unregister_event(struct lttng_event *event)
{
unregister_kprobe(&event->u.kprobe.kp);
}
-EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
+EXPORT_SYMBOL_GPL(lttng_kprobes_unregister_event);
+
+void lttng_kprobes_unregister_trigger(struct lttng_trigger *trigger)
+{
+ unregister_kprobe(&trigger->u.kprobe.kp);
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_unregister_trigger);
-void lttng_kprobes_destroy_private(struct lttng_event *event)
+void lttng_kprobes_destroy_event_private(struct lttng_event *event)
{
kfree(event->u.kprobe.symbol_name);
kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
}
-EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
+EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_event_private);
+
+void lttng_kprobes_destroy_trigger_private(struct lttng_trigger *trigger)
+{
+ kfree(trigger->u.kprobe.symbol_name);
+ kfree(trigger->desc->name);
+ kfree(trigger->desc);
+}
+EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_trigger_private);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
#include <wrapper/vmalloc.h>
static
-int lttng_uprobes_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
+int lttng_uprobes_event_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
{
struct lttng_uprobe_handler *uprobe_handler =
container_of(uc, struct lttng_uprobe_handler, up_consumer);
- struct lttng_event *event = uprobe_handler->event;
+ struct lttng_event *event = uprobe_handler->u.event;
struct lttng_probe_ctx lttng_probe_ctx = {
.event = event,
.interruptible = !lttng_regs_irqs_disabled(regs),
return 0;
}
+static
+int lttng_uprobes_trigger_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
+{
+ struct lttng_uprobe_handler *uprobe_handler =
+ container_of(uc, struct lttng_uprobe_handler, up_consumer);
+ struct lttng_trigger *trigger = uprobe_handler->u.trigger;
+
+ if (unlikely(!READ_ONCE(trigger->enabled)))
+ return 0;
+
+ trigger->send_notification(trigger, NULL, NULL);
+ return 0;
+}
+
/*
* Create event description.
*/
return ret;
}
+/*
+ * Create trigger description.
+ */
+static
+int lttng_create_uprobe_trigger(const char *name, struct lttng_trigger *trigger)
+{
+ struct lttng_event_desc *desc;
+ int ret;
+
+ desc = kzalloc(sizeof(*trigger->desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->name = kstrdup(name, GFP_KERNEL);
+ if (!desc->name) {
+ ret = -ENOMEM;
+ goto error_str;
+ }
+
+ desc->nr_fields = 0;
+
+ desc->owner = THIS_MODULE;
+ trigger->desc = desc;
+
+ return 0;
+
+error_str:
+ kfree(desc);
+ return ret;
+}
+
/*
* Returns the inode struct from the current task and an fd. The inode is
* grabbed by this function and must be put once we are done with it using
return inode;
}
-int lttng_uprobes_add_callsite(struct lttng_event *event,
- struct lttng_kernel_event_callsite __user *callsite)
+
+static
+int lttng_uprobes_add_callsite(struct lttng_uprobe *uprobe,
+ struct lttng_kernel_event_callsite __user *callsite,
+ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs),
+ void *priv_data)
{
int ret = 0;
struct lttng_uprobe_handler *uprobe_handler;
- if (!event) {
+ if (!priv_data) {
ret = -EINVAL;
goto end;
}
/* Ensure the memory we just allocated don't trigger page faults. */
wrapper_vmalloc_sync_mappings();
- uprobe_handler->event = event;
- uprobe_handler->up_consumer.handler = lttng_uprobes_handler_pre;
+ uprobe_handler->u.event = priv_data;
+ uprobe_handler->up_consumer.handler = handler;
ret = copy_from_user(&uprobe_handler->offset, &callsite->u.uprobe.offset, sizeof(uint64_t));
if (ret) {
goto register_error;
}
- ret = wrapper_uprobe_register(event->u.uprobe.inode,
+ ret = wrapper_uprobe_register(uprobe->inode,
uprobe_handler->offset, &uprobe_handler->up_consumer);
if (ret) {
printk(KERN_WARNING "LTTng: Error registering probe on inode %lu "
- "and offset 0x%llx\n", event->u.uprobe.inode->i_ino,
+ "and offset 0x%llx\n", uprobe->inode->i_ino,
uprobe_handler->offset);
ret = -1;
goto register_error;
}
- list_add(&uprobe_handler->node, &event->u.uprobe.head);
+ list_add(&uprobe_handler->node, &uprobe->head);
return ret;
end:
return ret;
}
-EXPORT_SYMBOL_GPL(lttng_uprobes_add_callsite);
-int lttng_uprobes_register(const char *name, int fd, struct lttng_event *event)
+int lttng_uprobes_event_add_callsite(struct lttng_event *event,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+ return lttng_uprobes_add_callsite(&event->u.uprobe, callsite,
+ lttng_uprobes_event_handler_pre, event);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_event_add_callsite);
+
+int lttng_uprobes_trigger_add_callsite(struct lttng_trigger *trigger,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+ return lttng_uprobes_add_callsite(&trigger->u.uprobe, callsite,
+ lttng_uprobes_trigger_handler_pre, trigger);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_trigger_add_callsite);
+
+static
+int lttng_uprobes_register(struct lttng_uprobe *uprobe, int fd)
{
int ret = 0;
struct inode *inode;
- ret = lttng_create_uprobe_event(name, event);
- if (ret)
- goto error;
-
inode = get_inode_from_fd(fd);
if (!inode) {
printk(KERN_WARNING "LTTng: Cannot get inode from fd\n");
ret = -EBADF;
goto inode_error;
}
- event->u.uprobe.inode = inode;
- INIT_LIST_HEAD(&event->u.uprobe.head);
+ uprobe->inode = inode;
+ INIT_LIST_HEAD(&uprobe->head);
+
+inode_error:
+ return ret;
+}
+
+int lttng_uprobes_register_event(const char *name, int fd, struct lttng_event *event)
+{
+ int ret = 0;
+
+ ret = lttng_create_uprobe_event(name, event);
+ if (ret)
+ goto error;
+
+ ret = lttng_uprobes_register(&event->u.uprobe, fd);
+ if (ret)
+ goto register_error;
return 0;
-inode_error:
+register_error:
kfree(event->desc->name);
kfree(event->desc);
error:
return ret;
}
-EXPORT_SYMBOL_GPL(lttng_uprobes_register);
+EXPORT_SYMBOL_GPL(lttng_uprobes_register_event);
-void lttng_uprobes_unregister(struct lttng_event *event)
+int lttng_uprobes_register_trigger(const char *name, int fd,
+ struct lttng_trigger *trigger)
+{
+ int ret = 0;
+
+ ret = lttng_create_uprobe_trigger(name, trigger);
+ if (ret)
+ goto error;
+
+ ret = lttng_uprobes_register(&trigger->u.uprobe, fd);
+ if (ret)
+ goto register_error;
+
+ return 0;
+
+register_error:
+ kfree(trigger->desc->name);
+ kfree(trigger->desc);
+error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_register_trigger);
+
+static
+void lttng_uprobes_unregister(struct inode *inode, struct list_head *head)
{
struct lttng_uprobe_handler *iter, *tmp;
* Iterate over the list of handler, remove each handler from the list
* and free the struct.
*/
- list_for_each_entry_safe(iter, tmp, &event->u.uprobe.head, node) {
- wrapper_uprobe_unregister(event->u.uprobe.inode, iter->offset,
- &iter->up_consumer);
+ list_for_each_entry_safe(iter, tmp, head, node) {
+ wrapper_uprobe_unregister(inode, iter->offset, &iter->up_consumer);
list_del(&iter->node);
kfree(iter);
}
+
+}
+
+void lttng_uprobes_unregister_event(struct lttng_event *event)
+{
+ lttng_uprobes_unregister(event->u.uprobe.inode, &event->u.uprobe.head);
}
-EXPORT_SYMBOL_GPL(lttng_uprobes_unregister);
+EXPORT_SYMBOL_GPL(lttng_uprobes_unregister_event);
-void lttng_uprobes_destroy_private(struct lttng_event *event)
+void lttng_uprobes_unregister_trigger(struct lttng_trigger *trigger)
+{
+ lttng_uprobes_unregister(trigger->u.uprobe.inode, &trigger->u.uprobe.head);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_unregister_trigger);
+
+void lttng_uprobes_destroy_event_private(struct lttng_event *event)
{
iput(event->u.uprobe.inode);
kfree(event->desc->name);
kfree(event->desc);
}
-EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_private);
+EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_event_private);
+
+void lttng_uprobes_destroy_trigger_private(struct lttng_trigger *trigger)
+{
+ iput(trigger->u.uprobe.inode);
+ kfree(trigger->desc->name);
+ kfree(trigger->desc);
+}
+EXPORT_SYMBOL_GPL(lttng_uprobes_destroy_trigger_private);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Yannick Brosseau");