ACLOCAL_AMFLAGS = -I m4
SUBDIRS = . include snprintf libringbuffer liblttng-ust-comm \
+ libcounter \
+ libmsgpack \
liblttng-ust \
liblttng-ust-ctl \
liblttng-ust-fd \
dnl Version infos
m4_define([V_MAJOR], [2])
-m4_define([V_MINOR], [12])
+m4_define([V_MINOR], [13])
m4_define([V_PATCH], [0])
-m4_define([V_EXTRA], [rc1])
-m4_define([V_NAME], [[(Ta) Meilleure]])
-m4_define([V_DESC], [[Ta Meilleure is a Northeast IPA beer brewed by Lagabière. Translating to "Your best one", this beer gives out strong aromas of passion fruit, lemon, and peaches. Tastewise, expect a lot of fruit, a creamy texture, and a smooth lingering hop bitterness.]])
+m4_define([V_EXTRA], [pre])
+m4_define([V_NAME], [[Codename TBD]])
+m4_define([V_DESC], [[Description TBD]])
m4_define([V_STRING], [V_MAJOR.V_MINOR.V_PATCH])
m4_ifdef([V_EXTRA], [m4_append([V_STRING], [-V_EXTRA])])
include/Makefile
include/lttng/ust-version.h
snprintf/Makefile
+ libcounter/Makefile
+ libmsgpack/Makefile
libringbuffer/Makefile
liblttng-ust-comm/Makefile
liblttng-ust/Makefile
tests/snprintf/Makefile
tests/ust-elf/Makefile
tests/benchmark/Makefile
+ tests/libmsgpack/Makefile
tests/utils/Makefile
tests/test-app-ctx/Makefile
tests/gcc-weak-hidden/Makefile
lttng/lttng-ust-tracelog.h \
lttng/ust-clock.h \
lttng/ust-getcpu.h \
- lttng/ust-elf.h
+ lttng/ust-elf.h \
+ lttng/counter-config.h \
+ lttng/bitmap.h
# note: usterr-signal-safe.h, core.h and share.h need namespace cleanup.
--- /dev/null
+/*
+ * lttng/bitmap.h
+ *
+ * LTTng Bitmap API
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_BITMAP_H
+#define _LTTNG_BITMAP_H
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+#include <urcu/uatomic.h>
+#include <stdbool.h>
+
+static inline void lttng_bitmap_index(unsigned int index, unsigned int *word,
+ unsigned int *bit)
+{
+ *word = index / CAA_BITS_PER_LONG;
+ *bit = index % CAA_BITS_PER_LONG;
+}
+
+static inline void lttng_bitmap_set_bit(unsigned int index, unsigned long *p)
+{
+ unsigned int word, bit;
+ unsigned long val;
+
+ lttng_bitmap_index(index, &word, &bit);
+ val = 1U << bit;
+ uatomic_or(p + word, val);
+}
+
+static inline void lttng_bitmap_clear_bit(unsigned int index, unsigned long *p)
+{
+ unsigned int word, bit;
+ unsigned long val;
+
+ lttng_bitmap_index(index, &word, &bit);
+ val = ~(1U << bit);
+ uatomic_and(p + word, val);
+}
+
+static inline bool lttng_bitmap_test_bit(unsigned int index, unsigned long *p)
+{
+ unsigned int word, bit;
+
+ lttng_bitmap_index(index, &word, &bit);
+ return (CMM_LOAD_SHARED(p[word]) >> bit) & 0x1;
+}
+
+#endif /* _LTTNG_BITMAP_H */
--- /dev/null
+/*
+ * lttng/counter-config.h
+ *
+ * LTTng Counters Configuration
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_CONFIG_H
+#define _LTTNG_COUNTER_CONFIG_H
+
+#include <stdint.h>
+
+enum lib_counter_config_alloc {
+ COUNTER_ALLOC_PER_CPU = (1 << 0),
+ COUNTER_ALLOC_GLOBAL = (1 << 1),
+};
+
+enum lib_counter_config_sync {
+ COUNTER_SYNC_PER_CPU,
+ COUNTER_SYNC_GLOBAL,
+};
+
+struct lib_counter_config {
+ uint32_t alloc; /* enum lib_counter_config_alloc flags */
+ enum lib_counter_config_sync sync;
+ enum {
+ COUNTER_ARITHMETIC_OVERFLOW,
+ COUNTER_ARITHMETIC_SATURATE, /* TODO */
+ } arithmetic;
+ enum {
+ COUNTER_SIZE_8_BIT = 1,
+ COUNTER_SIZE_16_BIT = 2,
+ COUNTER_SIZE_32_BIT = 4,
+ COUNTER_SIZE_64_BIT = 8,
+ } counter_size;
+};
+
+#endif /* _LTTNG_COUNTER_CONFIG_H */
*/
} LTTNG_PACKED;
+#define LTTNG_UST_TRIGGER_PADDING1 16
+#define LTTNG_UST_TRIGGER_PADDING2 (LTTNG_UST_SYM_NAME_LEN + 32)
+struct lttng_ust_trigger {
+ uint64_t id;
+ uint64_t error_counter_index;
+ enum lttng_ust_instrumentation instrumentation;
+ char name[LTTNG_UST_SYM_NAME_LEN]; /* event name */
+
+ enum lttng_ust_loglevel_type loglevel_type;
+ int loglevel; /* value, -1: all */
+ char padding[LTTNG_UST_TRIGGER_PADDING1];
+
+ /* Per instrumentation type configuration */
+ union {
+ char padding[LTTNG_UST_TRIGGER_PADDING2];
+ } u;
+} LTTNG_PACKED;
+
+enum lttng_ust_counter_arithmetic {
+ LTTNG_UST_COUNTER_ARITHMETIC_MODULAR = 0,
+ LTTNG_UST_COUNTER_ARITHMETIC_SATURATION = 1,
+};
+
+enum lttng_ust_counter_bitness {
+ LTTNG_UST_COUNTER_BITNESS_32BITS = 4,
+ LTTNG_UST_COUNTER_BITNESS_64BITS = 8,
+};
+
+struct lttng_ust_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+} LTTNG_PACKED;
+
+#define LTTNG_UST_COUNTER_DIMENSION_MAX 8
+struct lttng_ust_counter_conf {
+ uint32_t arithmetic; /* enum lttng_ust_counter_arithmetic */
+ uint32_t bitness; /* enum lttng_ust_counter_bitness */
+ uint32_t number_dimensions;
+ int64_t global_sum_step;
+ struct lttng_ust_counter_dimension dimensions[LTTNG_UST_COUNTER_DIMENSION_MAX];
+} LTTNG_PACKED;
+
+struct lttng_ust_counter_value {
+ uint32_t number_dimensions;
+ uint64_t dimension_indexes[LTTNG_UST_COUNTER_DIMENSION_MAX];
+ int64_t value;
+} LTTNG_PACKED;
+
+#define LTTNG_TRIGGER_NOTIFICATION_PADDING 32
+struct lttng_ust_trigger_notification {
+ uint64_t id;
+ uint16_t capture_buf_size;
+ char padding[LTTNG_TRIGGER_NOTIFICATION_PADDING];
+} LTTNG_PACKED;
+
#define LTTNG_UST_EVENT_PADDING1 16
#define LTTNG_UST_EVENT_PADDING2 (LTTNG_UST_SYM_NAME_LEN + 32)
struct lttng_ust_event {
} u;
} LTTNG_PACKED;
+#define LTTNG_UST_COUNTER_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32)
+#define LTTNG_UST_COUNTER_DATA_MAX_LEN 4096U
+struct lttng_ust_counter {
+ uint64_t len;
+ char padding[LTTNG_UST_COUNTER_PADDING1];
+ char data[]; /* variable sized data */
+} LTTNG_PACKED;
+
+#define LTTNG_UST_COUNTER_GLOBAL_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32)
+struct lttng_ust_counter_global {
+ uint64_t len; /* shm len */
+ char padding[LTTNG_UST_COUNTER_GLOBAL_PADDING1];
+} LTTNG_PACKED;
+
+#define LTTNG_UST_COUNTER_CPU_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32)
+struct lttng_ust_counter_cpu {
+ uint64_t len; /* shm len */
+ uint32_t cpu_nr;
+ char padding[LTTNG_UST_COUNTER_CPU_PADDING1];
+} LTTNG_PACKED;
+
enum lttng_ust_field_type {
LTTNG_UST_FIELD_OTHER = 0,
LTTNG_UST_FIELD_INTEGER = 1,
LTTNG_UST_OBJECT_TYPE_STREAM = 1,
LTTNG_UST_OBJECT_TYPE_EVENT = 2,
LTTNG_UST_OBJECT_TYPE_CONTEXT = 3,
+ LTTNG_UST_OBJECT_TYPE_TRIGGER_GROUP = 4,
+ LTTNG_UST_OBJECT_TYPE_TRIGGER = 5,
+ LTTNG_UST_OBJECT_TYPE_COUNTER = 6,
+ LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL = 7,
+ LTTNG_UST_OBJECT_TYPE_COUNTER_CPU = 8,
};
#define LTTNG_UST_OBJECT_DATA_PADDING1 32
int wakeup_fd;
uint32_t stream_nr;
} stream;
+ struct {
+ void *data;
+ } counter;
+ struct {
+ int shm_fd;
+ } counter_global;
+ struct {
+ int shm_fd;
+ uint32_t cpu_nr;
+ } counter_cpu;
char padding2[LTTNG_UST_OBJECT_DATA_PADDING2];
} u;
} LTTNG_PACKED;
char data[0];
} LTTNG_PACKED;
+#define CAPTURE_BYTECODE_MAX_LEN 65536
+#define LTTNG_UST_CAPTURE_PADDING 32
+struct lttng_ust_capture_bytecode {
+ uint32_t len;
+ uint32_t reloc_offset;
+ uint64_t seqnum;
+ char padding[LTTNG_UST_CAPTURE_PADDING];
+ char data[0];
+} LTTNG_PACKED;
+
#define LTTNG_UST_EXCLUSION_PADDING 32
struct lttng_ust_event_exclusion {
uint32_t count;
#define LTTNG_UST_FILTER _UST_CMD(0xA0)
#define LTTNG_UST_EXCLUSION _UST_CMD(0xA1)
+/* Trigger commands */
+#define LTTNG_UST_TRIGGER_GROUP_CREATE _UST_CMD(0xB0)
+#define LTTNG_UST_TRIGGER_CREATE \
+ _UST_CMDW(0xB1, struct lttng_ust_trigger)
+#define LTTNG_UST_CAPTURE _UST_CMD(0xB2)
+
+/* Session and Trigger group FD commands */
+#define LTTNG_UST_COUNTER \
+ _UST_CMDW(0xB3, struct lttng_ust_counter)
+#define LTTNG_UST_COUNTER_GLOBAL \
+ _UST_CMDW(0xB4, struct lttng_ust_counter_global)
+#define LTTNG_UST_COUNTER_CPU \
+ _UST_CMDW(0xB5, struct lttng_ust_counter_cpu)
+
#define LTTNG_UST_ROOT_HANDLE 0
struct lttng_ust_obj;
struct {
char *ctxname;
} app_context;
+ struct {
+ int trigger_notif_fd;
+ } trigger_handle;
+ struct {
+ void *counter_data;
+ } counter;
+ struct {
+ int shm_fd;
+ } counter_shm;
};
struct lttng_ust_objd_ops {
#include <limits.h>
#include <stddef.h>
#include <stdint.h>
+#include <stdbool.h>
#include <sys/types.h>
#include <lttng/ust-abi.h>
struct lttng_ust_object_data **context_data);
int ustctl_set_filter(int sock, struct lttng_ust_filter_bytecode *bytecode,
struct lttng_ust_object_data *obj_data);
+int ustctl_set_capture(int sock, struct lttng_ust_capture_bytecode *bytecode,
+ struct lttng_ust_object_data *obj_data);
int ustctl_set_exclusion(int sock, struct lttng_ust_event_exclusion *exclusion,
struct lttng_ust_object_data *obj_data);
int ustctl_start_session(int sock, int handle);
int ustctl_stop_session(int sock, int handle);
+/*
+ * ustctl_create_trigger_group creates a trigger group. It establishes the
+ * connection with the application by providing a file descriptor of the pipe
+ * to be used by the application when a trigger of that group is fired. It
+ * returns a handle to be used when creating trigger in that group.
+ */
+int ustctl_create_trigger_group(int sock, int pipe_fd,
+ struct lttng_ust_object_data **trigger_group);
+
+/*
+ * ustctl_create_trigger creates a trigger in a trigger group giving a trigger
+ * description and a trigger group handle. It returns a trigger handle to be
+ * used when enabling the trigger, attaching filter, attaching exclusion, and
+ * disabling the trigger.
+ */
+int ustctl_create_trigger(int sock, struct lttng_ust_trigger *trigger,
+ struct lttng_ust_object_data *trigger_group,
+ struct lttng_ust_object_data **trigger_data);
+
/*
* ustctl_tracepoint_list returns a tracepoint list handle, or negative
* error value.
enum ustctl_channel_header header_type,
int ret_code); /* return code. 0 ok, negative error */
+/*
+ * Counter API.
+ */
+
+enum ustctl_counter_bitness {
+ USTCTL_COUNTER_BITNESS_32 = 4,
+ USTCTL_COUNTER_BITNESS_64 = 8,
+};
+
+enum ustctl_counter_arithmetic {
+ USTCTL_COUNTER_ARITHMETIC_MODULAR = 0,
+ USTCTL_COUNTER_ARITHMETIC_SATURATION = 1,
+};
+
+/* Used as alloc flags. */
+enum ustctl_counter_alloc {
+ USTCTL_COUNTER_ALLOC_PER_CPU = (1 << 0),
+ USTCTL_COUNTER_ALLOC_GLOBAL = (1 << 1),
+};
+
+struct ustctl_daemon_counter;
+
+int ustctl_get_nr_cpu_per_counter(void);
+
+struct ustctl_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+};
+
+struct ustctl_daemon_counter *
+ ustctl_create_counter(size_t nr_dimensions,
+ const struct ustctl_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ enum ustctl_counter_bitness bitness,
+ enum ustctl_counter_arithmetic arithmetic,
+ uint32_t alloc_flags);
+
+int ustctl_create_counter_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_object_data **counter_data);
+
+int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_object_data **counter_global_data);
+int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu,
+ struct lttng_ust_object_data **counter_cpu_data);
+
+/*
+ * Each counter data and counter cpu data created need to be destroyed
+ * before calling ustctl_destroy_counter().
+ */
+void ustctl_destroy_counter(struct ustctl_daemon_counter *counter);
+
+int ustctl_send_counter_data_to_ust(int sock, int parent_handle,
+ struct lttng_ust_object_data *counter_data);
+int ustctl_send_counter_global_data_to_ust(int sock,
+ struct lttng_ust_object_data *counter_data,
+ struct lttng_ust_object_data *counter_global_data);
+int ustctl_send_counter_cpu_data_to_ust(int sock,
+ struct lttng_ust_object_data *counter_data,
+ struct lttng_ust_object_data *counter_cpu_data);
+
+int ustctl_counter_read(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow);
+int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow);
+int ustctl_counter_clear(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes);
+
#endif /* _LTTNG_UST_CTL_H */
struct lttng_ust_lib_ring_buffer_ctx;
struct lttng_ust_context_app;
struct lttng_event_field;
+struct lttng_trigger_group;
/*
* Data structures used by tracepoint event declarations, and by the
enum lttng_ust_dynamic_type sel;
union {
int64_t s64;
+ uint64_t u64;
const char *str;
double d;
} u;
union {
struct {
const char **model_emf_uri;
+ void (*trigger_callback)(void);
} ext;
char padding[LTTNG_UST_EVENT_DESC_PADDING];
} u;
/* Data structures used by the tracer. */
-enum lttng_enabler_type {
- LTTNG_ENABLER_STAR_GLOB,
- LTTNG_ENABLER_EVENT,
+enum lttng_enabler_format_type {
+ LTTNG_ENABLER_FORMAT_STAR_GLOB,
+ LTTNG_ENABLER_FORMAT_EVENT,
};
/*
* backward reference.
*/
struct lttng_enabler {
- enum lttng_enabler_type type;
+ enum lttng_enabler_format_type format_type;
/* head list of struct lttng_ust_filter_bytecode_node */
struct cds_list_head filter_bytecode_head;
/* head list of struct lttng_ust_excluder_node */
struct cds_list_head excluder_head;
- struct cds_list_head node; /* per-session list of enablers */
struct lttng_ust_event event_param;
- struct lttng_channel *chan;
- /*
- * Unused, but kept around to make it explicit that the tracer can do
- * it.
- */
- struct lttng_ctx *ctx;
unsigned int enabled:1;
};
struct ust_pending_probe;
struct lttng_event;
-struct lttng_ust_filter_bytecode_node {
- struct cds_list_head node;
- struct lttng_enabler *enabler;
- /*
- * struct lttng_ust_filter_bytecode has var. sized array, must
- * be last field.
- */
- struct lttng_ust_filter_bytecode bc;
-};
-
-struct lttng_ust_excluder_node {
- struct cds_list_head node;
- struct lttng_enabler *enabler;
- /*
- * struct lttng_ust_event_exclusion had variable sized array,
- * must be last field.
- */
- struct lttng_ust_event_exclusion excluder;
-};
/*
- * Filter return value masks.
+ * Bytecode interpreter return value masks.
*/
-enum lttng_filter_ret {
- LTTNG_FILTER_DISCARD = 0,
- LTTNG_FILTER_RECORD_FLAG = (1ULL << 0),
+enum lttng_bytecode_interpreter_ret {
+ LTTNG_INTERPRETER_DISCARD = 0,
+ LTTNG_INTERPRETER_RECORD_FLAG = (1ULL << 0),
/* Other bits are kept for future use. */
};
+struct lttng_interpreter_output;
+
/*
* This structure is used in the probes. More specifically, the `filter` and
* `node` fields are explicity used in the probes. When modifying this
*/
struct lttng_bytecode_runtime {
/* Associated bytecode */
- struct lttng_ust_filter_bytecode_node *bc;
- uint64_t (*filter)(void *filter_data, const char *filter_stack_data);
+ struct lttng_ust_bytecode_node *bc;
+ union {
+ uint64_t (*filter)(void *interpreter_data,
+ const char *interpreter_stack_data);
+ uint64_t (*capture)(void *interpreter_data,
+ const char *interpreter_stack_data,
+ struct lttng_interpreter_output *interpreter_output);
+ } interpreter_funcs;
int link_failed;
struct cds_list_head node; /* list of bytecode runtime in event */
- struct lttng_session *session;
+ /*
+ * Pointer to a URCU-protected pointer owned by an `struct
+ * lttng_session`or `struct lttng_trigger_group`.
+ */
+ struct lttng_ctx **pctx;
};
/*
- * Objects in a linked-list of enablers, owned by an event.
+ * Objects in a linked-list of enablers, owned by an event or trigger.
+ * This is used because an event (or a trigger) can be enabled by more than one
+ * enabler and we want a quick way to iterate over all enablers of an object.
+ *
+ * For example, event rules "my_app:a*" and "my_app:ab*" will both match the
+ * event with the name "my_app:abc".
*/
struct lttng_enabler_ref {
struct cds_list_head node; /* enabler ref list */
/* LTTng-UST 2.1 starts here */
/* list of struct lttng_bytecode_runtime, sorted by seqnum */
- struct cds_list_head bytecode_runtime_head;
+ struct cds_list_head filter_bytecode_runtime_head;
int has_enablers_without_bytecode;
/* Backward references: list of lttng_enabler_ref (ref to enablers) */
struct cds_list_head enablers_ref_head;
int registered; /* has reg'd tracepoint probe */
};
+struct lttng_trigger {
+ uint64_t id;
+ uint64_t error_counter_index;
+ int enabled;
+ int registered; /* has reg'd tracepoint probe */
+ size_t num_captures; /* Needed to allocate the msgpack array. */
+ struct cds_list_head filter_bytecode_runtime_head;
+ struct cds_list_head capture_bytecode_runtime_head;
+ int has_enablers_without_bytecode;
+ struct cds_list_head enablers_ref_head;
+ const struct lttng_event_desc *desc;
+ struct cds_hlist_node hlist; /* hashtable of triggers */
+ struct cds_list_head node; /* Trigger list in session */
+ struct lttng_trigger_group *group; /* weak ref */
+};
+
struct lttng_enum {
const struct lttng_enum_desc *desc;
struct lttng_session *session;
int tstate:1; /* Transient enable state */
};
+#define LTTNG_COUNTER_DIMENSION_MAX 8
+
+struct lttng_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+};
+
+struct lttng_counter_ops {
+ struct lib_counter *(*counter_create)(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon);
+ void (*counter_destroy)(struct lib_counter *counter);
+ int (*counter_add)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v);
+ int (*counter_read)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow);
+ int (*counter_aggregate)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t *value,
+ bool *overflow, bool *underflow);
+ int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
+};
+
#define LTTNG_UST_STACK_CTX_PADDING 32
struct lttng_stack_ctx {
struct lttng_event *event;
struct cds_hlist_head table[LTTNG_UST_EVENT_HT_SIZE];
};
+#define LTTNG_UST_TRIGGER_HT_BITS 12
+#define LTTNG_UST_TRIGGER_HT_SIZE (1U << LTTNG_UST_TRIGGER_HT_BITS)
+struct lttng_ust_trigger_ht {
+ struct cds_hlist_head table[LTTNG_UST_TRIGGER_HT_SIZE];
+};
+
#define LTTNG_UST_ENUM_HT_BITS 12
#define LTTNG_UST_ENUM_HT_SIZE (1U << LTTNG_UST_ENUM_HT_BITS)
struct lttng_ctx *ctx; /* contexts for filters. */
};
+struct lttng_counter {
+ int objd;
+ struct lttng_trigger_group *trigger_group; /* owner */
+ struct lttng_counter_transport *transport;
+ struct lib_counter *counter;
+ struct lttng_counter_ops *ops;
+};
+
+struct lttng_trigger_group {
+ int objd;
+ void *owner;
+ int notification_fd;
+ struct cds_list_head node; /* Trigger group handle list */
+ struct cds_list_head enablers_head;
+ struct cds_list_head triggers_head; /* list of triggers */
+ struct lttng_ust_trigger_ht triggers_ht; /* hashtable of triggers */
+ struct lttng_ctx *ctx; /* contexts for filters. */
+
+ struct lttng_counter *error_counter;
+ size_t error_counter_len;
+};
+
struct lttng_transport {
char *name;
struct cds_list_head node;
const struct lttng_ust_lib_ring_buffer_config *client_config;
};
+struct lttng_counter_transport {
+ char *name;
+ struct cds_list_head node;
+ struct lttng_counter_ops ops;
+ const struct lib_counter_config *client_config;
+};
+
struct lttng_session *lttng_session_create(void);
int lttng_session_enable(struct lttng_session *session);
int lttng_session_disable(struct lttng_session *session);
int lttng_session_statedump(struct lttng_session *session);
void lttng_session_destroy(struct lttng_session *session);
+void lttng_trigger_notification_send(struct lttng_trigger *trigger, const char *stack_data);
+
struct lttng_channel *lttng_channel_create(struct lttng_session *session,
const char *transport_name,
void *buf_addr,
int lttng_channel_enable(struct lttng_channel *channel);
int lttng_channel_disable(struct lttng_channel *channel);
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
- struct lttng_ust_event *event_param,
- struct lttng_channel *chan);
-int lttng_enabler_enable(struct lttng_enabler *enabler);
-int lttng_enabler_disable(struct lttng_enabler *enabler);
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
- struct lttng_ust_filter_bytecode_node *bytecode);
-int lttng_enabler_attach_context(struct lttng_enabler *enabler,
- struct lttng_ust_context *ctx);
-int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
- struct lttng_ust_excluder_node *excluder);
-
int lttng_attach_context(struct lttng_ust_context *context_param,
union ust_args *uargs,
struct lttng_ctx **ctx, struct lttng_session *session);
-int lttng_session_context_init(struct lttng_ctx **ctx);
-
void lttng_transport_register(struct lttng_transport *transport);
void lttng_transport_unregister(struct lttng_transport *transport);
+void lttng_counter_transport_register(struct lttng_counter_transport *transport);
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport);
+
+struct lttng_counter *lttng_ust_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const struct lttng_counter_dimension *dimensions);
+
void synchronize_trace(void);
int lttng_probe_register(struct lttng_probe_desc *desc);
extern const struct lttng_ust_client_lib_ring_buffer_client_cb *lttng_client_callbacks_overwrite;
struct lttng_transport *lttng_transport_find(const char *name);
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name);
int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list);
void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list);
struct lttng_ust_field_iter *
lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list);
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
- struct lttng_enabler *enabler);
void lttng_free_event_filter_runtime(struct lttng_event *event);
-void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime);
struct cds_list_head *lttng_get_probe_list_head(void);
int lttng_session_active(void);
/* For backward compatibility. Leave those exported symbols in place. */
extern struct lttng_ctx *lttng_static_ctx;
+struct lttng_ust_filter_bytecode_node;
+struct lttng_ust_excluder_node;
void lttng_context_init(void);
void lttng_context_exit(void);
void lttng_filter_event_link_bytecode(struct lttng_event *event);
+struct lttng_enabler *lttng_enabler_create(
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_event *event_param,
+ struct lttng_channel *chan);
+int lttng_enabler_enable(struct lttng_enabler *enabler);
+int lttng_enabler_disable(struct lttng_enabler *enabler);
+int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+ struct lttng_ust_filter_bytecode_node *bytecode);
+int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+ struct lttng_ust_context *ctx);
+int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
+ struct lttng_ust_excluder_node *excluder);
+void lttng_enabler_event_link_bytecode(struct lttng_event *event,
+ struct lttng_enabler *enabler);
+void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime);
+int lttng_session_context_init(struct lttng_ctx **ctx);
+
#ifdef __cplusplus
}
#include TRACEPOINT_INCLUDE
+/*
+ * Stage 2.1 of tracepoint event generation.
+ *
+ * Create probe trigger callback prototypes.
+ */
+
+/* Reset all macros within TRACEPOINT_EVENT */
+#include <lttng/ust-tracepoint-event-reset.h>
+
+#undef TP_ARGS
+#define TP_ARGS(...) __VA_ARGS__
+
+#undef TRACEPOINT_EVENT_CLASS
+#define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
+static void __trigger_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args));
+
+#include TRACEPOINT_INCLUDE
+
/*
* Stage 3.0 of tracepoint event generation.
*
#undef TRACEPOINT_EVENT_CLASS
#define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
static inline \
-void __event_prepare_filter_stack__##_provider##___##_name(char *__stack_data,\
+void __event_prepare_interpreter_stack__##_provider##___##_name(char *__stack_data,\
_TP_ARGS_DATA_PROTO(_args)) \
{ \
_fields \
return; \
if (caa_unlikely(!TP_RCU_LINK_TEST())) \
return; \
- if (caa_unlikely(!cds_list_empty(&__event->bytecode_runtime_head))) { \
- struct lttng_bytecode_runtime *bc_runtime; \
+ if (caa_unlikely(!cds_list_empty(&__event->filter_bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *__filter_bc_runtime; \
int __filter_record = __event->has_enablers_without_bytecode; \
\
- __event_prepare_filter_stack__##_provider##___##_name(__stackvar.__filter_stack_data, \
+ __event_prepare_interpreter_stack__##_provider##___##_name(__stackvar.__filter_stack_data, \
_TP_ARGS_DATA_VAR(_args)); \
- tp_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
- if (caa_unlikely(bc_runtime->filter(bc_runtime, \
- __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
+ tp_list_for_each_entry_rcu(__filter_bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
+ if (caa_unlikely(__filter_bc_runtime->interpreter_funcs.filter(__filter_bc_runtime, \
+ __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
__filter_record = 1; \
break; \
} \
#undef _TP_EXTRACT_STRING2
+/*
+ * Stage 5.2 of tracepoint event generation.
+ *
+ * Create the trigger probe function.
+ */
+#undef TRACEPOINT_EVENT_CLASS
+#define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
+static lttng_ust_notrace \
+void __trigger_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)); \
+static \
+void __trigger_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)) \
+{ \
+ struct lttng_trigger *__trigger = (struct lttng_trigger *) __tp_data; \
+ const size_t __num_fields = _TP_ARRAY_SIZE(__event_fields___##_provider##___##_name) - 1;\
+ union { \
+ size_t __dynamic_len[__num_fields]; \
+ char __interpreter_stack_data[2 * sizeof(unsigned long) * __num_fields]; \
+ } __stackvar; \
+ if (caa_unlikely(!CMM_ACCESS_ONCE(__trigger->enabled))) \
+ return; \
+ if (caa_unlikely(!TP_RCU_LINK_TEST())) \
+ return; \
+ if (caa_unlikely(!cds_list_empty(&__trigger->filter_bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *__filter_bc_runtime; \
+ int __filter_record = __trigger->has_enablers_without_bytecode; \
+ \
+ __event_prepare_interpreter_stack__##_provider##___##_name(__stackvar.__interpreter_stack_data, \
+ _TP_ARGS_DATA_VAR(_args)); \
+ tp_list_for_each_entry_rcu(__filter_bc_runtime, &__trigger->filter_bytecode_runtime_head, node) { \
+ if (caa_unlikely(__filter_bc_runtime->interpreter_funcs.filter(__filter_bc_runtime, \
+ __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
+ __filter_record = 1; \
+ } \
+ if (caa_likely(!__filter_record)) \
+ return; \
+ } \
+ if (caa_unlikely(!cds_list_empty(&__trigger->capture_bytecode_runtime_head))) \
+ __event_prepare_interpreter_stack__##_provider##___##_name(__stackvar.__interpreter_stack_data, \
+ _TP_ARGS_DATA_VAR(_args)); \
+ \
+ lttng_trigger_notification_send(__trigger, __stackvar.__interpreter_stack_data); \
+}
+
+#include TRACEPOINT_INCLUDE
+
/*
* Stage 6 of tracepoint event generation.
*
.u = { \
.ext = { \
.model_emf_uri = &__ref_model_emf_uri___##_provider##___##_name, \
+ .trigger_callback = (void (*)(void)) &__trigger_probe__##_provider##___##_template,\
}, \
}, \
};
*/
#include <stdlib.h>
+#include <sys/uio.h>
ssize_t patient_write(int fd, const void *buf, size_t count);
+ssize_t patient_writev(int fd, struct iovec *iov, int iovcnt);
ssize_t patient_send(int fd, const void *buf, size_t count, int flags);
#endif /* _LTTNG_SHARE_H */
uint32_t cmd;
char padding[USTCOMM_MSG_PADDING1];
union {
+ struct lttng_ust_trigger trigger;
struct lttng_ust_channel channel;
struct lttng_ust_stream stream;
struct lttng_ust_event event;
struct {
uint32_t count; /* how many names follow */
} LTTNG_PACKED exclusion;
+ struct {
+ uint32_t data_size; /* following capture data */
+ uint32_t reloc_offset;
+ uint64_t seqnum;
+ } LTTNG_PACKED capture;
+ struct lttng_ust_counter counter;
+ struct lttng_ust_counter_global counter_global;
+ struct lttng_ust_counter_cpu counter_cpu;
char padding[USTCOMM_MSG_PADDING2];
} u;
} LTTNG_PACKED;
int ustcomm_recv_stream_from_sessiond(int sock,
uint64_t *memory_map_size,
int *shm_fd, int *wakeup_fd);
+ssize_t ustcomm_recv_trigger_notif_fd_from_sessiond(int sock,
+ int *trigger_notif_fd);
+
+ssize_t ustcomm_recv_counter_from_sessiond(int sock,
+ void **counter_data, uint64_t len);
+int ustcomm_recv_counter_shm_from_sessiond(int sock,
+ int *shm_fd);
/*
* Returns 0 on success, negative error value on error.
--- /dev/null
+AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include
+AM_CFLAGS += -fno-strict-aliasing
+
+noinst_LTLIBRARIES = libcounter.la
+
+libcounter_la_SOURCES = \
+ counter.c smp.c smp.h shm.c shm.h shm_internal.h shm_types.h \
+ counter-api.h counter.h counter-internal.h counter-types.h
+
+libcounter_la_LIBADD = \
+ -lpthread \
+ -lrt
+
+if HAVE_LIBNUMA
+libcounter_la_LIBADD += -lnuma
+endif
+
+libcounter_la_CFLAGS = -DUST_COMPONENT="libcounter" $(AM_CFLAGS)
--- /dev/null
+/*
+ * counter/counter-api.h
+ *
+ * LTTng Counters API, requiring counter/config.h
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_API_H
+#define _LTTNG_COUNTER_API_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "counter.h"
+#include "counter-internal.h"
+#include <urcu/compiler.h>
+#include <urcu/uatomic.h>
+#include <lttng/bitmap.h>
+#include "../libringbuffer/getcpu.h"
+
+/*
+ * Using unsigned arithmetic because overflow is defined.
+ */
+static inline int __lttng_counter_add(const struct lib_counter_config *config,
+ enum lib_counter_config_alloc alloc,
+ enum lib_counter_config_sync sync,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v,
+ int64_t *remainder)
+{
+ size_t index;
+ bool overflow = false, underflow = false;
+ struct lib_counter_layout *layout;
+ int64_t move_sum = 0;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ layout = &counter->percpu_counters[lttng_ust_get_cpu()];
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ int8_t old, n, res;
+ int8_t global_sum_step = counter->global_sum_step.s8;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ if (caa_unlikely(n > (int8_t) global_sum_step))
+ move_sum = (int8_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int8_t) global_sum_step))
+ move_sum = -((int8_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= UINT8_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -UINT8_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ int16_t old, n, res;
+ int16_t global_sum_step = counter->global_sum_step.s16;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ if (caa_unlikely(n > (int16_t) global_sum_step))
+ move_sum = (int16_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int16_t) global_sum_step))
+ move_sum = -((int16_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= UINT16_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -UINT16_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ int32_t old, n, res;
+ int32_t global_sum_step = counter->global_sum_step.s32;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ if (caa_unlikely(n > (int32_t) global_sum_step))
+ move_sum = (int32_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int32_t) global_sum_step))
+ move_sum = -((int32_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && (v >= UINT32_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -UINT32_MAX || n > old))
+ underflow = true;
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ int64_t old, n, res;
+ int64_t global_sum_step = counter->global_sum_step.s64;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (caa_unlikely(n > (int64_t) global_sum_step))
+ move_sum = (int64_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int64_t) global_sum_step))
+ move_sum = -((int64_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ }
+ if (v > 0 && n < old)
+ overflow = true;
+ else if (v < 0 && n > old)
+ underflow = true;
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
+ lttng_bitmap_set_bit(index, layout->overflow_bitmap);
+ else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
+ lttng_bitmap_set_bit(index, layout->underflow_bitmap);
+ if (remainder)
+ *remainder = move_sum;
+ return 0;
+}
+
+static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ int64_t move_sum;
+ int ret;
+
+ ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
+ counter, dimension_indexes, v, &move_sum);
+ if (caa_unlikely(ret))
+ return ret;
+ if (caa_unlikely(move_sum))
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
+ counter, dimension_indexes, move_sum, NULL);
+ return 0;
+}
+
+static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
+ dimension_indexes, v, NULL);
+}
+
+static inline int lttng_counter_add(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
+ case COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_global(config, counter, dimension_indexes, v);
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int lttng_counter_inc(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, 1);
+}
+
+static inline int lttng_counter_dec(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, -1);
+}
+
+#endif /* _LTTNG_COUNTER_API_H */
--- /dev/null
+/*
+ * counter/counter-internal.h
+ *
+ * LTTng Counters Internal Header
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_INTERNAL_H
+#define _LTTNG_COUNTER_INTERNAL_H
+
+#include <stdint.h>
+#include <lttng/ust-config.h>
+#include <urcu/compiler.h>
+#include "counter-types.h"
+
+static inline int lttng_counter_validate_indexes(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ if (caa_unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem))
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+
+static inline size_t lttng_counter_get_index(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+ size_t index = 0;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ const size_t *dimension_index = &dimension_indexes[i];
+
+ index += *dimension_index * dimension->stride;
+ }
+ return index;
+}
+
+#endif /* _LTTNG_COUNTER_INTERNAL_H */
--- /dev/null
+/*
+ * counter/counter-types.h
+ *
+ * LTTng Counters Types
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_TYPES_H
+#define _LTTNG_COUNTER_TYPES_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <lttng/counter-config.h>
+#include <lttng/ust-config.h>
+#include "shm_types.h"
+
+struct lib_counter_dimension {
+ /*
+ * Max. number of indexable elements.
+ */
+ size_t max_nr_elem;
+ /*
+ * The stride for a dimension is the multiplication factor which
+ * should be applied to its index to take into account other
+ * dimensions nested inside.
+ */
+ size_t stride;
+};
+
+struct lib_counter_layout {
+ void *counters;
+ unsigned long *overflow_bitmap;
+ unsigned long *underflow_bitmap;
+ int shm_fd;
+ size_t shm_len;
+ struct lttng_counter_shm_handle handle;
+};
+
+enum lib_counter_arithmetic {
+ LIB_COUNTER_ARITHMETIC_MODULAR,
+ LIB_COUNTER_ARITHMETIC_SATURATE,
+};
+
+struct lib_counter {
+ size_t nr_dimensions;
+ int64_t allocated_elem;
+ struct lib_counter_dimension *dimensions;
+ enum lib_counter_arithmetic arithmetic;
+ union {
+ struct {
+ int32_t max, min;
+ } limits_32_bit;
+ struct {
+ int64_t max, min;
+ } limits_64_bit;
+ } saturation;
+ union {
+ int8_t s8;
+ int16_t s16;
+ int32_t s32;
+ int64_t s64;
+ } global_sum_step; /* 0 if unused */
+ struct lib_counter_config config;
+
+ struct lib_counter_layout global_counters;
+ struct lib_counter_layout *percpu_counters;
+
+ bool is_daemon;
+ struct lttng_counter_shm_object_table *object_table;
+};
+
+#endif /* _LTTNG_COUNTER_TYPES_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * counter.c
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include "counter.h"
+#include "counter-internal.h"
+#include <lttng/bitmap.h>
+#include <urcu/system.h>
+#include <urcu/compiler.h>
+#include <stdbool.h>
+#include <helper.h>
+#include <lttng/align.h>
+#include "smp.h"
+#include "shm.h"
+
+static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
+{
+ return dimension->max_nr_elem;
+}
+
+static int lttng_counter_init_stride(const struct lib_counter_config *config,
+ struct lib_counter *counter)
+{
+ size_t nr_dimensions = counter->nr_dimensions;
+ size_t stride = 1;
+ ssize_t i;
+
+ for (i = nr_dimensions - 1; i >= 0; i--) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ size_t nr_elem;
+
+ nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
+ dimension->stride = stride;
+ /* nr_elem should be minimum 1 for each dimension. */
+ if (!nr_elem)
+ return -EINVAL;
+ stride *= nr_elem;
+ if (stride > SIZE_MAX / nr_elem)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
+{
+ struct lib_counter_layout *layout;
+ size_t counter_size;
+ size_t nr_elem = counter->allocated_elem;
+ size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
+ struct lttng_counter_shm_object *shm_object;
+
+ if (shm_fd < 0)
+ return 0; /* Skip, will be populated later. */
+
+ if (cpu == -1)
+ layout = &counter->global_counters;
+ else
+ layout = &counter->percpu_counters[cpu];
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ case COUNTER_SIZE_16_BIT:
+ case COUNTER_SIZE_32_BIT:
+ case COUNTER_SIZE_64_BIT:
+ counter_size = (size_t) counter->config.counter_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ layout->shm_fd = shm_fd;
+ counters_offset = shm_length;
+ shm_length += counter_size * nr_elem;
+ overflow_offset = shm_length;
+ shm_length += ALIGN(nr_elem, 8) / 8;
+ underflow_offset = shm_length;
+ shm_length += ALIGN(nr_elem, 8) / 8;
+ layout->shm_len = shm_length;
+ if (counter->is_daemon) {
+ /* Allocate and clear shared memory. */
+ shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
+ shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
+ if (!shm_object)
+ return -ENOMEM;
+ } else {
+ /* Map pre-existing shared memory. */
+ shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
+ shm_fd, shm_length);
+ if (!shm_object)
+ return -ENOMEM;
+ }
+ layout->counters = shm_object->memory_map + counters_offset;
+ layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
+ layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
+ return 0;
+}
+
+int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
+{
+ struct lib_counter_config *config = &counter->config;
+ struct lib_counter_layout *layout;
+
+ if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
+ return -EINVAL;
+ layout = &counter->global_counters;
+ if (layout->shm_fd >= 0)
+ return -EBUSY;
+ return lttng_counter_layout_init(counter, -1, fd);
+}
+
+int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
+{
+ struct lib_counter_config *config = &counter->config;
+ struct lib_counter_layout *layout;
+
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ if (layout->shm_fd >= 0)
+ return -EBUSY;
+ return lttng_counter_layout_init(counter, cpu, fd);
+}
+
+static
+int lttng_counter_set_global_sum_step(struct lib_counter *counter,
+ int64_t global_sum_step)
+{
+ if (global_sum_step < 0)
+ return -EINVAL;
+
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ if (global_sum_step > INT8_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s8 = (int8_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_16_BIT:
+ if (global_sum_step > INT16_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s16 = (int16_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_32_BIT:
+ if (global_sum_step > INT32_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s32 = (int32_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_64_BIT:
+ counter->global_sum_step.s64 = global_sum_step;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+int validate_args(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds)
+{
+ int nr_cpus = lttng_counter_num_possible_cpus();
+
+ if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+ if (!max_nr_elem)
+ return -1;
+ /*
+ * global sum step is only useful with allocating both per-cpu
+ * and global counters.
+ */
+ if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
+ !(config->alloc & COUNTER_ALLOC_PER_CPU)))
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds >= 0)
+ return -1;
+ if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
+ return -1;
+ return 0;
+}
+
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ struct lib_counter *counter;
+ size_t dimension, nr_elem = 1;
+ int cpu, ret;
+ int nr_handles = 0;
+ int nr_cpus = lttng_counter_num_possible_cpus();
+
+ if (validate_args(config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds))
+ return NULL;
+ counter = zmalloc(sizeof(struct lib_counter));
+ if (!counter)
+ return NULL;
+ counter->global_counters.shm_fd = -1;
+ counter->config = *config;
+ counter->is_daemon = is_daemon;
+ if (lttng_counter_set_global_sum_step(counter, global_sum_step))
+ goto error_sum_step;
+ counter->nr_dimensions = nr_dimensions;
+ counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
+ if (!counter->dimensions)
+ goto error_dimensions;
+ for (dimension = 0; dimension < nr_dimensions; dimension++)
+ counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
+ if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+ counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
+ if (!counter->percpu_counters)
+ goto error_alloc_percpu;
+ lttng_counter_for_each_possible_cpu(cpu)
+ counter->percpu_counters[cpu].shm_fd = -1;
+ }
+
+ if (lttng_counter_init_stride(config, counter))
+ goto error_init_stride;
+ //TODO saturation values.
+ for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
+ nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
+ counter->allocated_elem = nr_elem;
+
+ if (config->alloc & COUNTER_ALLOC_GLOBAL)
+ nr_handles++;
+ if (config->alloc & COUNTER_ALLOC_PER_CPU)
+ nr_handles += nr_cpus;
+ /* Allocate table for global and per-cpu counters. */
+ counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
+ if (!counter->object_table)
+ goto error_alloc_object_table;
+
+ if (config->alloc & COUNTER_ALLOC_GLOBAL) {
+ ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
+ if (ret)
+ goto layout_init_error;
+ }
+ if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
+ lttng_counter_for_each_possible_cpu(cpu) {
+ ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
+ if (ret)
+ goto layout_init_error;
+ }
+ }
+ return counter;
+
+layout_init_error:
+ lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
+error_alloc_object_table:
+error_init_stride:
+ free(counter->percpu_counters);
+error_alloc_percpu:
+ free(counter->dimensions);
+error_dimensions:
+error_sum_step:
+ free(counter);
+ return NULL;
+}
+
+void lttng_counter_destroy(struct lib_counter *counter)
+{
+ struct lib_counter_config *config = &counter->config;
+
+ if (config->alloc & COUNTER_ALLOC_PER_CPU)
+ free(counter->percpu_counters);
+ lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
+ free(counter->dimensions);
+ free(counter);
+}
+
+int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
+{
+ int shm_fd;
+
+ shm_fd = counter->global_counters.shm_fd;
+ if (shm_fd < 0)
+ return -1;
+ *fd = shm_fd;
+ *len = counter->global_counters.shm_len;
+ return 0;
+}
+
+int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
+{
+ struct lib_counter_layout *layout;
+ int shm_fd;
+
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -1;
+ layout = &counter->percpu_counters[cpu];
+ shm_fd = layout->shm_fd;
+ if (shm_fd < 0)
+ return -1;
+ *fd = shm_fd;
+ *len = layout->shm_len;
+ return 0;
+}
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ size_t index;
+ struct lib_counter_layout *layout;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ *value = CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
+ *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ int cpu, ret;
+ int64_t v, sum = 0;
+ bool of, uf;
+
+ *overflow = false;
+ *underflow = false;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Read global counter. */
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ -1, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ sum += v;
+ *overflow |= of;
+ *underflow |= uf;
+ break;
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU:
+ lttng_counter_for_each_possible_cpu(cpu) {
+ int64_t old = sum;
+
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ cpu, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ *overflow |= of;
+ *underflow |= uf;
+ /* Overflow is defined on unsigned types. */
+ sum = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (v > 0 && sum < old)
+ *overflow = true;
+ else if (v < 0 && sum > old)
+ *underflow = true;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ *value = sum;
+ return 0;
+}
+
+static
+int lttng_counter_clear_cpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu)
+{
+ size_t index;
+ struct lib_counter_layout *layout;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
+ lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ int cpu, ret;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Clear global counter. */
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ lttng_counter_for_each_possible_cpu(cpu) {
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * lttng/counter.h
+ *
+ * LTTng Counters API
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _LTTNG_COUNTER_H
+#define _LTTNG_COUNTER_H
+
+#include <stdint.h>
+#include <lttng/ust-config.h>
+#include "counter-types.h"
+
+/* max_nr_elem is for each dimension. */
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon);
+void lttng_counter_destroy(struct lib_counter *counter);
+
+int lttng_counter_set_global_shm(struct lib_counter *counter, int fd);
+int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd);
+
+int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len);
+int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len);
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow);
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow);
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes);
+
+#endif /* _LTTNG_COUNTER_H */
--- /dev/null
+/*
+ * libcounter/shm.c
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _LGPL_SOURCE
+#include <config.h>
+#include "shm.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h> /* For mode constants */
+#include <fcntl.h> /* For O_* constants */
+#include <assert.h>
+#include <stdio.h>
+#include <signal.h>
+#include <dirent.h>
+#include <lttng/align.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#ifdef HAVE_LIBNUMA
+#include <numa.h>
+#include <numaif.h>
+#endif
+#include <helper.h>
+#include <ust-fd.h>
+#include "../libringbuffer/mmap.h"
+
+/*
+ * Ensure we have the required amount of space available by writing 0
+ * into the entire buffer. Not doing so can trigger SIGBUS when going
+ * beyond the available shm space.
+ */
+static
+int zero_file(int fd, size_t len)
+{
+ ssize_t retlen;
+ size_t written = 0;
+ char *zeropage;
+ long pagelen;
+ int ret;
+
+ pagelen = sysconf(_SC_PAGESIZE);
+ if (pagelen < 0)
+ return (int) pagelen;
+ zeropage = calloc(pagelen, 1);
+ if (!zeropage)
+ return -ENOMEM;
+
+ while (len > written) {
+ do {
+ retlen = write(fd, zeropage,
+ min_t(size_t, pagelen, len - written));
+ } while (retlen == -1UL && errno == EINTR);
+ if (retlen < 0) {
+ ret = (int) retlen;
+ goto error;
+ }
+ written += retlen;
+ }
+ ret = 0;
+error:
+ free(zeropage);
+ return ret;
+}
+
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+{
+ struct lttng_counter_shm_object_table *table;
+
+ table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]));
+ if (!table)
+ return NULL;
+ table->size = max_nb_obj;
+ return table;
+}
+
+static
+struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ int cpu_fd)
+{
+ int shmfd, ret;
+ struct lttng_counter_shm_object *obj;
+ char *memory_map;
+
+ if (cpu_fd < 0)
+ return NULL;
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ /* create shm */
+
+ shmfd = cpu_fd;
+ ret = zero_file(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("zero_file");
+ goto error_zero_file;
+ }
+ ret = ftruncate(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("ftruncate");
+ goto error_ftruncate;
+ }
+ /*
+ * Also ensure the file metadata is synced with the storage by using
+ * fsync(2).
+ */
+ ret = fsync(shmfd);
+ if (ret) {
+ PERROR("fsync");
+ goto error_fsync;
+ }
+ obj->shm_fd_ownership = 0;
+ obj->shm_fd = shmfd;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+error_fsync:
+error_ftruncate:
+error_zero_file:
+ return NULL;
+}
+
+static
+struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+ void *memory_map;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ memory_map = zmalloc(memory_map_size);
+ if (!memory_map)
+ goto alloc_error;
+
+ /* no shm_fd */
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+alloc_error:
+ return NULL;
+}
+
+/*
+ * libnuma prints errors on the console even for numa_available().
+ * Work-around this limitation by using get_mempolicy() directly to
+ * check whether the kernel supports mempolicy.
+ */
+#ifdef HAVE_LIBNUMA
+static bool lttng_is_numa_available(void)
+{
+ int ret;
+
+ ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
+ if (ret && errno == ENOSYS) {
+ return false;
+ }
+ return numa_available() > 0;
+}
+#endif
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ enum lttng_counter_shm_object_type type,
+ int cpu_fd,
+ int cpu)
+{
+ struct lttng_counter_shm_object *shm_object;
+#ifdef HAVE_LIBNUMA
+ int oldnode = 0, node;
+ bool numa_avail;
+
+ numa_avail = lttng_is_numa_available();
+ if (numa_avail) {
+ oldnode = numa_preferred();
+ if (cpu >= 0) {
+ node = numa_node_of_cpu(cpu);
+ if (node >= 0)
+ numa_set_preferred(node);
+ }
+ if (cpu < 0 || node < 0)
+ numa_set_localalloc();
+ }
+#endif /* HAVE_LIBNUMA */
+ switch (type) {
+ case LTTNG_COUNTER_SHM_OBJECT_SHM:
+ shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
+ cpu_fd);
+ break;
+ case LTTNG_COUNTER_SHM_OBJECT_MEM:
+ shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
+ break;
+ default:
+ assert(0);
+ }
+#ifdef HAVE_LIBNUMA
+ if (numa_avail)
+ numa_set_preferred(oldnode);
+#endif /* HAVE_LIBNUMA */
+ return shm_object;
+}
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
+ int shm_fd,
+ size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+ char *memory_map;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+
+ obj = &table->objects[table->allocated_len];
+
+ obj->shm_fd = shm_fd;
+ obj->shm_fd_ownership = 1;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+ return NULL;
+}
+
+/*
+ * Passing ownership of mem to object.
+ */
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
+ void *mem, size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
+ obj->memory_map = mem;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+ return NULL;
+}
+
+static
+void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
+{
+ switch (obj->type) {
+ case LTTNG_COUNTER_SHM_OBJECT_SHM:
+ {
+ int ret;
+
+ ret = munmap(obj->memory_map, obj->memory_map_size);
+ if (ret) {
+ PERROR("umnmap");
+ assert(0);
+ }
+
+ if (obj->shm_fd_ownership) {
+ /* Delete FDs only if called from app (not consumer). */
+ if (!consumer) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(obj->shm_fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(obj->shm_fd);
+ } else {
+ PERROR("close");
+ assert(0);
+ }
+ lttng_ust_unlock_fd_tracker();
+ } else {
+ ret = close(obj->shm_fd);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+ }
+ break;
+ }
+ case LTTNG_COUNTER_SHM_OBJECT_MEM:
+ {
+ free(obj->memory_map);
+ break;
+ }
+ default:
+ assert(0);
+ }
+}
+
+void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
+{
+ int i;
+
+ for (i = 0; i < table->allocated_len; i++)
+ lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
+ free(table);
+}
+
+/*
+ * lttng_counter_zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
+{
+ struct lttng_counter_shm_ref ref;
+ struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
+
+ if (obj->memory_map_size - obj->allocated_len < len)
+ return shm_ref_error;
+ ref.index = obj->index;
+ ref.offset = obj->allocated_len;
+ obj->allocated_len += len;
+ return ref;
+}
+
+void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
+{
+ size_t offset_len = offset_align(obj->allocated_len, align);
+ obj->allocated_len += offset_len;
+}
--- /dev/null
+#ifndef _LIBCOUNTER_SHM_H
+#define _LIBCOUNTER_SHM_H
+
+/*
+ * libcounter/shm.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <usterr-signal-safe.h>
+#include <urcu/compiler.h>
+#include "shm_types.h"
+
+/* lttng_counter_handle_create - for UST. */
+extern
+struct lttng_counter_shm_handle *lttng_counter_handle_create(void *data,
+ uint64_t memory_map_size, int wakeup_fd);
+/* lttng_counter_handle_add_cpu - for UST. */
+extern
+int lttng_counter_handle_add_cpu(struct lttng_counter_shm_handle *handle,
+ int shm_fd, uint32_t cpu_nr,
+ uint64_t memory_map_size);
+unsigned int lttng_counter_handle_get_nr_cpus(struct lttng_counter_shm_handle *handle);
+
+/*
+ * Pointer dereferencing. We don't trust the shm_ref, so we validate
+ * both the index and offset with known boundaries.
+ *
+ * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
+ * target type, even in the occurrence of shm_ref modification by an
+ * untrusted process having write access to the shm_ref. We return a
+ * NULL pointer if the ranges are invalid.
+ */
+static inline
+char *_lttng_counter_shmp_offset(struct lttng_counter_shm_object_table *table,
+ struct lttng_counter_shm_ref *ref,
+ size_t idx, size_t elem_size)
+{
+ struct lttng_counter_shm_object *obj;
+ size_t objindex, ref_offset;
+
+ objindex = (size_t) ref->index;
+ if (caa_unlikely(objindex >= table->allocated_len))
+ return NULL;
+ obj = &table->objects[objindex];
+ ref_offset = (size_t) ref->offset;
+ ref_offset += idx * elem_size;
+ /* Check if part of the element returned would exceed the limits. */
+ if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
+ return NULL;
+ return &obj->memory_map[ref_offset];
+}
+
+#define lttng_counter_shmp_index(handle, ref, index) \
+ ({ \
+ __typeof__((ref)._type) ____ptr_ret; \
+ ____ptr_ret = (__typeof__(____ptr_ret)) _lttng_counter_shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
+ ____ptr_ret; \
+ })
+
+#define lttng_counter_shmp(handle, ref) lttng_counter_shmp_index(handle, ref, 0)
+
+static inline
+void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_counter_shm_ref src)
+{
+ *ref = src;
+}
+
+#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src)
+
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj);
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ enum lttng_counter_shm_object_type type,
+ const int cpu_fd,
+ int cpu);
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
+ int shm_fd, size_t memory_map_size);
+/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
+ void *mem, size_t memory_map_size);
+void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer);
+
+/*
+ * lttng_counter_zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len);
+void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align);
+
+static inline
+int lttng_counter_shm_get_shm_fd(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref)
+{
+ struct lttng_counter_shm_object_table *table = handle->table;
+ struct lttng_counter_shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->shm_fd;
+}
+
+
+static inline
+int lttng_counter_shm_get_shm_size(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref,
+ uint64_t *size)
+{
+ struct lttng_counter_shm_object_table *table = handle->table;
+ struct lttng_counter_shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ *size = obj->memory_map_size;
+ return 0;
+}
+
+#endif /* _LIBCOUNTER_SHM_H */
--- /dev/null
+#ifndef _LIBCOUNTER_SHM_INTERNAL_H
+#define _LIBCOUNTER_SHM_INTERNAL_H
+
+/*
+ * libcounter/shm_internal.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+struct lttng_counter_shm_ref {
+ volatile ssize_t index; /* within the object table */
+ volatile ssize_t offset; /* within the object */
+};
+
+#define DECLARE_LTTNG_COUNTER_SHMP(type, name) \
+ union { \
+ struct lttng_counter_shm_ref _ref; \
+ type *_type; \
+ } name
+
+#endif /* _LIBCOUNTER_SHM_INTERNAL_H */
--- /dev/null
+#ifndef _LIBCOUNTER_SHM_TYPES_H
+#define _LIBCOUNTER_SHM_TYPES_H
+
+/*
+ * libcounter/shm_types.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <limits.h>
+#include "shm_internal.h"
+
+enum lttng_counter_shm_object_type {
+ LTTNG_COUNTER_SHM_OBJECT_SHM,
+ LTTNG_COUNTER_SHM_OBJECT_MEM,
+};
+
+struct lttng_counter_shm_object {
+ enum lttng_counter_shm_object_type type;
+ size_t index; /* within the object table */
+ int shm_fd; /* shm fd */
+ char *memory_map;
+ size_t memory_map_size;
+ uint64_t allocated_len;
+ int shm_fd_ownership;
+};
+
+struct lttng_counter_shm_object_table {
+ size_t size;
+ size_t allocated_len;
+ struct lttng_counter_shm_object objects[];
+};
+
+struct lttng_counter_shm_handle {
+ struct lttng_counter_shm_object_table *table;
+};
+
+#endif /* _LIBCOUNTER_SHM_TYPES_H */
--- /dev/null
+/*
+ * libcounter/smp.c
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <unistd.h>
+#include <pthread.h>
+#include "smp.h"
+
+int __lttng_counter_num_possible_cpus;
+
+#if (defined(__GLIBC__) || defined( __UCLIBC__))
+void _lttng_counter_get_num_possible_cpus(void)
+{
+ int result;
+
+ /* On Linux, when some processors are offline
+ * _SC_NPROCESSORS_CONF counts the offline
+ * processors, whereas _SC_NPROCESSORS_ONLN
+ * does not. If we used _SC_NPROCESSORS_ONLN,
+ * getcpu() could return a value greater than
+ * this sysconf, in which case the arrays
+ * indexed by processor would overflow.
+ */
+ result = sysconf(_SC_NPROCESSORS_CONF);
+ if (result == -1)
+ return;
+ __lttng_counter_num_possible_cpus = result;
+}
+
+#else
+
+/*
+ * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not
+ * return the number of configured CPUs in the system but relies on the cpu
+ * affinity mask of the current task.
+ *
+ * So instead we use a strategy similar to GLIBC's, counting the cpu
+ * directories in "/sys/devices/system/cpu" and fallback on the value from
+ * sysconf if it fails.
+ */
+
+#include <dirent.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#define __max(a,b) ((a)>(b)?(a):(b))
+
+void _lttng_counter_get_num_possible_cpus(void)
+{
+ int result, count = 0;
+ DIR *cpudir;
+ struct dirent *entry;
+
+ cpudir = opendir("/sys/devices/system/cpu");
+ if (cpudir == NULL)
+ goto end;
+
+ /*
+ * Count the number of directories named "cpu" followed by and
+ * integer. This is the same strategy as glibc uses.
+ */
+ while ((entry = readdir(cpudir))) {
+ if (entry->d_type == DT_DIR &&
+ strncmp(entry->d_name, "cpu", 3) == 0) {
+
+ char *endptr;
+ unsigned long cpu_num;
+
+ cpu_num = strtoul(entry->d_name + 3, &endptr, 10);
+ if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3)
+ && (*endptr == '\0')) {
+ count++;
+ }
+ }
+ }
+
+end:
+ /*
+ * Get the sysconf value as a fallback. Keep the highest number.
+ */
+ result = __max(sysconf(_SC_NPROCESSORS_CONF), count);
+
+ /*
+ * If both methods failed, don't store the value.
+ */
+ if (result < 1)
+ return;
+ __lttng_counter_num_possible_cpus = result;
+}
+#endif
--- /dev/null
+#ifndef _LIBCOUNTER_SMP_H
+#define _LIBCOUNTER_SMP_H
+
+/*
+ * libcounter/smp.h
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * 4kB of per-cpu data available.
+ */
+#define LTTNG_COUNTER_PER_CPU_MEM_SIZE 4096
+
+extern int __lttng_counter_num_possible_cpus;
+extern void _lttng_counter_get_num_possible_cpus(void);
+
+static inline
+int lttng_counter_num_possible_cpus(void)
+{
+ if (!__lttng_counter_num_possible_cpus)
+ _lttng_counter_get_num_possible_cpus();
+ return __lttng_counter_num_possible_cpus;
+}
+
+#define lttng_counter_for_each_possible_cpu(cpu) \
+ for ((cpu) = 0; (cpu) < lttng_counter_num_possible_cpus(); (cpu)++)
+
+#endif /* _LIBCOUNTER_SMP_H */
return len;
}
+ssize_t ustcomm_recv_trigger_notif_fd_from_sessiond(int sock,
+ int *_trigger_notif_fd)
+{
+ ssize_t nr_fd;
+ int trigger_notif_fd, ret;
+
+ /* Receive trigger notification fd */
+ lttng_ust_lock_fd_tracker();
+ nr_fd = ustcomm_recv_fds_unix_sock(sock, &trigger_notif_fd, 1);
+ if (nr_fd <= 0) {
+ lttng_ust_unlock_fd_tracker();
+ if (nr_fd < 0) {
+ ret = nr_fd;
+ goto error;
+ } else {
+ ret = -EIO;
+ goto error;
+ }
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(trigger_notif_fd);
+ if (ret < 0) {
+ ret = close(trigger_notif_fd);
+ if (ret) {
+ PERROR("close on trigger notif fd");
+ }
+ ret = -EIO;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+
+ *_trigger_notif_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ ret = nr_fd;
+
+error:
+ return ret;
+}
+
int ustcomm_recv_stream_from_sessiond(int sock,
uint64_t *memory_map_size,
int *shm_fd, int *wakeup_fd)
return ret;
}
+ssize_t ustcomm_recv_counter_from_sessiond(int sock,
+ void **_counter_data, uint64_t var_len)
+{
+ void *counter_data;
+ ssize_t len;
+
+ if (var_len > LTTNG_UST_COUNTER_DATA_MAX_LEN) {
+ len = -EINVAL;
+ goto error_check;
+ }
+ /* Receive variable length data */
+ counter_data = zmalloc(var_len);
+ if (!counter_data) {
+ len = -ENOMEM;
+ goto error_alloc;
+ }
+ len = ustcomm_recv_unix_sock(sock, counter_data, var_len);
+ if (len != var_len) {
+ goto error_recv;
+ }
+ *_counter_data = counter_data;
+ return len;
+
+error_recv:
+ free(counter_data);
+error_alloc:
+error_check:
+ return len;
+}
+
+int ustcomm_recv_counter_shm_from_sessiond(int sock,
+ int *shm_fd)
+{
+ ssize_t len;
+ int ret;
+ int fds[1];
+
+ /* recv shm fd fd */
+ lttng_ust_lock_fd_tracker();
+ len = ustcomm_recv_fds_unix_sock(sock, fds, 1);
+ if (len <= 0) {
+ lttng_ust_unlock_fd_tracker();
+ if (len < 0) {
+ ret = len;
+ goto error;
+ } else {
+ ret = -EIO;
+ goto error;
+ }
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(fds[0]);
+ if (ret < 0) {
+ ret = close(fds[0]);
+ if (ret) {
+ PERROR("close on received shm_fd");
+ }
+ ret = -EIO;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+ *shm_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+ return 0;
+
+error:
+ return ret;
+}
+
/*
* Returns 0 on success, negative error value on error.
*/
#include "../liblttng-ust/clock.h"
#include "../liblttng-ust/getenv.h"
+#include "../libcounter/shm.h"
+#include "../libcounter/smp.h"
+#include "../libcounter/counter.h"
+
/*
* Number of milliseconds to retry before failing metadata writes on
* buffer full condition. (10 seconds)
uint64_t memory_map_size;
};
+#define USTCTL_COUNTER_ATTR_DIMENSION_MAX 8
+struct ustctl_counter_attr {
+ enum ustctl_counter_arithmetic arithmetic;
+ enum ustctl_counter_bitness bitness;
+ uint32_t nr_dimensions;
+ int64_t global_sum_step;
+ struct ustctl_counter_dimension dimensions[USTCTL_COUNTER_ATTR_DIMENSION_MAX];
+};
+
+/*
+ * Counter representation within daemon.
+ */
+struct ustctl_daemon_counter {
+ struct lib_counter *counter;
+ const struct lttng_counter_ops *ops;
+ struct ustctl_counter_attr *attr; /* initial attributes */
+};
+
extern void lttng_ring_buffer_client_overwrite_init(void);
extern void lttng_ring_buffer_client_overwrite_rt_init(void);
extern void lttng_ring_buffer_client_discard_init(void);
extern void lttng_ring_buffer_client_discard_exit(void);
extern void lttng_ring_buffer_client_discard_rt_exit(void);
extern void lttng_ring_buffer_metadata_client_exit(void);
+extern void lttng_counter_client_percpu_32_overflow_init(void);
+extern void lttng_counter_client_percpu_32_overflow_exit(void);
+extern void lttng_counter_client_percpu_64_overflow_init(void);
+extern void lttng_counter_client_percpu_64_overflow_exit(void);
int ustctl_release_handle(int sock, int handle)
{
ret = -errno;
return ret;
}
+ data->u.channel.wakeup_fd = -1;
}
free(data->u.channel.data);
+ data->u.channel.data = NULL;
break;
case LTTNG_UST_OBJECT_TYPE_STREAM:
if (data->u.stream.shm_fd >= 0) {
ret = -errno;
return ret;
}
+ data->u.stream.shm_fd = -1;
}
if (data->u.stream.wakeup_fd >= 0) {
ret = close(data->u.stream.wakeup_fd);
ret = -errno;
return ret;
}
+ data->u.stream.wakeup_fd = -1;
}
break;
case LTTNG_UST_OBJECT_TYPE_EVENT:
case LTTNG_UST_OBJECT_TYPE_CONTEXT:
+ case LTTNG_UST_OBJECT_TYPE_TRIGGER_GROUP:
+ case LTTNG_UST_OBJECT_TYPE_TRIGGER:
+ break;
+ case LTTNG_UST_OBJECT_TYPE_COUNTER:
+ free(data->u.counter.data);
+ data->u.counter.data = NULL;
+ break;
+ case LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL:
+ if (data->u.counter_global.shm_fd >= 0) {
+ ret = close(data->u.counter_global.shm_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.counter_global.shm_fd = -1;
+ }
+ break;
+ case LTTNG_UST_OBJECT_TYPE_COUNTER_CPU:
+ if (data->u.counter_cpu.shm_fd >= 0) {
+ ret = close(data->u.counter_cpu.shm_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.counter_cpu.shm_fd = -1;
+ }
break;
default:
assert(0);
return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
}
+int ustctl_set_capture(int sock, struct lttng_ust_capture_bytecode *bytecode,
+ struct lttng_ust_object_data *obj_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!obj_data)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = obj_data->handle;
+ lum.cmd = LTTNG_UST_CAPTURE;
+ lum.u.capture.data_size = bytecode->len;
+ lum.u.capture.reloc_offset = bytecode->reloc_offset;
+ lum.u.capture.seqnum = bytecode->seqnum;
+
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+ /* send var len bytecode */
+ ret = ustcomm_send_unix_sock(sock, bytecode->data,
+ bytecode->len);
+ if (ret < 0) {
+ return ret;
+ }
+ if (ret != bytecode->len)
+ return -EINVAL;
+ return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+}
+
int ustctl_set_exclusion(int sock, struct lttng_ust_event_exclusion *exclusion,
struct lttng_ust_object_data *obj_data)
{
return ustctl_disable(sock, &obj);
}
+int ustctl_create_trigger_group(int sock, int pipe_fd,
+ struct lttng_ust_object_data **_trigger_group_data)
+{
+ struct lttng_ust_object_data *trigger_group_data;
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ ssize_t len;
+ int ret;
+
+ if (!_trigger_group_data)
+ return -EINVAL;
+
+ trigger_group_data = zmalloc(sizeof(*trigger_group_data));
+ if (!trigger_group_data)
+ return -ENOMEM;
+
+ trigger_group_data->type = LTTNG_UST_OBJECT_TYPE_TRIGGER_GROUP;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = LTTNG_UST_ROOT_HANDLE;
+ lum.cmd = LTTNG_UST_TRIGGER_GROUP_CREATE;
+
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ goto error;
+
+ /* Send trigger notification pipe. */
+ len = ustcomm_send_fds_unix_sock(sock, &pipe_fd, 1);
+ if (len <= 0) {
+ ret = len;
+ goto error;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (ret)
+ goto error;
+
+ trigger_group_data->handle = lur.ret_val;
+ DBG("received trigger group handle %d", trigger_group_data->handle);
+
+ *_trigger_group_data = trigger_group_data;
+
+ ret = 0;
+ goto end;
+error:
+ free(trigger_group_data);
+
+end:
+ return ret;
+}
+
+int ustctl_create_trigger(int sock, struct lttng_ust_trigger *trigger,
+ struct lttng_ust_object_data *trigger_group,
+ struct lttng_ust_object_data **_trigger_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ struct lttng_ust_object_data *trigger_data;
+ int ret;
+
+ if (!trigger_group || !_trigger_data)
+ return -EINVAL;
+
+ trigger_data = zmalloc(sizeof(*trigger_data));
+ if (!trigger_data)
+ return -ENOMEM;
+
+ trigger_data->type = LTTNG_UST_OBJECT_TYPE_TRIGGER;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = trigger_group->handle;
+ lum.cmd = LTTNG_UST_TRIGGER_CREATE;
+
+ strncpy(lum.u.trigger.name, trigger->name,
+ LTTNG_UST_SYM_NAME_LEN);
+ lum.u.trigger.instrumentation = trigger->instrumentation;
+ lum.u.trigger.loglevel_type = trigger->loglevel_type;
+ lum.u.trigger.loglevel = trigger->loglevel;
+ lum.u.trigger.id = trigger->id;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret) {
+ free(trigger_data);
+ return ret;
+ }
+ trigger_data->handle = lur.ret_val;
+ DBG("received trigger handle %u", trigger_data->handle);
+ *_trigger_data = trigger_data;
+
+ return ret;
+}
+
int ustctl_tracepoint_list(int sock)
{
struct ustcomm_ust_msg lum;
goto error_type;
}
+ case LTTNG_UST_OBJECT_TYPE_COUNTER:
+ {
+ obj->u.counter.data = zmalloc(obj->size);
+ if (!obj->u.counter.data) {
+ ret = -ENOMEM;
+ goto error_type;
+ }
+ memcpy(obj->u.counter.data, src->u.counter.data, obj->size);
+ break;
+ }
+
+ case LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL:
+ {
+ if (src->u.counter_global.shm_fd >= 0) {
+ obj->u.counter_global.shm_fd =
+ dup(src->u.counter_global.shm_fd);
+ if (obj->u.counter_global.shm_fd < 0) {
+ ret = errno;
+ goto error_type;
+ }
+ }
+ break;
+ }
+
+ case LTTNG_UST_OBJECT_TYPE_COUNTER_CPU:
+ {
+ obj->u.counter_cpu.cpu_nr = src->u.counter_cpu.cpu_nr;
+ if (src->u.counter_cpu.shm_fd >= 0) {
+ obj->u.counter_cpu.shm_fd =
+ dup(src->u.counter_cpu.shm_fd);
+ if (obj->u.counter_cpu.shm_fd < 0) {
+ ret = errno;
+ goto error_type;
+ }
+ }
+ break;
+ }
+
default:
ret = -EINVAL;
goto error_type;
return 0;
}
+/* counter operations */
+
+int ustctl_get_nr_cpu_per_counter(void)
+{
+ return lttng_counter_num_possible_cpus();
+}
+
+struct ustctl_daemon_counter *
+ ustctl_create_counter(size_t nr_dimensions,
+ const struct ustctl_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ enum ustctl_counter_bitness bitness,
+ enum ustctl_counter_arithmetic arithmetic,
+ uint32_t alloc_flags)
+{
+ const char *transport_name;
+ struct ustctl_daemon_counter *counter;
+ struct lttng_counter_transport *transport;
+ struct lttng_counter_dimension ust_dim[LTTNG_COUNTER_DIMENSION_MAX];
+ size_t i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ /* Currently, only per-cpu allocation is supported. */
+ switch (alloc_flags) {
+ case USTCTL_COUNTER_ALLOC_PER_CPU:
+ break;
+
+ case USTCTL_COUNTER_ALLOC_PER_CPU | USTCTL_COUNTER_ALLOC_GLOBAL:
+ case USTCTL_COUNTER_ALLOC_GLOBAL:
+ default:
+ return NULL;
+ }
+ switch (bitness) {
+ case USTCTL_COUNTER_BITNESS_32:
+ switch (arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ transport_name = "counter-per-cpu-32-modular";
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ transport_name = "counter-per-cpu-32-saturation";
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ case USTCTL_COUNTER_BITNESS_64:
+ switch (arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ transport_name = "counter-per-cpu-64-modular";
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ transport_name = "counter-per-cpu-64-saturation";
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ default:
+ return NULL;
+ }
+
+ transport = lttng_counter_transport_find(transport_name);
+ if (!transport) {
+ DBG("LTTng transport %s not found\n",
+ transport_name);
+ return NULL;
+ }
+
+ counter = zmalloc(sizeof(*counter));
+ if (!counter)
+ return NULL;
+ counter->attr = zmalloc(sizeof(*counter->attr));
+ if (!counter->attr)
+ goto free_counter;
+ counter->attr->bitness = bitness;
+ counter->attr->arithmetic = arithmetic;
+ counter->attr->nr_dimensions = nr_dimensions;
+ counter->attr->global_sum_step = global_sum_step;
+ for (i = 0; i < nr_dimensions; i++)
+ counter->attr->dimensions[i] = dimensions[i];
+
+ for (i = 0; i < nr_dimensions; i++) {
+ ust_dim[i].size = dimensions[i].size;
+ ust_dim[i].underflow_index = dimensions[i].underflow_index;
+ ust_dim[i].overflow_index = dimensions[i].overflow_index;
+ ust_dim[i].has_underflow = dimensions[i].has_underflow;
+ ust_dim[i].has_overflow = dimensions[i].has_overflow;
+ }
+ counter->counter = transport->ops.counter_create(nr_dimensions,
+ ust_dim, global_sum_step, global_counter_fd,
+ nr_counter_cpu_fds, counter_cpu_fds, true);
+ if (!counter->counter)
+ goto free_attr;
+ counter->ops = &transport->ops;
+ return counter;
+
+free_attr:
+ free(counter->attr);
+free_counter:
+ free(counter);
+ return NULL;
+}
+
+int ustctl_create_counter_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_object_data **_counter_data)
+{
+ struct lttng_ust_object_data *counter_data;
+ struct lttng_ust_counter_conf counter_conf;
+ size_t i;
+ int ret;
+
+ switch (counter->attr->arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ counter_conf.arithmetic = LTTNG_UST_COUNTER_ARITHMETIC_MODULAR;
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ counter_conf.arithmetic = LTTNG_UST_COUNTER_ARITHMETIC_SATURATION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (counter->attr->bitness) {
+ case USTCTL_COUNTER_BITNESS_32:
+ counter_conf.bitness = LTTNG_UST_COUNTER_BITNESS_32BITS;
+ break;
+ case USTCTL_COUNTER_BITNESS_64:
+ counter_conf.bitness = LTTNG_UST_COUNTER_BITNESS_64BITS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ counter_conf.number_dimensions = counter->attr->nr_dimensions;
+ counter_conf.global_sum_step = counter->attr->global_sum_step;
+ for (i = 0; i < counter->attr->nr_dimensions; i++) {
+ counter_conf.dimensions[i].size = counter->attr->dimensions[i].size;
+ counter_conf.dimensions[i].underflow_index = counter->attr->dimensions[i].underflow_index;
+ counter_conf.dimensions[i].overflow_index = counter->attr->dimensions[i].overflow_index;
+ counter_conf.dimensions[i].has_underflow = counter->attr->dimensions[i].has_underflow;
+ counter_conf.dimensions[i].has_overflow = counter->attr->dimensions[i].has_overflow;
+ }
+
+ counter_data = zmalloc(sizeof(*counter_data));
+ if (!counter_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER;
+ counter_data->handle = -1;
+
+ counter_data->size = sizeof(counter_conf);
+ counter_data->u.counter.data = zmalloc(sizeof(counter_conf));
+ if (!counter_data->u.counter.data) {
+ ret = -ENOMEM;
+ goto error_alloc_data;
+ }
+
+ memcpy(counter_data->u.counter.data, &counter_conf, sizeof(counter_conf));
+ *_counter_data = counter_data;
+
+ return 0;
+
+error_alloc_data:
+ free(counter_data);
+error_alloc:
+ return ret;
+}
+
+int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_object_data **_counter_global_data)
+{
+ struct lttng_ust_object_data *counter_global_data;
+ int ret, fd;
+ size_t len;
+
+ if (lttng_counter_get_global_shm(counter->counter, &fd, &len))
+ return -EINVAL;
+ counter_global_data = zmalloc(sizeof(*counter_global_data));
+ if (!counter_global_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_global_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL;
+ counter_global_data->handle = -1;
+ counter_global_data->size = len;
+ counter_global_data->u.counter_global.shm_fd = fd;
+ *_counter_global_data = counter_global_data;
+ return 0;
+
+error_alloc:
+ return ret;
+}
+
+int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu,
+ struct lttng_ust_object_data **_counter_cpu_data)
+{
+ struct lttng_ust_object_data *counter_cpu_data;
+ int ret, fd;
+ size_t len;
+
+ if (lttng_counter_get_cpu_shm(counter->counter, cpu, &fd, &len))
+ return -EINVAL;
+ counter_cpu_data = zmalloc(sizeof(*counter_cpu_data));
+ if (!counter_cpu_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_cpu_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER_CPU;
+ counter_cpu_data->handle = -1;
+ counter_cpu_data->size = len;
+ counter_cpu_data->u.counter_cpu.shm_fd = fd;
+ counter_cpu_data->u.counter_cpu.cpu_nr = cpu;
+ *_counter_cpu_data = counter_cpu_data;
+ return 0;
+
+error_alloc:
+ return ret;
+}
+
+void ustctl_destroy_counter(struct ustctl_daemon_counter *counter)
+{
+ counter->ops->counter_destroy(counter->counter);
+ free(counter->attr);
+ free(counter);
+}
+
+int ustctl_send_counter_data_to_ust(int sock, int parent_handle,
+ struct lttng_ust_object_data *counter_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data)
+ return -EINVAL;
+
+ size = counter_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = parent_handle;
+ lum.cmd = LTTNG_UST_COUNTER;
+ lum.u.counter.len = size;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ /* Send counter data */
+ len = ustcomm_send_unix_sock(sock, counter_data->u.counter.data, size);
+ if (len != size) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_send_counter_global_data_to_ust(int sock,
+ struct lttng_ust_object_data *counter_data,
+ struct lttng_ust_object_data *counter_global_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, shm_fd[1];
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data || !counter_global_data)
+ return -EINVAL;
+
+ size = counter_global_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = counter_data->handle; /* parent handle */
+ lum.cmd = LTTNG_UST_COUNTER_GLOBAL;
+ lum.u.counter_global.len = size;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ shm_fd[0] = counter_global_data->u.counter_global.shm_fd;
+ len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_global_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_send_counter_cpu_data_to_ust(int sock,
+ struct lttng_ust_object_data *counter_data,
+ struct lttng_ust_object_data *counter_cpu_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, shm_fd[1];
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data || !counter_cpu_data)
+ return -EINVAL;
+
+ size = counter_cpu_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = counter_data->handle; /* parent handle */
+ lum.cmd = LTTNG_UST_COUNTER_CPU;
+ lum.u.counter_cpu.len = size;
+ lum.u.counter_cpu.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ shm_fd[0] = counter_cpu_data->u.counter_global.shm_fd;
+ len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_cpu_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_counter_read(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_read(counter->counter, dimension_indexes, cpu,
+ value, overflow, underflow);
+}
+
+int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_aggregate(counter->counter, dimension_indexes,
+ value, overflow, underflow);
+}
+
+int ustctl_counter_clear(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return counter->ops->counter_clear(counter->counter, dimension_indexes);
+}
+
static __attribute__((constructor))
void ustctl_init(void)
{
lttng_ring_buffer_client_overwrite_rt_init();
lttng_ring_buffer_client_discard_init();
lttng_ring_buffer_client_discard_rt_init();
+ lttng_counter_client_percpu_32_overflow_init();
+ lttng_counter_client_percpu_64_overflow_init();
lib_ringbuffer_signal_init();
}
lttng_ring_buffer_client_overwrite_rt_exit();
lttng_ring_buffer_client_overwrite_exit();
lttng_ring_buffer_metadata_client_exit();
+ lttng_counter_client_percpu_32_overflow_exit();
+ lttng_counter_client_percpu_64_overflow_exit();
}
liblttng_ust_tracepoint_la_CFLAGS = -DUST_COMPONENT="liblttng_ust_tracepoint" $(AM_CFLAGS)
liblttng_ust_runtime_la_SOURCES = \
+ bytecode.h \
lttng-ust-comm.c \
lttng-ust-abi.c \
lttng-probes.c \
+ lttng-bytecode.c \
+ lttng-bytecode.h \
+ lttng-bytecode-validator.c \
+ lttng-bytecode-specialize.c \
+ lttng-bytecode-interpreter.c \
lttng-context-provider.c \
lttng-context-vtid.c \
lttng-context-vpid.c \
lttng-context-vsgid.c \
lttng-context.c \
lttng-events.c \
- lttng-filter.c \
- lttng-filter.h \
- lttng-filter-validator.c \
- lttng-filter-specialize.c \
- lttng-filter-interpreter.c \
- filter-bytecode.h \
lttng-hash-helper.h \
lttng-ust-elf.c \
lttng-ust-statedump.c \
lttng-ust-statedump-provider.h \
ust_lib.c \
ust_lib.h \
+ context-internal.h \
+ context-provider-internal.h \
tracepoint-internal.h \
+ ust-events-internal.h \
clock.h \
compat.h \
wait.h \
getenv.h \
string-utils.c \
string-utils.h \
+ trigger-notification.c \
ns.h \
creds.h
lttng-ring-buffer-client-overwrite-rt.c \
lttng-ring-buffer-metadata-client.h \
lttng-ring-buffer-metadata-client.c \
+ lttng-counter-client-percpu-32-modular.c \
+ lttng-counter-client-percpu-64-modular.c \
lttng-clock.c lttng-getcpu.c
liblttng_ust_la_SOURCES =
liblttng_ust_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
liblttng_ust_support_la_LIBADD = \
- $(top_builddir)/libringbuffer/libringbuffer.la
+ $(top_builddir)/libringbuffer/libringbuffer.la \
+ $(top_builddir)/libcounter/libcounter.la
liblttng_ust_la_LIBADD = \
-lpthread \
$(top_builddir)/liblttng-ust-comm/liblttng-ust-comm.la \
liblttng-ust-tracepoint.la \
liblttng-ust-runtime.la liblttng-ust-support.la \
+ $(top_builddir)/libmsgpack/libmsgpack.la \
$(DL_LIBS)
liblttng_ust_la_CFLAGS = -DUST_COMPONENT="liblttng_ust" $(AM_CFLAGS)
--- /dev/null
+#ifndef _BYTECODE_H
+#define _BYTECODE_H
+
+/*
+ * bytecode.h
+ *
+ * LTTng bytecode
+ *
+ * Copyright 2012-2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <lttng/ust-abi.h>
+
+#ifndef LTTNG_PACKED
+#error "LTTNG_PACKED should be defined"
+#endif
+
+/*
+ * offsets are absolute from start of bytecode.
+ */
+
+struct field_ref {
+ /* Initially, symbol offset. After link, field offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_symbol {
+ /* Symbol offset. */
+ uint16_t offset;
+} LTTNG_PACKED;
+
+struct get_index_u16 {
+ uint16_t index;
+} LTTNG_PACKED;
+
+struct get_index_u64 {
+ uint64_t index;
+} LTTNG_PACKED;
+
+struct literal_numeric {
+ int64_t v;
+} __attribute__((packed));
+
+struct literal_double {
+ double v;
+} __attribute__((packed));
+
+struct literal_string {
+ char string[0];
+} __attribute__((packed));
+
+enum bytecode_op {
+ BYTECODE_OP_UNKNOWN = 0,
+
+ BYTECODE_OP_RETURN = 1,
+
+ /* binary */
+ BYTECODE_OP_MUL = 2,
+ BYTECODE_OP_DIV = 3,
+ BYTECODE_OP_MOD = 4,
+ BYTECODE_OP_PLUS = 5,
+ BYTECODE_OP_MINUS = 6,
+ BYTECODE_OP_BIT_RSHIFT = 7,
+ BYTECODE_OP_BIT_LSHIFT = 8,
+ BYTECODE_OP_BIT_AND = 9,
+ BYTECODE_OP_BIT_OR = 10,
+ BYTECODE_OP_BIT_XOR = 11,
+
+ /* binary comparators */
+ BYTECODE_OP_EQ = 12,
+ BYTECODE_OP_NE = 13,
+ BYTECODE_OP_GT = 14,
+ BYTECODE_OP_LT = 15,
+ BYTECODE_OP_GE = 16,
+ BYTECODE_OP_LE = 17,
+
+ /* string binary comparator: apply to */
+ BYTECODE_OP_EQ_STRING = 18,
+ BYTECODE_OP_NE_STRING = 19,
+ BYTECODE_OP_GT_STRING = 20,
+ BYTECODE_OP_LT_STRING = 21,
+ BYTECODE_OP_GE_STRING = 22,
+ BYTECODE_OP_LE_STRING = 23,
+
+ /* s64 binary comparator */
+ BYTECODE_OP_EQ_S64 = 24,
+ BYTECODE_OP_NE_S64 = 25,
+ BYTECODE_OP_GT_S64 = 26,
+ BYTECODE_OP_LT_S64 = 27,
+ BYTECODE_OP_GE_S64 = 28,
+ BYTECODE_OP_LE_S64 = 29,
+
+ /* double binary comparator */
+ BYTECODE_OP_EQ_DOUBLE = 30,
+ BYTECODE_OP_NE_DOUBLE = 31,
+ BYTECODE_OP_GT_DOUBLE = 32,
+ BYTECODE_OP_LT_DOUBLE = 33,
+ BYTECODE_OP_GE_DOUBLE = 34,
+ BYTECODE_OP_LE_DOUBLE = 35,
+
+ /* Mixed S64-double binary comparators */
+ BYTECODE_OP_EQ_DOUBLE_S64 = 36,
+ BYTECODE_OP_NE_DOUBLE_S64 = 37,
+ BYTECODE_OP_GT_DOUBLE_S64 = 38,
+ BYTECODE_OP_LT_DOUBLE_S64 = 39,
+ BYTECODE_OP_GE_DOUBLE_S64 = 40,
+ BYTECODE_OP_LE_DOUBLE_S64 = 41,
+
+ BYTECODE_OP_EQ_S64_DOUBLE = 42,
+ BYTECODE_OP_NE_S64_DOUBLE = 43,
+ BYTECODE_OP_GT_S64_DOUBLE = 44,
+ BYTECODE_OP_LT_S64_DOUBLE = 45,
+ BYTECODE_OP_GE_S64_DOUBLE = 46,
+ BYTECODE_OP_LE_S64_DOUBLE = 47,
+
+ /* unary */
+ BYTECODE_OP_UNARY_PLUS = 48,
+ BYTECODE_OP_UNARY_MINUS = 49,
+ BYTECODE_OP_UNARY_NOT = 50,
+ BYTECODE_OP_UNARY_PLUS_S64 = 51,
+ BYTECODE_OP_UNARY_MINUS_S64 = 52,
+ BYTECODE_OP_UNARY_NOT_S64 = 53,
+ BYTECODE_OP_UNARY_PLUS_DOUBLE = 54,
+ BYTECODE_OP_UNARY_MINUS_DOUBLE = 55,
+ BYTECODE_OP_UNARY_NOT_DOUBLE = 56,
+
+ /* logical */
+ BYTECODE_OP_AND = 57,
+ BYTECODE_OP_OR = 58,
+
+ /* load field ref */
+ BYTECODE_OP_LOAD_FIELD_REF = 59,
+ BYTECODE_OP_LOAD_FIELD_REF_STRING = 60,
+ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61,
+ BYTECODE_OP_LOAD_FIELD_REF_S64 = 62,
+ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63,
+
+ /* load immediate from operand */
+ BYTECODE_OP_LOAD_STRING = 64,
+ BYTECODE_OP_LOAD_S64 = 65,
+ BYTECODE_OP_LOAD_DOUBLE = 66,
+
+ /* cast */
+ BYTECODE_OP_CAST_TO_S64 = 67,
+ BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68,
+ BYTECODE_OP_CAST_NOP = 69,
+
+ /* get context ref */
+ BYTECODE_OP_GET_CONTEXT_REF = 70,
+ BYTECODE_OP_GET_CONTEXT_REF_STRING = 71,
+ BYTECODE_OP_GET_CONTEXT_REF_S64 = 72,
+ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73,
+
+ /* load userspace field ref */
+ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74,
+ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate
+ */
+ BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76,
+
+ /* globbing pattern binary operator: apply to */
+ BYTECODE_OP_EQ_STAR_GLOB_STRING = 77,
+ BYTECODE_OP_NE_STAR_GLOB_STRING = 78,
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ BYTECODE_OP_GET_CONTEXT_ROOT = 79,
+ BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80,
+ BYTECODE_OP_GET_PAYLOAD_ROOT = 81,
+
+ BYTECODE_OP_GET_SYMBOL = 82,
+ BYTECODE_OP_GET_SYMBOL_FIELD = 83,
+ BYTECODE_OP_GET_INDEX_U16 = 84,
+ BYTECODE_OP_GET_INDEX_U64 = 85,
+
+ BYTECODE_OP_LOAD_FIELD = 86,
+ BYTECODE_OP_LOAD_FIELD_S8 = 87,
+ BYTECODE_OP_LOAD_FIELD_S16 = 88,
+ BYTECODE_OP_LOAD_FIELD_S32 = 89,
+ BYTECODE_OP_LOAD_FIELD_S64 = 90,
+ BYTECODE_OP_LOAD_FIELD_U8 = 91,
+ BYTECODE_OP_LOAD_FIELD_U16 = 92,
+ BYTECODE_OP_LOAD_FIELD_U32 = 93,
+ BYTECODE_OP_LOAD_FIELD_U64 = 94,
+ BYTECODE_OP_LOAD_FIELD_STRING = 95,
+ BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96,
+ BYTECODE_OP_LOAD_FIELD_DOUBLE = 97,
+
+ BYTECODE_OP_UNARY_BIT_NOT = 98,
+
+ BYTECODE_OP_RETURN_S64 = 99,
+
+ NR_BYTECODE_OPS,
+};
+
+typedef uint8_t bytecode_opcode_t;
+
+struct load_op {
+ bytecode_opcode_t op;
+ /*
+ * data to load. Size known by enum bytecode_opcode and null-term char.
+ */
+ char data[0];
+} __attribute__((packed));
+
+struct binary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct unary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+/* skip_offset is absolute from start of bytecode */
+struct logical_op {
+ bytecode_opcode_t op;
+ uint16_t skip_offset; /* bytecode insn, if skip second test */
+} __attribute__((packed));
+
+struct cast_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct return_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+#endif /* _BYTECODE_H */
--- /dev/null
+#ifndef _LTTNG_UST_CONTEXT_INTERNAL_H
+#define _LTTNG_UST_CONTEXT_INTERNAL_H
+
+/*
+ * ust-events-internal.h
+ *
+ * Copyright 2020 (c) - Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <lttng/ust-events.h>
+
+int lttng_context_init_all(struct lttng_ctx **ctx);
+
+#endif /* _LTTNG_UST_CONTEXT_INTERNAL_H */
--- /dev/null
+#ifndef _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
+#define _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
+
+/*
+ * Copyright 2019 - Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <stddef.h>
+#include <lttng/ust-events.h>
+
+void lttng_ust_context_set_trigger_group_provider(const char *name,
+ size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
+ void (*record)(struct lttng_ctx_field *field,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_channel *chan),
+ void (*get_value)(struct lttng_ctx_field *field,
+ struct lttng_ctx_value *value));
+
+#endif /* _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H */
+++ /dev/null
-#ifndef _FILTER_BYTECODE_H
-#define _FILTER_BYTECODE_H
-
-/*
- * filter-bytecode.h
- *
- * LTTng filter bytecode
- *
- * Copyright 2012-2016 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <stdint.h>
-#include <lttng/ust-abi.h>
-
-#ifndef LTTNG_PACKED
-#error "LTTNG_PACKED should be defined"
-#endif
-
-/*
- * offsets are absolute from start of bytecode.
- */
-
-struct field_ref {
- /* Initially, symbol offset. After link, field offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_symbol {
- /* Symbol offset. */
- uint16_t offset;
-} LTTNG_PACKED;
-
-struct get_index_u16 {
- uint16_t index;
-} LTTNG_PACKED;
-
-struct get_index_u64 {
- uint64_t index;
-} LTTNG_PACKED;
-
-struct literal_numeric {
- int64_t v;
-} __attribute__((packed));
-
-struct literal_double {
- double v;
-} __attribute__((packed));
-
-struct literal_string {
- char string[0];
-} __attribute__((packed));
-
-enum filter_op {
- FILTER_OP_UNKNOWN = 0,
-
- FILTER_OP_RETURN = 1,
-
- /* binary */
- FILTER_OP_MUL = 2,
- FILTER_OP_DIV = 3,
- FILTER_OP_MOD = 4,
- FILTER_OP_PLUS = 5,
- FILTER_OP_MINUS = 6,
- FILTER_OP_BIT_RSHIFT = 7,
- FILTER_OP_BIT_LSHIFT = 8,
- FILTER_OP_BIT_AND = 9,
- FILTER_OP_BIT_OR = 10,
- FILTER_OP_BIT_XOR = 11,
-
- /* binary comparators */
- FILTER_OP_EQ = 12,
- FILTER_OP_NE = 13,
- FILTER_OP_GT = 14,
- FILTER_OP_LT = 15,
- FILTER_OP_GE = 16,
- FILTER_OP_LE = 17,
-
- /* string binary comparator: apply to */
- FILTER_OP_EQ_STRING = 18,
- FILTER_OP_NE_STRING = 19,
- FILTER_OP_GT_STRING = 20,
- FILTER_OP_LT_STRING = 21,
- FILTER_OP_GE_STRING = 22,
- FILTER_OP_LE_STRING = 23,
-
- /* s64 binary comparator */
- FILTER_OP_EQ_S64 = 24,
- FILTER_OP_NE_S64 = 25,
- FILTER_OP_GT_S64 = 26,
- FILTER_OP_LT_S64 = 27,
- FILTER_OP_GE_S64 = 28,
- FILTER_OP_LE_S64 = 29,
-
- /* double binary comparator */
- FILTER_OP_EQ_DOUBLE = 30,
- FILTER_OP_NE_DOUBLE = 31,
- FILTER_OP_GT_DOUBLE = 32,
- FILTER_OP_LT_DOUBLE = 33,
- FILTER_OP_GE_DOUBLE = 34,
- FILTER_OP_LE_DOUBLE = 35,
-
- /* Mixed S64-double binary comparators */
- FILTER_OP_EQ_DOUBLE_S64 = 36,
- FILTER_OP_NE_DOUBLE_S64 = 37,
- FILTER_OP_GT_DOUBLE_S64 = 38,
- FILTER_OP_LT_DOUBLE_S64 = 39,
- FILTER_OP_GE_DOUBLE_S64 = 40,
- FILTER_OP_LE_DOUBLE_S64 = 41,
-
- FILTER_OP_EQ_S64_DOUBLE = 42,
- FILTER_OP_NE_S64_DOUBLE = 43,
- FILTER_OP_GT_S64_DOUBLE = 44,
- FILTER_OP_LT_S64_DOUBLE = 45,
- FILTER_OP_GE_S64_DOUBLE = 46,
- FILTER_OP_LE_S64_DOUBLE = 47,
-
- /* unary */
- FILTER_OP_UNARY_PLUS = 48,
- FILTER_OP_UNARY_MINUS = 49,
- FILTER_OP_UNARY_NOT = 50,
- FILTER_OP_UNARY_PLUS_S64 = 51,
- FILTER_OP_UNARY_MINUS_S64 = 52,
- FILTER_OP_UNARY_NOT_S64 = 53,
- FILTER_OP_UNARY_PLUS_DOUBLE = 54,
- FILTER_OP_UNARY_MINUS_DOUBLE = 55,
- FILTER_OP_UNARY_NOT_DOUBLE = 56,
-
- /* logical */
- FILTER_OP_AND = 57,
- FILTER_OP_OR = 58,
-
- /* load field ref */
- FILTER_OP_LOAD_FIELD_REF = 59,
- FILTER_OP_LOAD_FIELD_REF_STRING = 60,
- FILTER_OP_LOAD_FIELD_REF_SEQUENCE = 61,
- FILTER_OP_LOAD_FIELD_REF_S64 = 62,
- FILTER_OP_LOAD_FIELD_REF_DOUBLE = 63,
-
- /* load immediate from operand */
- FILTER_OP_LOAD_STRING = 64,
- FILTER_OP_LOAD_S64 = 65,
- FILTER_OP_LOAD_DOUBLE = 66,
-
- /* cast */
- FILTER_OP_CAST_TO_S64 = 67,
- FILTER_OP_CAST_DOUBLE_TO_S64 = 68,
- FILTER_OP_CAST_NOP = 69,
-
- /* get context ref */
- FILTER_OP_GET_CONTEXT_REF = 70,
- FILTER_OP_GET_CONTEXT_REF_STRING = 71,
- FILTER_OP_GET_CONTEXT_REF_S64 = 72,
- FILTER_OP_GET_CONTEXT_REF_DOUBLE = 73,
-
- /* load userspace field ref */
- FILTER_OP_LOAD_FIELD_REF_USER_STRING = 74,
- FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate
- */
- FILTER_OP_LOAD_STAR_GLOB_STRING = 76,
-
- /* globbing pattern binary operator: apply to */
- FILTER_OP_EQ_STAR_GLOB_STRING = 77,
- FILTER_OP_NE_STAR_GLOB_STRING = 78,
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- FILTER_OP_GET_CONTEXT_ROOT = 79,
- FILTER_OP_GET_APP_CONTEXT_ROOT = 80,
- FILTER_OP_GET_PAYLOAD_ROOT = 81,
-
- FILTER_OP_GET_SYMBOL = 82,
- FILTER_OP_GET_SYMBOL_FIELD = 83,
- FILTER_OP_GET_INDEX_U16 = 84,
- FILTER_OP_GET_INDEX_U64 = 85,
-
- FILTER_OP_LOAD_FIELD = 86,
- FILTER_OP_LOAD_FIELD_S8 = 87,
- FILTER_OP_LOAD_FIELD_S16 = 88,
- FILTER_OP_LOAD_FIELD_S32 = 89,
- FILTER_OP_LOAD_FIELD_S64 = 90,
- FILTER_OP_LOAD_FIELD_U8 = 91,
- FILTER_OP_LOAD_FIELD_U16 = 92,
- FILTER_OP_LOAD_FIELD_U32 = 93,
- FILTER_OP_LOAD_FIELD_U64 = 94,
- FILTER_OP_LOAD_FIELD_STRING = 95,
- FILTER_OP_LOAD_FIELD_SEQUENCE = 96,
- FILTER_OP_LOAD_FIELD_DOUBLE = 97,
-
- FILTER_OP_UNARY_BIT_NOT = 98,
-
- FILTER_OP_RETURN_S64 = 99,
-
- NR_FILTER_OPS,
-};
-
-typedef uint8_t filter_opcode_t;
-
-struct load_op {
- filter_opcode_t op;
- char data[0];
- /* data to load. Size known by enum filter_opcode and null-term char. */
-} __attribute__((packed));
-
-struct binary_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-struct unary_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-/* skip_offset is absolute from start of bytecode */
-struct logical_op {
- filter_opcode_t op;
- uint16_t skip_offset; /* bytecode insn, if skip second test */
-} __attribute__((packed));
-
-struct cast_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-struct return_op {
- filter_opcode_t op;
-} __attribute__((packed));
-
-#endif /* _FILTER_BYTECODE_H */
--- /dev/null
+/*
+ * lttng-bytecode-interpreter.c
+ *
+ * LTTng UST bytecode interpreter.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <urcu-pointer.h>
+#include <byteswap.h>
+
+#include <lttng/ust-events.h>
+
+#include "lttng-bytecode.h"
+#include "string-utils.h"
+
+
+/*
+ * -1: wildcard found.
+ * -2: unknown escape char.
+ * 0: normal char.
+ */
+
+static
+int parse_char(const char **p)
+{
+ switch (**p) {
+ case '\\':
+ (*p)++;
+ switch (**p) {
+ case '\\':
+ case '*':
+ return 0;
+ default:
+ return -2;
+ }
+ case '*':
+ return -1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Returns SIZE_MAX if the string is null-terminated, or the number of
+ * characters if not.
+ */
+static
+size_t get_str_or_seq_len(const struct estack_entry *entry)
+{
+ return entry->u.s.seq_len;
+}
+
+static
+int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
+{
+ const char *pattern;
+ const char *candidate;
+ size_t pattern_len;
+ size_t candidate_len;
+
+ /* Find out which side is the pattern vs. the candidate. */
+ if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
+ pattern = estack_ax(stack, top)->u.s.str;
+ pattern_len = get_str_or_seq_len(estack_ax(stack, top));
+ candidate = estack_bx(stack, top)->u.s.str;
+ candidate_len = get_str_or_seq_len(estack_bx(stack, top));
+ } else {
+ pattern = estack_bx(stack, top)->u.s.str;
+ pattern_len = get_str_or_seq_len(estack_bx(stack, top));
+ candidate = estack_ax(stack, top)->u.s.str;
+ candidate_len = get_str_or_seq_len(estack_ax(stack, top));
+ }
+
+ /* Perform the match. Returns 0 when the result is true. */
+ return !strutils_star_glob_match(pattern, pattern_len, candidate,
+ candidate_len);
+}
+
+static
+int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
+{
+ const char *p = estack_bx(stack, top)->u.s.str, *q = estack_ax(stack, top)->u.s.str;
+ int ret;
+ int diff;
+
+ for (;;) {
+ int escaped_r0 = 0;
+
+ if (unlikely(p - estack_bx(stack, top)->u.s.str >= estack_bx(stack, top)->u.s.seq_len || *p == '\0')) {
+ if (q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0') {
+ return 0;
+ } else {
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&q);
+ if (ret == -1)
+ return 0;
+ }
+ return -1;
+ }
+ }
+ if (unlikely(q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0')) {
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&p);
+ if (ret == -1)
+ return 0;
+ }
+ return 1;
+ }
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&p);
+ if (ret == -1) {
+ return 0;
+ } else if (ret == -2) {
+ escaped_r0 = 1;
+ }
+ /* else compare both char */
+ }
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&q);
+ if (ret == -1) {
+ return 0;
+ } else if (ret == -2) {
+ if (!escaped_r0)
+ return -1;
+ } else {
+ if (escaped_r0)
+ return 1;
+ }
+ } else {
+ if (escaped_r0)
+ return 1;
+ }
+ diff = *p - *q;
+ if (diff != 0)
+ break;
+ p++;
+ q++;
+ }
+ return diff;
+}
+
+uint64_t lttng_bytecode_filter_interpret_false(void *filter_data,
+ const char *filter_stack_data)
+{
+ return LTTNG_INTERPRETER_DISCARD;
+}
+
+uint64_t lttng_bytecode_capture_interpret_false(void *capture_data,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output)
+{
+ return LTTNG_INTERPRETER_DISCARD;
+}
+
+#ifdef INTERPRETER_USE_SWITCH
+
+/*
+ * Fallback for compilers that do not support taking address of labels.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->data[0]; \
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
+ pc = next_pc) { \
+ dbg_printf("Executing op %s (%u)\n", \
+ print_op((unsigned int) *(bytecode_opcode_t *) pc), \
+ (unsigned int) *(bytecode_opcode_t *) pc); \
+ switch (*(bytecode_opcode_t *) pc) {
+
+#define OP(name) jump_target_##name: __attribute__((unused)); \
+ case name
+
+#define PO break
+
+#define END_OP } \
+ }
+
+#define JUMP_TO(name) \
+ goto jump_target_##name
+
+#else
+
+/*
+ * Dispatch-table based interpreter.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->code[0]; \
+ pc = next_pc = start_pc; \
+ if (unlikely(pc - start_pc >= bytecode->len)) \
+ goto end; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define OP(name) \
+LABEL_##name
+
+#define PO \
+ pc = next_pc; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define END_OP
+
+#define JUMP_TO(name) \
+ goto LABEL_##name
+
+#endif
+
+#define IS_INTEGER_REGISTER(reg_type) \
+ (reg_type == REG_U64 || reg_type == REG_S64)
+
+static int context_get_index(struct lttng_ctx *ctx,
+ struct load_ptr *ptr,
+ uint32_t idx)
+{
+
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct lttng_ctx_value v;
+
+ ctx_field = &ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ptr->type = LOAD_OBJECT;
+ ptr->field = field;
+
+ switch (field->type.atype) {
+ case atype_integer:
+ ctx_field->get_value(ctx_field, &v);
+ if (field->type.u.integer.signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.u.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ case atype_enum: /* Fall-through */
+ case atype_enum_nestable:
+ {
+ const struct lttng_integer_type *itype;
+
+ if (field->type.atype == atype_enum) {
+ itype = &field->type.u.legacy.basic.enumeration.container_type;
+ } else {
+ itype = &field->type.u.enum_nestable.container_type->u.integer;
+ }
+ ctx_field->get_value(ctx_field, &v);
+ if (itype->signedness) {
+ ptr->object_type = OBJECT_TYPE_SIGNED_ENUM;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+ ptr->u.u64 = v.u.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ }
+ case atype_array:
+ if (field->type.u.legacy.array.elem_type.atype != atype_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ ERR("Only string arrays are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case atype_array_nestable:
+ if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ ERR("Only string arrays are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case atype_sequence:
+ if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ ERR("Only string sequences are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case atype_sequence_nestable:
+ if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ ERR("Only string sequences are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case atype_string:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case atype_float:
+ ptr->object_type = OBJECT_TYPE_DOUBLE;
+ ctx_field->get_value(ctx_field, &v);
+ ptr->u.d = v.u.d;
+ ptr->ptr = &ptr->u.d;
+ break;
+ case atype_dynamic:
+ ctx_field->get_value(ctx_field, &v);
+ switch (v.sel) {
+ case LTTNG_UST_DYNAMIC_TYPE_NONE:
+ return -EINVAL;
+ case LTTNG_UST_DYNAMIC_TYPE_U8:
+ case LTTNG_UST_DYNAMIC_TYPE_U16:
+ case LTTNG_UST_DYNAMIC_TYPE_U32:
+ case LTTNG_UST_DYNAMIC_TYPE_U64:
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.u.u64;
+ ptr->ptr = &ptr->u.u64;
+ dbg_printf("context get index dynamic u64 %" PRIi64 "\n", ptr->u.u64);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_S8:
+ case LTTNG_UST_DYNAMIC_TYPE_S16:
+ case LTTNG_UST_DYNAMIC_TYPE_S32:
+ case LTTNG_UST_DYNAMIC_TYPE_S64:
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_FLOAT:
+ case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
+ ptr->object_type = OBJECT_TYPE_DOUBLE;
+ ptr->u.d = v.u.d;
+ ptr->ptr = &ptr->u.d;
+ dbg_printf("context get index dynamic double %g\n", ptr->u.d);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_STRING:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ptr->ptr = v.u.str;
+ dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr);
+ break;
+ default:
+ dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
+ return -EINVAL;
+ }
+ break;
+ case atype_struct:
+ ERR("Structure type cannot be loaded.");
+ return -EINVAL;
+ default:
+ ERR("Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dynamic_get_index(struct lttng_ctx *ctx,
+ struct bytecode_runtime *runtime,
+ uint64_t index, struct estack_entry *stack_top)
+{
+ int ret;
+ const struct bytecode_get_index_data *gid;
+
+ gid = (const struct bytecode_get_index_data *) &runtime->data[index];
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const char *ptr;
+
+ assert(gid->offset < gid->array_len);
+ /* Skip count (unsigned long) */
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ assert(stack_top->u.ptr.field->type.atype == atype_array ||
+ stack_top->u.ptr.field->type.atype == atype_array_nestable);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const char *ptr;
+ size_t ptr_seq_len;
+
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+ if (gid->offset >= gid->elem.len * ptr_seq_len) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ assert(stack_top->u.ptr.field->type.atype == atype_sequence ||
+ stack_top->u.ptr.field->type.atype == atype_sequence_nestable);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ ERR("Nested structures are not supported yet.");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_VARIANT:
+ default:
+ ERR("Unexpected get index type %d",
+ (int) stack_top->u.ptr.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
+ {
+ ret = context_get_index(ctx,
+ &stack_top->u.ptr,
+ gid->ctx_index);
+ if (ret) {
+ goto end;
+ }
+ break;
+ }
+ case LOAD_ROOT_PAYLOAD:
+ stack_top->u.ptr.ptr += gid->offset;
+ if (gid->elem.type == OBJECT_TYPE_STRING)
+ stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.type = LOAD_OBJECT;
+ stack_top->u.ptr.field = gid->field;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ break;
+ }
+
+ stack_top->type = REG_PTR;
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+ int ret;
+
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printf("Interpreter warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printf("op load field s8\n");
+ stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_S64;
+ break;
+ case OBJECT_TYPE_S16:
+ {
+ int16_t tmp;
+
+ dbg_printf("op load field s16\n");
+ tmp = *(int16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_16(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S32:
+ {
+ int32_t tmp;
+
+ dbg_printf("op load field s32\n");
+ tmp = *(int32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_32(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S64:
+ {
+ int64_t tmp;
+
+ dbg_printf("op load field s64\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_SIGNED_ENUM:
+ {
+ int64_t tmp;
+
+ dbg_printf("op load field signed enumeration\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_U8:
+ dbg_printf("op load field u8\n");
+ stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_U64;
+ break;
+ case OBJECT_TYPE_U16:
+ {
+ uint16_t tmp;
+
+ dbg_printf("op load field u16\n");
+ tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_16(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U32:
+ {
+ uint32_t tmp;
+
+ dbg_printf("op load field u32\n");
+ tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_32(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U64:
+ {
+ uint64_t tmp;
+
+ dbg_printf("op load field u64\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ {
+ uint64_t tmp;
+
+ dbg_printf("op load field unsigned enumeration\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_DOUBLE:
+ memcpy(&stack_top->u.d,
+ stack_top->u.ptr.ptr,
+ sizeof(struct literal_double));
+ stack_top->type = REG_DOUBLE;
+ break;
+ case OBJECT_TYPE_STRING:
+ {
+ const char *str;
+
+ dbg_printf("op load field string\n");
+ str = (const char *) stack_top->u.ptr.ptr;
+ stack_top->u.s.str = str;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.seq_len = SIZE_MAX;
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ stack_top->type = REG_STRING;
+ break;
+ }
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ {
+ const char *ptr;
+
+ dbg_printf("op load field string sequence\n");
+ ptr = stack_top->u.ptr.ptr;
+ stack_top->u.s.seq_len = *(unsigned long *) ptr;
+ stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ stack_top->type = REG_STRING;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ break;
+ }
+ case OBJECT_TYPE_DYNAMIC:
+ /*
+ * Dynamic types in context are looked up
+ * by context get index.
+ */
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static
+int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
+ struct lttng_interpreter_output *output)
+{
+ int ret;
+
+again:
+ switch (ax->type) {
+ case REG_S64:
+ output->type = LTTNG_INTERPRETER_TYPE_S64;
+ output->u.s = ax->u.v;
+ break;
+ case REG_U64:
+ output->type = LTTNG_INTERPRETER_TYPE_U64;
+ output->u.u = (uint64_t) ax->u.v;
+ break;
+ case REG_DOUBLE:
+ output->type = LTTNG_INTERPRETER_TYPE_DOUBLE;
+ output->u.d = ax->u.d;
+ break;
+ case REG_STRING:
+ output->type = LTTNG_INTERPRETER_TYPE_STRING;
+ output->u.str.str = ax->u.s.str;
+ output->u.str.len = ax->u.s.seq_len;
+ break;
+ case REG_PTR:
+ switch (ax->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ case OBJECT_TYPE_S16:
+ case OBJECT_TYPE_S32:
+ case OBJECT_TYPE_S64:
+ case OBJECT_TYPE_U8:
+ case OBJECT_TYPE_U16:
+ case OBJECT_TYPE_U32:
+ case OBJECT_TYPE_U64:
+ case OBJECT_TYPE_DOUBLE:
+ case OBJECT_TYPE_STRING:
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ /* Retry after loading ptr into stack top. */
+ goto again;
+ case OBJECT_TYPE_SEQUENCE:
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
+ output->u.sequence.nested_type = ax->u.ptr.field->type.u.sequence_nestable.elem_type;
+ break;
+ case OBJECT_TYPE_ARRAY:
+ /* Skip count (unsigned long) */
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = ax->u.ptr.field->type.u.array_nestable.length;
+ output->u.sequence.nested_type = ax->u.ptr.field->type.u.array_nestable.elem_type;
+ break;
+ case OBJECT_TYPE_SIGNED_ENUM:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ output->type = LTTNG_INTERPRETER_TYPE_SIGNED_ENUM;
+ output->u.s = ax->u.v;
+ break;
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ output->type = LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM;
+ output->u.u = ax->u.v;
+ break;
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ return LTTNG_INTERPRETER_RECORD_FLAG;
+}
+
+/*
+ * For `output` equal to NULL:
+ * Return 0 (discard), or raise the 0x1 flag (log event).
+ * Currently, other flags are kept for future extensions and have no
+ * effect.
+ * For `output` not equal to NULL:
+ * Return 0 on success, negative error value on error.
+ */
+static
+uint64_t bytecode_interpret(void *interpreter_data,
+ const char *interpreter_stack_data,
+ struct lttng_interpreter_output *output)
+{
+ struct bytecode_runtime *bytecode = interpreter_data;
+ struct lttng_ctx *ctx = rcu_dereference(*bytecode->p.pctx);
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ uint64_t retval = 0;
+ struct estack _stack;
+ struct estack *stack = &_stack;
+ register int64_t ax = 0, bx = 0;
+ register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN;
+ register int top = INTERPRETER_STACK_EMPTY;
+#ifndef INTERPRETER_USE_SWITCH
+ static void *dispatch[NR_BYTECODE_OPS] = {
+ [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN,
+
+ [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN,
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL,
+ [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV,
+ [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD,
+ [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS,
+ [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS,
+ [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT,
+ [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT,
+ [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND,
+ [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR,
+ [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR,
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ,
+ [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE,
+ [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT,
+ [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT,
+ [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE,
+ [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE,
+
+ /* string binary comparator */
+ [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING,
+ [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING,
+ [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING,
+ [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING,
+ [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING,
+ [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING,
+
+ /* globbing pattern binary comparator */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING,
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING,
+
+ /* s64 binary comparator */
+ [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64,
+ [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64,
+ [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64,
+ [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64,
+ [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64,
+ [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64,
+
+ /* double binary comparator */
+ [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE,
+ [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE,
+ [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE,
+ [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE,
+ [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE,
+ [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE,
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64,
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64,
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64,
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64,
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64,
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64,
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE,
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE,
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE,
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE,
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE,
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE,
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS,
+ [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS,
+ [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT,
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64,
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64,
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64,
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE,
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND,
+ [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR,
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF,
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64,
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE,
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING,
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING,
+ [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64,
+ [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE,
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64,
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64,
+ [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP,
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF,
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING,
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64,
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE,
+
+ /* Instructions for recursive traversal through composed types. */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT,
+
+ [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL,
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD,
+ [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16,
+ [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64,
+
+ [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD,
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8,
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16,
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32,
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64,
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8,
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16,
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32,
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64,
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE,
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT,
+
+ [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64,
+ };
+#endif /* #ifndef INTERPRETER_USE_SWITCH */
+
+ START_OP
+
+ OP(BYTECODE_OP_UNKNOWN):
+ OP(BYTECODE_OP_LOAD_FIELD_REF):
+#ifdef INTERPRETER_USE_SWITCH
+ default:
+#endif /* INTERPRETER_USE_SWITCH */
+ ERR("unknown bytecode op %u",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_RETURN):
+ /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */
+ /* Handle dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64:
+ case REG_U64:
+ retval = !!estack_ax_v;
+ break;
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ if (!output) {
+ ret = -EINVAL;
+ goto end;
+ }
+ retval = 0;
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+
+ OP(BYTECODE_OP_RETURN_S64):
+ /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */
+ retval = !!estack_ax_v;
+ ret = 0;
+ goto end;
+
+ /* binary */
+ OP(BYTECODE_OP_MUL):
+ OP(BYTECODE_OP_DIV):
+ OP(BYTECODE_OP_MOD):
+ OP(BYTECODE_OP_PLUS):
+ OP(BYTECODE_OP_MINUS):
+ ERR("unsupported bytecode op %u",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_EQ):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_EQ_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_EQ_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_EQ_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_EQ_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STRING);
+ case REG_STAR_GLOB_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_NE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_NE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_NE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_NE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_NE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STRING);
+ case REG_STAR_GLOB_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_GT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GT_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GT_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_GT_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_LT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LT_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LT_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_LT_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_GE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_GE_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_LE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_LE_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ OP(BYTECODE_OP_EQ_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">") > 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<") < 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">=") >= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<=") <= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_S64):
+ {
+ int res;
+
+ res = (estack_bx_v == estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v != estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v > estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v < estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v >= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v <= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d == estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d != estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d > estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d < estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d >= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d <= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ /* Mixed S64-double binary comparators */
+ OP(BYTECODE_OP_EQ_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d == estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d != estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d > estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d < estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d >= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d <= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v == estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v != estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v > estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v < estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v >= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v <= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_RSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_LSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_AND):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_OR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_XOR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ /* unary */
+ OP(BYTECODE_OP_UNARY_PLUS):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_PLUS_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_PLUS_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_UNARY_MINUS):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_MINUS_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_MINUS_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_UNARY_NOT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_NOT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_NOT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_UNARY_BIT_NOT):
+ {
+ /* Dynamic typing. */
+ if (!IS_INTEGER_REGISTER(estack_ax_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ estack_ax_v = ~(uint64_t) estack_ax_v;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_UNARY_PLUS_S64):
+ OP(BYTECODE_OP_UNARY_PLUS_DOUBLE):
+ {
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_MINUS_S64):
+ {
+ estack_ax_v = -estack_ax_v;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_MINUS_DOUBLE):
+ {
+ estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_S64):
+ {
+ estack_ax_v = !estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_DOUBLE):
+ {
+ estack_ax_v = !estack_ax(stack, top)->u.d;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ /* logical */
+ OP(BYTECODE_OP_AND):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
+ ret = -EINVAL;
+ goto end;
+ }
+ /* If AX is 0, skip and evaluate to 0 */
+ if (unlikely(estack_ax_v == 0)) {
+ dbg_printf("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+ OP(BYTECODE_OP_OR):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
+ ret = -EINVAL;
+ goto end;
+ }
+ /* If AX is nonzero, skip and evaluate to 1 */
+ if (unlikely(estack_ax_v != 0)) {
+ estack_ax_v = 1;
+ dbg_printf("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+
+
+ /* load field ref */
+ OP(BYTECODE_OP_LOAD_FIELD_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type string\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str =
+ *(const char * const *) &interpreter_stack_data[ref->offset];
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax_t = REG_STRING;
+ dbg_printf("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type sequence\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.seq_len =
+ *(unsigned long *) &interpreter_stack_data[ref->offset];
+ estack_ax(stack, top)->u.s.str =
+ *(const char **) (&interpreter_stack_data[ref->offset
+ + sizeof(unsigned long)]);
+ estack_ax_t = REG_STRING;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type s64\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v =
+ ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type double\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, &interpreter_stack_data[ref->offset],
+ sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ /* load from immediate operand */
+ OP(BYTECODE_OP_LOAD_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("load string %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_PLAIN;
+ estack_ax_t = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("load globbing pattern %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
+ estack_ax_t = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = ((struct literal_numeric *) insn->data)->v;
+ estack_ax_t = REG_S64;
+ dbg_printf("load s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, insn->data,
+ sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("load double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ PO;
+ }
+
+ /* cast */
+ OP(BYTECODE_OP_CAST_TO_S64):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64:
+ JUMP_TO(BYTECODE_OP_CAST_NOP);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_CAST_DOUBLE_TO_S64);
+ case REG_U64:
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ OP(BYTECODE_OP_CAST_DOUBLE_TO_S64):
+ {
+ estack_ax_v = (int64_t) estack_ax(stack, top)->u.d;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_CAST_NOP):
+ {
+ next_pc += sizeof(struct cast_op);
+ PO;
+ }
+
+ /* get context ref */
+ OP(BYTECODE_OP_GET_CONTEXT_REF):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_ctx_value v;
+
+ dbg_printf("get context ref offset %u type dynamic\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ switch (v.sel) {
+ case LTTNG_UST_DYNAMIC_TYPE_NONE:
+ ret = -EINVAL;
+ goto end;
+ case LTTNG_UST_DYNAMIC_TYPE_S64:
+ estack_ax_v = v.u.s64;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref get context dynamic s64 %" PRIi64 "\n", estack_ax_v);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
+ estack_ax(stack, top)->u.d = v.u.d;
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref get context dynamic double %g\n", estack_ax(stack, top)->u.d);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_STRING:
+ estack_ax(stack, top)->u.s.str = v.u.str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ dbg_printf("ref get context dynamic string %s\n", estack_ax(stack, top)->u.s.str);
+ estack_ax_t = REG_STRING;
+ break;
+ default:
+ dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_ctx_value v;
+
+ dbg_printf("get context ref offset %u type string\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = v.u.str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax_t = REG_STRING;
+ dbg_printf("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_ctx_value v;
+
+ dbg_printf("get context ref offset %u type s64\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = v.u.s64;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref get context s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_ctx_value v;
+
+ dbg_printf("get context ref offset %u type double\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, &v.u.d, sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref get context double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_ROOT):
+ {
+ dbg_printf("op get context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT):
+ {
+ dbg_printf("op get app context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_PAYLOAD_ROOT):
+ {
+ dbg_printf("op get app payload root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+ estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL):
+ {
+ dbg_printf("op get symbol\n");
+ switch (estack_ax(stack, top)->u.ptr.type) {
+ case LOAD_OBJECT:
+ ERR("Nested fields not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ /*
+ * symbol lookup is performed by
+ * specialization.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL_FIELD):
+ {
+ /*
+ * Used for first variant encountered in a
+ * traversal. Variants are not implemented yet.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U16):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("op get index u16\n");
+ ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("op get index u64\n");
+ ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD):
+ {
+ dbg_printf("op load field\n");
+ ret = dynamic_load_field(estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_S8):
+ {
+ dbg_printf("op load field s8\n");
+
+ estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S16):
+ {
+ dbg_printf("op load field s16\n");
+
+ estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S32):
+ {
+ dbg_printf("op load field s32\n");
+
+ estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S64):
+ {
+ dbg_printf("op load field s64\n");
+
+ estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U8):
+ {
+ dbg_printf("op load field u8\n");
+
+ estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U16):
+ {
+ dbg_printf("op load field u16\n");
+
+ estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U32):
+ {
+ dbg_printf("op load field u32\n");
+
+ estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U64):
+ {
+ dbg_printf("op load field u64\n");
+
+ estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_DOUBLE):
+ {
+ dbg_printf("op load field double\n");
+
+ memcpy(&estack_ax(stack, top)->u.d,
+ estack_ax(stack, top)->u.ptr.ptr,
+ sizeof(struct literal_double));
+ estack_ax(stack, top)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_STRING):
+ {
+ const char *str;
+
+ dbg_printf("op load field string\n");
+ str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.str = str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE):
+ {
+ const char *ptr;
+
+ dbg_printf("op load field string sequence\n");
+ ptr = estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+ estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ estack_ax(stack, top)->type = REG_STRING;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ END_OP
+end:
+ /* Return _DISCARD on error. */
+ if (ret)
+ return LTTNG_INTERPRETER_DISCARD;
+
+ if (output) {
+ return lttng_bytecode_interpret_format_output(estack_ax(stack, top),
+ output);
+ }
+
+ return retval;
+}
+
+uint64_t lttng_bytecode_filter_interpret(void *filter_data,
+ const char *filter_stack_data)
+{
+ return bytecode_interpret(filter_data, filter_stack_data, NULL);
+}
+
+uint64_t lttng_bytecode_capture_interpret(void *capture_data,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output)
+{
+ return bytecode_interpret(capture_data, capture_stack_data,
+ (struct lttng_interpreter_output *) output);
+}
+
+#undef START_OP
+#undef OP
+#undef PO
+#undef END_OP
--- /dev/null
+/*
+ * lttng-bytecode-specialize.c
+ *
+ * LTTng UST bytecode specializer.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+
+#include "lttng-bytecode.h"
+#include <lttng/align.h>
+#include "ust-events-internal.h"
+
+static int lttng_fls(int val)
+{
+ int r = 32;
+ unsigned int x = (unsigned int) val;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ r -= 1;
+ }
+ return r;
+}
+
+static int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = lttng_fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+ size_t align, size_t len)
+{
+ ssize_t ret;
+ size_t padding = offset_align(runtime->data_len, align);
+ size_t new_len = runtime->data_len + padding + len;
+ size_t new_alloc_len = new_len;
+ size_t old_alloc_len = runtime->data_alloc_len;
+
+ if (new_len > BYTECODE_MAX_DATA_LEN)
+ return -EINVAL;
+
+ if (new_alloc_len > old_alloc_len) {
+ char *newptr;
+
+ new_alloc_len =
+ max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+ newptr = realloc(runtime->data, new_alloc_len);
+ if (!newptr)
+ return -ENOMEM;
+ runtime->data = newptr;
+ /* We zero directly the memory from start of allocation. */
+ memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+ runtime->data_alloc_len = new_alloc_len;
+ }
+ runtime->data_len += padding;
+ ret = runtime->data_len;
+ runtime->data_len += len;
+ return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+ const void *p, size_t align, size_t len)
+{
+ ssize_t offset;
+
+ offset = bytecode_reserve_data(runtime, align, len);
+ if (offset < 0)
+ return -ENOMEM;
+ memcpy(&runtime->data[offset], p, len);
+ return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+ struct load_op *insn)
+{
+ int ret;
+
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printf("op load field s8\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S8;
+ break;
+ case OBJECT_TYPE_S16:
+ dbg_printf("op load field s16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S16;
+ break;
+ case OBJECT_TYPE_S32:
+ dbg_printf("op load field s32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S32;
+ break;
+ case OBJECT_TYPE_S64:
+ dbg_printf("op load field s64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S64;
+ break;
+ case OBJECT_TYPE_SIGNED_ENUM:
+ dbg_printf("op load field signed enumeration\n");
+ stack_top->type = REG_PTR;
+ break;
+ case OBJECT_TYPE_U8:
+ dbg_printf("op load field u8\n");
+ stack_top->type = REG_U64;
+ insn->op = BYTECODE_OP_LOAD_FIELD_U8;
+ break;
+ case OBJECT_TYPE_U16:
+ dbg_printf("op load field u16\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U16;
+ break;
+ case OBJECT_TYPE_U32:
+ dbg_printf("op load field u32\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U32;
+ break;
+ case OBJECT_TYPE_U64:
+ dbg_printf("op load field u64\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U64;
+ break;
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ dbg_printf("op load field unsigned enumeration\n");
+ stack_top->type = REG_PTR;
+ break;
+ case OBJECT_TYPE_DOUBLE:
+ stack_top->type = REG_DOUBLE;
+ insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
+ break;
+ case OBJECT_TYPE_STRING:
+ dbg_printf("op load field string\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
+ break;
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ dbg_printf("op load field string sequence\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
+ break;
+ case OBJECT_TYPE_DYNAMIC:
+ dbg_printf("op load field dynamic\n");
+ stack_top->type = REG_UNKNOWN;
+ /* Don't specialize load op. */
+ break;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+ int signedness, uint32_t elem_len)
+{
+ switch (elem_len) {
+ case 8:
+ if (signedness)
+ *otype = OBJECT_TYPE_S8;
+ else
+ *otype = OBJECT_TYPE_U8;
+ break;
+ case 16:
+ if (signedness)
+ *otype = OBJECT_TYPE_S16;
+ else
+ *otype = OBJECT_TYPE_U16;
+ break;
+ case 32:
+ if (signedness)
+ *otype = OBJECT_TYPE_S32;
+ else
+ *otype = OBJECT_TYPE_U32;
+ break;
+ case 64:
+ if (signedness)
+ *otype = OBJECT_TYPE_S64;
+ else
+ *otype = OBJECT_TYPE_U64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+ struct load_op *insn, uint64_t index,
+ struct vstack_entry *stack_top,
+ int idx_len)
+{
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ memset(&gid, 0, sizeof(gid));
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const struct lttng_integer_type *integer_type;
+ const struct lttng_event_field *field;
+ uint32_t elem_len, num_elems;
+ int signedness;
+
+ field = stack_top->load.field;
+ switch (field->type.atype) {
+ case atype_array:
+ integer_type = &field->type.u.legacy.array.elem_type.u.basic.integer;
+ num_elems = field->type.u.legacy.array.length;
+ break;
+ case atype_array_nestable:
+ if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = &field->type.u.array_nestable.elem_type->u.integer;
+ num_elems = field->type.u.array_nestable.length;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ if (index >= num_elems) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.array_len = num_elems * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const struct lttng_integer_type *integer_type;
+ const struct lttng_event_field *field;
+ uint32_t elem_len;
+ int signedness;
+
+ field = stack_top->load.field;
+ switch (field->type.atype) {
+ case atype_sequence:
+ integer_type = &field->type.u.legacy.sequence.elem_type.u.basic.integer;
+ break;
+ case atype_sequence_nestable:
+ if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ /* Only generated by the specialize phase. */
+ case OBJECT_TYPE_VARIANT: /* Fall-through */
+ default:
+ ERR("Unexpected get index type %d",
+ (int) stack_top->load.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ ERR("Index lookup for root field not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ }
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (idx_len) {
+ case 2:
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ break;
+ case 8:
+ ((struct get_index_u64 *) insn->data)->index = data_offset;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_context_lookup_name(struct lttng_ctx *ctx,
+ struct bytecode_runtime *bytecode,
+ struct load_op *insn)
+{
+ uint16_t offset;
+ const char *name;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+ return lttng_get_context_index(ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_event_field *field,
+ struct vstack_load *load, bool is_context)
+{
+ load->type = LOAD_OBJECT;
+
+ switch (field->type.atype) {
+ case atype_integer:
+ if (field->type.u.integer.signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ case atype_enum:
+ case atype_enum_nestable:
+ {
+ const struct lttng_integer_type *itype;
+
+ if (field->type.atype == atype_enum) {
+ itype = &field->type.u.legacy.basic.enumeration.container_type;
+ } else {
+ itype = &field->type.u.enum_nestable.container_type->u.integer;
+ }
+ if (itype->signedness)
+ load->object_type = OBJECT_TYPE_SIGNED_ENUM;
+ else
+ load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+ load->rev_bo = false;
+ break;
+ }
+ case atype_array:
+ if (field->type.u.legacy.array.elem_type.atype != atype_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_ARRAY;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_array_nestable:
+ if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_ARRAY;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_sequence:
+ if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_SEQUENCE;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case atype_sequence_nestable:
+ if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
+ load->object_type = OBJECT_TYPE_SEQUENCE;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+
+ case atype_string:
+ load->object_type = OBJECT_TYPE_STRING;
+ break;
+ case atype_float:
+ load->object_type = OBJECT_TYPE_DOUBLE;
+ break;
+ case atype_dynamic:
+ load->object_type = OBJECT_TYPE_DYNAMIC;
+ break;
+ case atype_struct:
+ ERR("Structure type cannot be loaded.");
+ return -EINVAL;
+ default:
+ ERR("Unknown type: %d", (int) field->type.atype);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_context_lookup(struct lttng_ctx *ctx,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ int idx, ret;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ idx = specialize_context_lookup_name(ctx, runtime, insn);
+ if (idx < 0) {
+ return -ENOENT;
+ }
+ ctx_field = &ctx->fields[idx];
+ field = &ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ return ret;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ return -EINVAL;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ return 0;
+}
+
+static int specialize_app_context_lookup(struct lttng_ctx **pctx,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ uint16_t offset;
+ const char *orig_name;
+ char *name = NULL;
+ int idx, ret;
+ struct lttng_ctx_field *ctx_field;
+ struct lttng_event_field *field;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
+ if (!name) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ strcpy(name, "$app.");
+ strcat(name, orig_name);
+ idx = lttng_get_context_index(*pctx, name);
+ if (idx < 0) {
+ assert(lttng_context_is_app(name));
+ ret = lttng_ust_add_app_context_to_ctx_rcu(name,
+ pctx);
+ if (ret)
+ return ret;
+ idx = lttng_get_context_index(*pctx, name);
+ if (idx < 0)
+ return -ENOENT;
+ }
+ ctx_field = &(*pctx)->fields[idx];
+ field = &ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ goto end;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ free(name);
+ return ret;
+}
+
+static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ const char *name;
+ uint16_t offset;
+ unsigned int i, nr_fields;
+ bool found = false;
+ uint32_t field_offset = 0;
+ const struct lttng_event_field *field;
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ nr_fields = event_desc->nr_fields;
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ for (i = 0; i < nr_fields; i++) {
+ field = &event_desc->fields[i];
+ if (field->u.ext.nofilter) {
+ continue;
+ }
+ if (!strcmp(field->name, name)) {
+ found = true;
+ break;
+ }
+ /* compute field offset on stack */
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum:
+ case atype_enum_nestable:
+ field_offset += sizeof(int64_t);
+ break;
+ case atype_array:
+ case atype_array_nestable:
+ case atype_sequence:
+ case atype_sequence_nestable:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_string:
+ field_offset += sizeof(void *);
+ break;
+ case atype_float:
+ field_offset += sizeof(double);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (!found) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = specialize_load_object(field, load, false);
+ if (ret)
+ goto end;
+
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.offset = field_offset;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ return ret;
+}
+
+int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *bytecode)
+{
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack _stack;
+ struct vstack *stack = &_stack;
+ struct lttng_ctx **pctx = bytecode->p.pctx;
+
+ vstack_init(stack);
+
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_RETURN:
+ if (vstack_ax(stack)->type == REG_S64 ||
+ vstack_ax(stack)->type == REG_U64)
+ *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
+ ret = 0;
+ goto end;
+
+ case BYTECODE_OP_RETURN_S64:
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ ERR("Unexpected register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_EQ:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_EQ_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_EQ_S64;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_NE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_NE_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_NE_S64;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_NE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for > binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_GT_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GT_S64;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_LT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for < binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_LT_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LT_S64;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for >= binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_GE_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GE_S64;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for <= binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_LE_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LE_S64;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_PLUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_MINUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_NOT_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ break;
+ }
+
+ /* cast */
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ insn->op = BYTECODE_OP_CAST_NOP;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
+ break;
+ case REG_UNKNOWN:
+ case REG_U64:
+ break;
+ }
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ assert(vstack_ax(stack)->type == REG_PTR);
+ /* Pop 1, push 1 */
+ ret = specialize_load_field(vstack_ax(stack), insn);
+ if (ret)
+ goto end;
+
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("op get symbol\n");
+ switch (vstack_ax(stack)->load.type) {
+ case LOAD_OBJECT:
+ ERR("Nested fields not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ /* Lookup context field. */
+ ret = specialize_context_lookup(*pctx,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_APP_CONTEXT:
+ /* Lookup app context field. */
+ ret = specialize_app_context_lookup(pctx,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_PAYLOAD:
+ /* Lookup event payload field. */
+ ret = specialize_payload_lookup(event_desc,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Always generated by specialize phase. */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("op get index u16\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("op get index u64\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+ }
+end:
+ return ret;
+}
--- /dev/null
+/*
+ * lttng-bytecode-validator.c
+ *
+ * LTTng UST bytecode validator.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <time.h>
+
+#include <urcu-bp.h>
+#include <urcu/rculfhash.h>
+
+#include "lttng-bytecode.h"
+#include "lttng-hash-helper.h"
+#include "string-utils.h"
+#include "ust-events-internal.h"
+
+/*
+ * Number of merge points for hash table size. Hash table initialized to
+ * that size, and we do not resize, because we do not want to trigger
+ * RCU worker thread execution: fall-back on linear traversal if number
+ * of merge points exceeds this value.
+ */
+#define DEFAULT_NR_MERGE_POINTS 128
+#define MIN_NR_BUCKETS 128
+#define MAX_NR_BUCKETS 128
+
+/* merge point table node */
+struct lfht_mp_node {
+ struct cds_lfht_node node;
+
+ /* Context at merge point */
+ struct vstack stack;
+ unsigned long target_pc;
+};
+
+static unsigned long lttng_hash_seed;
+static unsigned int lttng_hash_seed_ready;
+
+static
+int lttng_hash_match(struct cds_lfht_node *node, const void *key)
+{
+ struct lfht_mp_node *mp_node =
+ caa_container_of(node, struct lfht_mp_node, node);
+ unsigned long key_pc = (unsigned long) key;
+
+ if (mp_node->target_pc == key_pc)
+ return 1;
+ else
+ return 0;
+}
+
+static
+int merge_points_compare(const struct vstack *stacka,
+ const struct vstack *stackb)
+{
+ int i, len;
+
+ if (stacka->top != stackb->top)
+ return 1;
+ len = stacka->top + 1;
+ assert(len >= 0);
+ for (i = 0; i < len; i++) {
+ if (stacka->e[i].type != REG_UNKNOWN
+ && stackb->e[i].type != REG_UNKNOWN
+ && stacka->e[i].type != stackb->e[i].type)
+ return 1;
+ }
+ return 0;
+}
+
+static
+int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc,
+ const struct vstack *stack)
+{
+ struct lfht_mp_node *node;
+ unsigned long hash = lttng_hash_mix((const char *) target_pc,
+ sizeof(target_pc),
+ lttng_hash_seed);
+ struct cds_lfht_node *ret;
+
+ dbg_printf("Bytecode: adding merge point at offset %lu, hash %lu\n",
+ target_pc, hash);
+ node = zmalloc(sizeof(struct lfht_mp_node));
+ if (!node)
+ return -ENOMEM;
+ node->target_pc = target_pc;
+ memcpy(&node->stack, stack, sizeof(node->stack));
+ ret = cds_lfht_add_unique(ht, hash, lttng_hash_match,
+ (const char *) target_pc, &node->node);
+ if (ret != &node->node) {
+ struct lfht_mp_node *ret_mp =
+ caa_container_of(ret, struct lfht_mp_node, node);
+
+ /* Key already present */
+ dbg_printf("Bytecode: compare merge points for offset %lu, hash %lu\n",
+ target_pc, hash);
+ free(node);
+ if (merge_points_compare(stack, &ret_mp->stack)) {
+ ERR("Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Binary comparators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_compare_check(struct vstack *stack, bytecode_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ goto error_mismatch;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_mismatch:
+ ERR("type mismatch for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_empty:
+ ERR("empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ ERR("unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, bytecode_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ case REG_U64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ case REG_U64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ ERR("empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ ERR("unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Validate bytecode range overflow within the validation pass.
+ * Called for each instruction encountered.
+ */
+static
+int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
+ char *start_pc, char *pc)
+{
+ int ret = 0;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (unlikely(pc + sizeof(struct return_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ if (unlikely(pc + sizeof(struct binary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (unlikely(pc + sizeof(struct unary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ if (unlikely(pc + sizeof(struct logical_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ uint32_t str_len, maxlen;
+
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+
+ maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
+ str_len = strnlen(insn->data, maxlen);
+ if (unlikely(str_len >= maxlen)) {
+ /* Final '\0' not found within range */
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ case BYTECODE_OP_CAST_NOP:
+ {
+ if (unlikely(pc + sizeof(struct cast_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ case BYTECODE_OP_LOAD_FIELD:
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ ERR("Unexpected get symbol field");
+ ret = -EINVAL;
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static
+unsigned long delete_all_nodes(struct cds_lfht *ht)
+{
+ struct cds_lfht_iter iter;
+ struct lfht_mp_node *node;
+ unsigned long nr_nodes = 0;
+
+ cds_lfht_for_each_entry(ht, &iter, node, node) {
+ int ret;
+
+ ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter));
+ assert(!ret);
+ /* note: this hash table is never used concurrently */
+ free(node);
+ nr_nodes++;
+ }
+ return nr_nodes;
+}
+
+/*
+ * Return value:
+ * >=0: success
+ * <0: error
+ */
+static
+int validate_instruction_context(struct bytecode_runtime *bytecode,
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret = 0;
+ const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
+
+ switch (opcode) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) opcode);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ {
+ ret = bin_op_compare_check(stack, opcode, "==");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_NE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "!=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GT:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LT:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GE:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STRING
+ || vstack_bx(stack)->type != REG_STRING) {
+ ERR("Unexpected register type for string comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+ && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+ ERR("Unexpected register type for globbing pattern comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
+ ERR("Double operator should have two double registers\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Double-S64 operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Double-S64 operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("S64-Double operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("S64-Double operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, ">>");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_LSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, "<<");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Unary op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ break;
+ }
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_DOUBLE:
+ ERR("Unary bitwise op can only be applied to numeric registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ ERR("Invalid register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_DOUBLE) {
+ ERR("Invalid register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64
+ && vstack_ax(stack)->type != REG_U64
+ && vstack_ax(stack)->type != REG_UNKNOWN) {
+ ERR("Logical comparator expects S64, U64 or dynamic register\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ dbg_printf("Validate jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ if (unlikely(start_pc + insn->skip_offset <= pc)) {
+ ERR("Loops are not allowed in bytecode\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type double\n",
+ ref->offset);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
+ if (vstack_ax(stack)->type != REG_DOUBLE) {
+ ERR("Cast expects double\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type dynamic\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type double\n",
+ ref->offset);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get app context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printf("Validate get payload root\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printf("Validate load field\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ {
+ dbg_printf("Validate load field s8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ {
+ dbg_printf("Validate load field s16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ {
+ dbg_printf("Validate load field s32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ dbg_printf("Validate load field s64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ {
+ dbg_printf("Validate load field u8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ {
+ dbg_printf("Validate load field u16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ {
+ dbg_printf("Validate load field u32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ dbg_printf("Validate load field u64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ {
+ dbg_printf("Validate load field string\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ dbg_printf("Validate load field sequence\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printf("Validate load field double\n");
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
+ break;
+ }
+ }
+end:
+ return ret;
+}
+
+/*
+ * Return value:
+ * 0: success
+ * <0: error
+ */
+static
+int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
+ struct cds_lfht *merge_points,
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret;
+ unsigned long target_pc = pc - start_pc;
+ struct cds_lfht_iter iter;
+ struct cds_lfht_node *node;
+ struct lfht_mp_node *mp_node;
+ unsigned long hash;
+
+ /* Validate the context resulting from the previous instruction */
+ ret = validate_instruction_context(bytecode, stack, start_pc, pc);
+ if (ret < 0)
+ return ret;
+
+ /* Validate merge points */
+ hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
+ lttng_hash_seed);
+ cds_lfht_lookup(merge_points, hash, lttng_hash_match,
+ (const char *) target_pc, &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (node) {
+ mp_node = caa_container_of(node, struct lfht_mp_node, node);
+
+ dbg_printf("Bytecode: validate merge point at offset %lu\n",
+ target_pc);
+ if (merge_points_compare(stack, &mp_node->stack)) {
+ ERR("Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ /* Once validated, we can remove the merge point */
+ dbg_printf("Bytecode: remove merge point at offset %lu\n",
+ target_pc);
+ ret = cds_lfht_del(merge_points, node);
+ assert(!ret);
+ }
+ return 0;
+}
+
+/*
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int exec_insn(struct bytecode_runtime *bytecode,
+ struct cds_lfht *merge_points,
+ struct vstack *stack,
+ char **_next_pc,
+ char *pc)
+{
+ int ret = 1;
+ char *next_pc = *_next_pc;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ case REG_UNKNOWN:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_S64:
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+ int merge_ret;
+
+ /* Add merge point to table */
+ merge_ret = merge_point_add_check(merge_points,
+ insn->skip_offset, stack);
+ if (merge_ret) {
+ ret = merge_ret;
+ goto end;
+ }
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+end:
+ *_next_pc = next_pc;
+ return ret;
+}
+
+/*
+ * Never called concurrently (hash seed is shared).
+ */
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
+{
+ struct cds_lfht *merge_points;
+ char *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack stack;
+
+ vstack_init(&stack);
+
+ if (!lttng_hash_seed_ready) {
+ lttng_hash_seed = time(NULL);
+ lttng_hash_seed_ready = 1;
+ }
+ /*
+ * Note: merge_points hash table used by single thread, and
+ * never concurrently resized. Therefore, we can use it without
+ * holding RCU read-side lock and free nodes without using
+ * call_rcu.
+ */
+ merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS,
+ MIN_NR_BUCKETS, MAX_NR_BUCKETS,
+ 0, NULL);
+ if (!merge_points) {
+ ERR("Error allocating hash table for bytecode validation\n");
+ return -ENOMEM;
+ }
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+ if (ret != 0) {
+ if (ret == -ERANGE)
+ ERR("Bytecode overflow\n");
+ goto end;
+ }
+ dbg_printf("Validating op %s (%u)\n",
+ print_op((unsigned int) *(bytecode_opcode_t *) pc),
+ (unsigned int) *(bytecode_opcode_t *) pc);
+
+ /*
+ * For each instruction, validate the current context
+ * (traversal of entire execution flow), and validate
+ * all merge points targeting this instruction.
+ */
+ ret = validate_instruction_all_contexts(bytecode, merge_points,
+ &stack, start_pc, pc);
+ if (ret)
+ goto end;
+ ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
+ if (ret <= 0)
+ goto end;
+ }
+end:
+ if (delete_all_nodes(merge_points)) {
+ if (!ret) {
+ ERR("Unexpected merge points\n");
+ ret = -EINVAL;
+ }
+ }
+ if (cds_lfht_destroy(merge_points, NULL)) {
+ ERR("Error destroying hash table\n");
+ }
+ return ret;
+}
--- /dev/null
+/*
+ * lttng-bytecode.c
+ *
+ * LTTng UST bytecode code.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+
+#include <urcu/rculist.h>
+
+#include "lttng-bytecode.h"
+#include "ust-events-internal.h"
+
+static const char *opnames[] = {
+ [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
+
+ [ BYTECODE_OP_RETURN ] = "RETURN",
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = "MUL",
+ [ BYTECODE_OP_DIV ] = "DIV",
+ [ BYTECODE_OP_MOD ] = "MOD",
+ [ BYTECODE_OP_PLUS ] = "PLUS",
+ [ BYTECODE_OP_MINUS ] = "MINUS",
+ [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
+ [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
+ [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
+ [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
+ [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = "EQ",
+ [ BYTECODE_OP_NE ] = "NE",
+ [ BYTECODE_OP_GT ] = "GT",
+ [ BYTECODE_OP_LT ] = "LT",
+ [ BYTECODE_OP_GE ] = "GE",
+ [ BYTECODE_OP_LE ] = "LE",
+
+ /* string binary comparators */
+ [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
+ [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
+ [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
+ [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
+ [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
+ [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
+
+ /* s64 binary comparators */
+ [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
+ [ BYTECODE_OP_NE_S64 ] = "NE_S64",
+ [ BYTECODE_OP_GT_S64 ] = "GT_S64",
+ [ BYTECODE_OP_LT_S64 ] = "LT_S64",
+ [ BYTECODE_OP_GE_S64 ] = "GE_S64",
+ [ BYTECODE_OP_LE_S64 ] = "LE_S64",
+
+ /* double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
+ [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
+ [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
+ [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
+ [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
+ [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
+ [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
+ [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = "AND",
+ [ BYTECODE_OP_OR ] = "OR",
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
+ [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
+ [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
+ [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
+
+ /* load userspace field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate.
+ */
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
+
+ /* globbing pattern binary operator: apply to */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+ [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+ [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+ [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+ [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
+
+ [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
+};
+
+const char *print_op(enum bytecode_op op)
+{
+ if (op >= NR_BYTECODE_OPS)
+ return "UNKNOWN";
+ else
+ return opnames[op];
+}
+
+static
+int apply_field_reloc(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *field_name,
+ enum bytecode_op bytecode_op)
+{
+ const struct lttng_event_field *fields, *field = NULL;
+ unsigned int nr_fields, i;
+ struct load_op *op;
+ uint32_t field_offset = 0;
+
+ dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
+
+ /* Lookup event by name */
+ if (!event_desc)
+ return -EINVAL;
+ fields = event_desc->fields;
+ if (!fields)
+ return -EINVAL;
+ nr_fields = event_desc->nr_fields;
+ for (i = 0; i < nr_fields; i++) {
+ if (fields[i].u.ext.nofilter) {
+ continue;
+ }
+ if (!strcmp(fields[i].name, field_name)) {
+ field = &fields[i];
+ break;
+ }
+ /* compute field offset */
+ switch (fields[i].type.atype) {
+ case atype_integer:
+ case atype_enum:
+ case atype_enum_nestable:
+ field_offset += sizeof(int64_t);
+ break;
+ case atype_array:
+ case atype_array_nestable:
+ case atype_sequence:
+ case atype_sequence_nestable:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case atype_string:
+ field_offset += sizeof(void *);
+ break;
+ case atype_float:
+ field_offset += sizeof(double);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ if (!field)
+ return -EINVAL;
+
+ /* Check if field offset is too large for 16-bit offset */
+ if (field_offset > FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* set type */
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (field->type.atype) {
+ case atype_integer:
+ case atype_enum:
+ case atype_enum_nestable:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
+ break;
+ case atype_array:
+ case atype_array_nestable:
+ case atype_sequence:
+ case atype_sequence_nestable:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ case atype_string:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
+ break;
+ case atype_float:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* set offset */
+ field_ref->offset = (uint16_t) field_offset;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_context_reloc(struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *context_name,
+ enum bytecode_op bytecode_op)
+{
+ struct load_op *op;
+ struct lttng_ctx_field *ctx_field;
+ int idx;
+ struct lttng_ctx *ctx = *runtime->p.pctx;
+
+ dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
+
+ /* Get context index */
+ idx = lttng_get_context_index(ctx, context_name);
+ if (idx < 0) {
+ if (lttng_context_is_app(context_name)) {
+ int ret;
+
+ ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
+ &ctx);
+ if (ret)
+ return ret;
+ idx = lttng_get_context_index(ctx, context_name);
+ if (idx < 0)
+ return -ENOENT;
+ } else {
+ return -ENOENT;
+ }
+ }
+ /* Check if idx is too large for 16-bit offset */
+ if (idx > FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* Get context return type */
+ ctx_field = &ctx->fields[idx];
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (ctx_field->event_field.type.atype) {
+ case atype_integer:
+ case atype_enum:
+ case atype_enum_nestable:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
+ break;
+ /* Sequence and array supported as string */
+ case atype_string:
+ case atype_array:
+ case atype_array_nestable:
+ case atype_sequence:
+ case atype_sequence_nestable:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case atype_float:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE;
+ break;
+ case atype_dynamic:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* set offset to context index within channel contexts */
+ field_ref->offset = (uint16_t) idx;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_reloc(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *name)
+{
+ struct load_op *op;
+
+ dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
+
+ /* Ensure that the reloc is within the code */
+ if (runtime_len - reloc_offset < sizeof(uint16_t))
+ return -EINVAL;
+
+ op = (struct load_op *) &runtime->code[reloc_offset];
+ switch (op->op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ return apply_field_reloc(event_desc, runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ return apply_context_reloc(runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ /*
+ * Will be handled by load specialize phase or
+ * dynamically by interpreter.
+ */
+ return 0;
+ default:
+ ERR("Unknown reloc op type %u\n", op->op);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode,
+ struct cds_list_head *bytecode_runtime_head)
+{
+ struct lttng_bytecode_runtime *bc_runtime;
+
+ cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
+ if (bc_runtime->bc == bytecode)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Take a bytecode with reloc table and link it to an event to create a
+ * bytecode runtime.
+ */
+static
+int link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx **ctx,
+ struct lttng_ust_bytecode_node *bytecode,
+ struct cds_list_head *insert_loc)
+{
+ int ret, offset, next_offset;
+ struct bytecode_runtime *runtime = NULL;
+ size_t runtime_alloc_len;
+
+ if (!bytecode)
+ return 0;
+ /* Bytecode already linked */
+ if (bytecode_is_linked(bytecode, insert_loc))
+ return 0;
+
+ dbg_printf("Linking...\n");
+
+ /* We don't need the reloc table in the runtime */
+ runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
+ runtime = zmalloc(runtime_alloc_len);
+ if (!runtime) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ runtime->p.bc = bytecode;
+ runtime->p.pctx = ctx;
+ runtime->len = bytecode->bc.reloc_offset;
+ /* copy original bytecode */
+ memcpy(runtime->code, bytecode->bc.data, runtime->len);
+ /*
+ * apply relocs. Those are a uint16_t (offset in bytecode)
+ * followed by a string (field name).
+ */
+ for (offset = bytecode->bc.reloc_offset;
+ offset < bytecode->bc.len;
+ offset = next_offset) {
+ uint16_t reloc_offset =
+ *(uint16_t *) &bytecode->bc.data[offset];
+ const char *name =
+ (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
+
+ ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
+ if (ret) {
+ goto link_error;
+ }
+ next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
+ }
+ /* Validate bytecode */
+ ret = lttng_bytecode_validate(runtime);
+ if (ret) {
+ goto link_error;
+ }
+ /* Specialize bytecode */
+ ret = lttng_bytecode_specialize(event_desc, runtime);
+ if (ret) {
+ goto link_error;
+ }
+
+ switch (bytecode->type) {
+ case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER:
+ runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret;
+ break;
+ case LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE:
+ runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret;
+ break;
+ default:
+ abort();
+ }
+
+ runtime->p.link_failed = 0;
+ cds_list_add_rcu(&runtime->p.node, insert_loc);
+ dbg_printf("Linking successful.\n");
+ return 0;
+
+link_error:
+ switch (bytecode->type) {
+ case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER:
+ runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
+ break;
+ case LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE:
+ runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
+ break;
+ default:
+ abort();
+ }
+
+ runtime->p.link_failed = 1;
+ cds_list_add_rcu(&runtime->p.node, insert_loc);
+alloc_error:
+ dbg_printf("Linking failed.\n");
+ return ret;
+}
+
+void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+ struct lttng_ust_bytecode_node *bc = runtime->bc;
+
+ if (!bc->enabler->enabled || runtime->link_failed)
+ runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
+ else
+ runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret;
+}
+
+void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+ struct lttng_ust_bytecode_node *bc = runtime->bc;
+
+ if (!bc->enabler->enabled || runtime->link_failed)
+ runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
+ else
+ runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret;
+}
+
+/*
+ * Given the lists of bytecode programs of an instance (trigger or event) and
+ * of a matching enabler, try to link all the enabler's bytecode programs with
+ * the instance.
+ *
+ * This function is called after we confirmed that name enabler and the
+ * instance are name matching (or glob pattern matching).
+ */
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx **ctx,
+ struct cds_list_head *instance_bytecode_head,
+ struct cds_list_head *enabler_bytecode_head)
+{
+ struct lttng_ust_bytecode_node *enabler_bc;
+ struct lttng_bytecode_runtime *runtime;
+
+ assert(event_desc);
+
+ /* Go over all the bytecode programs of the enabler. */
+ cds_list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
+ int found = 0, ret;
+ struct cds_list_head *insert_loc;
+
+ /*
+ * Check if the current enabler bytecode program is already
+ * linked with the instance.
+ */
+ cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
+ if (runtime->bc == enabler_bc) {
+ found = 1;
+ break;
+ }
+ }
+
+ /*
+ * Skip bytecode already linked, go to the next enabler
+ * bytecode program.
+ */
+ if (found)
+ continue;
+
+ /*
+ * Insert at specified priority (seqnum) in increasing
+ * order. If there already is a bytecode of the same priority,
+ * insert the new bytecode right after it.
+ */
+ cds_list_for_each_entry_reverse(runtime,
+ instance_bytecode_head, node) {
+ if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
+ /* insert here */
+ insert_loc = &runtime->node;
+ goto add_within;
+ }
+ }
+
+ /* Add to head to list */
+ insert_loc = instance_bytecode_head;
+ add_within:
+ dbg_printf("linking bytecode\n");
+ ret = link_bytecode(event_desc, ctx, enabler_bc, insert_loc);
+ if (ret) {
+ dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
+ }
+ }
+}
+
+/*
+ * We own the bytecode if we return success.
+ */
+int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
+ struct lttng_ust_bytecode_node *bytecode)
+{
+ cds_list_add(&bytecode->node, &enabler->filter_bytecode_head);
+ return 0;
+}
+
+static
+void free_filter_runtime(struct cds_list_head *bytecode_runtime_head)
+{
+ struct bytecode_runtime *runtime, *tmp;
+
+ cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
+ p.node) {
+ free(runtime->data);
+ free(runtime);
+ }
+}
+
+void lttng_free_event_filter_runtime(struct lttng_event *event)
+{
+ free_filter_runtime(&event->filter_bytecode_runtime_head);
+}
+
+void lttng_free_trigger_filter_runtime(struct lttng_trigger *trigger)
+{
+ free_filter_runtime(&trigger->filter_bytecode_runtime_head);
+}
+
+/* For backward compatibility. Leave those exported symbols in place. */
+void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
+{
+}
--- /dev/null
+#ifndef _LTTNG_BYTECODE_H
+#define _LTTNG_BYTECODE_H
+
+/*
+ * lttng-bytecode.h
+ *
+ * LTTng UST bytecode header.
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <helper.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-context-provider.h>
+#include <stdint.h>
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <usterr-signal-safe.h>
+#include "bytecode.h"
+
+/* Interpreter stack length, in number of entries */
+#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */
+#define INTERPRETER_STACK_EMPTY 1
+
+#define BYTECODE_MAX_DATA_LEN 65536
+
+#ifndef min_t
+#define min_t(type, a, b) \
+ ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
+#endif
+
+#ifndef likely
+#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+#ifdef DEBUG
+#define dbg_printf(fmt, args...) \
+ printf("[debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args)
+#else
+#define dbg_printf(fmt, args...) \
+do { \
+ /* do nothing but check printf format */ \
+ if (0) \
+ printf("[debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args); \
+} while (0)
+#endif
+
+/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
+struct bytecode_runtime {
+ struct lttng_bytecode_runtime p;
+ size_t data_len;
+ size_t data_alloc_len;
+ char *data;
+ uint16_t len;
+ char code[0];
+};
+
+enum entry_type {
+ REG_S64,
+ REG_U64,
+ REG_DOUBLE,
+ REG_STRING,
+ REG_STAR_GLOB_STRING,
+ REG_UNKNOWN,
+ REG_PTR,
+};
+
+enum load_type {
+ LOAD_ROOT_CONTEXT,
+ LOAD_ROOT_APP_CONTEXT,
+ LOAD_ROOT_PAYLOAD,
+ LOAD_OBJECT,
+};
+
+enum object_type {
+ OBJECT_TYPE_S8,
+ OBJECT_TYPE_S16,
+ OBJECT_TYPE_S32,
+ OBJECT_TYPE_S64,
+ OBJECT_TYPE_U8,
+ OBJECT_TYPE_U16,
+ OBJECT_TYPE_U32,
+ OBJECT_TYPE_U64,
+
+ OBJECT_TYPE_SIGNED_ENUM,
+ OBJECT_TYPE_UNSIGNED_ENUM,
+
+ OBJECT_TYPE_DOUBLE,
+ OBJECT_TYPE_STRING,
+ OBJECT_TYPE_STRING_SEQUENCE,
+
+ OBJECT_TYPE_SEQUENCE,
+ OBJECT_TYPE_ARRAY,
+ OBJECT_TYPE_STRUCT,
+ OBJECT_TYPE_VARIANT,
+
+ OBJECT_TYPE_DYNAMIC,
+};
+
+struct bytecode_get_index_data {
+ uint64_t offset; /* in bytes */
+ size_t ctx_index;
+ size_t array_len;
+ /*
+ * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
+ * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
+ * interpreter needs to find it from the event fields and types to
+ * support variants.
+ */
+ const struct lttng_event_field *field;
+ struct {
+ size_t len;
+ enum object_type type;
+ bool rev_bo; /* reverse byte order */
+ } elem;
+};
+
+/* Validation stack */
+struct vstack_load {
+ enum load_type type;
+ enum object_type object_type;
+ const struct lttng_event_field *field;
+ bool rev_bo; /* reverse byte order */
+};
+
+struct vstack_entry {
+ enum entry_type type;
+ struct vstack_load load;
+};
+
+struct vstack {
+ int top; /* top of stack */
+ struct vstack_entry e[INTERPRETER_STACK_LEN];
+};
+
+static inline
+void vstack_init(struct vstack *stack)
+{
+ stack->top = -1;
+}
+
+static inline
+struct vstack_entry *vstack_ax(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0))
+ return NULL;
+ return &stack->e[stack->top];
+}
+
+static inline
+struct vstack_entry *vstack_bx(struct vstack *stack)
+{
+ if (unlikely(stack->top < 1))
+ return NULL;
+ return &stack->e[stack->top - 1];
+}
+
+static inline
+int vstack_push(struct vstack *stack)
+{
+ if (stack->top >= INTERPRETER_STACK_LEN - 1) {
+ ERR("Stack full\n");
+ return -EINVAL;
+ }
+ ++stack->top;
+ return 0;
+}
+
+static inline
+int vstack_pop(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0)) {
+ ERR("Stack empty\n");
+ return -EINVAL;
+ }
+ stack->top--;
+ return 0;
+}
+
+/* Execution stack */
+enum estack_string_literal_type {
+ ESTACK_STRING_LITERAL_TYPE_NONE,
+ ESTACK_STRING_LITERAL_TYPE_PLAIN,
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
+};
+
+struct load_ptr {
+ enum load_type type;
+ enum object_type object_type;
+ const void *ptr;
+ size_t nr_elem;
+ bool rev_bo;
+ /* Temporary place-holders for contexts. */
+ union {
+ int64_t s64;
+ uint64_t u64;
+ double d;
+ } u;
+ const struct lttng_event_field *field;
+};
+
+struct estack_entry {
+ enum entry_type type; /* For dynamic typing. */
+ union {
+ int64_t v;
+ double d;
+
+ struct {
+ const char *str;
+ size_t seq_len;
+ enum estack_string_literal_type literal_type;
+ } s;
+ struct load_ptr ptr;
+ } u;
+};
+
+struct estack {
+ int top; /* top of stack */
+ struct estack_entry e[INTERPRETER_STACK_LEN];
+};
+
+/*
+ * Always use aliased type for ax/bx (top of stack).
+ * When ax/bx are S64, use aliased value.
+ */
+#define estack_ax_v ax
+#define estack_bx_v bx
+#define estack_ax_t ax_t
+#define estack_bx_t bx_t
+
+/*
+ * ax and bx registers can hold either integer, double or string.
+ */
+#define estack_ax(stack, top) \
+ ({ \
+ assert((top) > INTERPRETER_STACK_EMPTY); \
+ &(stack)->e[top]; \
+ })
+
+#define estack_bx(stack, top) \
+ ({ \
+ assert((top) > INTERPRETER_STACK_EMPTY + 1); \
+ &(stack)->e[(top) - 1]; \
+ })
+
+/*
+ * Currently, only integers (REG_S64) can be pushed into the stack.
+ */
+#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ assert((top) < INTERPRETER_STACK_LEN - 1); \
+ (stack)->e[(top) - 1].u.v = (bx); \
+ (stack)->e[(top) - 1].type = (bx_t); \
+ (bx) = (ax); \
+ (bx_t) = (ax_t); \
+ ++(top); \
+ } while (0)
+
+#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ assert((top) > INTERPRETER_STACK_EMPTY); \
+ (ax) = (bx); \
+ (ax_t) = (bx_t); \
+ (bx) = (stack)->e[(top) - 2].u.v; \
+ (bx_t) = (stack)->e[(top) - 2].type; \
+ (top)--; \
+ } while (0)
+
+enum lttng_interpreter_type {
+ LTTNG_INTERPRETER_TYPE_S64,
+ LTTNG_INTERPRETER_TYPE_U64,
+ LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_DOUBLE,
+ LTTNG_INTERPRETER_TYPE_STRING,
+ LTTNG_INTERPRETER_TYPE_SEQUENCE,
+};
+
+/*
+ * Represents the output parameter of the lttng interpreter.
+ * Currently capturable field classes are integer, double, string and sequence
+ * of integer.
+ */
+struct lttng_interpreter_output {
+ enum lttng_interpreter_type type;
+ union {
+ int64_t s;
+ uint64_t u;
+ double d;
+
+ struct {
+ const char *str;
+ size_t len;
+ } str;
+ struct {
+ const void *ptr;
+ size_t nr_elem;
+
+ /* Inner type. */
+ const struct lttng_type *nested_type;
+ } sequence;
+ } u;
+};
+
+const char *print_op(enum bytecode_op op);
+
+void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime);
+void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime);
+
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode);
+int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
+ struct bytecode_runtime *bytecode);
+
+uint64_t lttng_bytecode_filter_interpret_false(void *filter_data,
+ const char *filter_stack_data);
+uint64_t lttng_bytecode_filter_interpret(void *filter_data,
+ const char *filter_stack_data);
+
+uint64_t lttng_bytecode_capture_interpret_false(void *capture_data,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output);
+uint64_t lttng_bytecode_capture_interpret(void *capture_data,
+ const char *capture_stack_data,
+ struct lttng_interpreter_output *output);
+
+#endif /* _LTTNG_BYTECODE_H */
#include <unistd.h>
#include <lttng/ust-context-provider.h>
+
#include "lttng-tracer-core.h"
#include "jhash.h"
+#include "context-provider-internal.h"
#include <helper.h>
#define CONTEXT_PROVIDER_HT_BITS 12
hash = jhash(provider->name, name_len, 0);
head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
cds_hlist_add_head(&provider->node, head);
+
lttng_ust_context_set_session_provider(provider->name,
provider->get_size, provider->record,
provider->get_value);
+
+ lttng_ust_context_set_trigger_group_provider(provider->name,
+ provider->get_size, provider->record,
+ provider->get_value);
end:
ust_unlock();
return ret;
lttng_ust_context_set_session_provider(provider->name,
lttng_ust_dummy_get_size, lttng_ust_dummy_record,
lttng_ust_dummy_get_value);
+
+ lttng_ust_context_set_trigger_group_provider(provider->name,
+ lttng_ust_dummy_get_size, lttng_ust_dummy_record,
+ lttng_ust_dummy_get_value);
+
cds_hlist_del(&provider->node);
end:
ust_unlock();
#include <string.h>
#include <assert.h>
+#include "context-internal.h"
+
/*
* The filter implementation requires that two consecutive "get" for the
* same context performed by the same thread return the same result.
return ret;
}
-int lttng_session_context_init(struct lttng_ctx **ctx)
+int lttng_context_init_all(struct lttng_ctx **ctx)
{
int ret;
void lttng_context_exit(void)
{
}
+
+int lttng_session_context_init(struct lttng_ctx **ctx)
+{
+ return 0;
+}
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-32-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 32-bit counters in overflow
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _GNU_SOURCE
+#include <lttng/ust-events.h>
+#include "../libcounter/counter.h"
+#include "../libcounter/counter-api.h"
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_OVERFLOW,
+ .counter_size = COUNTER_SIZE_32_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ for (i = 0; i < nr_dimensions; i++) {
+ if (dimensions[i].has_underflow || dimensions[i].has_overflow)
+ return NULL;
+ max_nr_elem[i] = dimensions[i].size;
+ }
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds, is_daemon);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-32-modular",
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+ .client_config = &client_config,
+};
+
+void lttng_counter_client_percpu_32_overflow_init(void)
+{
+ lttng_counter_transport_register(<tng_counter_transport);
+}
+
+void lttng_counter_client_percpu_32_overflow_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-64-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 64-bit counters in overflow
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _GNU_SOURCE
+#include <lttng/ust-events.h>
+#include "../libcounter/counter.h"
+#include "../libcounter/counter-api.h"
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_OVERFLOW,
+ .counter_size = COUNTER_SIZE_64_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ for (i = 0; i < nr_dimensions; i++) {
+ if (dimensions[i].has_underflow || dimensions[i].has_overflow)
+ return NULL;
+ max_nr_elem[i] = dimensions[i].size;
+ }
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds, is_daemon);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-64-modular",
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+ .client_config = &client_config,
+};
+
+void lttng_counter_client_percpu_64_overflow_init(void)
+{
+ lttng_counter_transport_register(<tng_counter_transport);
+}
+
+void lttng_counter_client_percpu_64_overflow_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
#define _GNU_SOURCE
#define _LGPL_SOURCE
#include <stdio.h>
-#include <urcu/list.h>
-#include <urcu/hlist.h>
-#include <pthread.h>
+#include <assert.h>
#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
#include <sys/shm.h>
#include <sys/ipc.h>
#include <stdint.h>
#include <inttypes.h>
#include <time.h>
#include <stdbool.h>
+#include <unistd.h>
#include <lttng/ust-endian.h>
-#include "clock.h"
#include <urcu-bp.h>
+#include <urcu/arch.h>
#include <urcu/compiler.h>
+#include <urcu/hlist.h>
+#include <urcu/list.h>
#include <urcu/uatomic.h>
-#include <urcu/arch.h>
#include <lttng/tracepoint.h>
#include <lttng/ust-events.h>
#include <helper.h>
#include <lttng/ust-ctl.h>
#include <ust-comm.h>
+#include <ust-fd.h>
#include <lttng/ust-dynamic-type.h>
#include <lttng/ust-context-provider.h>
#include "error.h"
#include "tracepoint-internal.h"
#include "string-utils.h"
+#include "lttng-bytecode.h"
#include "lttng-tracer.h"
#include "lttng-tracer-core.h"
#include "lttng-ust-statedump.h"
+#include "context-internal.h"
+#include "ust-events-internal.h"
#include "wait.h"
#include "../libringbuffer/shm.h"
+#include "../libcounter/counter.h"
#include "jhash.h"
+#include <lttng/ust-abi.h>
/*
* All operations within this file are called by the communication
*/
static CDS_LIST_HEAD(sessions);
+static CDS_LIST_HEAD(trigger_groups);
struct cds_list_head *_lttng_get_sessions(void)
{
}
static void _lttng_event_destroy(struct lttng_event *event);
+static void _lttng_trigger_destroy(struct lttng_trigger *trigger);
static void _lttng_enum_destroy(struct lttng_enum *_enum);
static
-void lttng_session_lazy_sync_enablers(struct lttng_session *session);
+void lttng_session_lazy_sync_event_enablers(struct lttng_session *session);
+static
+void lttng_session_sync_event_enablers(struct lttng_session *session);
static
-void lttng_session_sync_enablers(struct lttng_session *session);
+void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group);
static
void lttng_enabler_destroy(struct lttng_enabler *enabler);
session = zmalloc(sizeof(struct lttng_session));
if (!session)
return NULL;
- if (lttng_session_context_init(&session->ctx)) {
+ if (lttng_context_init_all(&session->ctx)) {
free(session);
return NULL;
}
return session;
}
+struct lttng_counter *lttng_ust_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
+{
+ struct lttng_counter_transport *counter_transport = NULL;
+ struct lttng_counter *counter = NULL;
+
+ counter_transport = lttng_counter_transport_find(counter_transport_name);
+ if (!counter_transport)
+ goto notransport;
+ counter = zmalloc(sizeof(struct lttng_counter));
+ if (!counter)
+ goto nomem;
+
+ /* Create trigger error counter. */
+ counter->ops = &counter_transport->ops;
+ counter->transport = counter_transport;
+
+ counter->counter = counter->ops->counter_create(
+ number_dimensions, dimensions, 0,
+ -1, 0, NULL, false);
+ if (!counter->counter) {
+ goto create_error;
+ }
+
+ return counter;
+
+create_error:
+ free(counter);
+nomem:
+notransport:
+ return NULL;
+}
+
+static
+void lttng_ust_counter_destroy(struct lttng_counter *counter)
+{
+ counter->ops->counter_destroy(counter->counter);
+ free(counter);
+}
+
+struct lttng_trigger_group *lttng_trigger_group_create(void)
+{
+ struct lttng_trigger_group *trigger_group;
+ int i;
+
+ trigger_group = zmalloc(sizeof(struct lttng_trigger_group));
+ if (!trigger_group)
+ return NULL;
+
+ /* Add all contexts. */
+ if (lttng_context_init_all(&trigger_group->ctx)) {
+ free(trigger_group);
+ return NULL;
+ }
+
+ CDS_INIT_LIST_HEAD(&trigger_group->enablers_head);
+ CDS_INIT_LIST_HEAD(&trigger_group->triggers_head);
+ for (i = 0; i < LTTNG_UST_TRIGGER_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]);
+
+ cds_list_add(&trigger_group->node, &trigger_groups);
+
+ return trigger_group;
+}
+
/*
* Only used internally at session destruction.
*/
event->registered = 1;
}
+static
+void register_trigger(struct lttng_trigger *trigger)
+{
+ int ret;
+ const struct lttng_event_desc *desc;
+
+ assert(trigger->registered == 0);
+ desc = trigger->desc;
+ ret = __tracepoint_probe_register_queue_release(desc->name,
+ desc->u.ext.trigger_callback, trigger, desc->signature);
+ WARN_ON_ONCE(ret);
+ if (!ret)
+ trigger->registered = 1;
+}
+
static
void unregister_event(struct lttng_event *event)
{
event->registered = 0;
}
+static
+void unregister_trigger(struct lttng_trigger *trigger)
+{
+ int ret;
+ const struct lttng_event_desc *desc;
+
+ assert(trigger->registered == 1);
+ desc = trigger->desc;
+ ret = __tracepoint_probe_unregister_queue_release(desc->name,
+ desc->u.ext.trigger_callback, trigger);
+ WARN_ON_ONCE(ret);
+ if (!ret)
+ trigger->registered = 0;
+}
+
/*
* Only used internally at session destruction.
*/
unregister_event(event);
}
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_trigger_unregister(struct lttng_trigger *trigger)
+{
+ if (trigger->registered)
+ unregister_trigger(trigger);
+}
+
void lttng_session_destroy(struct lttng_session *session)
{
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
struct lttng_enum *_enum, *tmp_enum;
- struct lttng_enabler *enabler, *tmpenabler;
+ struct lttng_event_enabler *event_enabler, *event_tmpenabler;
CMM_ACCESS_ONCE(session->active) = 0;
cds_list_for_each_entry(event, &session->events_head, node) {
}
synchronize_trace(); /* Wait for in-flight events to complete */
__tracepoint_probe_prune_release_queue();
- cds_list_for_each_entry_safe(enabler, tmpenabler,
+ cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
&session->enablers_head, node)
- lttng_enabler_destroy(enabler);
+ lttng_event_enabler_destroy(event_enabler);
cds_list_for_each_entry_safe(event, tmpevent,
&session->events_head, node)
_lttng_event_destroy(event);
free(session);
}
+void lttng_trigger_group_destroy(
+ struct lttng_trigger_group *trigger_group)
+{
+ int close_ret;
+ struct lttng_trigger_enabler *trigger_enabler, *tmptrigger_enabler;
+ struct lttng_trigger *trigger, *tmptrigger;
+
+ if (!trigger_group) {
+ return;
+ }
+
+ cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node)
+ _lttng_trigger_unregister(trigger);
+
+ synchronize_trace();
+
+ cds_list_for_each_entry_safe(trigger_enabler, tmptrigger_enabler,
+ &trigger_group->enablers_head, node)
+ lttng_trigger_enabler_destroy(trigger_enabler);
+
+ cds_list_for_each_entry_safe(trigger, tmptrigger,
+ &trigger_group->triggers_head, node)
+ _lttng_trigger_destroy(trigger);
+
+ if (trigger_group->error_counter)
+ lttng_ust_counter_destroy(trigger_group->error_counter);
+
+ /* Close the notification fd to the listener of triggers. */
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(trigger_group->notification_fd);
+ if (!close_ret) {
+ lttng_ust_delete_fd_from_tracker(trigger_group->notification_fd);
+ } else {
+ PERROR("close");
+ abort();
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ cds_list_del(&trigger_group->node);
+
+ free(trigger_group);
+}
+
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler)
+{
+ struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
+
+ if (!enabler) {
+ return;
+ }
+
+ /* Destroy filter bytecode */
+ cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
+ &enabler->filter_bytecode_head, node) {
+ free(filter_node);
+ }
+
+ /* Destroy excluders */
+ cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
+ &enabler->excluder_head, node) {
+ free(excluder_node);
+ }
+}
+
+ void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler)
+{
+ if (!trigger_enabler) {
+ return;
+ }
+
+ cds_list_del(&trigger_enabler->node);
+
+ lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler));
+
+ free(trigger_enabler);
+}
+
static
int lttng_enum_create(const struct lttng_enum_desc *desc,
struct lttng_session *session)
session->tstate = 1;
/* We need to sync enablers with session before activation. */
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
/*
* Snapshot the number of events per channel to know the type of header
/* Set transient enabler state to "disabled" */
session->tstate = 0;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
end:
return ret;
}
}
/* Set transient enabler state to "enabled" */
channel->tstate = 1;
- lttng_session_sync_enablers(channel->session);
+ lttng_session_sync_event_enablers(channel->session);
/* Set atomically the state to "enabled" */
CMM_ACCESS_ONCE(channel->enabled) = 1;
end:
CMM_ACCESS_ONCE(channel->enabled) = 0;
/* Set transient enabler state to "enabled" */
channel->tstate = 0;
- lttng_session_sync_enablers(channel->session);
+ lttng_session_sync_event_enablers(channel->session);
end:
return ret;
}
+static inline
+struct cds_hlist_head *borrow_hash_table_bucket(
+ struct cds_hlist_head *hash_table,
+ unsigned int hash_table_size,
+ const struct lttng_event_desc *desc)
+{
+ const char *event_name;
+ size_t name_len;
+ uint32_t hash;
+
+ event_name = desc->name;
+ name_len = strlen(event_name);
+
+ hash = jhash(event_name, name_len, 0);
+ return &hash_table[hash & (hash_table_size - 1)];
+}
+
/*
* Supports event creation while tracing session is active.
*/
int lttng_event_create(const struct lttng_event_desc *desc,
struct lttng_channel *chan)
{
- const char *event_name = desc->name;
struct lttng_event *event;
struct lttng_session *session = chan->session;
struct cds_hlist_head *head;
int ret = 0;
- size_t name_len = strlen(event_name);
- uint32_t hash;
int notify_socket, loglevel;
const char *uri;
- hash = jhash(event_name, name_len, 0);
- head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
+ head = borrow_hash_table_bucket(chan->session->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, desc);
notify_socket = lttng_get_notify_socket(session->owner);
if (notify_socket < 0) {
/* Event will be enabled by enabler sync. */
event->enabled = 0;
event->registered = 0;
- CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
event->desc = desc;
session,
session->objd,
chan->objd,
- event_name,
+ desc->name,
loglevel,
desc->signature,
desc->nr_fields,
return ret;
}
+static
+int lttng_trigger_create(const struct lttng_event_desc *desc,
+ uint64_t id, uint64_t error_counter_index,
+ struct lttng_trigger_group *trigger_group)
+{
+ struct lttng_trigger *trigger;
+ struct cds_hlist_head *head;
+ int ret = 0;
+
+ /*
+ * Get the hashtable bucket the created lttng_trigger object should be
+ * inserted.
+ */
+ head = borrow_hash_table_bucket(trigger_group->triggers_ht.table,
+ LTTNG_UST_TRIGGER_HT_SIZE, desc);
+
+ trigger = zmalloc(sizeof(struct lttng_trigger));
+ if (!trigger) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ trigger->group = trigger_group;
+ trigger->id = id;
+ trigger->error_counter_index = error_counter_index;
+
+ /* Trigger will be enabled by enabler sync. */
+ trigger->enabled = 0;
+ trigger->registered = 0;
+
+ CDS_INIT_LIST_HEAD(&trigger->filter_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&trigger->capture_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&trigger->enablers_ref_head);
+ trigger->desc = desc;
+
+ cds_list_add(&trigger->node, &trigger_group->triggers_head);
+ cds_hlist_add_head(&trigger->hlist, head);
+
+ return 0;
+
+error:
+ return ret;
+}
+
+static
+void _lttng_trigger_destroy(struct lttng_trigger *trigger)
+{
+ struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
+
+ /* Remove from trigger list. */
+ cds_list_del(&trigger->node);
+ /* Remove from trigger hash table. */
+ cds_hlist_del(&trigger->hlist);
+
+ lttng_free_trigger_filter_runtime(trigger);
+
+ /* Free trigger enabler refs */
+ cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
+ &trigger->enablers_ref_head, node)
+ free(enabler_ref);
+ free(trigger);
+}
+
static
int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc,
struct lttng_enabler *enabler)
int loglevel = 0;
unsigned int has_loglevel = 0;
- assert(enabler->type == LTTNG_ENABLER_STAR_GLOB);
+ assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB);
if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
desc->name, SIZE_MAX))
return 0;
int loglevel = 0;
unsigned int has_loglevel = 0;
- assert(enabler->type == LTTNG_ENABLER_EVENT);
+ assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT);
if (strcmp(desc->name, enabler->event_param.name))
return 0;
if (desc->loglevel) {
int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
struct lttng_enabler *enabler)
{
- switch (enabler->type) {
- case LTTNG_ENABLER_STAR_GLOB:
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
{
struct lttng_ust_excluder_node *excluder;
}
return 1;
}
- case LTTNG_ENABLER_EVENT:
+ case LTTNG_ENABLER_FORMAT_EVENT:
return lttng_desc_match_event_enabler(desc, enabler);
default:
return -EINVAL;
}
static
-int lttng_event_match_enabler(struct lttng_event *event,
- struct lttng_enabler *enabler)
+int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
+ struct lttng_event *event)
{
- if (lttng_desc_match_enabler(event->desc, enabler)
- && event->chan == enabler->chan)
+ if (lttng_desc_match_enabler(event->desc,
+ lttng_event_enabler_as_enabler(event_enabler))
+ && event->chan == event_enabler->chan)
return 1;
else
return 0;
}
static
-struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
+int lttng_trigger_enabler_match_trigger(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_trigger *trigger)
+{
+ int desc_matches = lttng_desc_match_enabler(trigger->desc,
+ lttng_trigger_enabler_as_enabler(trigger_enabler));
+
+ if (desc_matches && trigger->group == trigger_enabler->group &&
+ trigger->id == trigger_enabler->id)
+ return 1;
+ else
+ return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_enabler_ref(
+ struct cds_list_head *enabler_ref_list,
struct lttng_enabler *enabler)
{
struct lttng_enabler_ref *enabler_ref;
- cds_list_for_each_entry(enabler_ref,
- &event->enablers_ref_head, node) {
+ cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) {
if (enabler_ref->ref == enabler)
return enabler_ref;
}
* tracepoint probes.
*/
static
-void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler)
{
- struct lttng_session *session = enabler->chan->session;
+ struct lttng_session *session = event_enabler->chan->session;
struct lttng_probe_desc *probe_desc;
const struct lttng_event_desc *desc;
struct lttng_event *event;
bool found = false;
struct cds_hlist_head *head;
struct cds_hlist_node *node;
- const char *event_name;
- size_t name_len;
- uint32_t hash;
desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc, enabler))
+ if (!lttng_desc_match_enabler(desc,
+ lttng_event_enabler_as_enabler(event_enabler)))
continue;
- event_name = desc->name;
- name_len = strlen(event_name);
- /*
- * Check if already created.
- */
- hash = jhash(event_name, name_len, 0);
- head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
+ head = borrow_hash_table_bucket(
+ session->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, desc);
+
cds_hlist_for_each_entry(event, node, head, hlist) {
if (event->desc == desc
- && event->chan == enabler->chan) {
+ && event->chan == event_enabler->chan) {
found = true;
break;
}
* event probe.
*/
ret = lttng_event_create(probe_desc->event_desc[i],
- enabler->chan);
+ event_enabler->chan);
if (ret) {
DBG("Unable to create event %s, error %d\n",
probe_desc->event_desc[i]->name, ret);
}
}
-/*
- * Iterate over all the UST sessions to unregister and destroy all probes from
- * the probe provider descriptor received as argument. Must me called with the
- * ust_lock held.
- */
-void lttng_probe_provider_unregister_events(struct lttng_probe_desc *provider_desc)
+static
+void probe_provider_event_for_each(struct lttng_probe_desc *provider_desc,
+ void (*event_func)(struct lttng_session *session,
+ struct lttng_event *event),
+ void (*trigger_func)(struct lttng_trigger *trigger))
{
struct cds_hlist_node *node, *tmp_node;
struct cds_list_head *sessionsp;
- struct lttng_session *session;
- struct cds_hlist_head *head;
- struct lttng_event *event;
- unsigned int i, j;
+ unsigned int i;
/* Get handle on list of sessions. */
sessionsp = _lttng_get_sessions();
/*
- * Iterate over all events in the probe provider descriptions and sessions
- * to queue the unregistration of the events.
+ * Iterate over all events in the probe provider descriptions and
+ * sessions to queue the unregistration of the events.
*/
for (i = 0; i < provider_desc->nr_events; i++) {
const struct lttng_event_desc *event_desc;
- const char *event_name;
- size_t name_len;
- uint32_t hash;
+ struct lttng_trigger_group *trigger_group;
+ struct lttng_trigger *trigger;
+ struct lttng_session *session;
+ struct cds_hlist_head *head;
+ struct lttng_event *event;
event_desc = provider_desc->event_desc[i];
- event_name = event_desc->name;
- name_len = strlen(event_name);
- hash = jhash(event_name, name_len, 0);
- /* Iterate over all session to find the current event description. */
+ /*
+ * Iterate over all session to find the current event
+ * description.
+ */
cds_list_for_each_entry(session, sessionsp, node) {
/*
- * Get the list of events in the hashtable bucket and iterate to
- * find the event matching this descriptor.
+ * Get the list of events in the hashtable bucket and
+ * iterate to find the event matching this descriptor.
*/
- head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
- cds_hlist_for_each_entry(event, node, head, hlist) {
+ head = borrow_hash_table_bucket(
+ session->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, event_desc);
+
+ cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) {
if (event_desc == event->desc) {
- /* Queue the unregistration of this event. */
- _lttng_event_unregister(event);
+ event_func(session, event);
+ break;
+ }
+ }
+ }
+
+ /*
+ * Iterate over all trigger groups to find the current event
+ * description.
+ */
+ cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
+ /*
+ * Get the list of triggers in the hashtable bucket and
+ * iterate to find the trigger matching this
+ * descriptor.
+ */
+ head = borrow_hash_table_bucket(
+ trigger_group->triggers_ht.table,
+ LTTNG_UST_TRIGGER_HT_SIZE, event_desc);
+
+ cds_hlist_for_each_entry_safe(trigger, node, tmp_node, head, hlist) {
+ if (event_desc == trigger->desc) {
+ trigger_func(trigger);
break;
}
}
}
}
+}
+
+static
+void _unregister_event(struct lttng_session *session,
+ struct lttng_event *event)
+{
+ _lttng_event_unregister(event);
+}
+
+static
+void _event_enum_destroy(struct lttng_session *session,
+ struct lttng_event *event)
+{
+ unsigned int i;
+
+ /* Destroy enums of the current event. */
+ for (i = 0; i < event->desc->nr_fields; i++) {
+ const struct lttng_enum_desc *enum_desc;
+ const struct lttng_event_field *field;
+ struct lttng_enum *curr_enum;
+
+ field = &(event->desc->fields[i]);
+ switch (field->type.atype) {
+ case atype_enum:
+ enum_desc = field->type.u.legacy.basic.enumeration.desc;
+ break;
+ case atype_enum_nestable:
+ enum_desc = field->type.u.enum_nestable.desc;
+ break;
+ default:
+ continue;
+ }
+
+ curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
+ if (curr_enum) {
+ _lttng_enum_destroy(curr_enum);
+ }
+ }
+
+ /* Destroy event. */
+ _lttng_event_destroy(event);
+}
+
+/*
+ * Iterate over all the UST sessions to unregister and destroy all probes from
+ * the probe provider descriptor received as argument. Must me called with the
+ * ust_lock held.
+ */
+void lttng_probe_provider_unregister_events(
+ struct lttng_probe_desc *provider_desc)
+{
+ /*
+ * Iterate over all events in the probe provider descriptions and sessions
+ * to queue the unregistration of the events.
+ */
+ probe_provider_event_for_each(provider_desc, _unregister_event,
+ _lttng_trigger_unregister);
/* Wait for grace period. */
synchronize_trace();
* It is now safe to destroy the events and remove them from the event list
* and hashtables.
*/
- for (i = 0; i < provider_desc->nr_events; i++) {
- const struct lttng_event_desc *event_desc;
- const char *event_name;
- size_t name_len;
- uint32_t hash;
-
- event_desc = provider_desc->event_desc[i];
- event_name = event_desc->name;
- name_len = strlen(event_name);
- hash = jhash(event_name, name_len, 0);
-
- /* Iterate over all sessions to find the current event description. */
- cds_list_for_each_entry(session, sessionsp, node) {
- /*
- * Get the list of events in the hashtable bucket and iterate to
- * find the event matching this descriptor.
- */
- head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
- cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) {
- if (event_desc == event->desc) {
- /* Destroy enums of the current event. */
- for (j = 0; j < event->desc->nr_fields; j++) {
- const struct lttng_enum_desc *enum_desc;
- const struct lttng_event_field *field;
- struct lttng_enum *curr_enum;
-
- field = &(event->desc->fields[j]);
- switch (field->type.atype) {
- case atype_enum:
- enum_desc = field->type.u.legacy.basic.enumeration.desc;
- break;
- case atype_enum_nestable:
- enum_desc = field->type.u.enum_nestable.desc;
- break;
- default:
- continue;
- }
- curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
- if (curr_enum) {
- _lttng_enum_destroy(curr_enum);
- }
- }
-
- /* Destroy event. */
- _lttng_event_destroy(event);
- break;
- }
- }
- }
- }
+ probe_provider_event_for_each(provider_desc, _event_enum_destroy,
+ _lttng_trigger_destroy);
}
/*
- * Create events associated with an enabler (if not already present),
+ * Create events associated with an event enabler (if not already present),
* and add backward reference from the event to the enabler.
*/
static
-int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler)
{
- struct lttng_session *session = enabler->chan->session;
+ struct lttng_session *session = event_enabler->chan->session;
struct lttng_event *event;
- if (!enabler->enabled)
+ if (!lttng_event_enabler_as_enabler(event_enabler)->enabled)
goto end;
/* First ensure that probe events are created for this enabler. */
- lttng_create_event_if_missing(enabler);
+ lttng_create_event_if_missing(event_enabler);
/* For each event matching enabler in session event list. */
cds_list_for_each_entry(event, &session->events_head, node) {
struct lttng_enabler_ref *enabler_ref;
- if (!lttng_event_match_enabler(event, enabler))
+ if (!lttng_event_enabler_match_event(event_enabler, event))
continue;
- enabler_ref = lttng_event_enabler_ref(event, enabler);
+ enabler_ref = lttng_enabler_ref(&event->enablers_ref_head,
+ lttng_event_enabler_as_enabler(event_enabler));
if (!enabler_ref) {
/*
* If no backward ref, create it.
enabler_ref = zmalloc(sizeof(*enabler_ref));
if (!enabler_ref)
return -ENOMEM;
- enabler_ref->ref = enabler;
+ enabler_ref->ref = lttng_event_enabler_as_enabler(
+ event_enabler);
cds_list_add(&enabler_ref->node,
&event->enablers_ref_head);
}
/*
* Link filter bytecodes if not linked yet.
*/
- lttng_enabler_event_link_bytecode(event, enabler);
+ lttng_enabler_link_bytecode(event->desc,
+ &session->ctx,
+ &event->filter_bytecode_runtime_head,
+ <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
/* TODO: merge event context. */
}
struct lttng_session *session;
cds_list_for_each_entry(session, &sessions, node) {
- lttng_session_lazy_sync_enablers(session);
+ lttng_session_lazy_sync_event_enablers(session);
+ }
+ return 0;
+}
+
+int lttng_fix_pending_triggers(void)
+{
+ struct lttng_trigger_group *trigger_group;
+
+ cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
+ lttng_trigger_group_sync_enablers(trigger_group);
}
return 0;
}
/*
* Enabler management.
*/
-struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
struct lttng_ust_event *event_param,
struct lttng_channel *chan)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
- enabler = zmalloc(sizeof(*enabler));
- if (!enabler)
+ event_enabler = zmalloc(sizeof(*event_enabler));
+ if (!event_enabler)
return NULL;
- enabler->type = type;
- CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
- CDS_INIT_LIST_HEAD(&enabler->excluder_head);
- memcpy(&enabler->event_param, event_param,
- sizeof(enabler->event_param));
- enabler->chan = chan;
+ event_enabler->base.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
+ memcpy(&event_enabler->base.event_param, event_param,
+ sizeof(event_enabler->base.event_param));
+ event_enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 0;
- cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
- return enabler;
+ event_enabler->base.enabled = 0;
+ cds_list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head);
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
+
+ return event_enabler;
+}
+
+struct lttng_trigger_enabler *lttng_trigger_enabler_create(
+ struct lttng_trigger_group *trigger_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_trigger *trigger_param)
+{
+ struct lttng_trigger_enabler *trigger_enabler;
+
+ trigger_enabler = zmalloc(sizeof(*trigger_enabler));
+ if (!trigger_enabler)
+ return NULL;
+ trigger_enabler->base.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&trigger_enabler->capture_bytecode_head);
+ CDS_INIT_LIST_HEAD(&trigger_enabler->base.excluder_head);
+
+ trigger_enabler->id = trigger_param->id;
+ trigger_enabler->num_captures = 0;
+
+ memcpy(&trigger_enabler->base.event_param.name, trigger_param->name,
+ sizeof(trigger_enabler->base.event_param.name));
+ trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation;
+ trigger_enabler->base.event_param.loglevel = trigger_param->loglevel;
+ trigger_enabler->base.event_param.loglevel_type = trigger_param->loglevel_type;
+
+ trigger_enabler->base.enabled = 0;
+ trigger_enabler->group = trigger_group;
+
+ cds_list_add(&trigger_enabler->node, &trigger_group->enablers_head);
+
+ lttng_trigger_group_sync_enablers(trigger_group);
+
+ return trigger_enabler;
}
-int lttng_enabler_enable(struct lttng_enabler *enabler)
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
{
- enabler->enabled = 1;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
+
return 0;
}
-int lttng_enabler_disable(struct lttng_enabler *enabler)
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
{
- enabler->enabled = 0;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
+
return 0;
}
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
- struct lttng_ust_filter_bytecode_node *bytecode)
+static
+void _lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
+ struct lttng_ust_bytecode_node *bytecode)
{
bytecode->enabler = enabler;
cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+}
+
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
+ struct lttng_ust_bytecode_node *bytecode)
+{
+ _lttng_enabler_attach_filter_bytecode(
+ lttng_event_enabler_as_enabler(event_enabler), bytecode);
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
return 0;
}
-int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
+static
+void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
struct lttng_ust_excluder_node *excluder)
{
excluder->enabler = enabler;
cds_list_add_tail(&excluder->node, &enabler->excluder_head);
- lttng_session_lazy_sync_enablers(enabler->chan->session);
+}
+
+int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
+ struct lttng_ust_excluder_node *excluder)
+{
+ _lttng_enabler_attach_exclusion(
+ lttng_event_enabler_as_enabler(event_enabler), excluder);
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->session);
+ return 0;
+}
+
+int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler)
+{
+ lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1;
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+
+ return 0;
+}
+
+int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler)
+{
+ lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0;
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+
+ return 0;
+}
+
+int lttng_trigger_enabler_attach_filter_bytecode(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_ust_bytecode_node *bytecode)
+{
+ _lttng_enabler_attach_filter_bytecode(
+ lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode);
+
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ return 0;
+}
+
+int lttng_trigger_enabler_attach_capture_bytecode(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_ust_bytecode_node *bytecode)
+{
+ bytecode->enabler = lttng_trigger_enabler_as_enabler(trigger_enabler);
+ cds_list_add_tail(&bytecode->node, &trigger_enabler->capture_bytecode_head);
+ trigger_enabler->num_captures++;
+
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
+ return 0;
+}
+
+int lttng_trigger_enabler_attach_exclusion(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_ust_excluder_node *excluder)
+{
+ _lttng_enabler_attach_exclusion(
+ lttng_trigger_enabler_as_enabler(trigger_enabler), excluder);
+
+ lttng_trigger_group_sync_enablers(trigger_enabler->group);
return 0;
}
}
}
-int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
struct lttng_ust_context *context_param)
{
-#if 0 // disabled for now.
- struct lttng_session *session = enabler->chan->session;
- int ret;
-
- ret = lttng_attach_context(context_param, &enabler->ctx,
- session);
- if (ret)
- return ret;
- lttng_session_lazy_sync_enablers(enabler->chan->session);
-#endif
return -ENOSYS;
}
-static
-void lttng_enabler_destroy(struct lttng_enabler *enabler)
+void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
{
- struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
- struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
-
- /* Destroy filter bytecode */
- cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
- &enabler->filter_bytecode_head, node) {
- free(filter_node);
- }
-
- /* Destroy excluders */
- cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
- &enabler->excluder_head, node) {
- free(excluder_node);
+ if (!event_enabler) {
+ return;
}
+ cds_list_del(&event_enabler->node);
- /* Destroy contexts */
- lttng_destroy_context(enabler->ctx);
+ lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
- cds_list_del(&enabler->node);
- free(enabler);
+ lttng_destroy_context(event_enabler->ctx);
+ free(event_enabler);
}
/*
- * lttng_session_sync_enablers should be called just before starting a
+ * lttng_session_sync_event_enablers should be called just before starting a
* session.
*/
static
-void lttng_session_sync_enablers(struct lttng_session *session)
+void lttng_session_sync_event_enablers(struct lttng_session *session)
{
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
struct lttng_event *event;
- cds_list_for_each_entry(enabler, &session->enablers_head, node)
- lttng_enabler_ref_events(enabler);
+ cds_list_for_each_entry(event_enabler, &session->enablers_head, node)
+ lttng_event_enabler_ref_events(event_enabler);
/*
* For each event, if at least one of its enablers is enabled,
* and its channel and session transient states are enabled, we
/* Enable filters */
cds_list_for_each_entry(runtime,
- &event->bytecode_runtime_head, node) {
- lttng_filter_sync_state(runtime);
+ &event->filter_bytecode_runtime_head, node) {
+ lttng_bytecode_filter_sync_state(runtime);
+ }
+ }
+ __tracepoint_probe_prune_release_queue();
+}
+
+static
+void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler)
+{
+ struct lttng_trigger_group *trigger_group = trigger_enabler->group;
+ struct lttng_probe_desc *probe_desc;
+ struct cds_list_head *probe_list;
+ int i;
+
+ probe_list = lttng_get_probe_list_head();
+
+ cds_list_for_each_entry(probe_desc, probe_list, head) {
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int ret;
+ bool found = false;
+ const struct lttng_event_desc *desc;
+ struct lttng_trigger *trigger;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+
+ desc = probe_desc->event_desc[i];
+
+ if (!lttng_desc_match_enabler(desc,
+ lttng_trigger_enabler_as_enabler(trigger_enabler)))
+ continue;
+
+ /*
+ * Given the current trigger group, get the bucket that
+ * the target trigger would be if it was already
+ * created.
+ */
+ head = borrow_hash_table_bucket(
+ trigger_group->triggers_ht.table,
+ LTTNG_UST_TRIGGER_HT_SIZE, desc);
+
+ cds_hlist_for_each_entry(trigger, node, head, hlist) {
+ /*
+ * Check if trigger already exists by checking
+ * if the trigger and enabler share the same
+ * description and id.
+ */
+ if (trigger->desc == desc &&
+ trigger->id == trigger_enabler->id) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ /*
+ * We need to create a trigger for this event probe.
+ */
+ ret = lttng_trigger_create(desc, trigger_enabler->id,
+ trigger_enabler->error_counter_index,
+ trigger_group);
+ if (ret) {
+ DBG("Unable to create trigger %s, error %d\n",
+ probe_desc->event_desc[i]->name, ret);
+ }
+ }
+ }
+}
+
+/*
+ * Create triggers associated with a trigger enabler (if not already present).
+ */
+static
+int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler)
+{
+ struct lttng_trigger_group *trigger_group = trigger_enabler->group;
+ struct lttng_trigger *trigger;
+
+ /*
+ * Only try to create triggers for enablers that are enabled, the user
+ * might still be attaching filter or exclusion to the
+ * trigger_enabler.
+ */
+ if (!lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled)
+ goto end;
+
+ /* First, ensure that probe triggers are created for this enabler. */
+ lttng_create_trigger_if_missing(trigger_enabler);
+
+ /* Link the created trigger with its associated enabler. */
+ cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger))
+ continue;
+
+ enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head,
+ lttng_trigger_enabler_as_enabler(trigger_enabler));
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from trigger to enabler.
+ */
+ enabler_ref = zmalloc(sizeof(*enabler_ref));
+ if (!enabler_ref)
+ return -ENOMEM;
+
+ enabler_ref->ref = lttng_trigger_enabler_as_enabler(
+ trigger_enabler);
+ cds_list_add(&enabler_ref->node,
+ &trigger->enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(trigger->desc,
+ &trigger_group->ctx, &trigger->filter_bytecode_runtime_head,
+ <tng_trigger_enabler_as_enabler(trigger_enabler)->filter_bytecode_head);
+
+ /*
+ * Link capture bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(trigger->desc,
+ &trigger_group->ctx, &trigger->capture_bytecode_runtime_head,
+ &trigger_enabler->capture_bytecode_head);
+ trigger->num_captures = trigger_enabler->num_captures;
+ }
+end:
+ return 0;
+}
+
+static
+void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group)
+{
+ struct lttng_trigger_enabler *trigger_enabler;
+ struct lttng_trigger *trigger;
+
+ cds_list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node)
+ lttng_trigger_enabler_ref_triggers(trigger_enabler);
+
+ /*
+ * For each trigger, if at least one of its enablers is enabled,
+ * we enable the trigger, else we disable it.
+ */
+ cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_bytecode = 0;
+
+ /* Enable triggers */
+ cds_list_for_each_entry(enabler_ref,
+ &trigger->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+
+ CMM_STORE_SHARED(trigger->enabled, enabled);
+ /*
+ * Sync tracepoint registration with trigger enabled
+ * state.
+ */
+ if (enabled) {
+ if (!trigger->registered)
+ register_trigger(trigger);
+ } else {
+ if (trigger->registered)
+ unregister_trigger(trigger);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ cds_list_for_each_entry(enabler_ref,
+ &trigger->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_bytecode = 1;
+ break;
+ }
+ }
+ trigger->has_enablers_without_bytecode =
+ has_enablers_without_bytecode;
+
+ /* Enable filters */
+ cds_list_for_each_entry(runtime,
+ &trigger->filter_bytecode_runtime_head, node) {
+ lttng_bytecode_filter_sync_state(runtime);
+ }
+
+ /* Enable captures. */
+ cds_list_for_each_entry(runtime,
+ &trigger->capture_bytecode_runtime_head, node) {
+ lttng_bytecode_capture_sync_state(runtime);
}
}
__tracepoint_probe_prune_release_queue();
* "lazy" sync means we only sync if required.
*/
static
-void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+void lttng_session_lazy_sync_event_enablers(struct lttng_session *session)
{
/* We can skip if session is not active */
if (!session->active)
return;
- lttng_session_sync_enablers(session);
+ lttng_session_sync_event_enablers(session);
}
/*
}
}
}
+
+/*
+ * Update all trigger groups with the given app context.
+ * Called with ust lock held.
+ * This is invoked when an application context gets loaded/unloaded. It
+ * ensures the context callbacks are in sync with the application
+ * context (either app context callbacks, or dummy callbacks).
+ */
+void lttng_ust_context_set_trigger_group_provider(const char *name,
+ size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
+ void (*record)(struct lttng_ctx_field *field,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_channel *chan),
+ void (*get_value)(struct lttng_ctx_field *field,
+ struct lttng_ctx_value *value))
+{
+ struct lttng_trigger_group *trigger_group;
+
+ cds_list_for_each_entry(trigger_group, &trigger_groups, node) {
+ int ret;
+
+ ret = lttng_ust_context_set_provider_rcu(&trigger_group->ctx,
+ name, get_size, record, get_value);
+ if (ret)
+ abort();
+ }
+}
+++ /dev/null
-/*
- * lttng-filter-interpreter.c
- *
- * LTTng UST filter interpreter.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <urcu-pointer.h>
-#include <byteswap.h>
-#include "lttng-filter.h"
-#include "string-utils.h"
-
-/*
- * -1: wildcard found.
- * -2: unknown escape char.
- * 0: normal char.
- */
-
-static
-int parse_char(const char **p)
-{
- switch (**p) {
- case '\\':
- (*p)++;
- switch (**p) {
- case '\\':
- case '*':
- return 0;
- default:
- return -2;
- }
- case '*':
- return -1;
- default:
- return 0;
- }
-}
-
-/*
- * Returns SIZE_MAX if the string is null-terminated, or the number of
- * characters if not.
- */
-static
-size_t get_str_or_seq_len(const struct estack_entry *entry)
-{
- return entry->u.s.seq_len;
-}
-
-static
-int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
-{
- const char *pattern;
- const char *candidate;
- size_t pattern_len;
- size_t candidate_len;
-
- /* Find out which side is the pattern vs. the candidate. */
- if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
- pattern = estack_ax(stack, top)->u.s.str;
- pattern_len = get_str_or_seq_len(estack_ax(stack, top));
- candidate = estack_bx(stack, top)->u.s.str;
- candidate_len = get_str_or_seq_len(estack_bx(stack, top));
- } else {
- pattern = estack_bx(stack, top)->u.s.str;
- pattern_len = get_str_or_seq_len(estack_bx(stack, top));
- candidate = estack_ax(stack, top)->u.s.str;
- candidate_len = get_str_or_seq_len(estack_ax(stack, top));
- }
-
- /* Perform the match. Returns 0 when the result is true. */
- return !strutils_star_glob_match(pattern, pattern_len, candidate,
- candidate_len);
-}
-
-static
-int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
-{
- const char *p = estack_bx(stack, top)->u.s.str, *q = estack_ax(stack, top)->u.s.str;
- int ret;
- int diff;
-
- for (;;) {
- int escaped_r0 = 0;
-
- if (unlikely(p - estack_bx(stack, top)->u.s.str >= estack_bx(stack, top)->u.s.seq_len || *p == '\0')) {
- if (q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0') {
- return 0;
- } else {
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&q);
- if (ret == -1)
- return 0;
- }
- return -1;
- }
- }
- if (unlikely(q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0')) {
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&p);
- if (ret == -1)
- return 0;
- }
- return 1;
- }
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&p);
- if (ret == -1) {
- return 0;
- } else if (ret == -2) {
- escaped_r0 = 1;
- }
- /* else compare both char */
- }
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&q);
- if (ret == -1) {
- return 0;
- } else if (ret == -2) {
- if (!escaped_r0)
- return -1;
- } else {
- if (escaped_r0)
- return 1;
- }
- } else {
- if (escaped_r0)
- return 1;
- }
- diff = *p - *q;
- if (diff != 0)
- break;
- p++;
- q++;
- }
- return diff;
-}
-
-uint64_t lttng_filter_false(void *filter_data,
- const char *filter_stack_data)
-{
- return LTTNG_FILTER_DISCARD;
-}
-
-#ifdef INTERPRETER_USE_SWITCH
-
-/*
- * Fallback for compilers that do not support taking address of labels.
- */
-
-#define START_OP \
- start_pc = &bytecode->data[0]; \
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
- pc = next_pc) { \
- dbg_printf("Executing op %s (%u)\n", \
- print_op((unsigned int) *(filter_opcode_t *) pc), \
- (unsigned int) *(filter_opcode_t *) pc); \
- switch (*(filter_opcode_t *) pc) {
-
-#define OP(name) jump_target_##name: __attribute__((unused)); \
- case name
-
-#define PO break
-
-#define END_OP } \
- }
-
-#define JUMP_TO(name) \
- goto jump_target_##name
-
-#else
-
-/*
- * Dispatch-table based interpreter.
- */
-
-#define START_OP \
- start_pc = &bytecode->code[0]; \
- pc = next_pc = start_pc; \
- if (unlikely(pc - start_pc >= bytecode->len)) \
- goto end; \
- goto *dispatch[*(filter_opcode_t *) pc];
-
-#define OP(name) \
-LABEL_##name
-
-#define PO \
- pc = next_pc; \
- goto *dispatch[*(filter_opcode_t *) pc];
-
-#define END_OP
-
-#define JUMP_TO(name) \
- goto LABEL_##name
-
-#endif
-
-static int context_get_index(struct lttng_ctx *ctx,
- struct load_ptr *ptr,
- uint32_t idx)
-{
-
- struct lttng_ctx_field *ctx_field;
- struct lttng_event_field *field;
- struct lttng_ctx_value v;
-
- ctx_field = &ctx->fields[idx];
- field = &ctx_field->event_field;
- ptr->type = LOAD_OBJECT;
- /* field is only used for types nested within variants. */
- ptr->field = NULL;
-
- switch (field->type.atype) {
- case atype_integer:
- ctx_field->get_value(ctx_field, &v);
- if (field->type.u.integer.signedness) {
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.u.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- case atype_enum: /* Fall-through */
- case atype_enum_nestable:
- {
- const struct lttng_integer_type *itype;
-
- if (field->type.atype == atype_enum) {
- itype = &field->type.u.legacy.basic.enumeration.container_type;
- } else {
- itype = &field->type.u.enum_nestable.container_type->u.integer;
- }
- ctx_field->get_value(ctx_field, &v);
- if (itype->signedness) {
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.u.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- }
- case atype_array:
- if (field->type.u.legacy.array.elem_type.atype != atype_integer) {
- ERR("Array nesting only supports integer types.");
- return -EINVAL;
- }
- if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
- ERR("Only string arrays are supported for contexts.");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, &v);
- ptr->ptr = v.u.str;
- break;
- case atype_array_nestable:
- if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
- ERR("Array nesting only supports integer types.");
- return -EINVAL;
- }
- if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- ERR("Only string arrays are supported for contexts.");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, &v);
- ptr->ptr = v.u.str;
- break;
- case atype_sequence:
- if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) {
- ERR("Sequence nesting only supports integer types.");
- return -EINVAL;
- }
- if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
- ERR("Only string sequences are supported for contexts.");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, &v);
- ptr->ptr = v.u.str;
- break;
- case atype_sequence_nestable:
- if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
- ERR("Sequence nesting only supports integer types.");
- return -EINVAL;
- }
- if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- ERR("Only string sequences are supported for contexts.");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, &v);
- ptr->ptr = v.u.str;
- break;
- case atype_string:
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field, &v);
- ptr->ptr = v.u.str;
- break;
- case atype_float:
- ptr->object_type = OBJECT_TYPE_DOUBLE;
- ctx_field->get_value(ctx_field, &v);
- ptr->u.d = v.u.d;
- ptr->ptr = &ptr->u.d;
- break;
- case atype_dynamic:
- ctx_field->get_value(ctx_field, &v);
- switch (v.sel) {
- case LTTNG_UST_DYNAMIC_TYPE_NONE:
- return -EINVAL;
- case LTTNG_UST_DYNAMIC_TYPE_S64:
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
- ptr->object_type = OBJECT_TYPE_DOUBLE;
- ptr->u.d = v.u.d;
- ptr->ptr = &ptr->u.d;
- dbg_printf("context get index dynamic double %g\n", ptr->u.d);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_STRING:
- ptr->object_type = OBJECT_TYPE_STRING;
- ptr->ptr = v.u.str;
- dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr);
- break;
- default:
- dbg_printf("Filter warning: unknown dynamic type (%d).\n", (int) v.sel);
- return -EINVAL;
- }
- break;
- case atype_struct:
- ERR("Structure type cannot be loaded.");
- return -EINVAL;
- default:
- ERR("Unknown type: %d", (int) field->type.atype);
- return -EINVAL;
- }
- return 0;
-}
-
-static int dynamic_get_index(struct lttng_session *session,
- struct bytecode_runtime *runtime,
- uint64_t index, struct estack_entry *stack_top)
-{
- int ret;
- const struct filter_get_index_data *gid;
-
- /*
- * Types nested within variants need to perform dynamic lookup
- * based on the field descriptions. LTTng-UST does not implement
- * variants for now.
- */
- if (stack_top->u.ptr.field)
- return -EINVAL;
- gid = (const struct filter_get_index_data *) &runtime->data[index];
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const char *ptr;
-
- assert(gid->offset < gid->array_len);
- /* Skip count (unsigned long) */
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- /* field is only used for types nested within variants. */
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const char *ptr;
- size_t ptr_seq_len;
-
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
- if (gid->offset >= gid->elem.len * ptr_seq_len) {
- ret = -EINVAL;
- goto end;
- }
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- /* field is only used for types nested within variants. */
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- ERR("Nested structures are not supported yet.");
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_VARIANT:
- default:
- ERR("Unexpected get index type %d",
- (int) stack_top->u.ptr.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
- {
- struct lttng_ctx *ctx;
-
- ctx = rcu_dereference(session->ctx);
- ret = context_get_index(ctx,
- &stack_top->u.ptr,
- gid->ctx_index);
- if (ret) {
- goto end;
- }
- break;
- }
- case LOAD_ROOT_PAYLOAD:
- stack_top->u.ptr.ptr += gid->offset;
- if (gid->elem.type == OBJECT_TYPE_STRING)
- stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.type = LOAD_OBJECT;
- /* field is only used for types nested within variants. */
- stack_top->u.ptr.field = NULL;
- break;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static int dynamic_load_field(struct estack_entry *stack_top)
-{
- int ret;
-
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printf("Filter warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printf("op load field s8\n");
- stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
- stack_top->type = REG_S64;
- break;
- case OBJECT_TYPE_S16:
- {
- int16_t tmp;
-
- dbg_printf("op load field s16\n");
- tmp = *(int16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_16(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_S32:
- {
- int32_t tmp;
-
- dbg_printf("op load field s32\n");
- tmp = *(int32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_32(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_S64:
- {
- int64_t tmp;
-
- dbg_printf("op load field s64\n");
- tmp = *(int64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_U8:
- dbg_printf("op load field u8\n");
- stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
- stack_top->type = REG_S64;
- break;
- case OBJECT_TYPE_U16:
- {
- uint16_t tmp;
-
- dbg_printf("op load field u16\n");
- tmp = *(uint16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_16(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_U32:
- {
- uint32_t tmp;
-
- dbg_printf("op load field u32\n");
- tmp = *(uint32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_32(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_U64:
- {
- uint64_t tmp;
-
- dbg_printf("op load field u64\n");
- tmp = *(uint64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_DOUBLE:
- memcpy(&stack_top->u.d,
- stack_top->u.ptr.ptr,
- sizeof(struct literal_double));
- stack_top->type = REG_DOUBLE;
- break;
- case OBJECT_TYPE_STRING:
- {
- const char *str;
-
- dbg_printf("op load field string\n");
- str = (const char *) stack_top->u.ptr.ptr;
- stack_top->u.s.str = str;
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printf("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.seq_len = SIZE_MAX;
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- stack_top->type = REG_STRING;
- break;
- }
- case OBJECT_TYPE_STRING_SEQUENCE:
- {
- const char *ptr;
-
- dbg_printf("op load field string sequence\n");
- ptr = stack_top->u.ptr.ptr;
- stack_top->u.s.seq_len = *(unsigned long *) ptr;
- stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- stack_top->type = REG_STRING;
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printf("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- break;
- }
- case OBJECT_TYPE_DYNAMIC:
- /*
- * Dynamic types in context are looked up
- * by context get index.
- */
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-/*
- * Return 0 (discard), or raise the 0x1 flag (log event).
- * Currently, other flags are kept for future extensions and have no
- * effect.
- */
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
- const char *filter_stack_data)
-{
- struct bytecode_runtime *bytecode = filter_data;
- struct lttng_session *session = bytecode->p.session;
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- uint64_t retval = 0;
- struct estack _stack;
- struct estack *stack = &_stack;
- register int64_t ax = 0, bx = 0;
- register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN;
- register int top = FILTER_STACK_EMPTY;
-#ifndef INTERPRETER_USE_SWITCH
- static void *dispatch[NR_FILTER_OPS] = {
- [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
-
- [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
-
- /* binary */
- [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
- [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
- [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
- [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
- [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
- [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
- [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
- [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
- [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
- [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
-
- /* binary comparators */
- [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
- [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
- [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
- [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
- [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
- [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
-
- /* string binary comparator */
- [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
- [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
- [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
- [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
- [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
- [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
-
- /* globbing pattern binary comparator */
- [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
- [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
-
- /* s64 binary comparator */
- [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
- [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
- [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
- [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
- [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
- [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
-
- /* double binary comparator */
- [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
- [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
- [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
- [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
- [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
- [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
-
- /* Mixed S64-double binary comparators */
- [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
- [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
- [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
- [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
- [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
- [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
-
- [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
- [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
- [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
- [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
- [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
- [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
-
- /* unary */
- [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
- [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
- [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
- [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
- [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
- [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
- [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
- [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
- [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
-
- /* logical */
- [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
- [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
-
- /* load field ref */
- [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
- [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
- [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
- [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
- [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
-
- /* load from immediate operand */
- [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
- [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
- [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
- [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
-
- /* cast */
- [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
- [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
- [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
-
- /* get context ref */
- [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
- [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
- [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
- [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
-
- /* Instructions for recursive traversal through composed types. */
- [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
- [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
- [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
-
- [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
- [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
- [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
- [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
-
- [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
- [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
- [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
- [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
- [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
- [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
- [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
- [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
- [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
- [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
- [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
- [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
-
- [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
-
- [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
- };
-#endif /* #ifndef INTERPRETER_USE_SWITCH */
-
- START_OP
-
- OP(FILTER_OP_UNKNOWN):
- OP(FILTER_OP_LOAD_FIELD_REF):
-#ifdef INTERPRETER_USE_SWITCH
- default:
-#endif /* INTERPRETER_USE_SWITCH */
- ERR("unknown bytecode op %u",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_RETURN):
- /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
- /* Handle dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- retval = !!estack_ax_v;
- break;
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- default:
- ret = -EINVAL;
- goto end;
- }
- ret = 0;
- goto end;
-
- OP(FILTER_OP_RETURN_S64):
- /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
- retval = !!estack_ax_v;
- ret = 0;
- goto end;
-
- /* binary */
- OP(FILTER_OP_MUL):
- OP(FILTER_OP_DIV):
- OP(FILTER_OP_MOD):
- OP(FILTER_OP_PLUS):
- OP(FILTER_OP_MINUS):
- ERR("unsupported bytecode op %u",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(FILTER_OP_EQ):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_EQ_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_EQ_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_EQ_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_EQ_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(FILTER_OP_EQ_STRING);
- case REG_STAR_GLOB_STRING:
- JUMP_TO(FILTER_OP_EQ_STAR_GLOB_STRING);
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(FILTER_OP_EQ_STAR_GLOB_STRING);
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(FILTER_OP_NE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_NE_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_NE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_NE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_NE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(FILTER_OP_NE_STRING);
- case REG_STAR_GLOB_STRING:
- JUMP_TO(FILTER_OP_NE_STAR_GLOB_STRING);
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(FILTER_OP_NE_STAR_GLOB_STRING);
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(FILTER_OP_GT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_GT_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_GT_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_GT_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_GT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(FILTER_OP_GT_STRING);
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(FILTER_OP_LT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_LT_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_LT_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_LT_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_LT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(FILTER_OP_LT_STRING);
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(FILTER_OP_GE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_GE_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_GE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_GE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_GE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(FILTER_OP_GE_STRING);
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(FILTER_OP_LE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_LE_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_LE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_LE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_LE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(FILTER_OP_LE_STRING);
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
-
- OP(FILTER_OP_EQ_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">") > 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<") < 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">=") >= 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<=") <= 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_S64):
- {
- int res;
-
- res = (estack_bx_v == estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_S64):
- {
- int res;
-
- res = (estack_bx_v != estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_S64):
- {
- int res;
-
- res = (estack_bx_v > estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_S64):
- {
- int res;
-
- res = (estack_bx_v < estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_S64):
- {
- int res;
-
- res = (estack_bx_v >= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_S64):
- {
- int res;
-
- res = (estack_bx_v <= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d == estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d != estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d > estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d < estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d >= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d <= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- /* Mixed S64-double binary comparators */
- OP(FILTER_OP_EQ_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d == estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d != estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d > estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d < estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d >= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d <= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(FILTER_OP_EQ_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v == estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_NE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v != estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GT_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v > estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LT_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v < estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_GE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v >= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_LE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v <= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_RSHIFT):
- {
- int64_t res;
-
- /* Dynamic typing. */
- if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) {
- ret = -EINVAL;
- goto end;
- }
- /* Catch undefined behavior. */
- if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_LSHIFT):
- {
- int64_t res;
-
- /* Dynamic typing. */
- if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) {
- ret = -EINVAL;
- goto end;
- }
- /* Catch undefined behavior. */
- if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_AND):
- {
- int64_t res;
-
- /* Dynamic typing. */
- if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_OR):
- {
- int64_t res;
-
- /* Dynamic typing. */
- if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(FILTER_OP_BIT_XOR):
- {
- int64_t res;
-
- /* Dynamic typing. */
- if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- /* unary */
- OP(FILTER_OP_UNARY_PLUS):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through. */
- JUMP_TO(FILTER_OP_UNARY_PLUS_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_UNARY_PLUS_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(FILTER_OP_UNARY_MINUS):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_UNARY_MINUS_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_UNARY_MINUS_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(FILTER_OP_UNARY_NOT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_UNARY_NOT_S64);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_UNARY_NOT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- OP(FILTER_OP_UNARY_BIT_NOT):
- {
- /* Dynamic typing. */
- if (estack_ax_t != REG_S64) {
- ret = -EINVAL;
- goto end;
- }
-
- estack_ax_v = ~(uint64_t) estack_ax_v;
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- OP(FILTER_OP_UNARY_PLUS_S64):
- OP(FILTER_OP_UNARY_PLUS_DOUBLE):
- {
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_MINUS_S64):
- {
- estack_ax_v = -estack_ax_v;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_MINUS_DOUBLE):
- {
- estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_NOT_S64):
- {
- estack_ax_v = !estack_ax_v;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(FILTER_OP_UNARY_NOT_DOUBLE):
- {
- estack_ax_v = !estack_ax(stack, top)->u.d;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- /* logical */
- OP(FILTER_OP_AND):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (estack_ax_t != REG_S64) {
- ret = -EINVAL;
- goto end;
- }
- /* If AX is 0, skip and evaluate to 0 */
- if (unlikely(estack_ax_v == 0)) {
- dbg_printf("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
- OP(FILTER_OP_OR):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (estack_ax_t != REG_S64) {
- ret = -EINVAL;
- goto end;
- }
- /* If AX is nonzero, skip and evaluate to 1 */
- if (unlikely(estack_ax_v != 0)) {
- estack_ax_v = 1;
- dbg_printf("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
-
-
- /* load field ref */
- OP(FILTER_OP_LOAD_FIELD_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type string\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str =
- *(const char * const *) &filter_stack_data[ref->offset];
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax_t = REG_STRING;
- dbg_printf("ref load string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type sequence\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.seq_len =
- *(unsigned long *) &filter_stack_data[ref->offset];
- estack_ax(stack, top)->u.s.str =
- *(const char **) (&filter_stack_data[ref->offset
- + sizeof(unsigned long)]);
- estack_ax_t = REG_STRING;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type s64\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v =
- ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
- estack_ax_t = REG_S64;
- dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type double\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, &filter_stack_data[ref->offset],
- sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- /* load from immediate operand */
- OP(FILTER_OP_LOAD_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("load string %s\n", insn->data);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_PLAIN;
- estack_ax_t = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("load globbing pattern %s\n", insn->data);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
- estack_ax_t = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(FILTER_OP_LOAD_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = ((struct literal_numeric *) insn->data)->v;
- estack_ax_t = REG_S64;
- dbg_printf("load s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- PO;
- }
-
- OP(FILTER_OP_LOAD_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, insn->data,
- sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("load double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- PO;
- }
-
- /* cast */
- OP(FILTER_OP_CAST_TO_S64):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- JUMP_TO(FILTER_OP_CAST_NOP);
- case REG_DOUBLE:
- JUMP_TO(FILTER_OP_CAST_DOUBLE_TO_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown filter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
-
- OP(FILTER_OP_CAST_DOUBLE_TO_S64):
- {
- estack_ax_v = (int64_t) estack_ax(stack, top)->u.d;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct cast_op);
- PO;
- }
-
- OP(FILTER_OP_CAST_NOP):
- {
- next_pc += sizeof(struct cast_op);
- PO;
- }
-
- /* get context ref */
- OP(FILTER_OP_GET_CONTEXT_REF):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- struct lttng_ctx *ctx;
- struct lttng_ctx_field *ctx_field;
- struct lttng_ctx_value v;
-
- dbg_printf("get context ref offset %u type dynamic\n",
- ref->offset);
- ctx = rcu_dereference(session->ctx);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- switch (v.sel) {
- case LTTNG_UST_DYNAMIC_TYPE_NONE:
- ret = -EINVAL;
- goto end;
- case LTTNG_UST_DYNAMIC_TYPE_S64:
- estack_ax_v = v.u.s64;
- estack_ax_t = REG_S64;
- dbg_printf("ref get context dynamic s64 %" PRIi64 "\n", estack_ax_v);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
- estack_ax(stack, top)->u.d = v.u.d;
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref get context dynamic double %g\n", estack_ax(stack, top)->u.d);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_STRING:
- estack_ax(stack, top)->u.s.str = v.u.str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- dbg_printf("ref get context dynamic string %s\n", estack_ax(stack, top)->u.s.str);
- estack_ax_t = REG_STRING;
- break;
- default:
- dbg_printf("Filter warning: unknown dynamic type (%d).\n", (int) v.sel);
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- struct lttng_ctx *ctx;
- struct lttng_ctx_field *ctx_field;
- struct lttng_ctx_value v;
-
- dbg_printf("get context ref offset %u type string\n",
- ref->offset);
- ctx = rcu_dereference(session->ctx);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = v.u.str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax_t = REG_STRING;
- dbg_printf("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- struct lttng_ctx *ctx;
- struct lttng_ctx_field *ctx_field;
- struct lttng_ctx_value v;
-
- dbg_printf("get context ref offset %u type s64\n",
- ref->offset);
- ctx = rcu_dereference(session->ctx);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = v.u.s64;
- estack_ax_t = REG_S64;
- dbg_printf("ref get context s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- struct lttng_ctx *ctx;
- struct lttng_ctx_field *ctx_field;
- struct lttng_ctx_value v;
-
- dbg_printf("get context ref offset %u type double\n",
- ref->offset);
- ctx = rcu_dereference(session->ctx);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, &v.u.d, sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref get context double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(FILTER_OP_GET_CONTEXT_ROOT):
- {
- dbg_printf("op get context root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
- {
- dbg_printf("op get app context root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_GET_PAYLOAD_ROOT):
- {
- dbg_printf("op get app payload root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
- estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_GET_SYMBOL):
- {
- dbg_printf("op get symbol\n");
- switch (estack_ax(stack, top)->u.ptr.type) {
- case LOAD_OBJECT:
- ERR("Nested fields not implemented yet.");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- /*
- * symbol lookup is performed by
- * specialization.
- */
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- PO;
- }
-
- OP(FILTER_OP_GET_SYMBOL_FIELD):
- {
- /*
- * Used for first variant encountered in a
- * traversal. Variants are not implemented yet.
- */
- ret = -EINVAL;
- goto end;
- }
-
- OP(FILTER_OP_GET_INDEX_U16):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("op get index u16\n");
- ret = dynamic_get_index(session, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- PO;
- }
-
- OP(FILTER_OP_GET_INDEX_U64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("op get index u64\n");
- ret = dynamic_get_index(session, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD):
- {
- dbg_printf("op load field\n");
- ret = dynamic_load_field(estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_S8):
- {
- dbg_printf("op load field s8\n");
-
- estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S16):
- {
- dbg_printf("op load field s16\n");
-
- estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S32):
- {
- dbg_printf("op load field s32\n");
-
- estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_S64):
- {
- dbg_printf("op load field s64\n");
-
- estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U8):
- {
- dbg_printf("op load field u8\n");
-
- estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U16):
- {
- dbg_printf("op load field u16\n");
-
- estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U32):
- {
- dbg_printf("op load field u32\n");
-
- estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_U64):
- {
- dbg_printf("op load field u64\n");
-
- estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(FILTER_OP_LOAD_FIELD_DOUBLE):
- {
- dbg_printf("op load field double\n");
-
- memcpy(&estack_ax(stack, top)->u.d,
- estack_ax(stack, top)->u.ptr.ptr,
- sizeof(struct literal_double));
- estack_ax(stack, top)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_STRING):
- {
- const char *str;
-
- dbg_printf("op load field string\n");
- str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.str = str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Filter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
- {
- const char *ptr;
-
- dbg_printf("op load field string sequence\n");
- ptr = estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
- estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- estack_ax(stack, top)->type = REG_STRING;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Filter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- END_OP
-end:
- /* Return _DISCARD on error. */
- if (ret)
- return LTTNG_FILTER_DISCARD;
- return retval;
-}
-
-#undef START_OP
-#undef OP
-#undef PO
-#undef END_OP
+++ /dev/null
-/*
- * lttng-filter-specialize.c
- *
- * LTTng UST filter code specializer.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-
-#include "lttng-filter.h"
-#include <lttng/align.h>
-
-static int lttng_fls(int val)
-{
- int r = 32;
- unsigned int x = (unsigned int) val;
-
- if (!x)
- return 0;
- if (!(x & 0xFFFF0000U)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xFF000000U)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xF0000000U)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xC0000000U)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000U)) {
- r -= 1;
- }
- return r;
-}
-
-static int get_count_order(unsigned int count)
-{
- int order;
-
- order = lttng_fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
-}
-
-static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
- size_t align, size_t len)
-{
- ssize_t ret;
- size_t padding = offset_align(runtime->data_len, align);
- size_t new_len = runtime->data_len + padding + len;
- size_t new_alloc_len = new_len;
- size_t old_alloc_len = runtime->data_alloc_len;
-
- if (new_len > FILTER_MAX_DATA_LEN)
- return -EINVAL;
-
- if (new_alloc_len > old_alloc_len) {
- char *newptr;
-
- new_alloc_len =
- max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
- newptr = realloc(runtime->data, new_alloc_len);
- if (!newptr)
- return -ENOMEM;
- runtime->data = newptr;
- /* We zero directly the memory from start of allocation. */
- memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
- runtime->data_alloc_len = new_alloc_len;
- }
- runtime->data_len += padding;
- ret = runtime->data_len;
- runtime->data_len += len;
- return ret;
-}
-
-static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
- const void *p, size_t align, size_t len)
-{
- ssize_t offset;
-
- offset = bytecode_reserve_data(runtime, align, len);
- if (offset < 0)
- return -ENOMEM;
- memcpy(&runtime->data[offset], p, len);
- return offset;
-}
-
-static int specialize_load_field(struct vstack_entry *stack_top,
- struct load_op *insn)
-{
- int ret;
-
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printf("Filter warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printf("op load field s8\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S8;
- break;
- case OBJECT_TYPE_S16:
- dbg_printf("op load field s16\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S16;
- break;
- case OBJECT_TYPE_S32:
- dbg_printf("op load field s32\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S32;
- break;
- case OBJECT_TYPE_S64:
- dbg_printf("op load field s64\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_S64;
- break;
- case OBJECT_TYPE_U8:
- dbg_printf("op load field u8\n");
- stack_top->type = REG_S64;
- insn->op = FILTER_OP_LOAD_FIELD_U8;
- break;
- case OBJECT_TYPE_U16:
- dbg_printf("op load field u16\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U16;
- break;
- case OBJECT_TYPE_U32:
- dbg_printf("op load field u32\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U32;
- break;
- case OBJECT_TYPE_U64:
- dbg_printf("op load field u64\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = FILTER_OP_LOAD_FIELD_U64;
- break;
- case OBJECT_TYPE_DOUBLE:
- stack_top->type = REG_DOUBLE;
- insn->op = FILTER_OP_LOAD_FIELD_DOUBLE;
- break;
- case OBJECT_TYPE_STRING:
- dbg_printf("op load field string\n");
- stack_top->type = REG_STRING;
- insn->op = FILTER_OP_LOAD_FIELD_STRING;
- break;
- case OBJECT_TYPE_STRING_SEQUENCE:
- dbg_printf("op load field string sequence\n");
- stack_top->type = REG_STRING;
- insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
- break;
- case OBJECT_TYPE_DYNAMIC:
- dbg_printf("op load field dynamic\n");
- stack_top->type = REG_UNKNOWN;
- /* Don't specialize load op. */
- break;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_get_index_object_type(enum object_type *otype,
- int signedness, uint32_t elem_len)
-{
- switch (elem_len) {
- case 8:
- if (signedness)
- *otype = OBJECT_TYPE_S8;
- else
- *otype = OBJECT_TYPE_U8;
- break;
- case 16:
- if (signedness)
- *otype = OBJECT_TYPE_S16;
- else
- *otype = OBJECT_TYPE_U16;
- break;
- case 32:
- if (signedness)
- *otype = OBJECT_TYPE_S32;
- else
- *otype = OBJECT_TYPE_U32;
- break;
- case 64:
- if (signedness)
- *otype = OBJECT_TYPE_S64;
- else
- *otype = OBJECT_TYPE_U64;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_get_index(struct bytecode_runtime *runtime,
- struct load_op *insn, uint64_t index,
- struct vstack_entry *stack_top,
- int idx_len)
-{
- int ret;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- memset(&gid, 0, sizeof(gid));
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const struct lttng_integer_type *integer_type;
- const struct lttng_event_field *field;
- uint32_t elem_len, num_elems;
- int signedness;
-
- field = stack_top->load.field;
- switch (field->type.atype) {
- case atype_array:
- integer_type = &field->type.u.legacy.array.elem_type.u.basic.integer;
- num_elems = field->type.u.legacy.array.length;
- break;
- case atype_array_nestable:
- if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = &field->type.u.array_nestable.elem_type->u.integer;
- num_elems = field->type.u.array_nestable.length;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- if (index >= num_elems) {
- ret = -EINVAL;
- goto end;
- }
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.array_len = num_elems * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const struct lttng_integer_type *integer_type;
- const struct lttng_event_field *field;
- uint32_t elem_len;
- int signedness;
-
- field = stack_top->load.field;
- switch (field->type.atype) {
- case atype_sequence:
- integer_type = &field->type.u.legacy.sequence.elem_type.u.basic.integer;
- break;
- case atype_sequence_nestable:
- if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- /* Only generated by the specialize phase. */
- case OBJECT_TYPE_VARIANT: /* Fall-through */
- default:
- ERR("Unexpected get index type %d",
- (int) stack_top->load.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- ERR("Index lookup for root field not implemented yet.");
- ret = -EINVAL;
- goto end;
- }
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- switch (idx_len) {
- case 2:
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- break;
- case 8:
- ((struct get_index_u64 *) insn->data)->index = data_offset;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
-
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_context_lookup_name(struct lttng_ctx *ctx,
- struct bytecode_runtime *bytecode,
- struct load_op *insn)
-{
- uint16_t offset;
- const char *name;
-
- offset = ((struct get_symbol *) insn->data)->offset;
- name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
- return lttng_get_context_index(ctx, name);
-}
-
-static int specialize_load_object(const struct lttng_event_field *field,
- struct vstack_load *load, bool is_context)
-{
- load->type = LOAD_OBJECT;
- /*
- * LTTng-UST layout all integer fields as s64 on the stack for the filter.
- */
- switch (field->type.atype) {
- case atype_integer:
- if (field->type.u.integer.signedness)
- load->object_type = OBJECT_TYPE_S64;
- else
- load->object_type = OBJECT_TYPE_U64;
- load->rev_bo = false;
- break;
- case atype_enum:
- case atype_enum_nestable:
- {
- const struct lttng_integer_type *itype;
-
- if (field->type.atype == atype_enum) {
- itype = &field->type.u.legacy.basic.enumeration.container_type;
- } else {
- itype = &field->type.u.enum_nestable.container_type->u.integer;
- }
- if (itype->signedness)
- load->object_type = OBJECT_TYPE_S64;
- else
- load->object_type = OBJECT_TYPE_U64;
- load->rev_bo = false;
- break;
- }
- case atype_array:
- if (field->type.u.legacy.array.elem_type.atype != atype_integer) {
- ERR("Array nesting only supports integer types.");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
- load->object_type = OBJECT_TYPE_ARRAY;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case atype_array_nestable:
- if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
- ERR("Array nesting only supports integer types.");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- load->object_type = OBJECT_TYPE_ARRAY;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case atype_sequence:
- if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) {
- ERR("Sequence nesting only supports integer types.");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
- load->object_type = OBJECT_TYPE_SEQUENCE;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case atype_sequence_nestable:
- if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
- ERR("Sequence nesting only supports integer types.");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
- load->object_type = OBJECT_TYPE_SEQUENCE;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
-
- case atype_string:
- load->object_type = OBJECT_TYPE_STRING;
- break;
- case atype_float:
- load->object_type = OBJECT_TYPE_DOUBLE;
- break;
- case atype_dynamic:
- load->object_type = OBJECT_TYPE_DYNAMIC;
- break;
- case atype_struct:
- ERR("Structure type cannot be loaded.");
- return -EINVAL;
- default:
- ERR("Unknown type: %d", (int) field->type.atype);
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_context_lookup(struct lttng_session *session,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- int idx, ret;
- struct lttng_ctx_field *ctx_field;
- struct lttng_event_field *field;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- idx = specialize_context_lookup_name(session->ctx, runtime, insn);
- if (idx < 0) {
- return -ENOENT;
- }
- ctx_field = &session->ctx->fields[idx];
- field = &ctx_field->event_field;
- ret = specialize_load_object(field, load, true);
- if (ret)
- return ret;
- /* Specialize each get_symbol into a get_index. */
- insn->op = FILTER_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.ctx_index = idx;
- gid.elem.type = load->object_type;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- return -EINVAL;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- return 0;
-}
-
-static int specialize_app_context_lookup(struct lttng_session *session,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- uint16_t offset;
- const char *orig_name;
- char *name = NULL;
- int idx, ret;
- struct lttng_ctx_field *ctx_field;
- struct lttng_event_field *field;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- offset = ((struct get_symbol *) insn->data)->offset;
- orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
- name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
- if (!name) {
- ret = -ENOMEM;
- goto end;
- }
- strcpy(name, "$app.");
- strcat(name, orig_name);
- idx = lttng_get_context_index(session->ctx, name);
- if (idx < 0) {
- assert(lttng_context_is_app(name));
- ret = lttng_ust_add_app_context_to_ctx_rcu(name,
- &session->ctx);
- if (ret)
- return ret;
- idx = lttng_get_context_index(session->ctx,
- name);
- if (idx < 0)
- return -ENOENT;
- }
- ctx_field = &session->ctx->fields[idx];
- field = &ctx_field->event_field;
- ret = specialize_load_object(field, load, true);
- if (ret)
- goto end;
- /* Specialize each get_symbol into a get_index. */
- insn->op = FILTER_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.ctx_index = idx;
- gid.elem.type = load->object_type;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- ret = 0;
-end:
- free(name);
- return ret;
-}
-
-static int specialize_event_payload_lookup(struct lttng_event *event,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- const char *name;
- uint16_t offset;
- const struct lttng_event_desc *desc = event->desc;
- unsigned int i, nr_fields;
- bool found = false;
- uint32_t field_offset = 0;
- const struct lttng_event_field *field;
- int ret;
- struct filter_get_index_data gid;
- ssize_t data_offset;
-
- nr_fields = desc->nr_fields;
- offset = ((struct get_symbol *) insn->data)->offset;
- name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
- for (i = 0; i < nr_fields; i++) {
- field = &desc->fields[i];
- if (field->u.ext.nofilter) {
- continue;
- }
- if (!strcmp(field->name, name)) {
- found = true;
- break;
- }
- /* compute field offset on stack */
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum:
- case atype_enum_nestable:
- field_offset += sizeof(int64_t);
- break;
- case atype_array:
- case atype_array_nestable:
- case atype_sequence:
- case atype_sequence_nestable:
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case atype_string:
- field_offset += sizeof(void *);
- break;
- case atype_float:
- field_offset += sizeof(double);
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- }
- if (!found) {
- ret = -EINVAL;
- goto end;
- }
-
- ret = specialize_load_object(field, load, false);
- if (ret)
- goto end;
-
- /* Specialize each get_symbol into a get_index. */
- insn->op = FILTER_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.offset = field_offset;
- gid.elem.type = load->object_type;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- ret = 0;
-end:
- return ret;
-}
-
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
- struct bytecode_runtime *bytecode)
-{
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack _stack;
- struct vstack *stack = &_stack;
- struct lttng_session *session = bytecode->p.session;
-
- vstack_init(stack);
-
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case FILTER_OP_RETURN:
- if (vstack_ax(stack)->type == REG_S64)
- *(filter_opcode_t *) pc = FILTER_OP_RETURN_S64;
- ret = 0;
- goto end;
-
- case FILTER_OP_RETURN_S64:
- if (vstack_ax(stack)->type != REG_S64) {
- ERR("Unexpected register type\n");
- ret = -EINVAL;
- goto end;
- }
- ret = 0;
- goto end;
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case FILTER_OP_EQ:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
- else
- insn->op = FILTER_OP_EQ_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_EQ_S64;
- else
- insn->op = FILTER_OP_EQ_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_EQ_S64_DOUBLE;
- else
- insn->op = FILTER_OP_EQ_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_NE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
- else
- insn->op = FILTER_OP_NE_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_NE_S64;
- else
- insn->op = FILTER_OP_NE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_NE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_NE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_GT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for > binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = FILTER_OP_GT_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GT_S64;
- else
- insn->op = FILTER_OP_GT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GT_S64_DOUBLE;
- else
- insn->op = FILTER_OP_GT_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_LT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for < binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = FILTER_OP_LT_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LT_S64;
- else
- insn->op = FILTER_OP_LT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LT_S64_DOUBLE;
- else
- insn->op = FILTER_OP_LT_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_GE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for >= binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = FILTER_OP_GE_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GE_S64;
- else
- insn->op = FILTER_OP_GE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_GE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_GE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
- case FILTER_OP_LE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for <= binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = FILTER_OP_LE_STRING;
- break;
- case REG_S64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LE_S64;
- else
- insn->op = FILTER_OP_LE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64)
- insn->op = FILTER_OP_LE_S64_DOUBLE;
- else
- insn->op = FILTER_OP_LE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_PLUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_MINUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_MINUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_NOT:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- insn->op = FILTER_OP_UNARY_NOT_S64;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case FILTER_OP_LOAD_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- break;
- }
-
- /* cast */
- case FILTER_OP_CAST_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- insn->op = FILTER_OP_CAST_NOP;
- break;
- case REG_DOUBLE:
- insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
- break;
- case REG_UNKNOWN:
- break;
- }
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- assert(vstack_ax(stack)->type == REG_PTR);
- /* Pop 1, push 1 */
- ret = specialize_load_field(vstack_ax(stack), insn);
- if (ret)
- goto end;
-
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("op get symbol\n");
- switch (vstack_ax(stack)->load.type) {
- case LOAD_OBJECT:
- ERR("Nested fields not implemented yet.");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- /* Lookup context field. */
- ret = specialize_context_lookup(session,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- case LOAD_ROOT_APP_CONTEXT:
- /* Lookup app context field. */
- ret = specialize_app_context_lookup(session,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- case LOAD_ROOT_PAYLOAD:
- /* Lookup event payload field. */
- ret = specialize_event_payload_lookup(event,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- /* Always generated by specialize phase. */
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("op get index u16\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("op get index u64\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
- }
-end:
- return ret;
-}
+++ /dev/null
-/*
- * lttng-filter-validator.c
- *
- * LTTng UST filter bytecode validator.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <time.h>
-
-#include <urcu-bp.h>
-#include <urcu/rculfhash.h>
-
-#include "lttng-filter.h"
-#include "lttng-hash-helper.h"
-#include "string-utils.h"
-
-/*
- * Number of merge points for hash table size. Hash table initialized to
- * that size, and we do not resize, because we do not want to trigger
- * RCU worker thread execution: fall-back on linear traversal if number
- * of merge points exceeds this value.
- */
-#define DEFAULT_NR_MERGE_POINTS 128
-#define MIN_NR_BUCKETS 128
-#define MAX_NR_BUCKETS 128
-
-/* merge point table node */
-struct lfht_mp_node {
- struct cds_lfht_node node;
-
- /* Context at merge point */
- struct vstack stack;
- unsigned long target_pc;
-};
-
-static unsigned long lttng_hash_seed;
-static unsigned int lttng_hash_seed_ready;
-
-static
-int lttng_hash_match(struct cds_lfht_node *node, const void *key)
-{
- struct lfht_mp_node *mp_node =
- caa_container_of(node, struct lfht_mp_node, node);
- unsigned long key_pc = (unsigned long) key;
-
- if (mp_node->target_pc == key_pc)
- return 1;
- else
- return 0;
-}
-
-static
-int merge_points_compare(const struct vstack *stacka,
- const struct vstack *stackb)
-{
- int i, len;
-
- if (stacka->top != stackb->top)
- return 1;
- len = stacka->top + 1;
- assert(len >= 0);
- for (i = 0; i < len; i++) {
- if (stacka->e[i].type != REG_UNKNOWN
- && stackb->e[i].type != REG_UNKNOWN
- && stacka->e[i].type != stackb->e[i].type)
- return 1;
- }
- return 0;
-}
-
-static
-int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc,
- const struct vstack *stack)
-{
- struct lfht_mp_node *node;
- unsigned long hash = lttng_hash_mix((const char *) target_pc,
- sizeof(target_pc),
- lttng_hash_seed);
- struct cds_lfht_node *ret;
-
- dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
- target_pc, hash);
- node = zmalloc(sizeof(struct lfht_mp_node));
- if (!node)
- return -ENOMEM;
- node->target_pc = target_pc;
- memcpy(&node->stack, stack, sizeof(node->stack));
- ret = cds_lfht_add_unique(ht, hash, lttng_hash_match,
- (const char *) target_pc, &node->node);
- if (ret != &node->node) {
- struct lfht_mp_node *ret_mp =
- caa_container_of(ret, struct lfht_mp_node, node);
-
- /* Key already present */
- dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
- target_pc, hash);
- free(node);
- if (merge_points_compare(stack, &ret_mp->stack)) {
- ERR("Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/*
- * Binary comparators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode,
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- break;
- case REG_STAR_GLOB_STRING:
- if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_S64:
- case REG_DOUBLE:
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- case REG_S64:
- case REG_DOUBLE:
- goto error_mismatch;
- }
- break;
- case REG_S64:
- case REG_DOUBLE:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- goto error_mismatch;
- case REG_S64:
- case REG_DOUBLE:
- break;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_mismatch:
- ERR("type mismatch for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_empty:
- ERR("empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- ERR("unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-/*
- * Binary bitwise operators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_S64:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_S64:
- break;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_empty:
- ERR("empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- ERR("unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-static
-int validate_get_symbol(struct bytecode_runtime *bytecode,
- const struct get_symbol *sym)
-{
- const char *str, *str_limit;
- size_t len_limit;
-
- if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
- return -EINVAL;
-
- str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
- str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
- len_limit = str_limit - str;
- if (strnlen(str, len_limit) == len_limit)
- return -EINVAL;
- return 0;
-}
-
-/*
- * Validate bytecode range overflow within the validation pass.
- * Called for each instruction encountered.
- */
-static
-int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
- char *start_pc, char *pc)
-{
- int ret = 0;
-
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case FILTER_OP_RETURN:
- case FILTER_OP_RETURN_S64:
- {
- if (unlikely(pc + sizeof(struct return_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case FILTER_OP_EQ:
- case FILTER_OP_NE:
- case FILTER_OP_GT:
- case FILTER_OP_LT:
- case FILTER_OP_GE:
- case FILTER_OP_LE:
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- if (unlikely(pc + sizeof(struct binary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- case FILTER_OP_UNARY_NOT:
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- case FILTER_OP_UNARY_BIT_NOT:
- {
- if (unlikely(pc + sizeof(struct unary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- if (unlikely(pc + sizeof(struct logical_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
-
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- uint32_t str_len, maxlen;
-
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
-
- maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
- str_len = strnlen(insn->data, maxlen);
- if (unlikely(str_len >= maxlen)) {
- /* Final '\0' not found within range */
- ret = -ERANGE;
- }
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- case FILTER_OP_LOAD_DOUBLE:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- case FILTER_OP_CAST_NOP:
- {
- if (unlikely(pc + sizeof(struct cast_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- case FILTER_OP_GET_PAYLOAD_ROOT:
- case FILTER_OP_LOAD_FIELD:
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
- ret = validate_get_symbol(bytecode, sym);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- ERR("Unexpected get symbol field");
- ret = -EINVAL;
- break;
-
- case FILTER_OP_GET_INDEX_U16:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case FILTER_OP_GET_INDEX_U64:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- return ret;
-}
-
-static
-unsigned long delete_all_nodes(struct cds_lfht *ht)
-{
- struct cds_lfht_iter iter;
- struct lfht_mp_node *node;
- unsigned long nr_nodes = 0;
-
- cds_lfht_for_each_entry(ht, &iter, node, node) {
- int ret;
-
- ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter));
- assert(!ret);
- /* note: this hash table is never used concurrently */
- free(node);
- nr_nodes++;
- }
- return nr_nodes;
-}
-
-/*
- * Return value:
- * >=0: success
- * <0: error
- */
-static
-int validate_instruction_context(struct bytecode_runtime *bytecode,
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret = 0;
- const filter_opcode_t opcode = *(filter_opcode_t *) pc;
-
- switch (opcode) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_RETURN:
- case FILTER_OP_RETURN_S64:
- {
- goto end;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) opcode);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_EQ:
- {
- ret = bin_op_compare_check(stack, opcode, "==");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_NE:
- {
- ret = bin_op_compare_check(stack, opcode, "!=");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_GT:
- {
- ret = bin_op_compare_check(stack, opcode, ">");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_LT:
- {
- ret = bin_op_compare_check(stack, opcode, "<");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_GE:
- {
- ret = bin_op_compare_check(stack, opcode, ">=");
- if (ret < 0)
- goto end;
- break;
- }
- case FILTER_OP_LE:
- {
- ret = bin_op_compare_check(stack, opcode, "<=");
- if (ret < 0)
- goto end;
- break;
- }
-
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STRING
- || vstack_bx(stack)->type != REG_STRING) {
- ERR("Unexpected register type for string comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
- && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
- ERR("Unexpected register type for globbing pattern comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64
- || vstack_bx(stack)->type != REG_S64) {
- ERR("Unexpected register type for s64 comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
- ERR("Double operator should have two double registers\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64 && vstack_bx(stack)->type != REG_DOUBLE) {
- ERR("Double-S64 operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_S64) {
- ERR("S64-Double operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_BIT_RSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, ">>");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_LSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, "<<");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_AND:
- ret = bin_op_bitwise_check(stack, opcode, "&");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_OR:
- ret = bin_op_bitwise_check(stack, opcode, "|");
- if (ret < 0)
- goto end;
- break;
- case FILTER_OP_BIT_XOR:
- ret = bin_op_bitwise_check(stack, opcode, "^");
- if (ret < 0)
- goto end;
- break;
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- case FILTER_OP_UNARY_NOT:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Unary op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_DOUBLE:
- break;
- case REG_UNKNOWN:
- break;
- }
- break;
- }
- case FILTER_OP_UNARY_BIT_NOT:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_DOUBLE:
- ERR("Unary bitwise op can only be applied to numeric registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_UNKNOWN:
- break;
- }
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64) {
- ERR("Invalid register type\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- case FILTER_OP_UNARY_NOT_DOUBLE:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_DOUBLE) {
- ERR("Invalid register type\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64
- && vstack_ax(stack)->type != REG_UNKNOWN) {
- ERR("Logical comparator expects S64 or dynamic register\n");
- ret = -EINVAL;
- goto end;
- }
-
- dbg_printf("Validate jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- if (unlikely(start_pc + insn->skip_offset <= pc)) {
- ERR("Loops are not allowed in bytecode\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type string\n",
- ref->offset);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type s64\n",
- ref->offset);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type double\n",
- ref->offset);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- break;
- }
-
- case FILTER_OP_LOAD_DOUBLE:
- {
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_DOUBLE:
- break;
- case REG_UNKNOWN:
- break;
- }
- if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
- if (vstack_ax(stack)->type != REG_DOUBLE) {
- ERR("Cast expects double\n");
- ret = -EINVAL;
- goto end;
- }
- }
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- break;
- }
-
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type dynamic\n",
- ref->offset);
- break;
- }
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type string\n",
- ref->offset);
- break;
- }
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type s64\n",
- ref->offset);
- break;
- }
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type double\n",
- ref->offset);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- {
- dbg_printf("Validate get context root\n");
- break;
- }
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- {
- dbg_printf("Validate get app context root\n");
- break;
- }
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- dbg_printf("Validate get payload root\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD:
- {
- /*
- * We tolerate that field type is unknown at validation,
- * because we are performing the load specialization in
- * a phase after validation.
- */
- dbg_printf("Validate load field\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S8:
- {
- dbg_printf("Validate load field s8\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S16:
- {
- dbg_printf("Validate load field s16\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S32:
- {
- dbg_printf("Validate load field s32\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_S64:
- {
- dbg_printf("Validate load field s64\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U8:
- {
- dbg_printf("Validate load field u8\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U16:
- {
- dbg_printf("Validate load field u16\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U32:
- {
- dbg_printf("Validate load field u32\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_U64:
- {
- dbg_printf("Validate load field u64\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_STRING:
- {
- dbg_printf("Validate load field string\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- dbg_printf("Validate load field sequence\n");
- break;
- }
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- dbg_printf("Validate load field double\n");
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printf("Validate get symbol offset %u\n", sym->offset);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printf("Validate get symbol field offset %u\n", sym->offset);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("Validate get index u16 index %u\n", get_index->index);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
- break;
- }
- }
-end:
- return ret;
-}
-
-/*
- * Return value:
- * 0: success
- * <0: error
- */
-static
-int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
- struct cds_lfht *merge_points,
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret;
- unsigned long target_pc = pc - start_pc;
- struct cds_lfht_iter iter;
- struct cds_lfht_node *node;
- struct lfht_mp_node *mp_node;
- unsigned long hash;
-
- /* Validate the context resulting from the previous instruction */
- ret = validate_instruction_context(bytecode, stack, start_pc, pc);
- if (ret < 0)
- return ret;
-
- /* Validate merge points */
- hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
- lttng_hash_seed);
- cds_lfht_lookup(merge_points, hash, lttng_hash_match,
- (const char *) target_pc, &iter);
- node = cds_lfht_iter_get_node(&iter);
- if (node) {
- mp_node = caa_container_of(node, struct lfht_mp_node, node);
-
- dbg_printf("Filter: validate merge point at offset %lu\n",
- target_pc);
- if (merge_points_compare(stack, &mp_node->stack)) {
- ERR("Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- /* Once validated, we can remove the merge point */
- dbg_printf("Filter: remove merge point at offset %lu\n",
- target_pc);
- ret = cds_lfht_del(merge_points, node);
- assert(!ret);
- }
- return 0;
-}
-
-/*
- * Return value:
- * >0: going to next insn.
- * 0: success, stop iteration.
- * <0: error
- */
-static
-int exec_insn(struct bytecode_runtime *bytecode,
- struct cds_lfht *merge_points,
- struct vstack *stack,
- char **_next_pc,
- char *pc)
-{
- int ret = 1;
- char *next_pc = *_next_pc;
-
- switch (*(filter_opcode_t *) pc) {
- case FILTER_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_RETURN:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
- case FILTER_OP_RETURN_S64:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- break;
- default:
- case REG_UNKNOWN:
- ERR("Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
-
- /* binary */
- case FILTER_OP_MUL:
- case FILTER_OP_DIV:
- case FILTER_OP_MOD:
- case FILTER_OP_PLUS:
- case FILTER_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case FILTER_OP_EQ:
- case FILTER_OP_NE:
- case FILTER_OP_GT:
- case FILTER_OP_LT:
- case FILTER_OP_GE:
- case FILTER_OP_LE:
- case FILTER_OP_EQ_STRING:
- case FILTER_OP_NE_STRING:
- case FILTER_OP_GT_STRING:
- case FILTER_OP_LT_STRING:
- case FILTER_OP_GE_STRING:
- case FILTER_OP_LE_STRING:
- case FILTER_OP_EQ_STAR_GLOB_STRING:
- case FILTER_OP_NE_STAR_GLOB_STRING:
- case FILTER_OP_EQ_S64:
- case FILTER_OP_NE_S64:
- case FILTER_OP_GT_S64:
- case FILTER_OP_LT_S64:
- case FILTER_OP_GE_S64:
- case FILTER_OP_LE_S64:
- case FILTER_OP_EQ_DOUBLE:
- case FILTER_OP_NE_DOUBLE:
- case FILTER_OP_GT_DOUBLE:
- case FILTER_OP_LT_DOUBLE:
- case FILTER_OP_GE_DOUBLE:
- case FILTER_OP_LE_DOUBLE:
- case FILTER_OP_EQ_DOUBLE_S64:
- case FILTER_OP_NE_DOUBLE_S64:
- case FILTER_OP_GT_DOUBLE_S64:
- case FILTER_OP_LT_DOUBLE_S64:
- case FILTER_OP_GE_DOUBLE_S64:
- case FILTER_OP_LE_DOUBLE_S64:
- case FILTER_OP_EQ_S64_DOUBLE:
- case FILTER_OP_NE_S64_DOUBLE:
- case FILTER_OP_GT_S64_DOUBLE:
- case FILTER_OP_LT_S64_DOUBLE:
- case FILTER_OP_GE_S64_DOUBLE:
- case FILTER_OP_LE_S64_DOUBLE:
- case FILTER_OP_BIT_RSHIFT:
- case FILTER_OP_BIT_LSHIFT:
- case FILTER_OP_BIT_AND:
- case FILTER_OP_BIT_OR:
- case FILTER_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case FILTER_OP_UNARY_PLUS:
- case FILTER_OP_UNARY_MINUS:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_DOUBLE:
- case REG_S64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_S64:
- case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_DOUBLE:
- case REG_S64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_S64:
- break;
- case REG_DOUBLE:
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_NOT_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case FILTER_OP_UNARY_PLUS_DOUBLE:
- case FILTER_OP_UNARY_MINUS_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case FILTER_OP_AND:
- case FILTER_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
- int merge_ret;
-
- /* Add merge point to table */
- merge_ret = merge_point_add_check(merge_points,
- insn->skip_offset, stack);
- if (merge_ret) {
- ret = merge_ret;
- goto end;
- }
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- /* There is always a cast-to-s64 operation before a or/and op. */
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case FILTER_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case FILTER_OP_GET_CONTEXT_REF:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_STRING:
- case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
- case FILTER_OP_GET_CONTEXT_REF_STRING:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_S64:
- case FILTER_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
- case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case FILTER_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case FILTER_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case FILTER_OP_LOAD_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- break;
- }
-
- case FILTER_OP_CAST_TO_S64:
- case FILTER_OP_CAST_DOUBLE_TO_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_DOUBLE:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Incorrect register type %d for cast\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case FILTER_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case FILTER_OP_GET_CONTEXT_ROOT:
- case FILTER_OP_GET_APP_CONTEXT_ROOT:
- case FILTER_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_S8:
- case FILTER_OP_LOAD_FIELD_S16:
- case FILTER_OP_LOAD_FIELD_S32:
- case FILTER_OP_LOAD_FIELD_S64:
- case FILTER_OP_LOAD_FIELD_U8:
- case FILTER_OP_LOAD_FIELD_U16:
- case FILTER_OP_LOAD_FIELD_U32:
- case FILTER_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_STRING:
- case FILTER_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case FILTER_OP_GET_SYMBOL:
- case FILTER_OP_GET_SYMBOL_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U16:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case FILTER_OP_GET_INDEX_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
-end:
- *_next_pc = next_pc;
- return ret;
-}
-
-/*
- * Never called concurrently (hash seed is shared).
- */
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
-{
- struct cds_lfht *merge_points;
- char *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack stack;
-
- vstack_init(&stack);
-
- if (!lttng_hash_seed_ready) {
- lttng_hash_seed = time(NULL);
- lttng_hash_seed_ready = 1;
- }
- /*
- * Note: merge_points hash table used by single thread, and
- * never concurrently resized. Therefore, we can use it without
- * holding RCU read-side lock and free nodes without using
- * call_rcu.
- */
- merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS,
- MIN_NR_BUCKETS, MAX_NR_BUCKETS,
- 0, NULL);
- if (!merge_points) {
- ERR("Error allocating hash table for bytecode validation\n");
- return -ENOMEM;
- }
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- ret = bytecode_validate_overflow(bytecode, start_pc, pc);
- if (ret != 0) {
- if (ret == -ERANGE)
- ERR("filter bytecode overflow\n");
- goto end;
- }
- dbg_printf("Validating op %s (%u)\n",
- print_op((unsigned int) *(filter_opcode_t *) pc),
- (unsigned int) *(filter_opcode_t *) pc);
-
- /*
- * For each instruction, validate the current context
- * (traversal of entire execution flow), and validate
- * all merge points targeting this instruction.
- */
- ret = validate_instruction_all_contexts(bytecode, merge_points,
- &stack, start_pc, pc);
- if (ret)
- goto end;
- ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
- if (ret <= 0)
- goto end;
- }
-end:
- if (delete_all_nodes(merge_points)) {
- if (!ret) {
- ERR("Unexpected merge points\n");
- ret = -EINVAL;
- }
- }
- if (cds_lfht_destroy(merge_points, NULL)) {
- ERR("Error destroying hash table\n");
- }
- return ret;
-}
+++ /dev/null
-/*
- * lttng-filter.c
- *
- * LTTng UST filter code.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-
-#include <urcu/rculist.h>
-
-#include "lttng-filter.h"
-
-static const char *opnames[] = {
- [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
-
- [ FILTER_OP_RETURN ] = "RETURN",
-
- /* binary */
- [ FILTER_OP_MUL ] = "MUL",
- [ FILTER_OP_DIV ] = "DIV",
- [ FILTER_OP_MOD ] = "MOD",
- [ FILTER_OP_PLUS ] = "PLUS",
- [ FILTER_OP_MINUS ] = "MINUS",
- [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
- [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
- [ FILTER_OP_BIT_AND ] = "BIT_AND",
- [ FILTER_OP_BIT_OR ] = "BIT_OR",
- [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
-
- /* binary comparators */
- [ FILTER_OP_EQ ] = "EQ",
- [ FILTER_OP_NE ] = "NE",
- [ FILTER_OP_GT ] = "GT",
- [ FILTER_OP_LT ] = "LT",
- [ FILTER_OP_GE ] = "GE",
- [ FILTER_OP_LE ] = "LE",
-
- /* string binary comparators */
- [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
- [ FILTER_OP_NE_STRING ] = "NE_STRING",
- [ FILTER_OP_GT_STRING ] = "GT_STRING",
- [ FILTER_OP_LT_STRING ] = "LT_STRING",
- [ FILTER_OP_GE_STRING ] = "GE_STRING",
- [ FILTER_OP_LE_STRING ] = "LE_STRING",
-
- /* s64 binary comparators */
- [ FILTER_OP_EQ_S64 ] = "EQ_S64",
- [ FILTER_OP_NE_S64 ] = "NE_S64",
- [ FILTER_OP_GT_S64 ] = "GT_S64",
- [ FILTER_OP_LT_S64 ] = "LT_S64",
- [ FILTER_OP_GE_S64 ] = "GE_S64",
- [ FILTER_OP_LE_S64 ] = "LE_S64",
-
- /* double binary comparators */
- [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
- [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
- [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
- [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
- [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
- [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
-
- /* Mixed S64-double binary comparators */
- [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
- [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
- [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
- [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
- [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
- [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
-
- [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
- [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
- [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
- [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
- [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
- [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
-
- /* unary */
- [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
- [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
- [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
- [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
- [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
- [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
- [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
- [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
- [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
-
- /* logical */
- [ FILTER_OP_AND ] = "AND",
- [ FILTER_OP_OR ] = "OR",
-
- /* load field ref */
- [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
- [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
- [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
- [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
- [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
-
- /* load from immediate operand */
- [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
- [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
- [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
-
- /* cast */
- [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
- [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
- [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
-
- /* get context ref */
- [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
- [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
- [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
- [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
-
- /* load userspace field ref */
- [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
- [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate.
- */
- [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
-
- /* globbing pattern binary operator: apply to */
- [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
- [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
- [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
- [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
-
- [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
- [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
- [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
- [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
-
- [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
- [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
- [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
- [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
- [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
- [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
- [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
- [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
- [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
- [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
- [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
- [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
-
- [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
-
- [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
-};
-
-const char *print_op(enum filter_op op)
-{
- if (op >= NR_FILTER_OPS)
- return "UNKNOWN";
- else
- return opnames[op];
-}
-
-static
-int apply_field_reloc(struct lttng_event *event,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *field_name,
- enum filter_op filter_op)
-{
- const struct lttng_event_desc *desc;
- const struct lttng_event_field *fields, *field = NULL;
- unsigned int nr_fields, i;
- struct load_op *op;
- uint32_t field_offset = 0;
-
- dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
-
- /* Lookup event by name */
- desc = event->desc;
- if (!desc)
- return -EINVAL;
- fields = desc->fields;
- if (!fields)
- return -EINVAL;
- nr_fields = desc->nr_fields;
- for (i = 0; i < nr_fields; i++) {
- if (fields[i].u.ext.nofilter) {
- continue;
- }
- if (!strcmp(fields[i].name, field_name)) {
- field = &fields[i];
- break;
- }
- /* compute field offset */
- switch (fields[i].type.atype) {
- case atype_integer:
- case atype_enum:
- case atype_enum_nestable:
- field_offset += sizeof(int64_t);
- break;
- case atype_array:
- case atype_array_nestable:
- case atype_sequence:
- case atype_sequence_nestable:
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case atype_string:
- field_offset += sizeof(void *);
- break;
- case atype_float:
- field_offset += sizeof(double);
- break;
- default:
- return -EINVAL;
- }
- }
- if (!field)
- return -EINVAL;
-
- /* Check if field offset is too large for 16-bit offset */
- if (field_offset > FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* set type */
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (filter_op) {
- case FILTER_OP_LOAD_FIELD_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum:
- case atype_enum_nestable:
- op->op = FILTER_OP_LOAD_FIELD_REF_S64;
- break;
- case atype_array:
- case atype_array_nestable:
- case atype_sequence:
- case atype_sequence_nestable:
- op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- case atype_string:
- op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
- break;
- case atype_float:
- op->op = FILTER_OP_LOAD_FIELD_REF_DOUBLE;
- break;
- default:
- return -EINVAL;
- }
- /* set offset */
- field_ref->offset = (uint16_t) field_offset;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_context_reloc(struct lttng_event *event,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *context_name,
- enum filter_op filter_op)
-{
- struct load_op *op;
- struct lttng_ctx_field *ctx_field;
- int idx;
- struct lttng_session *session = runtime->p.session;
-
- dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
-
- /* Get context index */
- idx = lttng_get_context_index(session->ctx, context_name);
- if (idx < 0) {
- if (lttng_context_is_app(context_name)) {
- int ret;
-
- ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
- &session->ctx);
- if (ret)
- return ret;
- idx = lttng_get_context_index(session->ctx,
- context_name);
- if (idx < 0)
- return -ENOENT;
- } else {
- return -ENOENT;
- }
- }
- /* Check if idx is too large for 16-bit offset */
- if (idx > FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* Get context return type */
- ctx_field = &session->ctx->fields[idx];
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (filter_op) {
- case FILTER_OP_GET_CONTEXT_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (ctx_field->event_field.type.atype) {
- case atype_integer:
- case atype_enum:
- case atype_enum_nestable:
- op->op = FILTER_OP_GET_CONTEXT_REF_S64;
- break;
- /* Sequence and array supported as string */
- case atype_string:
- case atype_array:
- case atype_array_nestable:
- case atype_sequence:
- case atype_sequence_nestable:
- op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
- break;
- case atype_float:
- op->op = FILTER_OP_GET_CONTEXT_REF_DOUBLE;
- break;
- case atype_dynamic:
- op->op = FILTER_OP_GET_CONTEXT_REF;
- break;
- default:
- return -EINVAL;
- }
- /* set offset to context index within channel contexts */
- field_ref->offset = (uint16_t) idx;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_reloc(struct lttng_event *event,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *name)
-{
- struct load_op *op;
-
- dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
-
- /* Ensure that the reloc is within the code */
- if (runtime_len - reloc_offset < sizeof(uint16_t))
- return -EINVAL;
-
- op = (struct load_op *) &runtime->code[reloc_offset];
- switch (op->op) {
- case FILTER_OP_LOAD_FIELD_REF:
- return apply_field_reloc(event, runtime, runtime_len,
- reloc_offset, name, op->op);
- case FILTER_OP_GET_CONTEXT_REF:
- return apply_context_reloc(event, runtime, runtime_len,
- reloc_offset, name, op->op);
- case FILTER_OP_GET_SYMBOL:
- case FILTER_OP_GET_SYMBOL_FIELD:
- /*
- * Will be handled by load specialize phase or
- * dynamically by interpreter.
- */
- return 0;
- default:
- ERR("Unknown reloc op type %u\n", op->op);
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int bytecode_is_linked(struct lttng_ust_filter_bytecode_node *filter_bytecode,
- struct lttng_event *event)
-{
- struct lttng_bytecode_runtime *bc_runtime;
-
- cds_list_for_each_entry(bc_runtime,
- &event->bytecode_runtime_head, node) {
- if (bc_runtime->bc == filter_bytecode)
- return 1;
- }
- return 0;
-}
-
-/*
- * Take a bytecode with reloc table and link it to an event to create a
- * bytecode runtime.
- */
-static
-int _lttng_filter_event_link_bytecode(struct lttng_event *event,
- struct lttng_ust_filter_bytecode_node *filter_bytecode,
- struct cds_list_head *insert_loc)
-{
- int ret, offset, next_offset;
- struct bytecode_runtime *runtime = NULL;
- size_t runtime_alloc_len;
-
- if (!filter_bytecode)
- return 0;
- /* Bytecode already linked */
- if (bytecode_is_linked(filter_bytecode, event))
- return 0;
-
- dbg_printf("Linking...\n");
-
- /* We don't need the reloc table in the runtime */
- runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
- runtime = zmalloc(runtime_alloc_len);
- if (!runtime) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- runtime->p.bc = filter_bytecode;
- runtime->p.session = event->chan->session;
- runtime->len = filter_bytecode->bc.reloc_offset;
- /* copy original bytecode */
- memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
- /*
- * apply relocs. Those are a uint16_t (offset in bytecode)
- * followed by a string (field name).
- */
- for (offset = filter_bytecode->bc.reloc_offset;
- offset < filter_bytecode->bc.len;
- offset = next_offset) {
- uint16_t reloc_offset =
- *(uint16_t *) &filter_bytecode->bc.data[offset];
- const char *name =
- (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
-
- ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
- if (ret) {
- goto link_error;
- }
- next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
- }
- /* Validate bytecode */
- ret = lttng_filter_validate_bytecode(runtime);
- if (ret) {
- goto link_error;
- }
- /* Specialize bytecode */
- ret = lttng_filter_specialize_bytecode(event, runtime);
- if (ret) {
- goto link_error;
- }
- runtime->p.filter = lttng_filter_interpret_bytecode;
- runtime->p.link_failed = 0;
- cds_list_add_rcu(&runtime->p.node, insert_loc);
- dbg_printf("Linking successful.\n");
- return 0;
-
-link_error:
- runtime->p.filter = lttng_filter_false;
- runtime->p.link_failed = 1;
- cds_list_add_rcu(&runtime->p.node, insert_loc);
-alloc_error:
- dbg_printf("Linking failed.\n");
- return ret;
-}
-
-void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
-{
- struct lttng_ust_filter_bytecode_node *bc = runtime->bc;
-
- if (!bc->enabler->enabled || runtime->link_failed)
- runtime->filter = lttng_filter_false;
- else
- runtime->filter = lttng_filter_interpret_bytecode;
-}
-
-/*
- * Link bytecode for all enablers referenced by an event.
- */
-void lttng_enabler_event_link_bytecode(struct lttng_event *event,
- struct lttng_enabler *enabler)
-{
- struct lttng_ust_filter_bytecode_node *bc;
- struct lttng_bytecode_runtime *runtime;
-
- /* Can only be called for events with desc attached */
- assert(event->desc);
-
- /* Link each bytecode. */
- cds_list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
- int found = 0, ret;
- struct cds_list_head *insert_loc;
-
- cds_list_for_each_entry(runtime,
- &event->bytecode_runtime_head, node) {
- if (runtime->bc == bc) {
- found = 1;
- break;
- }
- }
- /* Skip bytecode already linked */
- if (found)
- continue;
-
- /*
- * Insert at specified priority (seqnum) in increasing
- * order. If there already is a bytecode of the same priority,
- * insert the new bytecode right after it.
- */
- cds_list_for_each_entry_reverse(runtime,
- &event->bytecode_runtime_head, node) {
- if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
- /* insert here */
- insert_loc = &runtime->node;
- goto add_within;
- }
- }
- /* Add to head to list */
- insert_loc = &event->bytecode_runtime_head;
- add_within:
- dbg_printf("linking bytecode\n");
- ret = _lttng_filter_event_link_bytecode(event, bc,
- insert_loc);
- if (ret) {
- dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
- }
- }
-}
-
-/*
- * We own the filter_bytecode if we return success.
- */
-int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
- struct lttng_ust_filter_bytecode_node *filter_bytecode)
-{
- cds_list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
- return 0;
-}
-
-void lttng_free_event_filter_runtime(struct lttng_event *event)
-{
- struct bytecode_runtime *runtime, *tmp;
-
- cds_list_for_each_entry_safe(runtime, tmp,
- &event->bytecode_runtime_head, p.node) {
- free(runtime->data);
- free(runtime);
- }
-}
+++ /dev/null
-#ifndef _LTTNG_FILTER_H
-#define _LTTNG_FILTER_H
-
-/*
- * lttng-filter.h
- *
- * LTTng UST filter header.
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <helper.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-context-provider.h>
-#include <stdint.h>
-#include <assert.h>
-#include <errno.h>
-#include <string.h>
-#include <inttypes.h>
-#include <limits.h>
-#include <usterr-signal-safe.h>
-#include "filter-bytecode.h"
-
-/* Filter stack length, in number of entries */
-#define FILTER_STACK_LEN 10 /* includes 2 dummy */
-#define FILTER_STACK_EMPTY 1
-
-#define FILTER_MAX_DATA_LEN 65536
-
-#ifndef min_t
-#define min_t(type, a, b) \
- ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
-#endif
-
-#ifndef likely
-#define likely(x) __builtin_expect(!!(x), 1)
-#endif
-
-#ifndef unlikely
-#define unlikely(x) __builtin_expect(!!(x), 0)
-#endif
-
-#ifdef DEBUG
-#define dbg_printf(fmt, args...) \
- printf("[debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args)
-#else
-#define dbg_printf(fmt, args...) \
-do { \
- /* do nothing but check printf format */ \
- if (0) \
- printf("[debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args); \
-} while (0)
-#endif
-
-/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
-struct bytecode_runtime {
- struct lttng_bytecode_runtime p;
- size_t data_len;
- size_t data_alloc_len;
- char *data;
- uint16_t len;
- char code[0];
-};
-
-enum entry_type {
- REG_S64,
- REG_DOUBLE,
- REG_STRING,
- REG_STAR_GLOB_STRING,
- REG_UNKNOWN,
- REG_PTR,
-};
-
-enum load_type {
- LOAD_ROOT_CONTEXT,
- LOAD_ROOT_APP_CONTEXT,
- LOAD_ROOT_PAYLOAD,
- LOAD_OBJECT,
-};
-
-enum object_type {
- OBJECT_TYPE_S8,
- OBJECT_TYPE_S16,
- OBJECT_TYPE_S32,
- OBJECT_TYPE_S64,
- OBJECT_TYPE_U8,
- OBJECT_TYPE_U16,
- OBJECT_TYPE_U32,
- OBJECT_TYPE_U64,
-
- OBJECT_TYPE_DOUBLE,
- OBJECT_TYPE_STRING,
- OBJECT_TYPE_STRING_SEQUENCE,
-
- OBJECT_TYPE_SEQUENCE,
- OBJECT_TYPE_ARRAY,
- OBJECT_TYPE_STRUCT,
- OBJECT_TYPE_VARIANT,
-
- OBJECT_TYPE_DYNAMIC,
-};
-
-struct filter_get_index_data {
- uint64_t offset; /* in bytes */
- size_t ctx_index;
- size_t array_len;
- struct {
- size_t len;
- enum object_type type;
- bool rev_bo; /* reverse byte order */
- } elem;
-};
-
-/* Validation stack */
-struct vstack_load {
- enum load_type type;
- enum object_type object_type;
- const struct lttng_event_field *field;
- bool rev_bo; /* reverse byte order */
-};
-
-struct vstack_entry {
- enum entry_type type;
- struct vstack_load load;
-};
-
-struct vstack {
- int top; /* top of stack */
- struct vstack_entry e[FILTER_STACK_LEN];
-};
-
-static inline
-void vstack_init(struct vstack *stack)
-{
- stack->top = -1;
-}
-
-static inline
-struct vstack_entry *vstack_ax(struct vstack *stack)
-{
- if (unlikely(stack->top < 0))
- return NULL;
- return &stack->e[stack->top];
-}
-
-static inline
-struct vstack_entry *vstack_bx(struct vstack *stack)
-{
- if (unlikely(stack->top < 1))
- return NULL;
- return &stack->e[stack->top - 1];
-}
-
-static inline
-int vstack_push(struct vstack *stack)
-{
- if (stack->top >= FILTER_STACK_LEN - 1) {
- ERR("Stack full\n");
- return -EINVAL;
- }
- ++stack->top;
- return 0;
-}
-
-static inline
-int vstack_pop(struct vstack *stack)
-{
- if (unlikely(stack->top < 0)) {
- ERR("Stack empty\n");
- return -EINVAL;
- }
- stack->top--;
- return 0;
-}
-
-/* Execution stack */
-enum estack_string_literal_type {
- ESTACK_STRING_LITERAL_TYPE_NONE,
- ESTACK_STRING_LITERAL_TYPE_PLAIN,
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
-};
-
-struct load_ptr {
- enum load_type type;
- enum object_type object_type;
- const void *ptr;
- bool rev_bo;
- /* Temporary place-holders for contexts. */
- union {
- int64_t s64;
- uint64_t u64;
- double d;
- } u;
- /*
- * "field" is only needed when nested under a variant, in which
- * case we cannot specialize the nested operations.
- */
- const struct lttng_event_field *field;
-};
-
-struct estack_entry {
- enum entry_type type; /* For dynamic typing. */
- union {
- int64_t v;
- double d;
-
- struct {
- const char *str;
- size_t seq_len;
- enum estack_string_literal_type literal_type;
- } s;
- struct load_ptr ptr;
- } u;
-};
-
-struct estack {
- int top; /* top of stack */
- struct estack_entry e[FILTER_STACK_LEN];
-};
-
-/*
- * Always use aliased type for ax/bx (top of stack).
- * When ax/bx are S64, use aliased value.
- */
-#define estack_ax_v ax
-#define estack_bx_v bx
-#define estack_ax_t ax_t
-#define estack_bx_t bx_t
-
-/*
- * ax and bx registers can hold either integer, double or string.
- */
-#define estack_ax(stack, top) \
- ({ \
- assert((top) > FILTER_STACK_EMPTY); \
- &(stack)->e[top]; \
- })
-
-#define estack_bx(stack, top) \
- ({ \
- assert((top) > FILTER_STACK_EMPTY + 1); \
- &(stack)->e[(top) - 1]; \
- })
-
-/*
- * Currently, only integers (REG_S64) can be pushed into the stack.
- */
-#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
- do { \
- assert((top) < FILTER_STACK_LEN - 1); \
- (stack)->e[(top) - 1].u.v = (bx); \
- (stack)->e[(top) - 1].type = (bx_t); \
- (bx) = (ax); \
- (bx_t) = (ax_t); \
- ++(top); \
- } while (0)
-
-#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
- do { \
- assert((top) > FILTER_STACK_EMPTY); \
- (ax) = (bx); \
- (ax_t) = (bx_t); \
- (bx) = (stack)->e[(top) - 2].u.v; \
- (bx_t) = (stack)->e[(top) - 2].type; \
- (top)--; \
- } while (0)
-
-const char *print_op(enum filter_op op);
-
-int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode);
-int lttng_filter_specialize_bytecode(struct lttng_event *event,
- struct bytecode_runtime *bytecode);
-
-uint64_t lttng_filter_false(void *filter_data,
- const char *filter_stack_data);
-uint64_t lttng_filter_interpret_bytecode(void *filter_data,
- const char *filter_stack_data);
-
-#endif /* _LTTNG_FILTER_H */
#include "lttng-tracer-core.h"
#include "jhash.h"
#include "error.h"
+#include "ust-events-internal.h"
/*
* probe list is protected by ust_lock()/ust_unlock().
if (lttng_session_active())
fixup_lazy_probes();
+ lttng_fix_pending_triggers();
+
ust_unlock();
return ret;
}
*/
#define _LGPL_SOURCE
+#include <fcntl.h>
#include <stdint.h>
#include <unistd.h>
#include "../libringbuffer/frontend_types.h"
#include "../libringbuffer/shm.h"
+#include "../libcounter/counter.h"
#include "lttng-tracer.h"
#include "string-utils.h"
+#include "ust-events-internal.h"
#define OBJ_NAME_LEN 16
*/
static const struct lttng_ust_objd_ops lttng_ops;
+static const struct lttng_ust_objd_ops lttng_trigger_group_ops;
static const struct lttng_ust_objd_ops lttng_session_ops;
static const struct lttng_ust_objd_ops lttng_channel_ops;
-static const struct lttng_ust_objd_ops lttng_enabler_ops;
+static const struct lttng_ust_objd_ops lttng_event_enabler_ops;
+static const struct lttng_ust_objd_ops lttng_trigger_enabler_ops;
static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops;
static const struct lttng_ust_objd_ops lttng_tracepoint_field_list_ops;
return 0;
}
+static
+int lttng_abi_trigger_send_fd(void *owner, int trigger_notif_fd)
+{
+ struct lttng_trigger_group *trigger_group;
+ int trigger_group_objd, ret, fd_flag, close_ret;
+
+ trigger_group = lttng_trigger_group_create();
+ if (!trigger_group)
+ return -ENOMEM;
+
+ /*
+ * Set this file descriptor as NON-BLOCKING.
+ */
+ fd_flag = fcntl(trigger_notif_fd, F_GETFL);
+
+ fd_flag |= O_NONBLOCK;
+
+ ret = fcntl(trigger_notif_fd, F_SETFL, fd_flag);
+ if (ret) {
+ ret = -errno;
+ goto fd_error;
+ }
+
+ trigger_group_objd = objd_alloc(trigger_group,
+ <tng_trigger_group_ops, owner, "trigger_group");
+ if (trigger_group_objd < 0) {
+ ret = trigger_group_objd;
+ goto objd_error;
+ }
+
+ trigger_group->objd = trigger_group_objd;
+ trigger_group->owner = owner;
+ trigger_group->notification_fd = trigger_notif_fd;
+
+ return trigger_group_objd;
+
+objd_error:
+ lttng_trigger_group_destroy(trigger_group);
+fd_error:
+ close_ret = close(trigger_notif_fd);
+ if (close_ret) {
+ PERROR("close");
+ }
+
+ return ret;
+}
+
static
long lttng_abi_add_context(int objd,
struct lttng_ust_context *context_param,
case LTTNG_UST_WAIT_QUIESCENT:
synchronize_trace();
return 0;
+ case LTTNG_UST_TRIGGER_GROUP_CREATE:
+ return lttng_abi_trigger_send_fd(owner,
+ uargs->trigger_handle.trigger_notif_fd);
default:
return -EINVAL;
}
return lttng_session_disable(session);
case LTTNG_UST_SESSION_STATEDUMP:
return lttng_session_statedump(session);
+ case LTTNG_UST_COUNTER:
+ case LTTNG_UST_COUNTER_GLOBAL:
+ case LTTNG_UST_COUNTER_CPU:
+ /* Not implemented yet. */
+ return -EINVAL;
default:
return -EINVAL;
}
.cmd = lttng_session_cmd,
};
+static int lttng_ust_trigger_enabler_create(int trigger_group_obj, void *owner,
+ struct lttng_ust_trigger *trigger_param,
+ enum lttng_enabler_format_type type)
+{
+ struct lttng_trigger_group *trigger_group =
+ objd_private(trigger_group_obj);
+ struct lttng_trigger_enabler *trigger_enabler;
+ int trigger_objd, ret;
+
+ trigger_param->name[LTTNG_UST_SYM_NAME_LEN - 1] = '\0';
+ trigger_objd = objd_alloc(NULL, <tng_trigger_enabler_ops, owner,
+ "trigger enabler");
+ if (trigger_objd < 0) {
+ ret = trigger_objd;
+ goto objd_error;
+ }
+
+ trigger_enabler = lttng_trigger_enabler_create(trigger_group, type,
+ trigger_param);
+ if (!trigger_enabler) {
+ ret = -ENOMEM;
+ goto trigger_error;
+ }
+
+ objd_set_private(trigger_objd, trigger_enabler);
+ /* The trigger holds a reference on the trigger group. */
+ objd_ref(trigger_enabler->group->objd);
+
+ return trigger_objd;
+
+trigger_error:
+ {
+ int err;
+
+ err = lttng_ust_objd_unref(trigger_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+long lttng_trigger_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union ust_args *uargs, void *owner)
+{
+ struct lttng_trigger_enabler *trigger_enabler = objd_private(objd);
+ switch (cmd) {
+ case LTTNG_UST_FILTER:
+ return lttng_trigger_enabler_attach_filter_bytecode(
+ trigger_enabler,
+ (struct lttng_ust_bytecode_node *) arg);
+ case LTTNG_UST_EXCLUSION:
+ return lttng_trigger_enabler_attach_exclusion(trigger_enabler,
+ (struct lttng_ust_excluder_node *) arg);
+ case LTTNG_UST_CAPTURE:
+ return lttng_trigger_enabler_attach_capture_bytecode(
+ trigger_enabler,
+ (struct lttng_ust_bytecode_node *) arg);
+ case LTTNG_UST_ENABLE:
+ return lttng_trigger_enabler_enable(trigger_enabler);
+ case LTTNG_UST_DISABLE:
+ return lttng_trigger_enabler_disable(trigger_enabler);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * lttng_trigger_group_error_counter_cmd - lttng trigger group error counter object command
+ *
+ * @obj: the object
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This descriptor implements lttng commands:
+ * LTTNG_UST_COUNTER_GLOBAL
+ * Return negative error code on error, 0 on success.
+ * LTTNG_UST_COUNTER_CPU
+ * Return negative error code on error, 0 on success.
+ */
+static
+long lttng_trigger_group_error_counter_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union ust_args *uargs, void *owner)
+{
+ struct lttng_counter *counter = objd_private(objd);
+
+ switch (cmd) {
+ case LTTNG_UST_COUNTER_GLOBAL:
+ return -EINVAL; /* Unimplemented. */
+ case LTTNG_UST_COUNTER_CPU:
+ {
+ struct lttng_ust_counter_cpu *counter_cpu =
+ (struct lttng_ust_counter_cpu *)arg;
+ return lttng_counter_set_cpu_shm(counter->counter,
+ counter_cpu->cpu_nr, uargs->counter_shm.shm_fd);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_release_trigger_group_error_counter(int objd)
+{
+ struct lttng_counter *counter = objd_private(objd);
+
+ if (counter) {
+ return lttng_ust_objd_unref(counter->trigger_group->objd, 0);
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_objd_ops lttng_trigger_group_error_counter_ops = {
+ .release = lttng_release_trigger_group_error_counter,
+ .cmd = lttng_trigger_group_error_counter_cmd,
+};
+
+static
+int lttng_ust_trigger_group_create_error_counter(int trigger_group_objd, void *owner,
+ struct lttng_ust_counter_conf *error_counter_conf)
+{
+ const char *counter_transport_name;
+ struct lttng_trigger_group *trigger_group =
+ objd_private(trigger_group_objd);
+ struct lttng_counter *counter;
+ int counter_objd, ret;
+ struct lttng_counter_dimension dimensions[1];
+ size_t counter_len;
+
+ if (trigger_group->error_counter)
+ return -EBUSY;
+
+ if (error_counter_conf->arithmetic != LTTNG_UST_COUNTER_ARITHMETIC_MODULAR)
+ return -EINVAL;
+
+ if (error_counter_conf->number_dimensions != 1)
+ return -EINVAL;
+
+ switch (error_counter_conf->bitness) {
+ case LTTNG_UST_COUNTER_BITNESS_64BITS:
+ counter_transport_name = "counter-per-cpu-64-modular";
+ break;
+ case LTTNG_UST_COUNTER_BITNESS_32BITS:
+ counter_transport_name = "counter-per-cpu-32-modular";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ counter_objd = objd_alloc(NULL, <tng_trigger_group_error_counter_ops, owner,
+ "trigger group error counter");
+ if (counter_objd < 0) {
+ ret = counter_objd;
+ goto objd_error;
+ }
+
+ counter_len = error_counter_conf->dimensions[0].size;
+ dimensions[0].size = counter_len;
+ dimensions[0].underflow_index = 0;
+ dimensions[0].overflow_index = 0;
+ dimensions[0].has_underflow = 0;
+ dimensions[0].has_overflow = 0;
+
+ counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions);
+ if (!counter) {
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ trigger_group->error_counter = counter;
+ trigger_group->error_counter_len = counter_len;
+
+ counter->objd = counter_objd;
+ counter->trigger_group = trigger_group; /* owner */
+
+ objd_set_private(counter_objd, counter);
+ /* The error counter holds a reference on the trigger group. */
+ objd_ref(trigger_group->objd);
+
+ return counter_objd;
+
+create_error:
+ {
+ int err;
+
+ err = lttng_ust_objd_unref(counter_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+long lttng_trigger_group_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union ust_args *uargs, void *owner)
+{
+ switch (cmd) {
+ case LTTNG_UST_TRIGGER_CREATE:
+ {
+ struct lttng_ust_trigger *trigger_param =
+ (struct lttng_ust_trigger *) arg;
+ if (strutils_is_star_glob_pattern(trigger_param->name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ return lttng_ust_trigger_enabler_create(objd, owner,
+ trigger_param, LTTNG_ENABLER_FORMAT_STAR_GLOB);
+ } else {
+ return lttng_ust_trigger_enabler_create(objd, owner,
+ trigger_param, LTTNG_ENABLER_FORMAT_EVENT);
+ }
+ }
+ case LTTNG_UST_COUNTER:
+ {
+ struct lttng_ust_counter_conf *counter_conf =
+ (struct lttng_ust_counter_conf *) uargs->counter.counter_data;
+ return lttng_ust_trigger_group_create_error_counter(objd, owner,
+ counter_conf);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_trigger_enabler_release(int objd)
+{
+ struct lttng_trigger_enabler *trigger_enabler = objd_private(objd);
+
+ if (trigger_enabler)
+ return lttng_ust_objd_unref(trigger_enabler->group->objd, 0);
+ return 0;
+}
+
+static const struct lttng_ust_objd_ops lttng_trigger_enabler_ops = {
+ .release = lttng_trigger_enabler_release,
+ .cmd = lttng_trigger_enabler_cmd,
+};
+
+static
+int lttng_release_trigger_group(int objd)
+{
+ struct lttng_trigger_group *trigger_group = objd_private(objd);
+
+ if (trigger_group) {
+ lttng_trigger_group_destroy(trigger_group);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_objd_ops lttng_trigger_group_ops = {
+ .release = lttng_release_trigger_group,
+ .cmd = lttng_trigger_group_cmd,
+};
+
static
long lttng_tracepoint_list_cmd(int objd, unsigned int cmd, unsigned long arg,
union ust_args *uargs, void *owner)
}
static
-int lttng_abi_create_enabler(int channel_objd,
+int lttng_abi_create_event_enabler(int channel_objd,
struct lttng_ust_event *event_param,
void *owner,
- enum lttng_enabler_type type)
+ enum lttng_enabler_format_type format_type)
{
struct lttng_channel *channel = objd_private(channel_objd);
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *enabler;
int event_objd, ret;
event_param->name[LTTNG_UST_SYM_NAME_LEN - 1] = '\0';
- event_objd = objd_alloc(NULL, <tng_enabler_ops, owner, "enabler");
+ event_objd = objd_alloc(NULL, <tng_event_enabler_ops, owner,
+ "event enabler");
if (event_objd < 0) {
ret = event_objd;
goto objd_error;
* We tolerate no failure path after event creation. It will stay
* invariant for the rest of the session.
*/
- enabler = lttng_enabler_create(type, event_param, channel);
+ enabler = lttng_event_enabler_create(format_type, event_param, channel);
if (!enabler) {
ret = -ENOMEM;
goto event_error;
* If the event name is a star globbing pattern,
* we create the special star globbing enabler.
*/
- return lttng_abi_create_enabler(objd, event_param,
- owner, LTTNG_ENABLER_STAR_GLOB);
+ return lttng_abi_create_event_enabler(objd, event_param,
+ owner, LTTNG_ENABLER_FORMAT_STAR_GLOB);
} else {
- return lttng_abi_create_enabler(objd, event_param,
- owner, LTTNG_ENABLER_EVENT);
+ return lttng_abi_create_event_enabler(objd, event_param,
+ owner, LTTNG_ENABLER_FORMAT_EVENT);
}
}
case LTTNG_UST_CONTEXT:
* Attach exclusions to an enabler.
*/
static
-long lttng_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
+long lttng_event_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
union ust_args *uargs, void *owner)
{
- struct lttng_enabler *enabler = objd_private(objd);
+ struct lttng_event_enabler *enabler = objd_private(objd);
switch (cmd) {
case LTTNG_UST_CONTEXT:
- return lttng_enabler_attach_context(enabler,
+ return lttng_event_enabler_attach_context(enabler,
(struct lttng_ust_context *) arg);
case LTTNG_UST_ENABLE:
- return lttng_enabler_enable(enabler);
+ return lttng_event_enabler_enable(enabler);
case LTTNG_UST_DISABLE:
- return lttng_enabler_disable(enabler);
+ return lttng_event_enabler_disable(enabler);
case LTTNG_UST_FILTER:
{
int ret;
- ret = lttng_enabler_attach_bytecode(enabler,
- (struct lttng_ust_filter_bytecode_node *) arg);
+ ret = lttng_event_enabler_attach_filter_bytecode(enabler,
+ (struct lttng_ust_bytecode_node *) arg);
if (ret)
return ret;
return 0;
}
case LTTNG_UST_EXCLUSION:
{
- return lttng_enabler_attach_exclusion(enabler,
+ return lttng_event_enabler_attach_exclusion(enabler,
(struct lttng_ust_excluder_node *) arg);
}
default:
}
static
-int lttng_enabler_release(int objd)
+int lttng_event_enabler_release(int objd)
{
- struct lttng_enabler *enabler = objd_private(objd);
+ struct lttng_event_enabler *event_enabler = objd_private(objd);
+
+ if (event_enabler)
+ return lttng_ust_objd_unref(event_enabler->chan->objd, 0);
- if (enabler)
- return lttng_ust_objd_unref(enabler->chan->objd, 0);
return 0;
}
-static const struct lttng_ust_objd_ops lttng_enabler_ops = {
- .release = lttng_enabler_release,
- .cmd = lttng_enabler_cmd,
+static const struct lttng_ust_objd_ops lttng_event_enabler_ops = {
+ .release = lttng_event_enabler_release,
+ .cmd = lttng_event_enabler_cmd,
};
void lttng_ust_abi_exit(void)
#include "clock.h"
#include "../libringbuffer/getcpu.h"
#include "getenv.h"
+#include "ust-events-internal.h"
/* Concatenate lttng ust shared library name with its major version number. */
#define LTTNG_UST_LIB_SO_NAME "liblttng-ust.so." __ust_stringify(CONFIG_LTTNG_UST_LIBRARY_VERSION_MAJOR)
[ LTTNG_UST_REGISTER_DONE ] = "Registration Done",
[ LTTNG_UST_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
+ [ LTTNG_UST_TRIGGER_GROUP_CREATE ] = "Create trigger group",
+ [ LTTNG_UST_TRIGGER_CREATE ] = "Create trigger",
+
/* Session FD commands */
[ LTTNG_UST_CHANNEL ] = "Create Channel",
[ LTTNG_UST_SESSION_START ] = "Start Session",
/* Event FD commands */
[ LTTNG_UST_FILTER ] = "Create Filter",
[ LTTNG_UST_EXCLUSION ] = "Add exclusions to event",
+
+ /* Session and trigger FD commands */
+ [ LTTNG_UST_COUNTER ] = "Create Counter",
+ [ LTTNG_UST_COUNTER_GLOBAL ] = "Create Counter Global",
+ [ LTTNG_UST_COUNTER_CPU ] = "Create Counter CPU",
};
static const char *str_timeout;
extern void lttng_ring_buffer_client_discard_exit(void);
extern void lttng_ring_buffer_client_discard_rt_exit(void);
extern void lttng_ring_buffer_metadata_client_exit(void);
+extern void lttng_counter_client_percpu_32_overflow_init(void);
+extern void lttng_counter_client_percpu_32_overflow_exit(void);
+extern void lttng_counter_client_percpu_64_overflow_init(void);
+extern void lttng_counter_client_percpu_64_overflow_exit(void);
static char *get_map_shm(struct sock_info *sock_info);
}
}
+static inline
+const char *bytecode_type_str(uint32_t cmd)
+{
+ switch (cmd) {
+ case LTTNG_UST_CAPTURE:
+ return "capture";
+ case LTTNG_UST_FILTER:
+ return "filter";
+ default:
+ abort();
+ }
+}
+
+static
+int handle_bytecode_recv(struct sock_info *sock_info,
+ int sock, struct ustcomm_ust_msg *lum)
+{
+ struct lttng_ust_bytecode_node *bytecode;
+ enum lttng_ust_bytecode_node_type type;
+ const struct lttng_ust_objd_ops *ops;
+ uint32_t data_size, data_size_max, reloc_offset;
+ uint64_t seqnum;
+ ssize_t len;
+ int ret = 0;
+
+ switch (lum->cmd) {
+ case LTTNG_UST_FILTER:
+ type = LTTNG_UST_BYTECODE_NODE_TYPE_FILTER;
+ data_size = lum->u.filter.data_size;
+ data_size_max = FILTER_BYTECODE_MAX_LEN;
+ reloc_offset = lum->u.filter.reloc_offset;
+ seqnum = lum->u.filter.seqnum;
+ break;
+ case LTTNG_UST_CAPTURE:
+ type = LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE;
+ data_size = lum->u.capture.data_size;
+ data_size_max = CAPTURE_BYTECODE_MAX_LEN;
+ reloc_offset = lum->u.capture.reloc_offset;
+ seqnum = lum->u.capture.seqnum;
+ break;
+ default:
+ abort();
+ }
+
+ if (data_size > data_size_max) {
+ ERR("Bytecode %s data size is too large: %u bytes",
+ bytecode_type_str(lum->cmd), data_size);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (reloc_offset > data_size) {
+ ERR("Bytecode %s reloc offset %u is not within data",
+ bytecode_type_str(lum->cmd), reloc_offset);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Allocate the structure AND the `data[]` field. */
+ bytecode = zmalloc(sizeof(*bytecode) + data_size);
+ if (!bytecode) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ bytecode->bc.len = data_size;
+ bytecode->bc.reloc_offset = reloc_offset;
+ bytecode->bc.seqnum = seqnum;
+ bytecode->type = type;
+
+ len = ustcomm_recv_unix_sock(sock, bytecode->bc.data, bytecode->bc.len);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error_free_bytecode;
+ default:
+ if (len == bytecode->bc.len) {
+ DBG("Bytecode %s data received",
+ bytecode_type_str(lum->cmd));
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d",
+ (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection",
+ sock_info->name);
+ ret = len;
+ goto error_free_bytecode;
+ }
+ ret = len;
+ goto error_free_bytecode;
+ } else {
+ DBG("Incorrect %s bytecode data message size: %zd",
+ bytecode_type_str(lum->cmd), len);
+ ret = -EINVAL;
+ goto error_free_bytecode;
+ }
+ }
+
+ ops = objd_ops(lum->handle);
+ if (!ops) {
+ ret = -ENOENT;
+ goto error_free_bytecode;
+ }
+
+ if (ops->cmd) {
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) bytecode,
+ NULL, sock_info);
+ if (ret)
+ goto error_free_bytecode;
+ /* don't free bytecode if everything went fine. */
+ } else {
+ ret = -ENOSYS;
+ goto error_free_bytecode;
+ }
+
+ goto end;
+
+error_free_bytecode:
+ free(bytecode);
+end:
+ return ret;
+}
+
static
int handle_message(struct sock_info *sock_info,
int sock, struct ustcomm_ust_msg *lum)
else
ret = lttng_ust_objd_unref(lum->handle, 1);
break;
+ case LTTNG_UST_CAPTURE:
case LTTNG_UST_FILTER:
- {
- /* Receive filter data */
- struct lttng_ust_filter_bytecode_node *bytecode;
-
- if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
- ERR("Filter data size is too large: %u bytes",
- lum->u.filter.data_size);
- ret = -EINVAL;
- goto error;
- }
-
- if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
- ERR("Filter reloc offset %u is not within data",
- lum->u.filter.reloc_offset);
- ret = -EINVAL;
- goto error;
- }
-
- bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
- if (!bytecode) {
- ret = -ENOMEM;
- goto error;
- }
- len = ustcomm_recv_unix_sock(sock, bytecode->bc.data,
- lum->u.filter.data_size);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- free(bytecode);
+ ret = handle_bytecode_recv(sock_info, sock, lum);
+ if (ret)
goto error;
- default:
- if (len == lum->u.filter.data_size) {
- DBG("filter data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- free(bytecode);
- goto error;
- }
- ret = len;
- free(bytecode);
- goto error;
- } else {
- DBG("incorrect filter data message size: %zd", len);
- ret = -EINVAL;
- free(bytecode);
- goto error;
- }
- }
- bytecode->bc.len = lum->u.filter.data_size;
- bytecode->bc.reloc_offset = lum->u.filter.reloc_offset;
- bytecode->bc.seqnum = lum->u.filter.seqnum;
- if (ops->cmd) {
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) bytecode,
- &args, sock_info);
- if (ret) {
- free(bytecode);
- }
- /* don't free bytecode if everything went fine. */
- } else {
- ret = -ENOSYS;
- free(bytecode);
- }
break;
- }
case LTTNG_UST_EXCLUSION:
{
/* Receive exclusion names */
}
break;
}
+ case LTTNG_UST_TRIGGER_GROUP_CREATE:
+ {
+ int trigger_notif_fd;
+
+ len = ustcomm_recv_trigger_notif_fd_from_sessiond(sock,
+ &trigger_notif_fd);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ case 1:
+ break;
+ default:
+ if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect trigger fd message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.trigger_handle.trigger_notif_fd = trigger_notif_fd;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
case LTTNG_UST_CHANNEL:
{
void *chan_data;
ret = -ENOSYS;
}
break;
+ case LTTNG_UST_COUNTER:
+ {
+ void *counter_data;
+
+ len = ustcomm_recv_counter_from_sessiond(sock,
+ &counter_data, lum->u.counter.len);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == lum->u.counter.len) {
+ DBG("counter data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect counter data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.counter.counter_data = counter_data;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+ case LTTNG_UST_COUNTER_GLOBAL:
+ {
+ /* Receive shm_fd */
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock,
+ &args.counter_shm.shm_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+ case LTTNG_UST_COUNTER_CPU:
+ {
+ /* Receive shm_fd */
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock,
+ &args.counter_shm.shm_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+
default:
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
lttng_ring_buffer_client_overwrite_rt_init();
lttng_ring_buffer_client_discard_init();
lttng_ring_buffer_client_discard_rt_init();
+ lttng_counter_client_percpu_32_overflow_init();
+ lttng_counter_client_percpu_64_overflow_init();
lttng_perf_counter_init();
/*
* Invoke ust malloc wrapper init before starting other threads.
lttng_ring_buffer_client_overwrite_rt_exit();
lttng_ring_buffer_client_overwrite_exit();
lttng_ring_buffer_metadata_client_exit();
+ lttng_counter_client_percpu_32_overflow_exit();
+ lttng_counter_client_percpu_64_overflow_exit();
lttng_ust_statedump_destroy();
exit_tracepoint();
if (!exiting) {
--- /dev/null
+/*
+ * trigger-notification.c
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+
+#include <assert.h>
+#include <byteswap.h>
+#include <errno.h>
+#include <lttng/ust-events.h>
+#include <usterr-signal-safe.h>
+
+#include "../libmsgpack/msgpack.h"
+#include "lttng-bytecode.h"
+#include "share.h"
+
+/*
+ * We want this write to be atomic AND non-blocking, meaning that we
+ * want to write either everything OR nothing.
+ * According to `pipe(7)`, writes that are less than `PIPE_BUF` bytes must be
+ * atomic, so we bound the capture buffer size to the `PIPE_BUF` minus the size
+ * of the notification struct we are sending alongside the capture buffer.
+ */
+#define CAPTURE_BUFFER_SIZE \
+ (PIPE_BUF - sizeof(struct lttng_ust_trigger_notification) - 1)
+
+struct lttng_trigger_notification {
+ int notification_fd;
+ uint64_t trigger_id;
+ uint8_t capture_buf[CAPTURE_BUFFER_SIZE];
+ struct lttng_msgpack_writer writer;
+ bool has_captures;
+};
+
+static
+void capture_enum(struct lttng_msgpack_writer *writer,
+ struct lttng_interpreter_output *output)
+{
+ lttng_msgpack_begin_map(writer, 2);
+ lttng_msgpack_write_str(writer, "type");
+ lttng_msgpack_write_str(writer, "enum");
+
+ lttng_msgpack_write_str(writer, "value");
+
+ switch (output->type) {
+ case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+ lttng_msgpack_write_signed_integer(writer, output->u.s);
+ break;
+ case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+ lttng_msgpack_write_signed_integer(writer, output->u.u);
+ break;
+ default:
+ abort();
+ }
+
+ lttng_msgpack_end_map(writer);
+}
+
+static
+int64_t capture_sequence_element_signed(uint8_t *ptr,
+ const struct lttng_integer_type *type)
+{
+ int64_t value;
+ unsigned int size = type->size;
+ bool byte_order_reversed = type->reverse_byte_order;
+
+ switch (size) {
+ case 8:
+ value = *ptr;
+ break;
+ case 16:
+ {
+ int16_t tmp;
+ tmp = *(int16_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_16(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 32:
+ {
+ int32_t tmp;
+ tmp = *(int32_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_32(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 64:
+ {
+ int64_t tmp;
+ tmp = *(int64_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_64(tmp);
+
+ value = tmp;
+ break;
+ }
+ default:
+ abort();
+ }
+
+ return value;
+}
+
+static
+uint64_t capture_sequence_element_unsigned(uint8_t *ptr,
+ const struct lttng_integer_type *type)
+{
+ uint64_t value;
+ unsigned int size = type->size;
+ bool byte_order_reversed = type->reverse_byte_order;
+
+ switch (size) {
+ case 8:
+ value = *ptr;
+ break;
+ case 16:
+ {
+ uint16_t tmp;
+ tmp = *(uint16_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_16(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 32:
+ {
+ uint32_t tmp;
+ tmp = *(uint32_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_32(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 64:
+ {
+ uint64_t tmp;
+ tmp = *(uint64_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_64(tmp);
+
+ value = tmp;
+ break;
+ }
+ default:
+ abort();
+ }
+
+ return value;
+}
+
+static
+void capture_sequence(struct lttng_msgpack_writer *writer,
+ struct lttng_interpreter_output *output)
+{
+ const struct lttng_integer_type *integer_type;
+ const struct lttng_type *nested_type;
+ uint8_t *ptr;
+ bool signedness;
+ int i;
+
+ lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem);
+
+ ptr = (uint8_t *) output->u.sequence.ptr;
+ nested_type = output->u.sequence.nested_type;
+ switch (nested_type->atype) {
+ case atype_integer:
+ integer_type = &nested_type->u.integer;
+ break;
+ case atype_enum:
+ /* Treat enumeration as an integer. */
+ integer_type = &nested_type->u.enum_nestable.container_type->u.integer;
+ break;
+ default:
+ /* Capture of array of non-integer are not supported. */
+ abort();
+ }
+ signedness = integer_type->signedness;
+ for (i = 0; i < output->u.sequence.nr_elem; i++) {
+ if (signedness) {
+ lttng_msgpack_write_signed_integer(writer,
+ capture_sequence_element_signed(ptr, integer_type));
+ } else {
+ lttng_msgpack_write_unsigned_integer(writer,
+ capture_sequence_element_unsigned(ptr, integer_type));
+ }
+
+ /*
+ * We assume that alignment is smaller or equal to the size.
+ * This currently holds true but if it changes in the future,
+ * we will want to change the pointer arithmetics below to
+ * take into account that the next element might be further
+ * away.
+ */
+ assert(integer_type->alignment <= integer_type->size);
+
+ /* Size is in number of bits. */
+ ptr += (integer_type->size / CHAR_BIT) ;
+ }
+
+ lttng_msgpack_end_array(writer);
+}
+
+static
+void notification_init(struct lttng_trigger_notification *notif,
+ struct lttng_trigger *trigger)
+{
+ struct lttng_msgpack_writer *writer = ¬if->writer;
+
+ notif->trigger_id = trigger->id;
+ notif->notification_fd = trigger->group->notification_fd;
+ notif->has_captures = false;
+
+ if (trigger->num_captures > 0) {
+ lttng_msgpack_writer_init(writer, notif->capture_buf,
+ CAPTURE_BUFFER_SIZE);
+
+ lttng_msgpack_begin_array(writer, trigger->num_captures);
+ notif->has_captures = true;
+ }
+}
+
+static
+void notification_append_capture(
+ struct lttng_trigger_notification *notif,
+ struct lttng_interpreter_output *output)
+{
+ struct lttng_msgpack_writer *writer = ¬if->writer;
+
+ switch (output->type) {
+ case LTTNG_INTERPRETER_TYPE_S64:
+ lttng_msgpack_write_signed_integer(writer, output->u.s);
+ break;
+ case LTTNG_INTERPRETER_TYPE_U64:
+ lttng_msgpack_write_unsigned_integer(writer, output->u.u);
+ break;
+ case LTTNG_INTERPRETER_TYPE_DOUBLE:
+ lttng_msgpack_write_double(writer, output->u.d);
+ break;
+ case LTTNG_INTERPRETER_TYPE_STRING:
+ lttng_msgpack_write_str(writer, output->u.str.str);
+ break;
+ case LTTNG_INTERPRETER_TYPE_SEQUENCE:
+ capture_sequence(writer, output);
+ break;
+ case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+ case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+ capture_enum(writer, output);
+ break;
+ default:
+ abort();
+ }
+}
+
+static
+void notification_append_empty_capture(
+ struct lttng_trigger_notification *notif)
+{
+ lttng_msgpack_write_nil(¬if->writer);
+}
+
+static void record_error(struct lttng_trigger *trigger)
+{
+ struct lttng_trigger_group *trigger_group = trigger->group;
+ size_t dimension_index[1];
+ int ret;
+
+ dimension_index[0] = trigger->error_counter_index;
+ ret = trigger_group->error_counter->ops->counter_add(
+ trigger_group->error_counter->counter,
+ dimension_index, 1);
+ if (ret)
+ WARN_ON_ONCE(1);
+}
+
+static
+void notification_send(struct lttng_trigger_notification *notif,
+ struct lttng_trigger *trigger)
+{
+ ssize_t ret;
+ size_t content_len;
+ int iovec_count = 1;
+ struct lttng_ust_trigger_notification ust_notif;
+ struct iovec iov[2];
+
+ assert(notif);
+
+ ust_notif.id = trigger->id;
+
+ /*
+ * Prepare sending the notification from multiple buffers using an
+ * array of `struct iovec`. The first buffer of the vector is
+ * notification structure itself and is always present.
+ */
+ iov[0].iov_base = &ust_notif;
+ iov[0].iov_len = sizeof(ust_notif);
+
+ if (notif->has_captures) {
+ /*
+ * If captures were requested, the second buffer of the array
+ * is the capture buffer.
+ */
+ assert(notif->writer.buffer);
+ content_len = notif->writer.write_pos - notif->writer.buffer;
+
+ assert(content_len > 0 && content_len <= CAPTURE_BUFFER_SIZE);
+
+ iov[1].iov_base = notif->capture_buf;
+ iov[1].iov_len = content_len;
+
+ iovec_count++;
+ } else {
+ content_len = 0;
+ }
+
+ /*
+ * Update the capture buffer size so that receiver of the buffer will
+ * know how much to expect.
+ */
+ ust_notif.capture_buf_size = content_len;
+
+ /* Send all the buffers. */
+ ret = patient_writev(notif->notification_fd, iov, iovec_count);
+ if (ret == -1) {
+ if (errno == EAGAIN) {
+ record_error(trigger);
+ DBG("Cannot send trigger notification without blocking: %s",
+ strerror(errno));
+ } else {
+ DBG("Error to sending trigger notification: %s",
+ strerror(errno));
+ abort();
+ }
+ }
+}
+
+void lttng_trigger_notification_send(struct lttng_trigger *trigger,
+ const char *stack_data)
+{
+ /*
+ * This function is called from the probe, we must do dynamic
+ * allocation in this context.
+ */
+ struct lttng_trigger_notification notif = {0};
+
+ notification_init(¬if, trigger);
+
+ if (caa_unlikely(!cds_list_empty(&trigger->capture_bytecode_runtime_head))) {
+ struct lttng_bytecode_runtime *capture_bc_runtime;
+
+ /*
+ * Iterate over all the capture bytecodes. If the interpreter
+ * functions returns successfully, append the value of the
+ * `output` parameter to the capture buffer. If the interpreter
+ * fails, append an empty capture to the buffer.
+ */
+ cds_list_for_each_entry(capture_bc_runtime,
+ &trigger->capture_bytecode_runtime_head, node) {
+ struct lttng_interpreter_output output;
+
+ if (capture_bc_runtime->interpreter_funcs.capture(capture_bc_runtime,
+ stack_data, &output) & LTTNG_INTERPRETER_RECORD_FLAG)
+ notification_append_capture(¬if, &output);
+ else
+ notification_append_empty_capture(¬if);
+ }
+ }
+
+ /*
+ * Send the notification (including the capture buffer) to the
+ * sessiond.
+ */
+ notification_send(¬if, trigger);
+}
#include "jhash.h"
static CDS_LIST_HEAD(lttng_transport_list);
+static CDS_LIST_HEAD(lttng_counter_transport_list);
struct lttng_transport *lttng_transport_find(const char *name)
{
return NULL;
}
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+ struct lttng_counter_transport *transport;
+
+ cds_list_for_each_entry(transport, <tng_counter_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
/**
* lttng_transport_register - LTT transport registration
* @transport: transport structure
cds_list_del(&transport->node);
}
+/**
+ * lttng_counter_transport_register - LTTng counter transport registration
+ * @transport: transport structure
+ *
+ * Registers a counter transport which can be used as output to extract
+ * the data out of LTTng. Called with ust_lock held.
+ */
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+ cds_list_add_tail(&transport->node, <tng_counter_transport_list);
+}
+
+/**
+ * lttng_counter_transport_unregister - LTTng counter transport unregistration
+ * @transport: transport structure
+ * Called with ust_lock held.
+ */
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+ cds_list_del(&transport->node);
+}
+
/*
* Needed by comm layer.
*/
--- /dev/null
+#ifndef _LTTNG_UST_EVENTS_INTERNAL_H
+#define _LTTNG_UST_EVENTS_INTERNAL_H
+
+/*
+ * ust-events-internal.h
+ *
+ * Copyright 2019 (c) - Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include <urcu/list.h>
+#include <urcu/hlist.h>
+
+#include <helper.h>
+#include <lttng/ust-events.h>
+
+struct lttng_event_enabler {
+ struct lttng_enabler base;
+ struct cds_list_head node; /* per-session list of enablers */
+ struct lttng_channel *chan;
+ /*
+ * Unused, but kept around to make it explicit that the tracer can do
+ * it.
+ */
+ struct lttng_ctx *ctx;
+};
+
+struct lttng_trigger_enabler {
+ struct lttng_enabler base;
+ uint64_t id;
+ uint64_t error_counter_index;
+ struct cds_list_head node; /* per-app list of trigger enablers */
+ struct cds_list_head capture_bytecode_head;
+ struct lttng_trigger_group *group; /* weak ref */
+ uint64_t num_captures;
+};
+
+enum lttng_ust_bytecode_node_type{
+ LTTNG_UST_BYTECODE_NODE_TYPE_FILTER,
+ LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE,
+};
+
+
+struct lttng_ust_bytecode_node {
+ enum lttng_ust_bytecode_node_type type;
+ struct cds_list_head node;
+ struct lttng_enabler *enabler;
+ struct {
+ uint32_t len;
+ uint32_t reloc_offset;
+ uint64_t seqnum;
+ char data[];
+ } bc;
+};
+
+struct lttng_ust_excluder_node {
+ struct cds_list_head node;
+ struct lttng_enabler *enabler;
+ /*
+ * struct lttng_ust_event_exclusion had variable sized array,
+ * must be last field.
+ */
+ struct lttng_ust_event_exclusion excluder;
+};
+
+static inline
+struct lttng_enabler *lttng_event_enabler_as_enabler(
+ struct lttng_event_enabler *event_enabler)
+{
+ return &event_enabler->base;
+}
+
+static inline
+struct lttng_enabler *lttng_trigger_enabler_as_enabler(
+ struct lttng_trigger_enabler *trigger_enabler)
+{
+ return &trigger_enabler->base;
+}
+
+/*
+ * Allocate and initialize a `struct lttng_event_enabler` object.
+ *
+ * On success, returns a `struct lttng_event_enabler`,
+ * On memory error, returns NULL.
+ */
+LTTNG_HIDDEN
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_event *event_param,
+ struct lttng_channel *chan);
+
+/*
+ * Destroy a `struct lttng_event_enabler` object.
+ */
+LTTNG_HIDDEN
+void lttng_event_enabler_destroy(struct lttng_event_enabler *enabler);
+
+/*
+ * Enable a `struct lttng_event_enabler` object and all events related to this
+ * enabler.
+ */
+LTTNG_HIDDEN
+int lttng_event_enabler_enable(struct lttng_event_enabler *enabler);
+
+/*
+ * Disable a `struct lttng_event_enabler` object and all events related to this
+ * enabler.
+ */
+LTTNG_HIDDEN
+int lttng_event_enabler_disable(struct lttng_event_enabler *enabler);
+
+/*
+ * Attach filter bytecode program to `struct lttng_event_enabler` and all
+ * events related to this enabler.
+ */
+LTTNG_HIDDEN
+int lttng_event_enabler_attach_filter_bytecode(
+ struct lttng_event_enabler *enabler,
+ struct lttng_ust_bytecode_node *bytecode);
+
+/*
+ * Attach an application context to an event enabler.
+ *
+ * Not implemented.
+ */
+LTTNG_HIDDEN
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
+ struct lttng_ust_context *ctx);
+
+/*
+ * Attach exclusion list to `struct lttng_event_enabler` and all
+ * events related to this enabler.
+ */
+LTTNG_HIDDEN
+int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *enabler,
+ struct lttng_ust_excluder_node *excluder);
+
+/*
+ * Synchronize bytecodes for the enabler and the instance (event or trigger).
+ *
+ * This function goes over all bytecode programs of the enabler (event or
+ * trigger enabler) to ensure each is linked to the provided instance.
+ */
+LTTNG_HIDDEN
+void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+ struct lttng_ctx **ctx,
+ struct cds_list_head *instance_bytecode_runtime_head,
+ struct cds_list_head *enabler_bytecode_runtime_head);
+
+/*
+ * Allocate and initialize a `struct lttng_trigger_group` object.
+ *
+ * On success, returns a `struct lttng_triggre_group`,
+ * on memory error, returns NULL.
+ */
+LTTNG_HIDDEN
+struct lttng_trigger_group *lttng_trigger_group_create(void);
+
+/*
+ * Destroy a `struct lttng_trigger_group` object.
+ */
+LTTNG_HIDDEN
+void lttng_trigger_group_destroy(
+ struct lttng_trigger_group *trigger_group);
+
+/*
+ * Allocate and initialize a `struct lttng_trigger_enabler` object.
+ *
+ * On success, returns a `struct lttng_trigger_enabler`,
+ * On memory error, returns NULL.
+ */
+LTTNG_HIDDEN
+struct lttng_trigger_enabler *lttng_trigger_enabler_create(
+ struct lttng_trigger_group *trigger_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_trigger *trigger_param);
+
+/*
+ * Destroy a `struct lttng_trigger_enabler` object.
+ */
+LTTNG_HIDDEN
+void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler);
+
+/*
+ * Enable a `struct lttng_trigger_enabler` object and all triggers related to
+ * this enabler.
+ */
+LTTNG_HIDDEN
+int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler);
+
+/*
+ * Disable a `struct lttng_trigger_enabler` object and all triggers related to
+ * this enabler.
+ */
+LTTNG_HIDDEN
+int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler);
+
+/*
+ * Attach filter bytecode program to `struct lttng_trigger_enabler` and all
+ * triggers related to this enabler.
+ */
+LTTNG_HIDDEN
+int lttng_trigger_enabler_attach_filter_bytecode(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_ust_bytecode_node *bytecode);
+
+/*
+ * Attach capture bytecode program to `struct lttng_trigger_enabler` and all
+ * triggers related to this enabler.
+ */
+LTTNG_HIDDEN
+int lttng_trigger_enabler_attach_capture_bytecode(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_ust_bytecode_node *bytecode);
+
+/*
+ * Attach exclusion list to `struct lttng_trigger_enabler` and all
+ * triggers related to this enabler.
+ */
+LTTNG_HIDDEN
+int lttng_trigger_enabler_attach_exclusion(
+ struct lttng_trigger_enabler *trigger_enabler,
+ struct lttng_ust_excluder_node *excluder);
+
+LTTNG_HIDDEN
+void lttng_free_trigger_filter_runtime(struct lttng_trigger *trigger);
+
+/*
+ * Connect the probe on all enablers matching this event description.
+ * Called on library load.
+ */
+LTTNG_HIDDEN
+int lttng_fix_pending_triggers(void);
+
+#endif /* _LTTNG_UST_EVENTS_INTERNAL_H */
--- /dev/null
+AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include
+AM_CFLAGS += -fno-strict-aliasing
+
+noinst_LTLIBRARIES = libmsgpack.la
+
+libmsgpack_la_SOURCES = \
+ msgpack.c msgpack.h
+
+libmsgpack_la_CFLAGS = -DUST_COMPONENT="libmsgpack" $(AM_CFLAGS)
--- /dev/null
+/*
+ * msgpack.c
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
+#include <stddef.h>
+
+#define MSGPACK_FIXSTR_ID_MASK 0xA0
+#define MSGPACK_FIXMAP_ID_MASK 0x80
+#define MSGPACK_FIXARRAY_ID_MASK 0x90
+
+#define MSGPACK_NIL_ID 0xC0
+#define MSGPACK_FALSE_ID 0xC2
+#define MSGPACK_TRUE_ID 0xC3
+#define MSGPACK_MAP16_ID 0xDE
+#define MSGPACK_ARRAY16_ID 0xDC
+
+#define MSGPACK_UINT8_ID 0xCC
+#define MSGPACK_UINT16_ID 0xCD
+#define MSGPACK_UINT32_ID 0xCE
+#define MSGPACK_UINT64_ID 0xCF
+
+#define MSGPACK_INT8_ID 0xD0
+#define MSGPACK_INT16_ID 0xD1
+#define MSGPACK_INT32_ID 0xD2
+#define MSGPACK_INT64_ID 0xD3
+
+#define MSGPACK_FLOAT64_ID 0xCB
+#define MSGPACK_STR16_ID 0xDA
+
+#define MSGPACK_FIXINT_MAX ((1 << 7) - 1)
+#define MSGPACK_FIXINT_MIN -(1 << 5)
+#define MSGPACK_FIXMAP_MAX_COUNT 15
+#define MSGPACK_FIXARRAY_MAX_COUNT 15
+#define MSGPACK_FIXSTR_MAX_LENGTH 31
+
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <lttng/msgpack.h>
+
+#define INT8_MIN (-128)
+#define INT16_MIN (-32767-1)
+#define INT32_MIN (-2147483647-1)
+#define INT8_MAX (127)
+#define INT16_MAX (32767)
+#define INT32_MAX (2147483647)
+#define UINT8_MAX (255)
+#define UINT16_MAX (65535)
+#define UINT32_MAX (4294967295U)
+
+#define byteswap_host_to_be16(_tmp) cpu_to_be16(_tmp)
+#define byteswap_host_to_be32(_tmp) cpu_to_be32(_tmp)
+#define byteswap_host_to_be64(_tmp) cpu_to_be64(_tmp)
+
+#define lttng_msgpack_assert(cond) WARN_ON(!(cond))
+
+#else /* __KERNEL__ */
+
+#include <endian.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "msgpack.h"
+
+#define byteswap_host_to_be16(_tmp) htobe16(_tmp)
+#define byteswap_host_to_be32(_tmp) htobe32(_tmp)
+#define byteswap_host_to_be64(_tmp) htobe64(_tmp)
+
+#define lttng_msgpack_assert(cond) ({ \
+ if (!(cond)) \
+ fprintf(stderr, "Assertion failed. %s:%d\n", __FILE__, __LINE__); \
+ })
+#endif /* __KERNEL__ */
+
+static inline int lttng_msgpack_append_buffer(
+ struct lttng_msgpack_writer *writer,
+ const uint8_t *buf,
+ size_t length)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(buf);
+
+ /* Ensure we are not trying to write after the end of the buffer. */
+ if (writer->write_pos + length > writer->end_write_pos) {
+ ret = -1;
+ goto end;
+ }
+
+ memcpy(writer->write_pos, buf, length);
+ writer->write_pos += length;
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_append_u8(
+ struct lttng_msgpack_writer *writer, uint8_t value)
+{
+ return lttng_msgpack_append_buffer(writer, &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u16(
+ struct lttng_msgpack_writer *writer, uint16_t value)
+{
+ value = byteswap_host_to_be16(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u32(
+ struct lttng_msgpack_writer *writer, uint32_t value)
+{
+ value = byteswap_host_to_be32(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u64(
+ struct lttng_msgpack_writer *writer, uint64_t value)
+{
+ value = byteswap_host_to_be64(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_f64(
+ struct lttng_msgpack_writer *writer, double value)
+{
+
+ union {
+ double d;
+ uint64_t u;
+ } u;
+
+ u.d = value;
+
+ return lttng_msgpack_append_u64(writer, u.u);
+}
+
+static inline int lttng_msgpack_append_i8(
+ struct lttng_msgpack_writer *writer, int8_t value)
+{
+ return lttng_msgpack_append_u8(writer, (uint8_t) value);
+}
+
+static inline int lttng_msgpack_append_i16(
+ struct lttng_msgpack_writer *writer, int16_t value)
+{
+ return lttng_msgpack_append_u16(writer, (uint16_t) value);
+}
+
+static inline int lttng_msgpack_append_i32(
+ struct lttng_msgpack_writer *writer, int32_t value)
+{
+ return lttng_msgpack_append_u32(writer, (uint32_t) value);
+}
+
+static inline int lttng_msgpack_append_i64(
+ struct lttng_msgpack_writer *writer, int64_t value)
+{
+ return lttng_msgpack_append_u64(writer, (uint64_t) value);
+}
+
+static inline int lttng_msgpack_encode_f64(
+ struct lttng_msgpack_writer *writer, double value)
+{
+ int ret;
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FLOAT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_f64(writer, value);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixmap(
+ struct lttng_msgpack_writer *writer, uint8_t count)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(count <= MSGPACK_FIXMAP_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXMAP_ID_MASK | count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_map16(
+ struct lttng_msgpack_writer *writer, uint16_t count)
+{
+ int ret;
+
+ lttng_msgpack_assert(count > MSGPACK_FIXMAP_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_MAP16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixarray(
+ struct lttng_msgpack_writer *writer, uint8_t count)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(count <= MSGPACK_FIXARRAY_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXARRAY_ID_MASK | count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_array16(
+ struct lttng_msgpack_writer *writer, uint16_t count)
+{
+ int ret;
+
+ lttng_msgpack_assert(count > MSGPACK_FIXARRAY_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_ARRAY16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixstr(
+ struct lttng_msgpack_writer *writer,
+ const char *str,
+ uint8_t len)
+{
+ int ret;
+
+ lttng_msgpack_assert(len <= MSGPACK_FIXSTR_MAX_LENGTH);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXSTR_ID_MASK | len);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_str16(
+ struct lttng_msgpack_writer *writer,
+ const char *str,
+ uint16_t len)
+{
+ int ret;
+
+ lttng_msgpack_assert(len > MSGPACK_FIXSTR_MAX_LENGTH);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_STR16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, len);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count)
+{
+ int ret;
+
+ if (count < 0 || count >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (count <= MSGPACK_FIXMAP_MAX_COUNT)
+ ret = lttng_msgpack_encode_fixmap(writer, count);
+ else
+ ret = lttng_msgpack_encode_map16(writer, count);
+
+ writer->map_nesting++;
+end:
+ return ret;
+}
+
+int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer)
+{
+ lttng_msgpack_assert(writer->map_nesting > 0);
+ writer->map_nesting--;
+ return 0;
+}
+
+int lttng_msgpack_begin_array(
+ struct lttng_msgpack_writer *writer, size_t count)
+{
+ int ret;
+
+ if (count < 0 || count >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (count <= MSGPACK_FIXARRAY_MAX_COUNT)
+ ret = lttng_msgpack_encode_fixarray(writer, count);
+ else
+ ret = lttng_msgpack_encode_array16(writer, count);
+
+ writer->array_nesting++;
+end:
+ return ret;
+}
+
+int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer)
+{
+ lttng_msgpack_assert(writer->array_nesting > 0);
+ writer->array_nesting--;
+ return 0;
+}
+
+int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
+ const char *str)
+{
+ int ret;
+ size_t length = strlen(str);
+ if (length < 0 || length >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (length <= MSGPACK_FIXSTR_MAX_LENGTH)
+ ret = lttng_msgpack_encode_fixstr(writer, str, length);
+ else
+ ret = lttng_msgpack_encode_str16(writer, str, length);
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_NIL_ID);
+}
+
+int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_TRUE_ID);
+}
+
+int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_FALSE_ID);
+}
+
+int lttng_msgpack_write_unsigned_integer(
+ struct lttng_msgpack_writer *writer, uint64_t value)
+{
+ int ret = 0;
+
+ if (value <= MSGPACK_FIXINT_MAX) {
+ ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT8_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT8_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT16_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, (uint16_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT32_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT32_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u32(writer, (uint32_t) value);
+ if (ret)
+ goto end;
+ } else {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u64(writer, value);
+ if (ret)
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_signed_integer(struct lttng_msgpack_writer *writer, int64_t value)
+{
+ int ret;
+
+ if (value >= MSGPACK_FIXINT_MIN && value <= MSGPACK_FIXINT_MAX){
+ ret = lttng_msgpack_append_i8(writer, (int8_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT8_MIN && value <= INT8_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT8_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i8(writer, (int8_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT16_MIN && value <= INT16_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i16(writer, (int16_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT32_MIN && value <= INT32_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT32_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i32(writer, (int32_t) value);
+ if (ret)
+ goto end;
+ } else {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i64(writer, value);
+ if (ret)
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value)
+{
+ return lttng_msgpack_encode_f64(writer, value);
+}
+
+void lttng_msgpack_writer_init(struct lttng_msgpack_writer *writer,
+ uint8_t *buffer, size_t size)
+{
+ lttng_msgpack_assert(buffer);
+ lttng_msgpack_assert(size >= 0);
+
+ writer->buffer = buffer;
+ writer->write_pos = buffer;
+ writer->end_write_pos = buffer + size;
+
+ writer->array_nesting = 0;
+ writer->map_nesting = 0;
+}
+
+void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer)
+{
+ memset(writer, 0, sizeof(*writer));
+}
--- /dev/null
+#ifndef _LTTNG_UST_MSGPACK_H
+#define _LTTNG_UST_MSGPACK_H
+
+/*
+ * msgpack.h
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else /* __KERNEL__ */
+#include <stdint.h>
+#endif /* __KERNEL__ */
+
+struct lttng_msgpack_writer {
+ uint8_t *buffer;
+ uint8_t *write_pos;
+ const uint8_t *end_write_pos;
+ uint8_t array_nesting;
+ uint8_t map_nesting;
+};
+
+void lttng_msgpack_writer_init(
+ struct lttng_msgpack_writer *writer,
+ uint8_t *buffer, size_t size);
+
+void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer);
+
+int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_write_unsigned_integer(
+ struct lttng_msgpack_writer *writer, uint64_t value);
+int lttng_msgpack_write_signed_integer(
+ struct lttng_msgpack_writer *writer, int64_t value);
+int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value);
+int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
+ const char *value);
+int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count);
+int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer);
+int lttng_msgpack_begin_array(
+ struct lttng_msgpack_writer *writer, size_t count);
+int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer);
+
+#endif /* _LTTNG_UST_MSGPACK_H */
/* write() */
#include <unistd.h>
+/* writev() */
+#include <sys/uio.h>
+
/* send() */
#include <sys/types.h>
#include <sys/socket.h>
return bufc-(const char *)buf;
}
+/*
+ * The `struct iovec *iov` is not `const` because we modify it to support
+ * partial writes.
+ */
+ssize_t patient_writev(int fd, struct iovec *iov, int iovcnt)
+{
+ ssize_t written, total_written = 0;
+ int curr_element_idx = 0;
+
+ for(;;) {
+ written = writev(fd, iov + curr_element_idx,
+ iovcnt - curr_element_idx);
+ if (written == -1 && errno == EINTR) {
+ continue;
+ }
+ if (written <= 0) {
+ return written;
+ }
+
+ total_written += written;
+
+ /*
+ * If it's not the last element in the vector and we have
+ * written more than the current element size, then increment
+ * the current element index until we reach the element that
+ * was partially written.
+ */
+ while (curr_element_idx < iovcnt &&
+ written >= iov[curr_element_idx].iov_len) {
+ written -= iov[curr_element_idx].iov_len;
+ curr_element_idx++;
+ }
+
+ /* Maybe we are done. */
+ if (curr_element_idx >= iovcnt) {
+ break;
+ }
+
+ /* Update the current element base and size. */
+ iov[curr_element_idx].iov_base += written;
+ iov[curr_element_idx].iov_len -= written;
+ }
+
+ return total_written;
+}
+
ssize_t patient_send(int fd, const void *buf, size_t count, int flags)
{
const char *bufc = (const char *) buf;
SUBDIRS = utils hello same_line_tracepoint snprintf benchmark ust-elf \
- ctf-types test-app-ctx gcc-weak-hidden hello-many
+ ctf-types test-app-ctx gcc-weak-hidden hello-many \
+ libmsgpack
if CXX_WORKS
SUBDIRS += hello.cxx
TESTS = snprintf/test_snprintf \
ust-elf/test_ust_elf \
- gcc-weak-hidden/test_gcc_weak_hidden
+ gcc-weak-hidden/test_gcc_weak_hidden \
+ libmsgpack/test_msgpack
EXTRA_DIST = README
"Weak-hidden behavior is the same for 4 bytes integer and pointer objects within main program");
ok(match_matrix[MATCH_LIB_INT] == match_matrix[MATCH_LIB_PTR],
"Weak-hidden behavior is the same for 4 bytes integer and pointer objects within shared library");
- return 0;
+
+ return exit_status();
}
--- /dev/null
+AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/tests/utils
+
+noinst_PROGRAMS = test_msgpack
+test_msgpack_SOURCES = test_msgpack.c
+test_msgpack_LDADD = \
+ $(top_builddir)/libmsgpack/libmsgpack.la \
+ $(top_builddir)/tests/utils/libtap.a
+
+test_msgpack_CFLAGS = $(AM_CFLAGS)
--- /dev/null
+/*
+ * test_msgpack.c
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "tap.h"
+
+#include "../libmsgpack/msgpack.h"
+
+#define BUFFER_SIZE 4096
+#define NUM_TESTS 23
+
+
+/*
+ * echo 'null' | json2msgpack | xxd -i
+ */
+static const uint8_t NIL_EXPECTED[] = { 0xc0 };
+
+/*
+ * echo '"bye"' | json2msgpack | xxd -i
+ */
+static const uint8_t STRING_BYE_EXPECTED[] = { 0xa3, 0x62, 0x79, 0x65 };
+
+/*
+ * echo '1337' | json2msgpack | xxd -i
+ */
+static const uint8_t UINT_1337_EXPECTED[] = { 0xcd, 0x05, 0x39 };
+
+/*
+ * echo '127' | json2msgpack | xxd -i
+ */
+static const uint8_t UINT_127_EXPECTED[] = { 0x7f };
+
+/*
+ * echo '128' | json2msgpack | xxd -i
+ */
+static const uint8_t UINT_128_EXPECTED[] = { 0xcc, 0x80 };
+
+/*
+ * echo '256' | json2msgpack | xxd -i
+ */
+static const uint8_t UINT_256_EXPECTED[] = { 0xcd, 0x01, 0x00 };
+
+/*
+ * echo '65535' | json2msgpack | xxd -i
+ */
+static const uint8_t UINT_65535_EXPECTED[] = { 0xcd, 0xff, 0xff };
+
+/*
+ * echo '65536' | json2msgpack | xxd -i
+ */
+static const uint8_t UINT_65536_EXPECTED[] = { 0xce, 0x00, 0x01, 0x00, 0x00 };
+
+/*
+ * echo '4294967295' | json2msgpack | xxd -i
+ */
+static const uint8_t UINT_4294967295_EXPECTED[] = { 0xce, 0xff, 0xff, 0xff, 0xff };
+
+/*
+ * echo '4294967296' | json2msgpack | xxd -i
+ */
+static const uint8_t UINT_4294967296_EXPECTED[] = { 0xcf, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00 };
+
+/*
+ * echo '-32' | json2msgpack | xxd -i
+ */
+static const uint8_t INT_NEG_32_EXPECTED[] = { 0xe0 };
+
+/*
+ * echo '-33' | json2msgpack | xxd -i
+ */
+static const uint8_t INT_NEG_33_EXPECTED[] = { 0xd0, 0xdf };
+
+/*
+ * echo '-129' | json2msgpack | xxd -i
+ */
+static const uint8_t INT_NEG_129_EXPECTED[] = { 0xd1, 0xff, 0x7f};
+
+/*
+ * echo '-32768' | json2msgpack | xxd -i
+ */
+static const uint8_t INT_NEG_32768_EXPECTED[] = { 0xd1, 0x80, 0x00 };
+
+/*
+ * echo '-32769' | json2msgpack | xxd -i
+ */
+static const uint8_t INT_NEG_32769_EXPECTED[] = { 0xd2, 0xff, 0xff, 0x7f,
+ 0xff };
+
+/*
+ * echo '-2147483648' | json2msgpack | xxd -i
+ */
+static const uint8_t INT_NEG_2147483648_EXPECTED[] = { 0xd2, 0x80, 0x00, 0x00,
+ 0x00 };
+
+/*
+ * echo '-2147483649' | json2msgpack | xxd -i
+ */
+static const uint8_t INT_NEG_2147483649_EXPECTED[] = { 0xd3, 0xff, 0xff, 0xff,
+ 0xff, 0x7f, 0xff, 0xff, 0xff };
+/*
+ * echo '0.0' | json2msgpack | xxd -i
+ */
+static const uint8_t DOUBLE_ZERO_EXPECTED[] = { 0xcb, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 };
+
+/*
+ * echo '3.14159265' | json2msgpack | xxd -i
+ */
+static const uint8_t DOUBLE_PI_EXPECTED[] = { 0xcb, 0x40, 0x09, 0x21, 0xfb, 0x53,
+ 0xc8, 0xd4, 0xf1 };
+
+/*
+ * echo '3.14159265' | json2msgpack | xxd -i
+ */
+static const uint8_t DOUBLE_NEG_PI_EXPECTED[] = { 0xcb, 0xc0, 0x09, 0x21, 0xfb,
+ 0x53, 0xc8, 0xd4, 0xf1 };
+
+/*
+ * echo [1.1, 2.3, -12345.2] | json2msgpack | xxd -i
+ */
+static const uint8_t ARRAY_DOUBLE_EXPECTED[] = { 0x93, 0xcb, 0x3f, 0xf1, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x9a, 0xcb, 0x40, 0x02, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0xcb, 0xc0, 0xc8, 0x1c, 0x99, 0x99,
+ 0x99, 0x99, 0x9a };
+
+/*
+ * echo '{"type":"enum","value":117}' | json2msgpack | xxd -i
+ */
+static const uint8_t MAP_EXPECTED[] = {
+ 0x82, 0xa4, 0x74, 0x79, 0x70, 0x65, 0xa4, 0x65, 0x6e, 0x75, 0x6d, 0xa5,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x75 };
+
+/*
+ * echo '["meow mix", 18, null, 14.197, [1980, 1995]]' | json2msgpack | xxd -i
+ */
+static const uint8_t COMPLETE_CAPTURE_EXPECTED[] = { 0x95, 0xa8, 0x6d, 0x65,
+ 0x6f, 0x77, 0x20, 0x6d, 0x69, 0x78, 0x12, 0xc0, 0xcb, 0x40,
+ 0x2c, 0x64, 0xdd, 0x2f, 0x1a, 0x9f, 0xbe, 0x92, 0xcd, 0x07,
+ 0xbc, 0xcd, 0x07, 0xcb };
+
+static void string_test(uint8_t *buf, const char *value)
+{
+ struct lttng_msgpack_writer writer;
+
+ lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE);
+ lttng_msgpack_write_str(&writer, value);
+ lttng_msgpack_writer_fini(&writer);
+}
+
+static void int_test(uint8_t *buf, int64_t value)
+{
+ struct lttng_msgpack_writer writer;
+
+ lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE);
+ lttng_msgpack_write_signed_integer(&writer, value);
+
+ lttng_msgpack_writer_fini(&writer);
+}
+
+static void uint_test(uint8_t *buf, uint64_t value)
+{
+ struct lttng_msgpack_writer writer;
+
+ lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE);
+ lttng_msgpack_write_unsigned_integer(&writer, value);
+ lttng_msgpack_writer_fini(&writer);
+}
+
+static void double_test(uint8_t *buf, double value)
+{
+ struct lttng_msgpack_writer writer;
+
+ lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE);
+ lttng_msgpack_write_double(&writer, value);
+ lttng_msgpack_writer_fini(&writer);
+}
+
+static void array_double_test(uint8_t *buf, double *values, size_t nb_values)
+{
+ int i = 0;
+ struct lttng_msgpack_writer writer;
+
+ lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE);
+ lttng_msgpack_begin_array(&writer, nb_values);
+
+ for (i = 0; i < nb_values; i++) {
+ lttng_msgpack_write_double(&writer, values[i]);
+ }
+
+ lttng_msgpack_end_array(&writer);
+ lttng_msgpack_writer_fini(&writer);
+}
+
+static void map_test(uint8_t *buf)
+{
+ struct lttng_msgpack_writer writer;
+
+ lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE);
+
+ lttng_msgpack_begin_map(&writer, 2);
+
+ lttng_msgpack_write_str(&writer, "type");
+ lttng_msgpack_write_str(&writer, "enum");
+
+ lttng_msgpack_write_str(&writer, "value");
+ lttng_msgpack_write_unsigned_integer(&writer, 117);
+
+ lttng_msgpack_end_map(&writer);
+ lttng_msgpack_writer_fini(&writer);
+}
+
+static void complete_capture_test(uint8_t *buf)
+{
+ /*
+ * This testcase tests the following json representation:
+ * "meow mix",18, null, 14.197,[1980, 1995]]
+ */
+ struct lttng_msgpack_writer writer;
+
+ lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE);
+
+ lttng_msgpack_begin_array(&writer, 5);
+
+ lttng_msgpack_write_str(&writer, "meow mix");
+ lttng_msgpack_write_signed_integer(&writer, 18);
+ lttng_msgpack_write_nil(&writer);
+ lttng_msgpack_write_double(&writer, 14.197);
+
+ lttng_msgpack_begin_array(&writer, 2);
+
+ lttng_msgpack_write_unsigned_integer(&writer, 1980);
+ lttng_msgpack_write_unsigned_integer(&writer, 1995);
+
+ lttng_msgpack_end_array(&writer);
+
+ lttng_msgpack_end_array(&writer);
+
+ lttng_msgpack_writer_fini(&writer);
+}
+
+static void nil_test(uint8_t *buf)
+{
+ struct lttng_msgpack_writer writer;
+
+ lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE);
+ lttng_msgpack_write_nil(&writer);
+ lttng_msgpack_writer_fini(&writer);
+}
+
+int main(int argc, char *argv[])
+{
+ uint8_t buf[BUFFER_SIZE] = {0};
+ double arr_double[] = {1.1, 2.3, -12345.2};
+
+ plan_tests(NUM_TESTS);
+
+ diag("Testing msgpack implementation");
+
+ /*
+ * Expected outputs were produced using the `json2msgpack` tool.
+ * https://github.com/ludocode/msgpack-tools
+ * For example, here is the command to produce the null test expected
+ * output:
+ * echo 'null' | json2msgpack | hexdump -v -e '"\\\x" 1/1 "%02x"'
+ *
+ * The only exception is that we always produce 64bits integer to
+ * represent integers even if they would fit into smaller objects so
+ * they need to be manually crafted in 64bits two's complement (if
+ * signed) big endian.
+ */
+ nil_test(buf);
+ ok(memcmp(buf, NIL_EXPECTED, sizeof(NIL_EXPECTED)) == 0,
+ "NIL object");
+
+ string_test(buf, "bye");
+ ok(memcmp(buf, STRING_BYE_EXPECTED, sizeof(STRING_BYE_EXPECTED)) == 0,
+ "String \"bye\" object");
+
+ uint_test(buf, 1337);
+ ok(memcmp(buf, UINT_1337_EXPECTED, sizeof(UINT_1337_EXPECTED)) == 0,
+ "Unsigned integer \"1337\" object");
+
+ uint_test(buf, 127);
+ ok(memcmp(buf, UINT_127_EXPECTED, sizeof(UINT_127_EXPECTED)) == 0,
+ "Unsigned integer \"127\" object");
+
+ uint_test(buf, 128);
+ ok(memcmp(buf, UINT_128_EXPECTED, sizeof(UINT_128_EXPECTED)) == 0,
+ "Unsigned integer \"128\" object");
+
+ uint_test(buf, 256);
+ ok(memcmp(buf, UINT_256_EXPECTED, sizeof(UINT_256_EXPECTED)) == 0,
+ "Unsigned integer \"256\" object");
+
+ uint_test(buf, 65536);
+ ok(memcmp(buf, UINT_65536_EXPECTED, sizeof(UINT_65536_EXPECTED)) == 0,
+ "Unsigned integer \"65536\" object");
+
+ uint_test(buf, 65535);
+ ok(memcmp(buf, UINT_65535_EXPECTED, sizeof(UINT_65535_EXPECTED)) == 0,
+ "Unsigned integer \"65535\" object");
+
+ uint_test(buf, 4294967295);
+ ok(memcmp(buf, UINT_4294967295_EXPECTED, sizeof(UINT_4294967295_EXPECTED)) == 0,
+ "Unsigned integer \"4294967295\" object");
+
+ uint_test(buf, 4294967296);
+ ok(memcmp(buf, UINT_4294967296_EXPECTED, sizeof(UINT_4294967296_EXPECTED)) == 0,
+ "Unsigned integer \"4294967296\" object");
+
+ int_test(buf, -32);
+ ok(memcmp(buf, INT_NEG_32_EXPECTED, sizeof(INT_NEG_32_EXPECTED)) == 0,
+ "Signed integer \"-32\" object");
+
+ int_test(buf, -33);
+ ok(memcmp(buf, INT_NEG_33_EXPECTED, sizeof(INT_NEG_33_EXPECTED)) == 0,
+ "Signed integer \"-33\" object");
+
+ int_test(buf, -129);
+ ok(memcmp(buf, INT_NEG_129_EXPECTED, sizeof(INT_NEG_129_EXPECTED)) == 0,
+ "Signed integer \"-129\" object");
+
+ int_test(buf, -32768);
+ ok(memcmp(buf, INT_NEG_32768_EXPECTED, sizeof(INT_NEG_32768_EXPECTED)) == 0,
+ "Signed integer \"-32768\" object");
+
+ int_test(buf, -32769);
+ ok(memcmp(buf, INT_NEG_32769_EXPECTED, sizeof(INT_NEG_32769_EXPECTED)) == 0,
+ "Signed integer \"-32769\" object");
+
+ int_test(buf, -2147483648);
+ ok(memcmp(buf, INT_NEG_2147483648_EXPECTED, sizeof(INT_NEG_2147483648_EXPECTED)) == 0,
+ "Signed integer \"-2147483648\" object");
+
+ int_test(buf, -2147483649);
+ ok(memcmp(buf, INT_NEG_2147483649_EXPECTED, sizeof(INT_NEG_2147483649_EXPECTED)) == 0,
+ "Signed integer \"-2147483649\" object");
+
+ double_test(buf, 0.0);
+ ok(memcmp(buf, DOUBLE_ZERO_EXPECTED, sizeof(DOUBLE_ZERO_EXPECTED)) == 0,
+ "double \"0.0\" object");
+
+ double_test(buf, 3.14159265);
+ ok(memcmp(buf, DOUBLE_PI_EXPECTED, sizeof(DOUBLE_PI_EXPECTED)) == 0,
+ "double \"PI\" object");
+
+ double_test(buf, -3.14159265);
+ ok(memcmp(buf, DOUBLE_NEG_PI_EXPECTED, sizeof(DOUBLE_NEG_PI_EXPECTED)) == 0,
+ "double \"-PI\" object");
+
+ array_double_test(buf, arr_double, 3);
+ ok(memcmp(buf, ARRAY_DOUBLE_EXPECTED, sizeof(ARRAY_DOUBLE_EXPECTED)) == 0,
+ "Array of double object");
+
+ map_test(buf);
+ ok(memcmp(buf, MAP_EXPECTED, sizeof(MAP_EXPECTED)) == 0,
+ "Map object");
+
+ complete_capture_test(buf);
+ ok(memcmp(buf, COMPLETE_CAPTURE_EXPECTED, sizeof(COMPLETE_CAPTURE_EXPECTED)) == 0,
+ "Complete capture object");
+
+ return EXIT_SUCCESS;
+}
sprintf(test_desc, test_desc_fmt_str, escaped_test_fmt_str);
ok(strcmp(buf, expected) == 0, test_desc);
- return 0;
+ return exit_status();
}
value->u.s64 = -64;
break;
case LTTNG_UST_DYNAMIC_TYPE_U8:
- value->u.s64 = 8;
+ value->u.u64 = 8;
break;
case LTTNG_UST_DYNAMIC_TYPE_U16:
- value->u.s64 = 16;
+ value->u.u64 = 16;
break;
case LTTNG_UST_DYNAMIC_TYPE_U32:
- value->u.s64 = 32;
+ value->u.u64 = 32;
break;
case LTTNG_UST_DYNAMIC_TYPE_U64:
- value->u.s64 = 64;
+ value->u.u64 = 64;
break;
case LTTNG_UST_DYNAMIC_TYPE_FLOAT:
value->u.d = 22322.0;
AARCH64_BE_CRC);
test_pic(test_dir);
- return EXIT_SUCCESS;
+ return exit_status();
}