From 4e3b3630c981421cbafea211ef2b826f399ac7eb Mon Sep 17 00:00:00 2001 From: Jonathan Rajotte Date: Thu, 15 Oct 2020 20:14:58 -0400 Subject: [PATCH] SoW-2020-0002: Trace Hit Counters: Implement key-addressed counters in shared memory as a new LTTng-UST map for counter aggregation Revision 1 Change-Id: I76122fd195c0f321bfbc5b99624dfcee16776590 Signed-off-by: Jonathan Rajotte --- Makefile.am | 2 + configure.ac | 11 +- do-not-commit.md | 1 + include/Makefile.am | 4 +- include/lttng/bitmap.h | 70 ++ include/lttng/counter-config.h | 57 + include/lttng/ust-abi.h | 127 ++ include/lttng/ust-ctl.h | 99 ++ include/lttng/ust-events.h | 199 +++- include/lttng/ust-tracepoint-event.h | 78 +- include/share.h | 2 + include/ust-comm.h | 16 + libcounter/Makefile.am | 18 + libcounter/counter-api.h | 296 +++++ libcounter/counter-internal.h | 65 ++ libcounter/counter-types.h | 93 ++ libcounter/counter.c | 564 +++++++++ libcounter/counter.h | 65 ++ libcounter/shm.c | 387 ++++++ libcounter/shm.h | 142 +++ libcounter/shm_internal.h | 35 + libcounter/shm_types.h | 54 + libcounter/smp.c | 111 ++ libcounter/smp.h | 43 + liblttng-ust-comm/lttng-ust-comm.c | 109 ++ liblttng-ust-ctl/ustctl.c | 587 ++++++++++ liblttng-ust/Makefile.am | 22 +- liblttng-ust/bytecode.h | 251 ++++ liblttng-ust/context-internal.h | 32 + liblttng-ust/context-provider-internal.h | 37 + liblttng-ust/filter-bytecode.h | 249 ---- ...rpreter.c => lttng-bytecode-interpreter.c} | 972 +++++++++------- ...ecialize.c => lttng-bytecode-specialize.c} | 471 ++++---- ...validator.c => lttng-bytecode-validator.c} | 763 ++++++------ liblttng-ust/lttng-bytecode.c | 638 ++++++++++ .../{lttng-filter.h => lttng-bytecode.h} | 108 +- liblttng-ust/lttng-context-provider.c | 12 + liblttng-ust/lttng-context.c | 9 +- .../lttng-counter-client-percpu-32-modular.c | 95 ++ .../lttng-counter-client-percpu-64-modular.c | 95 ++ liblttng-ust/lttng-events.c | 1033 +++++++++++++---- liblttng-ust/lttng-filter.c | 581 --------- liblttng-ust/lttng-probes.c | 3 + liblttng-ust/lttng-ust-abi.c | 372 +++++- liblttng-ust/lttng-ust-comm.c | 322 +++-- liblttng-ust/trigger-notification.c | 396 +++++++ liblttng-ust/ust-core.c | 34 + liblttng-ust/ust-events-internal.h | 254 ++++ libmsgpack/Makefile.am | 9 + libmsgpack/msgpack.c | 517 +++++++++ libmsgpack/msgpack.h | 61 + snprintf/patient_write.c | 49 + tests/Makefile.am | 6 +- tests/gcc-weak-hidden/main.c | 3 +- tests/libmsgpack/Makefile.am | 9 + tests/libmsgpack/test_msgpack.c | 386 ++++++ tests/snprintf/snprintf.c | 2 +- tests/test-app-ctx/hello.c | 8 +- tests/ust-elf/ust-elf.c | 2 +- 59 files changed, 8876 insertions(+), 2160 deletions(-) create mode 100644 do-not-commit.md create mode 100644 include/lttng/bitmap.h create mode 100644 include/lttng/counter-config.h create mode 100644 libcounter/Makefile.am create mode 100644 libcounter/counter-api.h create mode 100644 libcounter/counter-internal.h create mode 100644 libcounter/counter-types.h create mode 100644 libcounter/counter.c create mode 100644 libcounter/counter.h create mode 100644 libcounter/shm.c create mode 100644 libcounter/shm.h create mode 100644 libcounter/shm_internal.h create mode 100644 libcounter/shm_types.h create mode 100644 libcounter/smp.c create mode 100644 libcounter/smp.h create mode 100644 liblttng-ust/bytecode.h create mode 100644 liblttng-ust/context-internal.h create mode 100644 liblttng-ust/context-provider-internal.h delete mode 100644 liblttng-ust/filter-bytecode.h rename liblttng-ust/{lttng-filter-interpreter.c => lttng-bytecode-interpreter.c} (67%) rename liblttng-ust/{lttng-filter-specialize.c => lttng-bytecode-specialize.c} (75%) rename liblttng-ust/{lttng-filter-validator.c => lttng-bytecode-validator.c} (70%) create mode 100644 liblttng-ust/lttng-bytecode.c rename liblttng-ust/{lttng-filter.h => lttng-bytecode.h} (69%) create mode 100644 liblttng-ust/lttng-counter-client-percpu-32-modular.c create mode 100644 liblttng-ust/lttng-counter-client-percpu-64-modular.c delete mode 100644 liblttng-ust/lttng-filter.c create mode 100644 liblttng-ust/trigger-notification.c create mode 100644 liblttng-ust/ust-events-internal.h create mode 100644 libmsgpack/Makefile.am create mode 100644 libmsgpack/msgpack.c create mode 100644 libmsgpack/msgpack.h create mode 100644 tests/libmsgpack/Makefile.am create mode 100644 tests/libmsgpack/test_msgpack.c diff --git a/Makefile.am b/Makefile.am index cc923c1d..b6b7fb6d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,6 +1,8 @@ ACLOCAL_AMFLAGS = -I m4 SUBDIRS = . include snprintf libringbuffer liblttng-ust-comm \ + libcounter \ + libmsgpack \ liblttng-ust \ liblttng-ust-ctl \ liblttng-ust-fd \ diff --git a/configure.ac b/configure.ac index 243762ce..607b3c78 100644 --- a/configure.ac +++ b/configure.ac @@ -1,10 +1,10 @@ dnl Version infos m4_define([V_MAJOR], [2]) -m4_define([V_MINOR], [12]) +m4_define([V_MINOR], [13]) m4_define([V_PATCH], [0]) -m4_define([V_EXTRA], [rc1]) -m4_define([V_NAME], [[(Ta) Meilleure]]) -m4_define([V_DESC], [[Ta Meilleure is a Northeast IPA beer brewed by Lagabière. Translating to "Your best one", this beer gives out strong aromas of passion fruit, lemon, and peaches. Tastewise, expect a lot of fruit, a creamy texture, and a smooth lingering hop bitterness.]]) +m4_define([V_EXTRA], [pre]) +m4_define([V_NAME], [[Codename TBD]]) +m4_define([V_DESC], [[Description TBD]]) m4_define([V_STRING], [V_MAJOR.V_MINOR.V_PATCH]) m4_ifdef([V_EXTRA], [m4_append([V_STRING], [-V_EXTRA])]) @@ -531,6 +531,8 @@ AC_CONFIG_FILES([ include/Makefile include/lttng/ust-version.h snprintf/Makefile + libcounter/Makefile + libmsgpack/Makefile libringbuffer/Makefile liblttng-ust-comm/Makefile liblttng-ust/Makefile @@ -565,6 +567,7 @@ AC_CONFIG_FILES([ tests/snprintf/Makefile tests/ust-elf/Makefile tests/benchmark/Makefile + tests/libmsgpack/Makefile tests/utils/Makefile tests/test-app-ctx/Makefile tests/gcc-weak-hidden/Makefile diff --git a/do-not-commit.md b/do-not-commit.md new file mode 100644 index 00000000..c727a35c --- /dev/null +++ b/do-not-commit.md @@ -0,0 +1 @@ +capture diff --git a/include/Makefile.am b/include/Makefile.am index 277e4e69..23a165ae 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -25,7 +25,9 @@ nobase_include_HEADERS = \ lttng/lttng-ust-tracelog.h \ lttng/ust-clock.h \ lttng/ust-getcpu.h \ - lttng/ust-elf.h + lttng/ust-elf.h \ + lttng/counter-config.h \ + lttng/bitmap.h # note: usterr-signal-safe.h, core.h and share.h need namespace cleanup. diff --git a/include/lttng/bitmap.h b/include/lttng/bitmap.h new file mode 100644 index 00000000..fb57ff41 --- /dev/null +++ b/include/lttng/bitmap.h @@ -0,0 +1,70 @@ +/* + * lttng/bitmap.h + * + * LTTng Bitmap API + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_BITMAP_H +#define _LTTNG_BITMAP_H + +#include +#include +#include +#include + +static inline void lttng_bitmap_index(unsigned int index, unsigned int *word, + unsigned int *bit) +{ + *word = index / CAA_BITS_PER_LONG; + *bit = index % CAA_BITS_PER_LONG; +} + +static inline void lttng_bitmap_set_bit(unsigned int index, unsigned long *p) +{ + unsigned int word, bit; + unsigned long val; + + lttng_bitmap_index(index, &word, &bit); + val = 1U << bit; + uatomic_or(p + word, val); +} + +static inline void lttng_bitmap_clear_bit(unsigned int index, unsigned long *p) +{ + unsigned int word, bit; + unsigned long val; + + lttng_bitmap_index(index, &word, &bit); + val = ~(1U << bit); + uatomic_and(p + word, val); +} + +static inline bool lttng_bitmap_test_bit(unsigned int index, unsigned long *p) +{ + unsigned int word, bit; + + lttng_bitmap_index(index, &word, &bit); + return (CMM_LOAD_SHARED(p[word]) >> bit) & 0x1; +} + +#endif /* _LTTNG_BITMAP_H */ diff --git a/include/lttng/counter-config.h b/include/lttng/counter-config.h new file mode 100644 index 00000000..eaa4cb88 --- /dev/null +++ b/include/lttng/counter-config.h @@ -0,0 +1,57 @@ +/* + * lttng/counter-config.h + * + * LTTng Counters Configuration + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_CONFIG_H +#define _LTTNG_COUNTER_CONFIG_H + +#include + +enum lib_counter_config_alloc { + COUNTER_ALLOC_PER_CPU = (1 << 0), + COUNTER_ALLOC_GLOBAL = (1 << 1), +}; + +enum lib_counter_config_sync { + COUNTER_SYNC_PER_CPU, + COUNTER_SYNC_GLOBAL, +}; + +struct lib_counter_config { + uint32_t alloc; /* enum lib_counter_config_alloc flags */ + enum lib_counter_config_sync sync; + enum { + COUNTER_ARITHMETIC_OVERFLOW, + COUNTER_ARITHMETIC_SATURATE, /* TODO */ + } arithmetic; + enum { + COUNTER_SIZE_8_BIT = 1, + COUNTER_SIZE_16_BIT = 2, + COUNTER_SIZE_32_BIT = 4, + COUNTER_SIZE_64_BIT = 8, + } counter_size; +}; + +#endif /* _LTTNG_COUNTER_CONFIG_H */ diff --git a/include/lttng/ust-abi.h b/include/lttng/ust-abi.h index 8bf77f64..8ec51baf 100644 --- a/include/lttng/ust-abi.h +++ b/include/lttng/ust-abi.h @@ -103,6 +103,64 @@ struct lttng_ust_stream { */ } LTTNG_PACKED; +#define LTTNG_UST_TRIGGER_PADDING1 16 +#define LTTNG_UST_TRIGGER_PADDING2 (LTTNG_UST_SYM_NAME_LEN + 32) +struct lttng_ust_trigger { + uint64_t id; + uint64_t error_counter_index; + enum lttng_ust_instrumentation instrumentation; + char name[LTTNG_UST_SYM_NAME_LEN]; /* event name */ + + enum lttng_ust_loglevel_type loglevel_type; + int loglevel; /* value, -1: all */ + char padding[LTTNG_UST_TRIGGER_PADDING1]; + + /* Per instrumentation type configuration */ + union { + char padding[LTTNG_UST_TRIGGER_PADDING2]; + } u; +} LTTNG_PACKED; + +enum lttng_ust_counter_arithmetic { + LTTNG_UST_COUNTER_ARITHMETIC_MODULAR = 0, + LTTNG_UST_COUNTER_ARITHMETIC_SATURATION = 1, +}; + +enum lttng_ust_counter_bitness { + LTTNG_UST_COUNTER_BITNESS_32BITS = 4, + LTTNG_UST_COUNTER_BITNESS_64BITS = 8, +}; + +struct lttng_ust_counter_dimension { + uint64_t size; + uint64_t underflow_index; + uint64_t overflow_index; + uint8_t has_underflow; + uint8_t has_overflow; +} LTTNG_PACKED; + +#define LTTNG_UST_COUNTER_DIMENSION_MAX 8 +struct lttng_ust_counter_conf { + uint32_t arithmetic; /* enum lttng_ust_counter_arithmetic */ + uint32_t bitness; /* enum lttng_ust_counter_bitness */ + uint32_t number_dimensions; + int64_t global_sum_step; + struct lttng_ust_counter_dimension dimensions[LTTNG_UST_COUNTER_DIMENSION_MAX]; +} LTTNG_PACKED; + +struct lttng_ust_counter_value { + uint32_t number_dimensions; + uint64_t dimension_indexes[LTTNG_UST_COUNTER_DIMENSION_MAX]; + int64_t value; +} LTTNG_PACKED; + +#define LTTNG_TRIGGER_NOTIFICATION_PADDING 32 +struct lttng_ust_trigger_notification { + uint64_t id; + uint16_t capture_buf_size; + char padding[LTTNG_TRIGGER_NOTIFICATION_PADDING]; +} LTTNG_PACKED; + #define LTTNG_UST_EVENT_PADDING1 16 #define LTTNG_UST_EVENT_PADDING2 (LTTNG_UST_SYM_NAME_LEN + 32) struct lttng_ust_event { @@ -119,6 +177,27 @@ struct lttng_ust_event { } u; } LTTNG_PACKED; +#define LTTNG_UST_COUNTER_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32) +#define LTTNG_UST_COUNTER_DATA_MAX_LEN 4096U +struct lttng_ust_counter { + uint64_t len; + char padding[LTTNG_UST_COUNTER_PADDING1]; + char data[]; /* variable sized data */ +} LTTNG_PACKED; + +#define LTTNG_UST_COUNTER_GLOBAL_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32) +struct lttng_ust_counter_global { + uint64_t len; /* shm len */ + char padding[LTTNG_UST_COUNTER_GLOBAL_PADDING1]; +} LTTNG_PACKED; + +#define LTTNG_UST_COUNTER_CPU_PADDING1 (LTTNG_UST_SYM_NAME_LEN + 32) +struct lttng_ust_counter_cpu { + uint64_t len; /* shm len */ + uint32_t cpu_nr; + char padding[LTTNG_UST_COUNTER_CPU_PADDING1]; +} LTTNG_PACKED; + enum lttng_ust_field_type { LTTNG_UST_FIELD_OTHER = 0, LTTNG_UST_FIELD_INTEGER = 1, @@ -217,6 +296,11 @@ enum lttng_ust_object_type { LTTNG_UST_OBJECT_TYPE_STREAM = 1, LTTNG_UST_OBJECT_TYPE_EVENT = 2, LTTNG_UST_OBJECT_TYPE_CONTEXT = 3, + LTTNG_UST_OBJECT_TYPE_TRIGGER_GROUP = 4, + LTTNG_UST_OBJECT_TYPE_TRIGGER = 5, + LTTNG_UST_OBJECT_TYPE_COUNTER = 6, + LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL = 7, + LTTNG_UST_OBJECT_TYPE_COUNTER_CPU = 8, }; #define LTTNG_UST_OBJECT_DATA_PADDING1 32 @@ -238,6 +322,16 @@ struct lttng_ust_object_data { int wakeup_fd; uint32_t stream_nr; } stream; + struct { + void *data; + } counter; + struct { + int shm_fd; + } counter_global; + struct { + int shm_fd; + uint32_t cpu_nr; + } counter_cpu; char padding2[LTTNG_UST_OBJECT_DATA_PADDING2]; } u; } LTTNG_PACKED; @@ -267,6 +361,16 @@ struct lttng_ust_filter_bytecode { char data[0]; } LTTNG_PACKED; +#define CAPTURE_BYTECODE_MAX_LEN 65536 +#define LTTNG_UST_CAPTURE_PADDING 32 +struct lttng_ust_capture_bytecode { + uint32_t len; + uint32_t reloc_offset; + uint64_t seqnum; + char padding[LTTNG_UST_CAPTURE_PADDING]; + char data[0]; +} LTTNG_PACKED; + #define LTTNG_UST_EXCLUSION_PADDING 32 struct lttng_ust_event_exclusion { uint32_t count; @@ -322,6 +426,20 @@ struct lttng_ust_event_exclusion { #define LTTNG_UST_FILTER _UST_CMD(0xA0) #define LTTNG_UST_EXCLUSION _UST_CMD(0xA1) +/* Trigger commands */ +#define LTTNG_UST_TRIGGER_GROUP_CREATE _UST_CMD(0xB0) +#define LTTNG_UST_TRIGGER_CREATE \ + _UST_CMDW(0xB1, struct lttng_ust_trigger) +#define LTTNG_UST_CAPTURE _UST_CMD(0xB2) + +/* Session and Trigger group FD commands */ +#define LTTNG_UST_COUNTER \ + _UST_CMDW(0xB3, struct lttng_ust_counter) +#define LTTNG_UST_COUNTER_GLOBAL \ + _UST_CMDW(0xB4, struct lttng_ust_counter_global) +#define LTTNG_UST_COUNTER_CPU \ + _UST_CMDW(0xB5, struct lttng_ust_counter_cpu) + #define LTTNG_UST_ROOT_HANDLE 0 struct lttng_ust_obj; @@ -341,6 +459,15 @@ union ust_args { struct { char *ctxname; } app_context; + struct { + int trigger_notif_fd; + } trigger_handle; + struct { + void *counter_data; + } counter; + struct { + int shm_fd; + } counter_shm; }; struct lttng_ust_objd_ops { diff --git a/include/lttng/ust-ctl.h b/include/lttng/ust-ctl.h index 19fba726..840befe3 100644 --- a/include/lttng/ust-ctl.h +++ b/include/lttng/ust-ctl.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -93,6 +94,8 @@ int ustctl_add_context(int sock, struct lttng_ust_context_attr *ctx, struct lttng_ust_object_data **context_data); int ustctl_set_filter(int sock, struct lttng_ust_filter_bytecode *bytecode, struct lttng_ust_object_data *obj_data); +int ustctl_set_capture(int sock, struct lttng_ust_capture_bytecode *bytecode, + struct lttng_ust_object_data *obj_data); int ustctl_set_exclusion(int sock, struct lttng_ust_event_exclusion *exclusion, struct lttng_ust_object_data *obj_data); @@ -101,6 +104,25 @@ int ustctl_disable(int sock, struct lttng_ust_object_data *object); int ustctl_start_session(int sock, int handle); int ustctl_stop_session(int sock, int handle); +/* + * ustctl_create_trigger_group creates a trigger group. It establishes the + * connection with the application by providing a file descriptor of the pipe + * to be used by the application when a trigger of that group is fired. It + * returns a handle to be used when creating trigger in that group. + */ +int ustctl_create_trigger_group(int sock, int pipe_fd, + struct lttng_ust_object_data **trigger_group); + +/* + * ustctl_create_trigger creates a trigger in a trigger group giving a trigger + * description and a trigger group handle. It returns a trigger handle to be + * used when enabling the trigger, attaching filter, attaching exclusion, and + * disabling the trigger. + */ +int ustctl_create_trigger(int sock, struct lttng_ust_trigger *trigger, + struct lttng_ust_object_data *trigger_group, + struct lttng_ust_object_data **trigger_data); + /* * ustctl_tracepoint_list returns a tracepoint list handle, or negative * error value. @@ -567,4 +589,81 @@ int ustctl_reply_register_channel(int sock, enum ustctl_channel_header header_type, int ret_code); /* return code. 0 ok, negative error */ +/* + * Counter API. + */ + +enum ustctl_counter_bitness { + USTCTL_COUNTER_BITNESS_32 = 4, + USTCTL_COUNTER_BITNESS_64 = 8, +}; + +enum ustctl_counter_arithmetic { + USTCTL_COUNTER_ARITHMETIC_MODULAR = 0, + USTCTL_COUNTER_ARITHMETIC_SATURATION = 1, +}; + +/* Used as alloc flags. */ +enum ustctl_counter_alloc { + USTCTL_COUNTER_ALLOC_PER_CPU = (1 << 0), + USTCTL_COUNTER_ALLOC_GLOBAL = (1 << 1), +}; + +struct ustctl_daemon_counter; + +int ustctl_get_nr_cpu_per_counter(void); + +struct ustctl_counter_dimension { + uint64_t size; + uint64_t underflow_index; + uint64_t overflow_index; + uint8_t has_underflow; + uint8_t has_overflow; +}; + +struct ustctl_daemon_counter * + ustctl_create_counter(size_t nr_dimensions, + const struct ustctl_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + enum ustctl_counter_bitness bitness, + enum ustctl_counter_arithmetic arithmetic, + uint32_t alloc_flags); + +int ustctl_create_counter_data(struct ustctl_daemon_counter *counter, + struct lttng_ust_object_data **counter_data); + +int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter, + struct lttng_ust_object_data **counter_global_data); +int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu, + struct lttng_ust_object_data **counter_cpu_data); + +/* + * Each counter data and counter cpu data created need to be destroyed + * before calling ustctl_destroy_counter(). + */ +void ustctl_destroy_counter(struct ustctl_daemon_counter *counter); + +int ustctl_send_counter_data_to_ust(int sock, int parent_handle, + struct lttng_ust_object_data *counter_data); +int ustctl_send_counter_global_data_to_ust(int sock, + struct lttng_ust_object_data *counter_data, + struct lttng_ust_object_data *counter_global_data); +int ustctl_send_counter_cpu_data_to_ust(int sock, + struct lttng_ust_object_data *counter_data, + struct lttng_ust_object_data *counter_cpu_data); + +int ustctl_counter_read(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes, + int cpu, int64_t *value, + bool *overflow, bool *underflow); +int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes, + int64_t *value, + bool *overflow, bool *underflow); +int ustctl_counter_clear(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes); + #endif /* _LTTNG_UST_CTL_H */ diff --git a/include/lttng/ust-events.h b/include/lttng/ust-events.h index 3fee93c4..c5ff0231 100644 --- a/include/lttng/ust-events.h +++ b/include/lttng/ust-events.h @@ -64,6 +64,7 @@ struct lttng_session; struct lttng_ust_lib_ring_buffer_ctx; struct lttng_ust_context_app; struct lttng_event_field; +struct lttng_trigger_group; /* * Data structures used by tracepoint event declarations, and by the @@ -315,6 +316,7 @@ struct lttng_ctx_value { enum lttng_ust_dynamic_type sel; union { int64_t s64; + uint64_t u64; const char *str; double d; } u; @@ -360,6 +362,7 @@ struct lttng_event_desc { union { struct { const char **model_emf_uri; + void (*trigger_callback)(void); } ext; char padding[LTTNG_UST_EVENT_DESC_PADDING]; } u; @@ -380,9 +383,9 @@ struct lttng_probe_desc { /* Data structures used by the tracer. */ -enum lttng_enabler_type { - LTTNG_ENABLER_STAR_GLOB, - LTTNG_ENABLER_EVENT, +enum lttng_enabler_format_type { + LTTNG_ENABLER_FORMAT_STAR_GLOB, + LTTNG_ENABLER_FORMAT_EVENT, }; /* @@ -390,21 +393,14 @@ enum lttng_enabler_type { * backward reference. */ struct lttng_enabler { - enum lttng_enabler_type type; + enum lttng_enabler_format_type format_type; /* head list of struct lttng_ust_filter_bytecode_node */ struct cds_list_head filter_bytecode_head; /* head list of struct lttng_ust_excluder_node */ struct cds_list_head excluder_head; - struct cds_list_head node; /* per-session list of enablers */ struct lttng_ust_event event_param; - struct lttng_channel *chan; - /* - * Unused, but kept around to make it explicit that the tracer can do - * it. - */ - struct lttng_ctx *ctx; unsigned int enabled:1; }; @@ -431,34 +427,17 @@ struct lttng_ust_field_list { struct ust_pending_probe; struct lttng_event; -struct lttng_ust_filter_bytecode_node { - struct cds_list_head node; - struct lttng_enabler *enabler; - /* - * struct lttng_ust_filter_bytecode has var. sized array, must - * be last field. - */ - struct lttng_ust_filter_bytecode bc; -}; - -struct lttng_ust_excluder_node { - struct cds_list_head node; - struct lttng_enabler *enabler; - /* - * struct lttng_ust_event_exclusion had variable sized array, - * must be last field. - */ - struct lttng_ust_event_exclusion excluder; -}; /* - * Filter return value masks. + * Bytecode interpreter return value masks. */ -enum lttng_filter_ret { - LTTNG_FILTER_DISCARD = 0, - LTTNG_FILTER_RECORD_FLAG = (1ULL << 0), +enum lttng_bytecode_interpreter_ret { + LTTNG_INTERPRETER_DISCARD = 0, + LTTNG_INTERPRETER_RECORD_FLAG = (1ULL << 0), /* Other bits are kept for future use. */ }; +struct lttng_interpreter_output; + /* * This structure is used in the probes. More specifically, the `filter` and * `node` fields are explicity used in the probes. When modifying this @@ -467,15 +446,30 @@ enum lttng_filter_ret { */ struct lttng_bytecode_runtime { /* Associated bytecode */ - struct lttng_ust_filter_bytecode_node *bc; - uint64_t (*filter)(void *filter_data, const char *filter_stack_data); + struct lttng_ust_bytecode_node *bc; + union { + uint64_t (*filter)(void *interpreter_data, + const char *interpreter_stack_data); + uint64_t (*capture)(void *interpreter_data, + const char *interpreter_stack_data, + struct lttng_interpreter_output *interpreter_output); + } interpreter_funcs; int link_failed; struct cds_list_head node; /* list of bytecode runtime in event */ - struct lttng_session *session; + /* + * Pointer to a URCU-protected pointer owned by an `struct + * lttng_session`or `struct lttng_trigger_group`. + */ + struct lttng_ctx **pctx; }; /* - * Objects in a linked-list of enablers, owned by an event. + * Objects in a linked-list of enablers, owned by an event or trigger. + * This is used because an event (or a trigger) can be enabled by more than one + * enabler and we want a quick way to iterate over all enablers of an object. + * + * For example, event rules "my_app:a*" and "my_app:ab*" will both match the + * event with the name "my_app:abc". */ struct lttng_enabler_ref { struct cds_list_head node; /* enabler ref list */ @@ -506,7 +500,7 @@ struct lttng_event { /* LTTng-UST 2.1 starts here */ /* list of struct lttng_bytecode_runtime, sorted by seqnum */ - struct cds_list_head bytecode_runtime_head; + struct cds_list_head filter_bytecode_runtime_head; int has_enablers_without_bytecode; /* Backward references: list of lttng_enabler_ref (ref to enablers) */ struct cds_list_head enablers_ref_head; @@ -514,6 +508,22 @@ struct lttng_event { int registered; /* has reg'd tracepoint probe */ }; +struct lttng_trigger { + uint64_t id; + uint64_t error_counter_index; + int enabled; + int registered; /* has reg'd tracepoint probe */ + size_t num_captures; /* Needed to allocate the msgpack array. */ + struct cds_list_head filter_bytecode_runtime_head; + struct cds_list_head capture_bytecode_runtime_head; + int has_enablers_without_bytecode; + struct cds_list_head enablers_ref_head; + const struct lttng_event_desc *desc; + struct cds_hlist_node hlist; /* hashtable of triggers */ + struct cds_list_head node; /* Trigger list in session */ + struct lttng_trigger_group *group; /* weak ref */ +}; + struct lttng_enum { const struct lttng_enum_desc *desc; struct lttng_session *session; @@ -607,6 +617,36 @@ struct lttng_channel { int tstate:1; /* Transient enable state */ }; +#define LTTNG_COUNTER_DIMENSION_MAX 8 + +struct lttng_counter_dimension { + uint64_t size; + uint64_t underflow_index; + uint64_t overflow_index; + uint8_t has_underflow; + uint8_t has_overflow; +}; + +struct lttng_counter_ops { + struct lib_counter *(*counter_create)(size_t nr_dimensions, + const struct lttng_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon); + void (*counter_destroy)(struct lib_counter *counter); + int (*counter_add)(struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v); + int (*counter_read)(struct lib_counter *counter, + const size_t *dimension_indexes, int cpu, + int64_t *value, bool *overflow, bool *underflow); + int (*counter_aggregate)(struct lib_counter *counter, + const size_t *dimension_indexes, int64_t *value, + bool *overflow, bool *underflow); + int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes); +}; + #define LTTNG_UST_STACK_CTX_PADDING 32 struct lttng_stack_ctx { struct lttng_event *event; @@ -622,6 +662,12 @@ struct lttng_ust_event_ht { struct cds_hlist_head table[LTTNG_UST_EVENT_HT_SIZE]; }; +#define LTTNG_UST_TRIGGER_HT_BITS 12 +#define LTTNG_UST_TRIGGER_HT_SIZE (1U << LTTNG_UST_TRIGGER_HT_BITS) +struct lttng_ust_trigger_ht { + struct cds_hlist_head table[LTTNG_UST_TRIGGER_HT_SIZE]; +}; + #define LTTNG_UST_ENUM_HT_BITS 12 #define LTTNG_UST_ENUM_HT_SIZE (1U << LTTNG_UST_ENUM_HT_BITS) @@ -662,6 +708,28 @@ struct lttng_session { struct lttng_ctx *ctx; /* contexts for filters. */ }; +struct lttng_counter { + int objd; + struct lttng_trigger_group *trigger_group; /* owner */ + struct lttng_counter_transport *transport; + struct lib_counter *counter; + struct lttng_counter_ops *ops; +}; + +struct lttng_trigger_group { + int objd; + void *owner; + int notification_fd; + struct cds_list_head node; /* Trigger group handle list */ + struct cds_list_head enablers_head; + struct cds_list_head triggers_head; /* list of triggers */ + struct lttng_ust_trigger_ht triggers_ht; /* hashtable of triggers */ + struct lttng_ctx *ctx; /* contexts for filters. */ + + struct lttng_counter *error_counter; + size_t error_counter_len; +}; + struct lttng_transport { char *name; struct cds_list_head node; @@ -669,12 +737,21 @@ struct lttng_transport { const struct lttng_ust_lib_ring_buffer_config *client_config; }; +struct lttng_counter_transport { + char *name; + struct cds_list_head node; + struct lttng_counter_ops ops; + const struct lib_counter_config *client_config; +}; + struct lttng_session *lttng_session_create(void); int lttng_session_enable(struct lttng_session *session); int lttng_session_disable(struct lttng_session *session); int lttng_session_statedump(struct lttng_session *session); void lttng_session_destroy(struct lttng_session *session); +void lttng_trigger_notification_send(struct lttng_trigger *trigger, const char *stack_data); + struct lttng_channel *lttng_channel_create(struct lttng_session *session, const char *transport_name, void *buf_addr, @@ -688,26 +765,19 @@ struct lttng_channel *lttng_channel_create(struct lttng_session *session, int lttng_channel_enable(struct lttng_channel *channel); int lttng_channel_disable(struct lttng_channel *channel); -struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type, - struct lttng_ust_event *event_param, - struct lttng_channel *chan); -int lttng_enabler_enable(struct lttng_enabler *enabler); -int lttng_enabler_disable(struct lttng_enabler *enabler); -int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler, - struct lttng_ust_filter_bytecode_node *bytecode); -int lttng_enabler_attach_context(struct lttng_enabler *enabler, - struct lttng_ust_context *ctx); -int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler, - struct lttng_ust_excluder_node *excluder); - int lttng_attach_context(struct lttng_ust_context *context_param, union ust_args *uargs, struct lttng_ctx **ctx, struct lttng_session *session); -int lttng_session_context_init(struct lttng_ctx **ctx); - void lttng_transport_register(struct lttng_transport *transport); void lttng_transport_unregister(struct lttng_transport *transport); +void lttng_counter_transport_register(struct lttng_counter_transport *transport); +void lttng_counter_transport_unregister(struct lttng_counter_transport *transport); + +struct lttng_counter *lttng_ust_counter_create( + const char *counter_transport_name, + size_t number_dimensions, const struct lttng_counter_dimension *dimensions); + void synchronize_trace(void); int lttng_probe_register(struct lttng_probe_desc *desc); @@ -794,6 +864,7 @@ extern const struct lttng_ust_client_lib_ring_buffer_client_cb *lttng_client_cal extern const struct lttng_ust_client_lib_ring_buffer_client_cb *lttng_client_callbacks_overwrite; struct lttng_transport *lttng_transport_find(const char *name); +struct lttng_counter_transport *lttng_counter_transport_find(const char *name); int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list); void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list); @@ -804,10 +875,7 @@ void lttng_probes_prune_field_list(struct lttng_ust_field_list *list); struct lttng_ust_field_iter * lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list); -void lttng_enabler_event_link_bytecode(struct lttng_event *event, - struct lttng_enabler *enabler); void lttng_free_event_filter_runtime(struct lttng_event *event); -void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime); struct cds_list_head *lttng_get_probe_list_head(void); int lttng_session_active(void); @@ -824,9 +892,28 @@ void lttng_ust_fixup_fd_tracker_tls(void); /* For backward compatibility. Leave those exported symbols in place. */ extern struct lttng_ctx *lttng_static_ctx; +struct lttng_ust_filter_bytecode_node; +struct lttng_ust_excluder_node; void lttng_context_init(void); void lttng_context_exit(void); void lttng_filter_event_link_bytecode(struct lttng_event *event); +struct lttng_enabler *lttng_enabler_create( + enum lttng_enabler_format_type format_type, + struct lttng_ust_event *event_param, + struct lttng_channel *chan); +int lttng_enabler_enable(struct lttng_enabler *enabler); +int lttng_enabler_disable(struct lttng_enabler *enabler); +int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler, + struct lttng_ust_filter_bytecode_node *bytecode); +int lttng_enabler_attach_context(struct lttng_enabler *enabler, + struct lttng_ust_context *ctx); +int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler, + struct lttng_ust_excluder_node *excluder); +void lttng_enabler_event_link_bytecode(struct lttng_event *event, + struct lttng_enabler *enabler); +void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime); +int lttng_session_context_init(struct lttng_ctx **ctx); + #ifdef __cplusplus } diff --git a/include/lttng/ust-tracepoint-event.h b/include/lttng/ust-tracepoint-event.h index ec0e3144..3ff6afa5 100644 --- a/include/lttng/ust-tracepoint-event.h +++ b/include/lttng/ust-tracepoint-event.h @@ -433,6 +433,24 @@ static void __event_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)); #include TRACEPOINT_INCLUDE +/* + * Stage 2.1 of tracepoint event generation. + * + * Create probe trigger callback prototypes. + */ + +/* Reset all macros within TRACEPOINT_EVENT */ +#include + +#undef TP_ARGS +#define TP_ARGS(...) __VA_ARGS__ + +#undef TRACEPOINT_EVENT_CLASS +#define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \ +static void __trigger_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)); + +#include TRACEPOINT_INCLUDE + /* * Stage 3.0 of tracepoint event generation. * @@ -647,7 +665,7 @@ size_t __event_get_size__##_provider##___##_name(size_t *__dynamic_len, _TP_ARGS #undef TRACEPOINT_EVENT_CLASS #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \ static inline \ -void __event_prepare_filter_stack__##_provider##___##_name(char *__stack_data,\ +void __event_prepare_interpreter_stack__##_provider##___##_name(char *__stack_data,\ _TP_ARGS_DATA_PROTO(_args)) \ { \ _fields \ @@ -853,15 +871,15 @@ void __event_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)) \ return; \ if (caa_unlikely(!TP_RCU_LINK_TEST())) \ return; \ - if (caa_unlikely(!cds_list_empty(&__event->bytecode_runtime_head))) { \ - struct lttng_bytecode_runtime *bc_runtime; \ + if (caa_unlikely(!cds_list_empty(&__event->filter_bytecode_runtime_head))) { \ + struct lttng_bytecode_runtime *__filter_bc_runtime; \ int __filter_record = __event->has_enablers_without_bytecode; \ \ - __event_prepare_filter_stack__##_provider##___##_name(__stackvar.__filter_stack_data, \ + __event_prepare_interpreter_stack__##_provider##___##_name(__stackvar.__filter_stack_data, \ _TP_ARGS_DATA_VAR(_args)); \ - tp_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ - if (caa_unlikely(bc_runtime->filter(bc_runtime, \ - __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \ + tp_list_for_each_entry_rcu(__filter_bc_runtime, &__event->filter_bytecode_runtime_head, node) { \ + if (caa_unlikely(__filter_bc_runtime->interpreter_funcs.filter(__filter_bc_runtime, \ + __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \ __filter_record = 1; \ break; \ } \ @@ -913,6 +931,51 @@ static const char __tp_event_signature___##_provider##___##_name[] = \ #undef _TP_EXTRACT_STRING2 +/* + * Stage 5.2 of tracepoint event generation. + * + * Create the trigger probe function. + */ +#undef TRACEPOINT_EVENT_CLASS +#define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \ +static lttng_ust_notrace \ +void __trigger_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)); \ +static \ +void __trigger_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)) \ +{ \ + struct lttng_trigger *__trigger = (struct lttng_trigger *) __tp_data; \ + const size_t __num_fields = _TP_ARRAY_SIZE(__event_fields___##_provider##___##_name) - 1;\ + union { \ + size_t __dynamic_len[__num_fields]; \ + char __interpreter_stack_data[2 * sizeof(unsigned long) * __num_fields]; \ + } __stackvar; \ + if (caa_unlikely(!CMM_ACCESS_ONCE(__trigger->enabled))) \ + return; \ + if (caa_unlikely(!TP_RCU_LINK_TEST())) \ + return; \ + if (caa_unlikely(!cds_list_empty(&__trigger->filter_bytecode_runtime_head))) { \ + struct lttng_bytecode_runtime *__filter_bc_runtime; \ + int __filter_record = __trigger->has_enablers_without_bytecode; \ + \ + __event_prepare_interpreter_stack__##_provider##___##_name(__stackvar.__interpreter_stack_data, \ + _TP_ARGS_DATA_VAR(_args)); \ + tp_list_for_each_entry_rcu(__filter_bc_runtime, &__trigger->filter_bytecode_runtime_head, node) { \ + if (caa_unlikely(__filter_bc_runtime->interpreter_funcs.filter(__filter_bc_runtime, \ + __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \ + __filter_record = 1; \ + } \ + if (caa_likely(!__filter_record)) \ + return; \ + } \ + if (caa_unlikely(!cds_list_empty(&__trigger->capture_bytecode_runtime_head))) \ + __event_prepare_interpreter_stack__##_provider##___##_name(__stackvar.__interpreter_stack_data, \ + _TP_ARGS_DATA_VAR(_args)); \ + \ + lttng_trigger_notification_send(__trigger, __stackvar.__interpreter_stack_data); \ +} + +#include TRACEPOINT_INCLUDE + /* * Stage 6 of tracepoint event generation. * @@ -1008,6 +1071,7 @@ static const struct lttng_event_desc __event_desc___##_provider##_##_name = { .u = { \ .ext = { \ .model_emf_uri = &__ref_model_emf_uri___##_provider##___##_name, \ + .trigger_callback = (void (*)(void)) &__trigger_probe__##_provider##___##_template,\ }, \ }, \ }; diff --git a/include/share.h b/include/share.h index 20315b26..9df61772 100644 --- a/include/share.h +++ b/include/share.h @@ -24,8 +24,10 @@ */ #include +#include ssize_t patient_write(int fd, const void *buf, size_t count); +ssize_t patient_writev(int fd, struct iovec *iov, int iovcnt); ssize_t patient_send(int fd, const void *buf, size_t count, int flags); #endif /* _LTTNG_SHARE_H */ diff --git a/include/ust-comm.h b/include/ust-comm.h index a5e09572..440b1c4d 100644 --- a/include/ust-comm.h +++ b/include/ust-comm.h @@ -89,6 +89,7 @@ struct ustcomm_ust_msg { uint32_t cmd; char padding[USTCOMM_MSG_PADDING1]; union { + struct lttng_ust_trigger trigger; struct lttng_ust_channel channel; struct lttng_ust_stream stream; struct lttng_ust_event event; @@ -103,6 +104,14 @@ struct ustcomm_ust_msg { struct { uint32_t count; /* how many names follow */ } LTTNG_PACKED exclusion; + struct { + uint32_t data_size; /* following capture data */ + uint32_t reloc_offset; + uint64_t seqnum; + } LTTNG_PACKED capture; + struct lttng_ust_counter counter; + struct lttng_ust_counter_global counter_global; + struct lttng_ust_counter_cpu counter_cpu; char padding[USTCOMM_MSG_PADDING2]; } u; } LTTNG_PACKED; @@ -220,6 +229,13 @@ ssize_t ustcomm_recv_channel_from_sessiond(int sock, int ustcomm_recv_stream_from_sessiond(int sock, uint64_t *memory_map_size, int *shm_fd, int *wakeup_fd); +ssize_t ustcomm_recv_trigger_notif_fd_from_sessiond(int sock, + int *trigger_notif_fd); + +ssize_t ustcomm_recv_counter_from_sessiond(int sock, + void **counter_data, uint64_t len); +int ustcomm_recv_counter_shm_from_sessiond(int sock, + int *shm_fd); /* * Returns 0 on success, negative error value on error. diff --git a/libcounter/Makefile.am b/libcounter/Makefile.am new file mode 100644 index 00000000..9d52b74c --- /dev/null +++ b/libcounter/Makefile.am @@ -0,0 +1,18 @@ +AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include +AM_CFLAGS += -fno-strict-aliasing + +noinst_LTLIBRARIES = libcounter.la + +libcounter_la_SOURCES = \ + counter.c smp.c smp.h shm.c shm.h shm_internal.h shm_types.h \ + counter-api.h counter.h counter-internal.h counter-types.h + +libcounter_la_LIBADD = \ + -lpthread \ + -lrt + +if HAVE_LIBNUMA +libcounter_la_LIBADD += -lnuma +endif + +libcounter_la_CFLAGS = -DUST_COMPONENT="libcounter" $(AM_CFLAGS) diff --git a/libcounter/counter-api.h b/libcounter/counter-api.h new file mode 100644 index 00000000..0a7c0ade --- /dev/null +++ b/libcounter/counter-api.h @@ -0,0 +1,296 @@ +/* + * counter/counter-api.h + * + * LTTng Counters API, requiring counter/config.h + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_API_H +#define _LTTNG_COUNTER_API_H + +#include +#include +#include "counter.h" +#include "counter-internal.h" +#include +#include +#include +#include "../libringbuffer/getcpu.h" + +/* + * Using unsigned arithmetic because overflow is defined. + */ +static inline int __lttng_counter_add(const struct lib_counter_config *config, + enum lib_counter_config_alloc alloc, + enum lib_counter_config_sync sync, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v, + int64_t *remainder) +{ + size_t index; + bool overflow = false, underflow = false; + struct lib_counter_layout *layout; + int64_t move_sum = 0; + + if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes))) + return -EOVERFLOW; + index = lttng_counter_get_index(config, counter, dimension_indexes); + + switch (alloc) { + case COUNTER_ALLOC_PER_CPU: + layout = &counter->percpu_counters[lttng_ust_get_cpu()]; + break; + case COUNTER_ALLOC_GLOBAL: + layout = &counter->global_counters; + break; + default: + return -EINVAL; + } + if (caa_unlikely(!layout->counters)) + return -ENODEV; + + switch (config->counter_size) { + case COUNTER_SIZE_8_BIT: + { + int8_t *int_p = (int8_t *) layout->counters + index; + int8_t old, n, res; + int8_t global_sum_step = counter->global_sum_step.s8; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int8_t) ((uint8_t) old + (uint8_t) v); + if (caa_unlikely(n > (int8_t) global_sum_step)) + move_sum = (int8_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int8_t) global_sum_step)) + move_sum = -((int8_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int8_t) ((uint8_t) old + (uint8_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT8_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT8_MAX || n > old)) + underflow = true; + break; + } + case COUNTER_SIZE_16_BIT: + { + int16_t *int_p = (int16_t *) layout->counters + index; + int16_t old, n, res; + int16_t global_sum_step = counter->global_sum_step.s16; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int16_t) ((uint16_t) old + (uint16_t) v); + if (caa_unlikely(n > (int16_t) global_sum_step)) + move_sum = (int16_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int16_t) global_sum_step)) + move_sum = -((int16_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int16_t) ((uint16_t) old + (uint16_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT16_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT16_MAX || n > old)) + underflow = true; + break; + } + case COUNTER_SIZE_32_BIT: + { + int32_t *int_p = (int32_t *) layout->counters + index; + int32_t old, n, res; + int32_t global_sum_step = counter->global_sum_step.s32; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int32_t) ((uint32_t) old + (uint32_t) v); + if (caa_unlikely(n > (int32_t) global_sum_step)) + move_sum = (int32_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int32_t) global_sum_step)) + move_sum = -((int32_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int32_t) ((uint32_t) old + (uint32_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && (v >= UINT32_MAX || n < old)) + overflow = true; + else if (v < 0 && (v <= -UINT32_MAX || n > old)) + underflow = true; + break; + } +#if CAA_BITS_PER_LONG == 64 + case COUNTER_SIZE_64_BIT: + { + int64_t *int_p = (int64_t *) layout->counters + index; + int64_t old, n, res; + int64_t global_sum_step = counter->global_sum_step.s64; + + res = *int_p; + switch (sync) { + case COUNTER_SYNC_PER_CPU: + { + do { + move_sum = 0; + old = res; + n = (int64_t) ((uint64_t) old + (uint64_t) v); + if (caa_unlikely(n > (int64_t) global_sum_step)) + move_sum = (int64_t) global_sum_step / 2; + else if (caa_unlikely(n < -(int64_t) global_sum_step)) + move_sum = -((int64_t) global_sum_step / 2); + n -= move_sum; + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + case COUNTER_SYNC_GLOBAL: + { + do { + old = res; + n = (int64_t) ((uint64_t) old + (uint64_t) v); + res = uatomic_cmpxchg(int_p, old, n); + } while (old != res); + break; + } + } + if (v > 0 && n < old) + overflow = true; + else if (v < 0 && n > old) + underflow = true; + break; + } +#endif + default: + return -EINVAL; + } + if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap))) + lttng_bitmap_set_bit(index, layout->overflow_bitmap); + else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap))) + lttng_bitmap_set_bit(index, layout->underflow_bitmap); + if (remainder) + *remainder = move_sum; + return 0; +} + +static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + int64_t move_sum; + int ret; + + ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync, + counter, dimension_indexes, v, &move_sum); + if (caa_unlikely(ret)) + return ret; + if (caa_unlikely(move_sum)) + return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL, + counter, dimension_indexes, move_sum, NULL); + return 0; +} + +static inline int __lttng_counter_add_global(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter, + dimension_indexes, v, NULL); +} + +static inline int lttng_counter_add(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, int64_t v) +{ + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + return __lttng_counter_add_percpu(config, counter, dimension_indexes, v); + case COUNTER_ALLOC_GLOBAL: + return __lttng_counter_add_global(config, counter, dimension_indexes, v); + default: + return -EINVAL; + } +} + +static inline int lttng_counter_inc(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + return lttng_counter_add(config, counter, dimension_indexes, 1); +} + +static inline int lttng_counter_dec(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + return lttng_counter_add(config, counter, dimension_indexes, -1); +} + +#endif /* _LTTNG_COUNTER_API_H */ diff --git a/libcounter/counter-internal.h b/libcounter/counter-internal.h new file mode 100644 index 00000000..38cb089b --- /dev/null +++ b/libcounter/counter-internal.h @@ -0,0 +1,65 @@ +/* + * counter/counter-internal.h + * + * LTTng Counters Internal Header + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_INTERNAL_H +#define _LTTNG_COUNTER_INTERNAL_H + +#include +#include +#include +#include "counter-types.h" + +static inline int lttng_counter_validate_indexes(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + size_t nr_dimensions = counter->nr_dimensions, i; + + for (i = 0; i < nr_dimensions; i++) { + if (caa_unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem)) + return -EOVERFLOW; + } + return 0; +} + + +static inline size_t lttng_counter_get_index(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + size_t nr_dimensions = counter->nr_dimensions, i; + size_t index = 0; + + for (i = 0; i < nr_dimensions; i++) { + struct lib_counter_dimension *dimension = &counter->dimensions[i]; + const size_t *dimension_index = &dimension_indexes[i]; + + index += *dimension_index * dimension->stride; + } + return index; +} + +#endif /* _LTTNG_COUNTER_INTERNAL_H */ diff --git a/libcounter/counter-types.h b/libcounter/counter-types.h new file mode 100644 index 00000000..b193fb3a --- /dev/null +++ b/libcounter/counter-types.h @@ -0,0 +1,93 @@ +/* + * counter/counter-types.h + * + * LTTng Counters Types + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_TYPES_H +#define _LTTNG_COUNTER_TYPES_H + +#include +#include +#include +#include +#include +#include +#include "shm_types.h" + +struct lib_counter_dimension { + /* + * Max. number of indexable elements. + */ + size_t max_nr_elem; + /* + * The stride for a dimension is the multiplication factor which + * should be applied to its index to take into account other + * dimensions nested inside. + */ + size_t stride; +}; + +struct lib_counter_layout { + void *counters; + unsigned long *overflow_bitmap; + unsigned long *underflow_bitmap; + int shm_fd; + size_t shm_len; + struct lttng_counter_shm_handle handle; +}; + +enum lib_counter_arithmetic { + LIB_COUNTER_ARITHMETIC_MODULAR, + LIB_COUNTER_ARITHMETIC_SATURATE, +}; + +struct lib_counter { + size_t nr_dimensions; + int64_t allocated_elem; + struct lib_counter_dimension *dimensions; + enum lib_counter_arithmetic arithmetic; + union { + struct { + int32_t max, min; + } limits_32_bit; + struct { + int64_t max, min; + } limits_64_bit; + } saturation; + union { + int8_t s8; + int16_t s16; + int32_t s32; + int64_t s64; + } global_sum_step; /* 0 if unused */ + struct lib_counter_config config; + + struct lib_counter_layout global_counters; + struct lib_counter_layout *percpu_counters; + + bool is_daemon; + struct lttng_counter_shm_object_table *object_table; +}; + +#endif /* _LTTNG_COUNTER_TYPES_H */ diff --git a/libcounter/counter.c b/libcounter/counter.c new file mode 100644 index 00000000..4010d5dd --- /dev/null +++ b/libcounter/counter.c @@ -0,0 +1,564 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) + * + * counter.c + * + * Copyright (C) 2020 Mathieu Desnoyers + */ + +#define _GNU_SOURCE +#include +#include "counter.h" +#include "counter-internal.h" +#include +#include +#include +#include +#include +#include +#include "smp.h" +#include "shm.h" + +static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension) +{ + return dimension->max_nr_elem; +} + +static int lttng_counter_init_stride(const struct lib_counter_config *config, + struct lib_counter *counter) +{ + size_t nr_dimensions = counter->nr_dimensions; + size_t stride = 1; + ssize_t i; + + for (i = nr_dimensions - 1; i >= 0; i--) { + struct lib_counter_dimension *dimension = &counter->dimensions[i]; + size_t nr_elem; + + nr_elem = lttng_counter_get_dimension_nr_elements(dimension); + dimension->stride = stride; + /* nr_elem should be minimum 1 for each dimension. */ + if (!nr_elem) + return -EINVAL; + stride *= nr_elem; + if (stride > SIZE_MAX / nr_elem) + return -EINVAL; + } + return 0; +} + +static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd) +{ + struct lib_counter_layout *layout; + size_t counter_size; + size_t nr_elem = counter->allocated_elem; + size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset; + struct lttng_counter_shm_object *shm_object; + + if (shm_fd < 0) + return 0; /* Skip, will be populated later. */ + + if (cpu == -1) + layout = &counter->global_counters; + else + layout = &counter->percpu_counters[cpu]; + switch (counter->config.counter_size) { + case COUNTER_SIZE_8_BIT: + case COUNTER_SIZE_16_BIT: + case COUNTER_SIZE_32_BIT: + case COUNTER_SIZE_64_BIT: + counter_size = (size_t) counter->config.counter_size; + break; + default: + return -EINVAL; + } + layout->shm_fd = shm_fd; + counters_offset = shm_length; + shm_length += counter_size * nr_elem; + overflow_offset = shm_length; + shm_length += ALIGN(nr_elem, 8) / 8; + underflow_offset = shm_length; + shm_length += ALIGN(nr_elem, 8) / 8; + layout->shm_len = shm_length; + if (counter->is_daemon) { + /* Allocate and clear shared memory. */ + shm_object = lttng_counter_shm_object_table_alloc(counter->object_table, + shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu); + if (!shm_object) + return -ENOMEM; + } else { + /* Map pre-existing shared memory. */ + shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table, + shm_fd, shm_length); + if (!shm_object) + return -ENOMEM; + } + layout->counters = shm_object->memory_map + counters_offset; + layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset); + layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset); + return 0; +} + +int lttng_counter_set_global_shm(struct lib_counter *counter, int fd) +{ + struct lib_counter_config *config = &counter->config; + struct lib_counter_layout *layout; + + if (!(config->alloc & COUNTER_ALLOC_GLOBAL)) + return -EINVAL; + layout = &counter->global_counters; + if (layout->shm_fd >= 0) + return -EBUSY; + return lttng_counter_layout_init(counter, -1, fd); +} + +int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd) +{ + struct lib_counter_config *config = &counter->config; + struct lib_counter_layout *layout; + + if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + + if (!(config->alloc & COUNTER_ALLOC_PER_CPU)) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + if (layout->shm_fd >= 0) + return -EBUSY; + return lttng_counter_layout_init(counter, cpu, fd); +} + +static +int lttng_counter_set_global_sum_step(struct lib_counter *counter, + int64_t global_sum_step) +{ + if (global_sum_step < 0) + return -EINVAL; + + switch (counter->config.counter_size) { + case COUNTER_SIZE_8_BIT: + if (global_sum_step > INT8_MAX) + return -EINVAL; + counter->global_sum_step.s8 = (int8_t) global_sum_step; + break; + case COUNTER_SIZE_16_BIT: + if (global_sum_step > INT16_MAX) + return -EINVAL; + counter->global_sum_step.s16 = (int16_t) global_sum_step; + break; + case COUNTER_SIZE_32_BIT: + if (global_sum_step > INT32_MAX) + return -EINVAL; + counter->global_sum_step.s32 = (int32_t) global_sum_step; + break; + case COUNTER_SIZE_64_BIT: + counter->global_sum_step.s64 = global_sum_step; + break; + default: + return -EINVAL; + } + + return 0; +} + +static +int validate_args(const struct lib_counter_config *config, + size_t nr_dimensions, + const size_t *max_nr_elem, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds) +{ + int nr_cpus = lttng_counter_num_possible_cpus(); + + if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) { + WARN_ON_ONCE(1); + return -1; + } + if (!max_nr_elem) + return -1; + /* + * global sum step is only useful with allocating both per-cpu + * and global counters. + */ + if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) || + !(config->alloc & COUNTER_ALLOC_PER_CPU))) + return -1; + if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0) + return -1; + if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) + return -1; + if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds >= 0) + return -1; + if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds) + return -1; + return 0; +} + +struct lib_counter *lttng_counter_create(const struct lib_counter_config *config, + size_t nr_dimensions, + const size_t *max_nr_elem, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon) +{ + struct lib_counter *counter; + size_t dimension, nr_elem = 1; + int cpu, ret; + int nr_handles = 0; + int nr_cpus = lttng_counter_num_possible_cpus(); + + if (validate_args(config, nr_dimensions, max_nr_elem, + global_sum_step, global_counter_fd, nr_counter_cpu_fds, + counter_cpu_fds)) + return NULL; + counter = zmalloc(sizeof(struct lib_counter)); + if (!counter) + return NULL; + counter->global_counters.shm_fd = -1; + counter->config = *config; + counter->is_daemon = is_daemon; + if (lttng_counter_set_global_sum_step(counter, global_sum_step)) + goto error_sum_step; + counter->nr_dimensions = nr_dimensions; + counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions)); + if (!counter->dimensions) + goto error_dimensions; + for (dimension = 0; dimension < nr_dimensions; dimension++) + counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension]; + if (config->alloc & COUNTER_ALLOC_PER_CPU) { + counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus); + if (!counter->percpu_counters) + goto error_alloc_percpu; + lttng_counter_for_each_possible_cpu(cpu) + counter->percpu_counters[cpu].shm_fd = -1; + } + + if (lttng_counter_init_stride(config, counter)) + goto error_init_stride; + //TODO saturation values. + for (dimension = 0; dimension < counter->nr_dimensions; dimension++) + nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]); + counter->allocated_elem = nr_elem; + + if (config->alloc & COUNTER_ALLOC_GLOBAL) + nr_handles++; + if (config->alloc & COUNTER_ALLOC_PER_CPU) + nr_handles += nr_cpus; + /* Allocate table for global and per-cpu counters. */ + counter->object_table = lttng_counter_shm_object_table_create(nr_handles); + if (!counter->object_table) + goto error_alloc_object_table; + + if (config->alloc & COUNTER_ALLOC_GLOBAL) { + ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */ + if (ret) + goto layout_init_error; + } + if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) { + lttng_counter_for_each_possible_cpu(cpu) { + ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]); + if (ret) + goto layout_init_error; + } + } + return counter; + +layout_init_error: + lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon); +error_alloc_object_table: +error_init_stride: + free(counter->percpu_counters); +error_alloc_percpu: + free(counter->dimensions); +error_dimensions: +error_sum_step: + free(counter); + return NULL; +} + +void lttng_counter_destroy(struct lib_counter *counter) +{ + struct lib_counter_config *config = &counter->config; + + if (config->alloc & COUNTER_ALLOC_PER_CPU) + free(counter->percpu_counters); + lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon); + free(counter->dimensions); + free(counter); +} + +int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len) +{ + int shm_fd; + + shm_fd = counter->global_counters.shm_fd; + if (shm_fd < 0) + return -1; + *fd = shm_fd; + *len = counter->global_counters.shm_len; + return 0; +} + +int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len) +{ + struct lib_counter_layout *layout; + int shm_fd; + + if (cpu >= lttng_counter_num_possible_cpus()) + return -1; + layout = &counter->percpu_counters[cpu]; + shm_fd = layout->shm_fd; + if (shm_fd < 0) + return -1; + *fd = shm_fd; + *len = layout->shm_len; + return 0; +} + +int lttng_counter_read(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int cpu, int64_t *value, bool *overflow, + bool *underflow) +{ + size_t index; + struct lib_counter_layout *layout; + + if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes))) + return -EOVERFLOW; + index = lttng_counter_get_index(config, counter, dimension_indexes); + + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: + if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + break; + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + if (cpu >= 0) { + if (cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + } else { + layout = &counter->global_counters; + } + break; + case COUNTER_ALLOC_GLOBAL: + if (cpu >= 0) + return -EINVAL; + layout = &counter->global_counters; + break; + default: + return -EINVAL; + } + if (caa_unlikely(!layout->counters)) + return -ENODEV; + + switch (config->counter_size) { + case COUNTER_SIZE_8_BIT: + { + int8_t *int_p = (int8_t *) layout->counters + index; + *value = (int64_t) CMM_LOAD_SHARED(*int_p); + break; + } + case COUNTER_SIZE_16_BIT: + { + int16_t *int_p = (int16_t *) layout->counters + index; + *value = (int64_t) CMM_LOAD_SHARED(*int_p); + break; + } + case COUNTER_SIZE_32_BIT: + { + int32_t *int_p = (int32_t *) layout->counters + index; + *value = (int64_t) CMM_LOAD_SHARED(*int_p); + break; + } +#if CAA_BITS_PER_LONG == 64 + case COUNTER_SIZE_64_BIT: + { + int64_t *int_p = (int64_t *) layout->counters + index; + *value = CMM_LOAD_SHARED(*int_p); + break; + } +#endif + default: + return -EINVAL; + } + *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap); + *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap); + return 0; +} + +int lttng_counter_aggregate(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int64_t *value, bool *overflow, + bool *underflow) +{ + int cpu, ret; + int64_t v, sum = 0; + bool of, uf; + + *overflow = false; + *underflow = false; + + switch (config->alloc) { + case COUNTER_ALLOC_GLOBAL: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + /* Read global counter. */ + ret = lttng_counter_read(config, counter, dimension_indexes, + -1, &v, &of, &uf); + if (ret < 0) + return ret; + sum += v; + *overflow |= of; + *underflow |= uf; + break; + case COUNTER_ALLOC_PER_CPU: + break; + default: + return -EINVAL; + } + + switch (config->alloc) { + case COUNTER_ALLOC_GLOBAL: + break; + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU: + lttng_counter_for_each_possible_cpu(cpu) { + int64_t old = sum; + + ret = lttng_counter_read(config, counter, dimension_indexes, + cpu, &v, &of, &uf); + if (ret < 0) + return ret; + *overflow |= of; + *underflow |= uf; + /* Overflow is defined on unsigned types. */ + sum = (int64_t) ((uint64_t) old + (uint64_t) v); + if (v > 0 && sum < old) + *overflow = true; + else if (v < 0 && sum > old) + *underflow = true; + } + break; + default: + return -EINVAL; + } + *value = sum; + return 0; +} + +static +int lttng_counter_clear_cpu(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int cpu) +{ + size_t index; + struct lib_counter_layout *layout; + + if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes))) + return -EOVERFLOW; + index = lttng_counter_get_index(config, counter, dimension_indexes); + + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: + if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + break; + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + if (cpu >= 0) { + if (cpu >= lttng_counter_num_possible_cpus()) + return -EINVAL; + layout = &counter->percpu_counters[cpu]; + } else { + layout = &counter->global_counters; + } + break; + case COUNTER_ALLOC_GLOBAL: + if (cpu >= 0) + return -EINVAL; + layout = &counter->global_counters; + break; + default: + return -EINVAL; + } + if (caa_unlikely(!layout->counters)) + return -ENODEV; + + switch (config->counter_size) { + case COUNTER_SIZE_8_BIT: + { + int8_t *int_p = (int8_t *) layout->counters + index; + CMM_STORE_SHARED(*int_p, 0); + break; + } + case COUNTER_SIZE_16_BIT: + { + int16_t *int_p = (int16_t *) layout->counters + index; + CMM_STORE_SHARED(*int_p, 0); + break; + } + case COUNTER_SIZE_32_BIT: + { + int32_t *int_p = (int32_t *) layout->counters + index; + CMM_STORE_SHARED(*int_p, 0); + break; + } +#if CAA_BITS_PER_LONG == 64 + case COUNTER_SIZE_64_BIT: + { + int64_t *int_p = (int64_t *) layout->counters + index; + CMM_STORE_SHARED(*int_p, 0); + break; + } +#endif + default: + return -EINVAL; + } + lttng_bitmap_clear_bit(index, layout->overflow_bitmap); + lttng_bitmap_clear_bit(index, layout->underflow_bitmap); + return 0; +} + +int lttng_counter_clear(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes) +{ + int cpu, ret; + + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: + break; + case COUNTER_ALLOC_GLOBAL: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + /* Clear global counter. */ + ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1); + if (ret < 0) + return ret; + break; + default: + return -EINVAL; + } + + switch (config->alloc) { + case COUNTER_ALLOC_PER_CPU: /* Fallthrough */ + case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: + lttng_counter_for_each_possible_cpu(cpu) { + ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu); + if (ret < 0) + return ret; + } + break; + case COUNTER_ALLOC_GLOBAL: + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/libcounter/counter.h b/libcounter/counter.h new file mode 100644 index 00000000..54f83009 --- /dev/null +++ b/libcounter/counter.h @@ -0,0 +1,65 @@ +/* + * lttng/counter.h + * + * LTTng Counters API + * + * Copyright (C) 2020 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _LTTNG_COUNTER_H +#define _LTTNG_COUNTER_H + +#include +#include +#include "counter-types.h" + +/* max_nr_elem is for each dimension. */ +struct lib_counter *lttng_counter_create(const struct lib_counter_config *config, + size_t nr_dimensions, + const size_t *max_nr_elem, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon); +void lttng_counter_destroy(struct lib_counter *counter); + +int lttng_counter_set_global_shm(struct lib_counter *counter, int fd); +int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd); + +int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len); +int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len); + +int lttng_counter_read(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int cpu, int64_t *value, + bool *overflow, bool *underflow); +int lttng_counter_aggregate(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes, + int64_t *value, + bool *overflow, bool *underflow); +int lttng_counter_clear(const struct lib_counter_config *config, + struct lib_counter *counter, + const size_t *dimension_indexes); + +#endif /* _LTTNG_COUNTER_H */ diff --git a/libcounter/shm.c b/libcounter/shm.c new file mode 100644 index 00000000..a2e1f819 --- /dev/null +++ b/libcounter/shm.c @@ -0,0 +1,387 @@ +/* + * libcounter/shm.c + * + * Copyright (C) 2005-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define _LGPL_SOURCE +#include +#include "shm.h" +#include +#include +#include +#include +#include /* For mode constants */ +#include /* For O_* constants */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_LIBNUMA +#include +#include +#endif +#include +#include +#include "../libringbuffer/mmap.h" + +/* + * Ensure we have the required amount of space available by writing 0 + * into the entire buffer. Not doing so can trigger SIGBUS when going + * beyond the available shm space. + */ +static +int zero_file(int fd, size_t len) +{ + ssize_t retlen; + size_t written = 0; + char *zeropage; + long pagelen; + int ret; + + pagelen = sysconf(_SC_PAGESIZE); + if (pagelen < 0) + return (int) pagelen; + zeropage = calloc(pagelen, 1); + if (!zeropage) + return -ENOMEM; + + while (len > written) { + do { + retlen = write(fd, zeropage, + min_t(size_t, pagelen, len - written)); + } while (retlen == -1UL && errno == EINTR); + if (retlen < 0) { + ret = (int) retlen; + goto error; + } + written += retlen; + } + ret = 0; +error: + free(zeropage); + return ret; +} + +struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj) +{ + struct lttng_counter_shm_object_table *table; + + table = zmalloc(sizeof(struct lttng_counter_shm_object_table) + + max_nb_obj * sizeof(table->objects[0])); + if (!table) + return NULL; + table->size = max_nb_obj; + return table; +} + +static +struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table, + size_t memory_map_size, + int cpu_fd) +{ + int shmfd, ret; + struct lttng_counter_shm_object *obj; + char *memory_map; + + if (cpu_fd < 0) + return NULL; + if (table->allocated_len >= table->size) + return NULL; + obj = &table->objects[table->allocated_len]; + + /* create shm */ + + shmfd = cpu_fd; + ret = zero_file(shmfd, memory_map_size); + if (ret) { + PERROR("zero_file"); + goto error_zero_file; + } + ret = ftruncate(shmfd, memory_map_size); + if (ret) { + PERROR("ftruncate"); + goto error_ftruncate; + } + /* + * Also ensure the file metadata is synced with the storage by using + * fsync(2). + */ + ret = fsync(shmfd); + if (ret) { + PERROR("fsync"); + goto error_fsync; + } + obj->shm_fd_ownership = 0; + obj->shm_fd = shmfd; + + /* memory_map: mmap */ + memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, + MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0); + if (memory_map == MAP_FAILED) { + PERROR("mmap"); + goto error_mmap; + } + obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM; + obj->memory_map = memory_map; + obj->memory_map_size = memory_map_size; + obj->allocated_len = 0; + obj->index = table->allocated_len++; + + return obj; + +error_mmap: +error_fsync: +error_ftruncate: +error_zero_file: + return NULL; +} + +static +struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table, + size_t memory_map_size) +{ + struct lttng_counter_shm_object *obj; + void *memory_map; + + if (table->allocated_len >= table->size) + return NULL; + obj = &table->objects[table->allocated_len]; + + memory_map = zmalloc(memory_map_size); + if (!memory_map) + goto alloc_error; + + /* no shm_fd */ + obj->shm_fd = -1; + obj->shm_fd_ownership = 0; + + obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM; + obj->memory_map = memory_map; + obj->memory_map_size = memory_map_size; + obj->allocated_len = 0; + obj->index = table->allocated_len++; + + return obj; + +alloc_error: + return NULL; +} + +/* + * libnuma prints errors on the console even for numa_available(). + * Work-around this limitation by using get_mempolicy() directly to + * check whether the kernel supports mempolicy. + */ +#ifdef HAVE_LIBNUMA +static bool lttng_is_numa_available(void) +{ + int ret; + + ret = get_mempolicy(NULL, NULL, 0, NULL, 0); + if (ret && errno == ENOSYS) { + return false; + } + return numa_available() > 0; +} +#endif + +struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table, + size_t memory_map_size, + enum lttng_counter_shm_object_type type, + int cpu_fd, + int cpu) +{ + struct lttng_counter_shm_object *shm_object; +#ifdef HAVE_LIBNUMA + int oldnode = 0, node; + bool numa_avail; + + numa_avail = lttng_is_numa_available(); + if (numa_avail) { + oldnode = numa_preferred(); + if (cpu >= 0) { + node = numa_node_of_cpu(cpu); + if (node >= 0) + numa_set_preferred(node); + } + if (cpu < 0 || node < 0) + numa_set_localalloc(); + } +#endif /* HAVE_LIBNUMA */ + switch (type) { + case LTTNG_COUNTER_SHM_OBJECT_SHM: + shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size, + cpu_fd); + break; + case LTTNG_COUNTER_SHM_OBJECT_MEM: + shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size); + break; + default: + assert(0); + } +#ifdef HAVE_LIBNUMA + if (numa_avail) + numa_set_preferred(oldnode); +#endif /* HAVE_LIBNUMA */ + return shm_object; +} + +struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table, + int shm_fd, + size_t memory_map_size) +{ + struct lttng_counter_shm_object *obj; + char *memory_map; + + if (table->allocated_len >= table->size) + return NULL; + + obj = &table->objects[table->allocated_len]; + + obj->shm_fd = shm_fd; + obj->shm_fd_ownership = 1; + + /* memory_map: mmap */ + memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE, + MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0); + if (memory_map == MAP_FAILED) { + PERROR("mmap"); + goto error_mmap; + } + obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM; + obj->memory_map = memory_map; + obj->memory_map_size = memory_map_size; + obj->allocated_len = memory_map_size; + obj->index = table->allocated_len++; + + return obj; + +error_mmap: + return NULL; +} + +/* + * Passing ownership of mem to object. + */ +struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table, + void *mem, size_t memory_map_size) +{ + struct lttng_counter_shm_object *obj; + + if (table->allocated_len >= table->size) + return NULL; + obj = &table->objects[table->allocated_len]; + + obj->shm_fd = -1; + obj->shm_fd_ownership = 0; + + obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM; + obj->memory_map = mem; + obj->memory_map_size = memory_map_size; + obj->allocated_len = memory_map_size; + obj->index = table->allocated_len++; + + return obj; + + return NULL; +} + +static +void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer) +{ + switch (obj->type) { + case LTTNG_COUNTER_SHM_OBJECT_SHM: + { + int ret; + + ret = munmap(obj->memory_map, obj->memory_map_size); + if (ret) { + PERROR("umnmap"); + assert(0); + } + + if (obj->shm_fd_ownership) { + /* Delete FDs only if called from app (not consumer). */ + if (!consumer) { + lttng_ust_lock_fd_tracker(); + ret = close(obj->shm_fd); + if (!ret) { + lttng_ust_delete_fd_from_tracker(obj->shm_fd); + } else { + PERROR("close"); + assert(0); + } + lttng_ust_unlock_fd_tracker(); + } else { + ret = close(obj->shm_fd); + if (ret) { + PERROR("close"); + assert(0); + } + } + } + break; + } + case LTTNG_COUNTER_SHM_OBJECT_MEM: + { + free(obj->memory_map); + break; + } + default: + assert(0); + } +} + +void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer) +{ + int i; + + for (i = 0; i < table->allocated_len; i++) + lttng_counter_shmp_object_destroy(&table->objects[i], consumer); + free(table); +} + +/* + * lttng_counter_zalloc_shm - allocate memory within a shm object. + * + * Shared memory is already zeroed by shmget. + * *NOT* multithread-safe (should be protected by mutex). + * Returns a -1, -1 tuple on error. + */ +struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len) +{ + struct lttng_counter_shm_ref ref; + struct lttng_counter_shm_ref shm_ref_error = { -1, -1 }; + + if (obj->memory_map_size - obj->allocated_len < len) + return shm_ref_error; + ref.index = obj->index; + ref.offset = obj->allocated_len; + obj->allocated_len += len; + return ref; +} + +void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align) +{ + size_t offset_len = offset_align(obj->allocated_len, align); + obj->allocated_len += offset_len; +} diff --git a/libcounter/shm.h b/libcounter/shm.h new file mode 100644 index 00000000..2c6e0c7b --- /dev/null +++ b/libcounter/shm.h @@ -0,0 +1,142 @@ +#ifndef _LIBCOUNTER_SHM_H +#define _LIBCOUNTER_SHM_H + +/* + * libcounter/shm.h + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include "shm_types.h" + +/* lttng_counter_handle_create - for UST. */ +extern +struct lttng_counter_shm_handle *lttng_counter_handle_create(void *data, + uint64_t memory_map_size, int wakeup_fd); +/* lttng_counter_handle_add_cpu - for UST. */ +extern +int lttng_counter_handle_add_cpu(struct lttng_counter_shm_handle *handle, + int shm_fd, uint32_t cpu_nr, + uint64_t memory_map_size); +unsigned int lttng_counter_handle_get_nr_cpus(struct lttng_counter_shm_handle *handle); + +/* + * Pointer dereferencing. We don't trust the shm_ref, so we validate + * both the index and offset with known boundaries. + * + * "shmp" and "shmp_index" guarantee that it's safe to use the pointer + * target type, even in the occurrence of shm_ref modification by an + * untrusted process having write access to the shm_ref. We return a + * NULL pointer if the ranges are invalid. + */ +static inline +char *_lttng_counter_shmp_offset(struct lttng_counter_shm_object_table *table, + struct lttng_counter_shm_ref *ref, + size_t idx, size_t elem_size) +{ + struct lttng_counter_shm_object *obj; + size_t objindex, ref_offset; + + objindex = (size_t) ref->index; + if (caa_unlikely(objindex >= table->allocated_len)) + return NULL; + obj = &table->objects[objindex]; + ref_offset = (size_t) ref->offset; + ref_offset += idx * elem_size; + /* Check if part of the element returned would exceed the limits. */ + if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size)) + return NULL; + return &obj->memory_map[ref_offset]; +} + +#define lttng_counter_shmp_index(handle, ref, index) \ + ({ \ + __typeof__((ref)._type) ____ptr_ret; \ + ____ptr_ret = (__typeof__(____ptr_ret)) _lttng_counter_shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \ + ____ptr_ret; \ + }) + +#define lttng_counter_shmp(handle, ref) lttng_counter_shmp_index(handle, ref, 0) + +static inline +void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_counter_shm_ref src) +{ + *ref = src; +} + +#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src) + +struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj); +struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table, + size_t memory_map_size, + enum lttng_counter_shm_object_type type, + const int cpu_fd, + int cpu); +struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table, + int shm_fd, size_t memory_map_size); +/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */ +struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table, + void *mem, size_t memory_map_size); +void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer); + +/* + * lttng_counter_zalloc_shm - allocate memory within a shm object. + * + * Shared memory is already zeroed by shmget. + * *NOT* multithread-safe (should be protected by mutex). + * Returns a -1, -1 tuple on error. + */ +struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len); +void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align); + +static inline +int lttng_counter_shm_get_shm_fd(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref) +{ + struct lttng_counter_shm_object_table *table = handle->table; + struct lttng_counter_shm_object *obj; + size_t index; + + index = (size_t) ref->index; + if (caa_unlikely(index >= table->allocated_len)) + return -EPERM; + obj = &table->objects[index]; + return obj->shm_fd; +} + + +static inline +int lttng_counter_shm_get_shm_size(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref, + uint64_t *size) +{ + struct lttng_counter_shm_object_table *table = handle->table; + struct lttng_counter_shm_object *obj; + size_t index; + + index = (size_t) ref->index; + if (caa_unlikely(index >= table->allocated_len)) + return -EPERM; + obj = &table->objects[index]; + *size = obj->memory_map_size; + return 0; +} + +#endif /* _LIBCOUNTER_SHM_H */ diff --git a/libcounter/shm_internal.h b/libcounter/shm_internal.h new file mode 100644 index 00000000..dcc3aab6 --- /dev/null +++ b/libcounter/shm_internal.h @@ -0,0 +1,35 @@ +#ifndef _LIBCOUNTER_SHM_INTERNAL_H +#define _LIBCOUNTER_SHM_INTERNAL_H + +/* + * libcounter/shm_internal.h + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +struct lttng_counter_shm_ref { + volatile ssize_t index; /* within the object table */ + volatile ssize_t offset; /* within the object */ +}; + +#define DECLARE_LTTNG_COUNTER_SHMP(type, name) \ + union { \ + struct lttng_counter_shm_ref _ref; \ + type *_type; \ + } name + +#endif /* _LIBCOUNTER_SHM_INTERNAL_H */ diff --git a/libcounter/shm_types.h b/libcounter/shm_types.h new file mode 100644 index 00000000..2086a832 --- /dev/null +++ b/libcounter/shm_types.h @@ -0,0 +1,54 @@ +#ifndef _LIBCOUNTER_SHM_TYPES_H +#define _LIBCOUNTER_SHM_TYPES_H + +/* + * libcounter/shm_types.h + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include "shm_internal.h" + +enum lttng_counter_shm_object_type { + LTTNG_COUNTER_SHM_OBJECT_SHM, + LTTNG_COUNTER_SHM_OBJECT_MEM, +}; + +struct lttng_counter_shm_object { + enum lttng_counter_shm_object_type type; + size_t index; /* within the object table */ + int shm_fd; /* shm fd */ + char *memory_map; + size_t memory_map_size; + uint64_t allocated_len; + int shm_fd_ownership; +}; + +struct lttng_counter_shm_object_table { + size_t size; + size_t allocated_len; + struct lttng_counter_shm_object objects[]; +}; + +struct lttng_counter_shm_handle { + struct lttng_counter_shm_object_table *table; +}; + +#endif /* _LIBCOUNTER_SHM_TYPES_H */ diff --git a/libcounter/smp.c b/libcounter/smp.c new file mode 100644 index 00000000..22ad98ab --- /dev/null +++ b/libcounter/smp.c @@ -0,0 +1,111 @@ +/* + * libcounter/smp.c + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * Copyright (C) 2019 Michael Jeanson + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define _GNU_SOURCE +#define _LGPL_SOURCE +#include +#include +#include "smp.h" + +int __lttng_counter_num_possible_cpus; + +#if (defined(__GLIBC__) || defined( __UCLIBC__)) +void _lttng_counter_get_num_possible_cpus(void) +{ + int result; + + /* On Linux, when some processors are offline + * _SC_NPROCESSORS_CONF counts the offline + * processors, whereas _SC_NPROCESSORS_ONLN + * does not. If we used _SC_NPROCESSORS_ONLN, + * getcpu() could return a value greater than + * this sysconf, in which case the arrays + * indexed by processor would overflow. + */ + result = sysconf(_SC_NPROCESSORS_CONF); + if (result == -1) + return; + __lttng_counter_num_possible_cpus = result; +} + +#else + +/* + * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not + * return the number of configured CPUs in the system but relies on the cpu + * affinity mask of the current task. + * + * So instead we use a strategy similar to GLIBC's, counting the cpu + * directories in "/sys/devices/system/cpu" and fallback on the value from + * sysconf if it fails. + */ + +#include +#include +#include +#include +#include + +#define __max(a,b) ((a)>(b)?(a):(b)) + +void _lttng_counter_get_num_possible_cpus(void) +{ + int result, count = 0; + DIR *cpudir; + struct dirent *entry; + + cpudir = opendir("/sys/devices/system/cpu"); + if (cpudir == NULL) + goto end; + + /* + * Count the number of directories named "cpu" followed by and + * integer. This is the same strategy as glibc uses. + */ + while ((entry = readdir(cpudir))) { + if (entry->d_type == DT_DIR && + strncmp(entry->d_name, "cpu", 3) == 0) { + + char *endptr; + unsigned long cpu_num; + + cpu_num = strtoul(entry->d_name + 3, &endptr, 10); + if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3) + && (*endptr == '\0')) { + count++; + } + } + } + +end: + /* + * Get the sysconf value as a fallback. Keep the highest number. + */ + result = __max(sysconf(_SC_NPROCESSORS_CONF), count); + + /* + * If both methods failed, don't store the value. + */ + if (result < 1) + return; + __lttng_counter_num_possible_cpus = result; +} +#endif diff --git a/libcounter/smp.h b/libcounter/smp.h new file mode 100644 index 00000000..00ca7a03 --- /dev/null +++ b/libcounter/smp.h @@ -0,0 +1,43 @@ +#ifndef _LIBCOUNTER_SMP_H +#define _LIBCOUNTER_SMP_H + +/* + * libcounter/smp.h + * + * Copyright (C) 2011-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* + * 4kB of per-cpu data available. + */ +#define LTTNG_COUNTER_PER_CPU_MEM_SIZE 4096 + +extern int __lttng_counter_num_possible_cpus; +extern void _lttng_counter_get_num_possible_cpus(void); + +static inline +int lttng_counter_num_possible_cpus(void) +{ + if (!__lttng_counter_num_possible_cpus) + _lttng_counter_get_num_possible_cpus(); + return __lttng_counter_num_possible_cpus; +} + +#define lttng_counter_for_each_possible_cpu(cpu) \ + for ((cpu) = 0; (cpu) < lttng_counter_num_possible_cpus(); (cpu)++) + +#endif /* _LIBCOUNTER_SMP_H */ diff --git a/liblttng-ust-comm/lttng-ust-comm.c b/liblttng-ust-comm/lttng-ust-comm.c index f9398e0d..c2766c3a 100644 --- a/liblttng-ust-comm/lttng-ust-comm.c +++ b/liblttng-ust-comm/lttng-ust-comm.c @@ -669,6 +669,46 @@ error_check: return len; } +ssize_t ustcomm_recv_trigger_notif_fd_from_sessiond(int sock, + int *_trigger_notif_fd) +{ + ssize_t nr_fd; + int trigger_notif_fd, ret; + + /* Receive trigger notification fd */ + lttng_ust_lock_fd_tracker(); + nr_fd = ustcomm_recv_fds_unix_sock(sock, &trigger_notif_fd, 1); + if (nr_fd <= 0) { + lttng_ust_unlock_fd_tracker(); + if (nr_fd < 0) { + ret = nr_fd; + goto error; + } else { + ret = -EIO; + goto error; + } + } + + ret = lttng_ust_add_fd_to_tracker(trigger_notif_fd); + if (ret < 0) { + ret = close(trigger_notif_fd); + if (ret) { + PERROR("close on trigger notif fd"); + } + ret = -EIO; + lttng_ust_unlock_fd_tracker(); + goto error; + } + + *_trigger_notif_fd = ret; + lttng_ust_unlock_fd_tracker(); + + ret = nr_fd; + +error: + return ret; +} + int ustcomm_recv_stream_from_sessiond(int sock, uint64_t *memory_map_size, int *shm_fd, int *wakeup_fd) @@ -726,6 +766,75 @@ error: return ret; } +ssize_t ustcomm_recv_counter_from_sessiond(int sock, + void **_counter_data, uint64_t var_len) +{ + void *counter_data; + ssize_t len; + + if (var_len > LTTNG_UST_COUNTER_DATA_MAX_LEN) { + len = -EINVAL; + goto error_check; + } + /* Receive variable length data */ + counter_data = zmalloc(var_len); + if (!counter_data) { + len = -ENOMEM; + goto error_alloc; + } + len = ustcomm_recv_unix_sock(sock, counter_data, var_len); + if (len != var_len) { + goto error_recv; + } + *_counter_data = counter_data; + return len; + +error_recv: + free(counter_data); +error_alloc: +error_check: + return len; +} + +int ustcomm_recv_counter_shm_from_sessiond(int sock, + int *shm_fd) +{ + ssize_t len; + int ret; + int fds[1]; + + /* recv shm fd fd */ + lttng_ust_lock_fd_tracker(); + len = ustcomm_recv_fds_unix_sock(sock, fds, 1); + if (len <= 0) { + lttng_ust_unlock_fd_tracker(); + if (len < 0) { + ret = len; + goto error; + } else { + ret = -EIO; + goto error; + } + } + + ret = lttng_ust_add_fd_to_tracker(fds[0]); + if (ret < 0) { + ret = close(fds[0]); + if (ret) { + PERROR("close on received shm_fd"); + } + ret = -EIO; + lttng_ust_unlock_fd_tracker(); + goto error; + } + *shm_fd = ret; + lttng_ust_unlock_fd_tracker(); + return 0; + +error: + return ret; +} + /* * Returns 0 on success, negative error value on error. */ diff --git a/liblttng-ust-ctl/ustctl.c b/liblttng-ust-ctl/ustctl.c index ea5ab9dc..3c2d53fc 100644 --- a/liblttng-ust-ctl/ustctl.c +++ b/liblttng-ust-ctl/ustctl.c @@ -38,6 +38,10 @@ #include "../liblttng-ust/clock.h" #include "../liblttng-ust/getenv.h" +#include "../libcounter/shm.h" +#include "../libcounter/smp.h" +#include "../libcounter/counter.h" + /* * Number of milliseconds to retry before failing metadata writes on * buffer full condition. (10 seconds) @@ -68,6 +72,24 @@ struct ustctl_consumer_stream { uint64_t memory_map_size; }; +#define USTCTL_COUNTER_ATTR_DIMENSION_MAX 8 +struct ustctl_counter_attr { + enum ustctl_counter_arithmetic arithmetic; + enum ustctl_counter_bitness bitness; + uint32_t nr_dimensions; + int64_t global_sum_step; + struct ustctl_counter_dimension dimensions[USTCTL_COUNTER_ATTR_DIMENSION_MAX]; +}; + +/* + * Counter representation within daemon. + */ +struct ustctl_daemon_counter { + struct lib_counter *counter; + const struct lttng_counter_ops *ops; + struct ustctl_counter_attr *attr; /* initial attributes */ +}; + extern void lttng_ring_buffer_client_overwrite_init(void); extern void lttng_ring_buffer_client_overwrite_rt_init(void); extern void lttng_ring_buffer_client_discard_init(void); @@ -78,6 +100,10 @@ extern void lttng_ring_buffer_client_overwrite_rt_exit(void); extern void lttng_ring_buffer_client_discard_exit(void); extern void lttng_ring_buffer_client_discard_rt_exit(void); extern void lttng_ring_buffer_metadata_client_exit(void); +extern void lttng_counter_client_percpu_32_overflow_init(void); +extern void lttng_counter_client_percpu_32_overflow_exit(void); +extern void lttng_counter_client_percpu_64_overflow_init(void); +extern void lttng_counter_client_percpu_64_overflow_exit(void); int ustctl_release_handle(int sock, int handle) { @@ -111,8 +137,10 @@ int ustctl_release_object(int sock, struct lttng_ust_object_data *data) ret = -errno; return ret; } + data->u.channel.wakeup_fd = -1; } free(data->u.channel.data); + data->u.channel.data = NULL; break; case LTTNG_UST_OBJECT_TYPE_STREAM: if (data->u.stream.shm_fd >= 0) { @@ -121,6 +149,7 @@ int ustctl_release_object(int sock, struct lttng_ust_object_data *data) ret = -errno; return ret; } + data->u.stream.shm_fd = -1; } if (data->u.stream.wakeup_fd >= 0) { ret = close(data->u.stream.wakeup_fd); @@ -128,10 +157,37 @@ int ustctl_release_object(int sock, struct lttng_ust_object_data *data) ret = -errno; return ret; } + data->u.stream.wakeup_fd = -1; } break; case LTTNG_UST_OBJECT_TYPE_EVENT: case LTTNG_UST_OBJECT_TYPE_CONTEXT: + case LTTNG_UST_OBJECT_TYPE_TRIGGER_GROUP: + case LTTNG_UST_OBJECT_TYPE_TRIGGER: + break; + case LTTNG_UST_OBJECT_TYPE_COUNTER: + free(data->u.counter.data); + data->u.counter.data = NULL; + break; + case LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL: + if (data->u.counter_global.shm_fd >= 0) { + ret = close(data->u.counter_global.shm_fd); + if (ret < 0) { + ret = -errno; + return ret; + } + data->u.counter_global.shm_fd = -1; + } + break; + case LTTNG_UST_OBJECT_TYPE_COUNTER_CPU: + if (data->u.counter_cpu.shm_fd >= 0) { + ret = close(data->u.counter_cpu.shm_fd); + if (ret < 0) { + ret = -errno; + return ret; + } + data->u.counter_cpu.shm_fd = -1; + } break; default: assert(0); @@ -328,6 +384,37 @@ int ustctl_set_filter(int sock, struct lttng_ust_filter_bytecode *bytecode, return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); } +int ustctl_set_capture(int sock, struct lttng_ust_capture_bytecode *bytecode, + struct lttng_ust_object_data *obj_data) +{ + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + int ret; + + if (!obj_data) + return -EINVAL; + + memset(&lum, 0, sizeof(lum)); + lum.handle = obj_data->handle; + lum.cmd = LTTNG_UST_CAPTURE; + lum.u.capture.data_size = bytecode->len; + lum.u.capture.reloc_offset = bytecode->reloc_offset; + lum.u.capture.seqnum = bytecode->seqnum; + + ret = ustcomm_send_app_msg(sock, &lum); + if (ret) + return ret; + /* send var len bytecode */ + ret = ustcomm_send_unix_sock(sock, bytecode->data, + bytecode->len); + if (ret < 0) { + return ret; + } + if (ret != bytecode->len) + return -EINVAL; + return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); +} + int ustctl_set_exclusion(int sock, struct lttng_ust_event_exclusion *exclusion, struct lttng_ust_object_data *obj_data) { @@ -418,6 +505,97 @@ int ustctl_stop_session(int sock, int handle) return ustctl_disable(sock, &obj); } +int ustctl_create_trigger_group(int sock, int pipe_fd, + struct lttng_ust_object_data **_trigger_group_data) +{ + struct lttng_ust_object_data *trigger_group_data; + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + ssize_t len; + int ret; + + if (!_trigger_group_data) + return -EINVAL; + + trigger_group_data = zmalloc(sizeof(*trigger_group_data)); + if (!trigger_group_data) + return -ENOMEM; + + trigger_group_data->type = LTTNG_UST_OBJECT_TYPE_TRIGGER_GROUP; + + memset(&lum, 0, sizeof(lum)); + lum.handle = LTTNG_UST_ROOT_HANDLE; + lum.cmd = LTTNG_UST_TRIGGER_GROUP_CREATE; + + ret = ustcomm_send_app_msg(sock, &lum); + if (ret) + goto error; + + /* Send trigger notification pipe. */ + len = ustcomm_send_fds_unix_sock(sock, &pipe_fd, 1); + if (len <= 0) { + ret = len; + goto error; + } + + ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); + if (ret) + goto error; + + trigger_group_data->handle = lur.ret_val; + DBG("received trigger group handle %d", trigger_group_data->handle); + + *_trigger_group_data = trigger_group_data; + + ret = 0; + goto end; +error: + free(trigger_group_data); + +end: + return ret; +} + +int ustctl_create_trigger(int sock, struct lttng_ust_trigger *trigger, + struct lttng_ust_object_data *trigger_group, + struct lttng_ust_object_data **_trigger_data) +{ + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + struct lttng_ust_object_data *trigger_data; + int ret; + + if (!trigger_group || !_trigger_data) + return -EINVAL; + + trigger_data = zmalloc(sizeof(*trigger_data)); + if (!trigger_data) + return -ENOMEM; + + trigger_data->type = LTTNG_UST_OBJECT_TYPE_TRIGGER; + + memset(&lum, 0, sizeof(lum)); + lum.handle = trigger_group->handle; + lum.cmd = LTTNG_UST_TRIGGER_CREATE; + + strncpy(lum.u.trigger.name, trigger->name, + LTTNG_UST_SYM_NAME_LEN); + lum.u.trigger.instrumentation = trigger->instrumentation; + lum.u.trigger.loglevel_type = trigger->loglevel_type; + lum.u.trigger.loglevel = trigger->loglevel; + lum.u.trigger.id = trigger->id; + ret = ustcomm_send_app_cmd(sock, &lum, &lur); + if (ret) { + free(trigger_data); + return ret; + } + trigger_data->handle = lur.ret_val; + DBG("received trigger handle %u", trigger_data->handle); + *_trigger_data = trigger_data; + + return ret; +} + int ustctl_tracepoint_list(int sock) { struct ustcomm_ust_msg lum; @@ -980,6 +1158,44 @@ int ustctl_duplicate_ust_object_data(struct lttng_ust_object_data **dest, goto error_type; } + case LTTNG_UST_OBJECT_TYPE_COUNTER: + { + obj->u.counter.data = zmalloc(obj->size); + if (!obj->u.counter.data) { + ret = -ENOMEM; + goto error_type; + } + memcpy(obj->u.counter.data, src->u.counter.data, obj->size); + break; + } + + case LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL: + { + if (src->u.counter_global.shm_fd >= 0) { + obj->u.counter_global.shm_fd = + dup(src->u.counter_global.shm_fd); + if (obj->u.counter_global.shm_fd < 0) { + ret = errno; + goto error_type; + } + } + break; + } + + case LTTNG_UST_OBJECT_TYPE_COUNTER_CPU: + { + obj->u.counter_cpu.cpu_nr = src->u.counter_cpu.cpu_nr; + if (src->u.counter_cpu.shm_fd >= 0) { + obj->u.counter_cpu.shm_fd = + dup(src->u.counter_cpu.shm_fd); + if (obj->u.counter_cpu.shm_fd < 0) { + ret = errno; + goto error_type; + } + } + break; + } + default: ret = -EINVAL; goto error_type; @@ -2236,6 +2452,373 @@ int ustctl_regenerate_statedump(int sock, int handle) return 0; } +/* counter operations */ + +int ustctl_get_nr_cpu_per_counter(void) +{ + return lttng_counter_num_possible_cpus(); +} + +struct ustctl_daemon_counter * + ustctl_create_counter(size_t nr_dimensions, + const struct ustctl_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + enum ustctl_counter_bitness bitness, + enum ustctl_counter_arithmetic arithmetic, + uint32_t alloc_flags) +{ + const char *transport_name; + struct ustctl_daemon_counter *counter; + struct lttng_counter_transport *transport; + struct lttng_counter_dimension ust_dim[LTTNG_COUNTER_DIMENSION_MAX]; + size_t i; + + if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX) + return NULL; + /* Currently, only per-cpu allocation is supported. */ + switch (alloc_flags) { + case USTCTL_COUNTER_ALLOC_PER_CPU: + break; + + case USTCTL_COUNTER_ALLOC_PER_CPU | USTCTL_COUNTER_ALLOC_GLOBAL: + case USTCTL_COUNTER_ALLOC_GLOBAL: + default: + return NULL; + } + switch (bitness) { + case USTCTL_COUNTER_BITNESS_32: + switch (arithmetic) { + case USTCTL_COUNTER_ARITHMETIC_MODULAR: + transport_name = "counter-per-cpu-32-modular"; + break; + case USTCTL_COUNTER_ARITHMETIC_SATURATION: + transport_name = "counter-per-cpu-32-saturation"; + break; + default: + return NULL; + } + break; + case USTCTL_COUNTER_BITNESS_64: + switch (arithmetic) { + case USTCTL_COUNTER_ARITHMETIC_MODULAR: + transport_name = "counter-per-cpu-64-modular"; + break; + case USTCTL_COUNTER_ARITHMETIC_SATURATION: + transport_name = "counter-per-cpu-64-saturation"; + break; + default: + return NULL; + } + break; + default: + return NULL; + } + + transport = lttng_counter_transport_find(transport_name); + if (!transport) { + DBG("LTTng transport %s not found\n", + transport_name); + return NULL; + } + + counter = zmalloc(sizeof(*counter)); + if (!counter) + return NULL; + counter->attr = zmalloc(sizeof(*counter->attr)); + if (!counter->attr) + goto free_counter; + counter->attr->bitness = bitness; + counter->attr->arithmetic = arithmetic; + counter->attr->nr_dimensions = nr_dimensions; + counter->attr->global_sum_step = global_sum_step; + for (i = 0; i < nr_dimensions; i++) + counter->attr->dimensions[i] = dimensions[i]; + + for (i = 0; i < nr_dimensions; i++) { + ust_dim[i].size = dimensions[i].size; + ust_dim[i].underflow_index = dimensions[i].underflow_index; + ust_dim[i].overflow_index = dimensions[i].overflow_index; + ust_dim[i].has_underflow = dimensions[i].has_underflow; + ust_dim[i].has_overflow = dimensions[i].has_overflow; + } + counter->counter = transport->ops.counter_create(nr_dimensions, + ust_dim, global_sum_step, global_counter_fd, + nr_counter_cpu_fds, counter_cpu_fds, true); + if (!counter->counter) + goto free_attr; + counter->ops = &transport->ops; + return counter; + +free_attr: + free(counter->attr); +free_counter: + free(counter); + return NULL; +} + +int ustctl_create_counter_data(struct ustctl_daemon_counter *counter, + struct lttng_ust_object_data **_counter_data) +{ + struct lttng_ust_object_data *counter_data; + struct lttng_ust_counter_conf counter_conf; + size_t i; + int ret; + + switch (counter->attr->arithmetic) { + case USTCTL_COUNTER_ARITHMETIC_MODULAR: + counter_conf.arithmetic = LTTNG_UST_COUNTER_ARITHMETIC_MODULAR; + break; + case USTCTL_COUNTER_ARITHMETIC_SATURATION: + counter_conf.arithmetic = LTTNG_UST_COUNTER_ARITHMETIC_SATURATION; + break; + default: + return -EINVAL; + } + switch (counter->attr->bitness) { + case USTCTL_COUNTER_BITNESS_32: + counter_conf.bitness = LTTNG_UST_COUNTER_BITNESS_32BITS; + break; + case USTCTL_COUNTER_BITNESS_64: + counter_conf.bitness = LTTNG_UST_COUNTER_BITNESS_64BITS; + break; + default: + return -EINVAL; + } + counter_conf.number_dimensions = counter->attr->nr_dimensions; + counter_conf.global_sum_step = counter->attr->global_sum_step; + for (i = 0; i < counter->attr->nr_dimensions; i++) { + counter_conf.dimensions[i].size = counter->attr->dimensions[i].size; + counter_conf.dimensions[i].underflow_index = counter->attr->dimensions[i].underflow_index; + counter_conf.dimensions[i].overflow_index = counter->attr->dimensions[i].overflow_index; + counter_conf.dimensions[i].has_underflow = counter->attr->dimensions[i].has_underflow; + counter_conf.dimensions[i].has_overflow = counter->attr->dimensions[i].has_overflow; + } + + counter_data = zmalloc(sizeof(*counter_data)); + if (!counter_data) { + ret = -ENOMEM; + goto error_alloc; + } + counter_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER; + counter_data->handle = -1; + + counter_data->size = sizeof(counter_conf); + counter_data->u.counter.data = zmalloc(sizeof(counter_conf)); + if (!counter_data->u.counter.data) { + ret = -ENOMEM; + goto error_alloc_data; + } + + memcpy(counter_data->u.counter.data, &counter_conf, sizeof(counter_conf)); + *_counter_data = counter_data; + + return 0; + +error_alloc_data: + free(counter_data); +error_alloc: + return ret; +} + +int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter, + struct lttng_ust_object_data **_counter_global_data) +{ + struct lttng_ust_object_data *counter_global_data; + int ret, fd; + size_t len; + + if (lttng_counter_get_global_shm(counter->counter, &fd, &len)) + return -EINVAL; + counter_global_data = zmalloc(sizeof(*counter_global_data)); + if (!counter_global_data) { + ret = -ENOMEM; + goto error_alloc; + } + counter_global_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER_GLOBAL; + counter_global_data->handle = -1; + counter_global_data->size = len; + counter_global_data->u.counter_global.shm_fd = fd; + *_counter_global_data = counter_global_data; + return 0; + +error_alloc: + return ret; +} + +int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu, + struct lttng_ust_object_data **_counter_cpu_data) +{ + struct lttng_ust_object_data *counter_cpu_data; + int ret, fd; + size_t len; + + if (lttng_counter_get_cpu_shm(counter->counter, cpu, &fd, &len)) + return -EINVAL; + counter_cpu_data = zmalloc(sizeof(*counter_cpu_data)); + if (!counter_cpu_data) { + ret = -ENOMEM; + goto error_alloc; + } + counter_cpu_data->type = LTTNG_UST_OBJECT_TYPE_COUNTER_CPU; + counter_cpu_data->handle = -1; + counter_cpu_data->size = len; + counter_cpu_data->u.counter_cpu.shm_fd = fd; + counter_cpu_data->u.counter_cpu.cpu_nr = cpu; + *_counter_cpu_data = counter_cpu_data; + return 0; + +error_alloc: + return ret; +} + +void ustctl_destroy_counter(struct ustctl_daemon_counter *counter) +{ + counter->ops->counter_destroy(counter->counter); + free(counter->attr); + free(counter); +} + +int ustctl_send_counter_data_to_ust(int sock, int parent_handle, + struct lttng_ust_object_data *counter_data) +{ + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + int ret; + size_t size; + ssize_t len; + + if (!counter_data) + return -EINVAL; + + size = counter_data->size; + memset(&lum, 0, sizeof(lum)); + lum.handle = parent_handle; + lum.cmd = LTTNG_UST_COUNTER; + lum.u.counter.len = size; + ret = ustcomm_send_app_msg(sock, &lum); + if (ret) + return ret; + + /* Send counter data */ + len = ustcomm_send_unix_sock(sock, counter_data->u.counter.data, size); + if (len != size) { + if (len < 0) + return len; + else + return -EIO; + } + + ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); + if (!ret) { + counter_data->handle = lur.ret_val; + } + return ret; +} + +int ustctl_send_counter_global_data_to_ust(int sock, + struct lttng_ust_object_data *counter_data, + struct lttng_ust_object_data *counter_global_data) +{ + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + int ret, shm_fd[1]; + size_t size; + ssize_t len; + + if (!counter_data || !counter_global_data) + return -EINVAL; + + size = counter_global_data->size; + memset(&lum, 0, sizeof(lum)); + lum.handle = counter_data->handle; /* parent handle */ + lum.cmd = LTTNG_UST_COUNTER_GLOBAL; + lum.u.counter_global.len = size; + ret = ustcomm_send_app_msg(sock, &lum); + if (ret) + return ret; + + shm_fd[0] = counter_global_data->u.counter_global.shm_fd; + len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1); + if (len <= 0) { + if (len < 0) + return len; + else + return -EIO; + } + + ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); + if (!ret) { + counter_global_data->handle = lur.ret_val; + } + return ret; +} + +int ustctl_send_counter_cpu_data_to_ust(int sock, + struct lttng_ust_object_data *counter_data, + struct lttng_ust_object_data *counter_cpu_data) +{ + struct ustcomm_ust_msg lum; + struct ustcomm_ust_reply lur; + int ret, shm_fd[1]; + size_t size; + ssize_t len; + + if (!counter_data || !counter_cpu_data) + return -EINVAL; + + size = counter_cpu_data->size; + memset(&lum, 0, sizeof(lum)); + lum.handle = counter_data->handle; /* parent handle */ + lum.cmd = LTTNG_UST_COUNTER_CPU; + lum.u.counter_cpu.len = size; + lum.u.counter_cpu.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr; + ret = ustcomm_send_app_msg(sock, &lum); + if (ret) + return ret; + + shm_fd[0] = counter_cpu_data->u.counter_global.shm_fd; + len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1); + if (len <= 0) { + if (len < 0) + return len; + else + return -EIO; + } + + ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd); + if (!ret) { + counter_cpu_data->handle = lur.ret_val; + } + return ret; +} + +int ustctl_counter_read(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes, + int cpu, int64_t *value, + bool *overflow, bool *underflow) +{ + return counter->ops->counter_read(counter->counter, dimension_indexes, cpu, + value, overflow, underflow); +} + +int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes, + int64_t *value, + bool *overflow, bool *underflow) +{ + return counter->ops->counter_aggregate(counter->counter, dimension_indexes, + value, overflow, underflow); +} + +int ustctl_counter_clear(struct ustctl_daemon_counter *counter, + const size_t *dimension_indexes) +{ + return counter->ops->counter_clear(counter->counter, dimension_indexes); +} + static __attribute__((constructor)) void ustctl_init(void) { @@ -2247,6 +2830,8 @@ void ustctl_init(void) lttng_ring_buffer_client_overwrite_rt_init(); lttng_ring_buffer_client_discard_init(); lttng_ring_buffer_client_discard_rt_init(); + lttng_counter_client_percpu_32_overflow_init(); + lttng_counter_client_percpu_64_overflow_init(); lib_ringbuffer_signal_init(); } @@ -2258,4 +2843,6 @@ void ustctl_exit(void) lttng_ring_buffer_client_overwrite_rt_exit(); lttng_ring_buffer_client_overwrite_exit(); lttng_ring_buffer_metadata_client_exit(); + lttng_counter_client_percpu_32_overflow_exit(); + lttng_counter_client_percpu_64_overflow_exit(); } diff --git a/liblttng-ust/Makefile.am b/liblttng-ust/Makefile.am index 3f4c023d..2f1fb2a8 100644 --- a/liblttng-ust/Makefile.am +++ b/liblttng-ust/Makefile.am @@ -23,9 +23,15 @@ liblttng_ust_tracepoint_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIB liblttng_ust_tracepoint_la_CFLAGS = -DUST_COMPONENT="liblttng_ust_tracepoint" $(AM_CFLAGS) liblttng_ust_runtime_la_SOURCES = \ + bytecode.h \ lttng-ust-comm.c \ lttng-ust-abi.c \ lttng-probes.c \ + lttng-bytecode.c \ + lttng-bytecode.h \ + lttng-bytecode-validator.c \ + lttng-bytecode-specialize.c \ + lttng-bytecode-interpreter.c \ lttng-context-provider.c \ lttng-context-vtid.c \ lttng-context-vpid.c \ @@ -49,12 +55,6 @@ liblttng_ust_runtime_la_SOURCES = \ lttng-context-vsgid.c \ lttng-context.c \ lttng-events.c \ - lttng-filter.c \ - lttng-filter.h \ - lttng-filter-validator.c \ - lttng-filter-specialize.c \ - lttng-filter-interpreter.c \ - filter-bytecode.h \ lttng-hash-helper.h \ lttng-ust-elf.c \ lttng-ust-statedump.c \ @@ -62,7 +62,10 @@ liblttng_ust_runtime_la_SOURCES = \ lttng-ust-statedump-provider.h \ ust_lib.c \ ust_lib.h \ + context-internal.h \ + context-provider-internal.h \ tracepoint-internal.h \ + ust-events-internal.h \ clock.h \ compat.h \ wait.h \ @@ -76,6 +79,7 @@ liblttng_ust_runtime_la_SOURCES = \ getenv.h \ string-utils.c \ string-utils.h \ + trigger-notification.c \ ns.h \ creds.h @@ -100,6 +104,8 @@ liblttng_ust_support_la_SOURCES = \ lttng-ring-buffer-client-overwrite-rt.c \ lttng-ring-buffer-metadata-client.h \ lttng-ring-buffer-metadata-client.c \ + lttng-counter-client-percpu-32-modular.c \ + lttng-counter-client-percpu-64-modular.c \ lttng-clock.c lttng-getcpu.c liblttng_ust_la_SOURCES = @@ -107,7 +113,8 @@ liblttng_ust_la_SOURCES = liblttng_ust_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION) liblttng_ust_support_la_LIBADD = \ - $(top_builddir)/libringbuffer/libringbuffer.la + $(top_builddir)/libringbuffer/libringbuffer.la \ + $(top_builddir)/libcounter/libcounter.la liblttng_ust_la_LIBADD = \ -lpthread \ @@ -117,6 +124,7 @@ liblttng_ust_la_LIBADD = \ $(top_builddir)/liblttng-ust-comm/liblttng-ust-comm.la \ liblttng-ust-tracepoint.la \ liblttng-ust-runtime.la liblttng-ust-support.la \ + $(top_builddir)/libmsgpack/libmsgpack.la \ $(DL_LIBS) liblttng_ust_la_CFLAGS = -DUST_COMPONENT="liblttng_ust" $(AM_CFLAGS) diff --git a/liblttng-ust/bytecode.h b/liblttng-ust/bytecode.h new file mode 100644 index 00000000..90ea9ad4 --- /dev/null +++ b/liblttng-ust/bytecode.h @@ -0,0 +1,251 @@ +#ifndef _BYTECODE_H +#define _BYTECODE_H + +/* + * bytecode.h + * + * LTTng bytecode + * + * Copyright 2012-2016 - Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#ifndef LTTNG_PACKED +#error "LTTNG_PACKED should be defined" +#endif + +/* + * offsets are absolute from start of bytecode. + */ + +struct field_ref { + /* Initially, symbol offset. After link, field offset. */ + uint16_t offset; +} __attribute__((packed)); + +struct get_symbol { + /* Symbol offset. */ + uint16_t offset; +} LTTNG_PACKED; + +struct get_index_u16 { + uint16_t index; +} LTTNG_PACKED; + +struct get_index_u64 { + uint64_t index; +} LTTNG_PACKED; + +struct literal_numeric { + int64_t v; +} __attribute__((packed)); + +struct literal_double { + double v; +} __attribute__((packed)); + +struct literal_string { + char string[0]; +} __attribute__((packed)); + +enum bytecode_op { + BYTECODE_OP_UNKNOWN = 0, + + BYTECODE_OP_RETURN = 1, + + /* binary */ + BYTECODE_OP_MUL = 2, + BYTECODE_OP_DIV = 3, + BYTECODE_OP_MOD = 4, + BYTECODE_OP_PLUS = 5, + BYTECODE_OP_MINUS = 6, + BYTECODE_OP_BIT_RSHIFT = 7, + BYTECODE_OP_BIT_LSHIFT = 8, + BYTECODE_OP_BIT_AND = 9, + BYTECODE_OP_BIT_OR = 10, + BYTECODE_OP_BIT_XOR = 11, + + /* binary comparators */ + BYTECODE_OP_EQ = 12, + BYTECODE_OP_NE = 13, + BYTECODE_OP_GT = 14, + BYTECODE_OP_LT = 15, + BYTECODE_OP_GE = 16, + BYTECODE_OP_LE = 17, + + /* string binary comparator: apply to */ + BYTECODE_OP_EQ_STRING = 18, + BYTECODE_OP_NE_STRING = 19, + BYTECODE_OP_GT_STRING = 20, + BYTECODE_OP_LT_STRING = 21, + BYTECODE_OP_GE_STRING = 22, + BYTECODE_OP_LE_STRING = 23, + + /* s64 binary comparator */ + BYTECODE_OP_EQ_S64 = 24, + BYTECODE_OP_NE_S64 = 25, + BYTECODE_OP_GT_S64 = 26, + BYTECODE_OP_LT_S64 = 27, + BYTECODE_OP_GE_S64 = 28, + BYTECODE_OP_LE_S64 = 29, + + /* double binary comparator */ + BYTECODE_OP_EQ_DOUBLE = 30, + BYTECODE_OP_NE_DOUBLE = 31, + BYTECODE_OP_GT_DOUBLE = 32, + BYTECODE_OP_LT_DOUBLE = 33, + BYTECODE_OP_GE_DOUBLE = 34, + BYTECODE_OP_LE_DOUBLE = 35, + + /* Mixed S64-double binary comparators */ + BYTECODE_OP_EQ_DOUBLE_S64 = 36, + BYTECODE_OP_NE_DOUBLE_S64 = 37, + BYTECODE_OP_GT_DOUBLE_S64 = 38, + BYTECODE_OP_LT_DOUBLE_S64 = 39, + BYTECODE_OP_GE_DOUBLE_S64 = 40, + BYTECODE_OP_LE_DOUBLE_S64 = 41, + + BYTECODE_OP_EQ_S64_DOUBLE = 42, + BYTECODE_OP_NE_S64_DOUBLE = 43, + BYTECODE_OP_GT_S64_DOUBLE = 44, + BYTECODE_OP_LT_S64_DOUBLE = 45, + BYTECODE_OP_GE_S64_DOUBLE = 46, + BYTECODE_OP_LE_S64_DOUBLE = 47, + + /* unary */ + BYTECODE_OP_UNARY_PLUS = 48, + BYTECODE_OP_UNARY_MINUS = 49, + BYTECODE_OP_UNARY_NOT = 50, + BYTECODE_OP_UNARY_PLUS_S64 = 51, + BYTECODE_OP_UNARY_MINUS_S64 = 52, + BYTECODE_OP_UNARY_NOT_S64 = 53, + BYTECODE_OP_UNARY_PLUS_DOUBLE = 54, + BYTECODE_OP_UNARY_MINUS_DOUBLE = 55, + BYTECODE_OP_UNARY_NOT_DOUBLE = 56, + + /* logical */ + BYTECODE_OP_AND = 57, + BYTECODE_OP_OR = 58, + + /* load field ref */ + BYTECODE_OP_LOAD_FIELD_REF = 59, + BYTECODE_OP_LOAD_FIELD_REF_STRING = 60, + BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61, + BYTECODE_OP_LOAD_FIELD_REF_S64 = 62, + BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63, + + /* load immediate from operand */ + BYTECODE_OP_LOAD_STRING = 64, + BYTECODE_OP_LOAD_S64 = 65, + BYTECODE_OP_LOAD_DOUBLE = 66, + + /* cast */ + BYTECODE_OP_CAST_TO_S64 = 67, + BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68, + BYTECODE_OP_CAST_NOP = 69, + + /* get context ref */ + BYTECODE_OP_GET_CONTEXT_REF = 70, + BYTECODE_OP_GET_CONTEXT_REF_STRING = 71, + BYTECODE_OP_GET_CONTEXT_REF_S64 = 72, + BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73, + + /* load userspace field ref */ + BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74, + BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75, + + /* + * load immediate star globbing pattern (literal string) + * from immediate + */ + BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76, + + /* globbing pattern binary operator: apply to */ + BYTECODE_OP_EQ_STAR_GLOB_STRING = 77, + BYTECODE_OP_NE_STAR_GLOB_STRING = 78, + + /* + * Instructions for recursive traversal through composed types. + */ + BYTECODE_OP_GET_CONTEXT_ROOT = 79, + BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80, + BYTECODE_OP_GET_PAYLOAD_ROOT = 81, + + BYTECODE_OP_GET_SYMBOL = 82, + BYTECODE_OP_GET_SYMBOL_FIELD = 83, + BYTECODE_OP_GET_INDEX_U16 = 84, + BYTECODE_OP_GET_INDEX_U64 = 85, + + BYTECODE_OP_LOAD_FIELD = 86, + BYTECODE_OP_LOAD_FIELD_S8 = 87, + BYTECODE_OP_LOAD_FIELD_S16 = 88, + BYTECODE_OP_LOAD_FIELD_S32 = 89, + BYTECODE_OP_LOAD_FIELD_S64 = 90, + BYTECODE_OP_LOAD_FIELD_U8 = 91, + BYTECODE_OP_LOAD_FIELD_U16 = 92, + BYTECODE_OP_LOAD_FIELD_U32 = 93, + BYTECODE_OP_LOAD_FIELD_U64 = 94, + BYTECODE_OP_LOAD_FIELD_STRING = 95, + BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96, + BYTECODE_OP_LOAD_FIELD_DOUBLE = 97, + + BYTECODE_OP_UNARY_BIT_NOT = 98, + + BYTECODE_OP_RETURN_S64 = 99, + + NR_BYTECODE_OPS, +}; + +typedef uint8_t bytecode_opcode_t; + +struct load_op { + bytecode_opcode_t op; + /* + * data to load. Size known by enum bytecode_opcode and null-term char. + */ + char data[0]; +} __attribute__((packed)); + +struct binary_op { + bytecode_opcode_t op; +} __attribute__((packed)); + +struct unary_op { + bytecode_opcode_t op; +} __attribute__((packed)); + +/* skip_offset is absolute from start of bytecode */ +struct logical_op { + bytecode_opcode_t op; + uint16_t skip_offset; /* bytecode insn, if skip second test */ +} __attribute__((packed)); + +struct cast_op { + bytecode_opcode_t op; +} __attribute__((packed)); + +struct return_op { + bytecode_opcode_t op; +} __attribute__((packed)); + +#endif /* _BYTECODE_H */ diff --git a/liblttng-ust/context-internal.h b/liblttng-ust/context-internal.h new file mode 100644 index 00000000..79c88644 --- /dev/null +++ b/liblttng-ust/context-internal.h @@ -0,0 +1,32 @@ +#ifndef _LTTNG_UST_CONTEXT_INTERNAL_H +#define _LTTNG_UST_CONTEXT_INTERNAL_H + +/* + * ust-events-internal.h + * + * Copyright 2020 (c) - Francis Deslauriers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +int lttng_context_init_all(struct lttng_ctx **ctx); + +#endif /* _LTTNG_UST_CONTEXT_INTERNAL_H */ diff --git a/liblttng-ust/context-provider-internal.h b/liblttng-ust/context-provider-internal.h new file mode 100644 index 00000000..62487c33 --- /dev/null +++ b/liblttng-ust/context-provider-internal.h @@ -0,0 +1,37 @@ +#ifndef _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H +#define _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H + +/* + * Copyright 2019 - Francis Deslauriers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +void lttng_ust_context_set_trigger_group_provider(const char *name, + size_t (*get_size)(struct lttng_ctx_field *field, size_t offset), + void (*record)(struct lttng_ctx_field *field, + struct lttng_ust_lib_ring_buffer_ctx *ctx, + struct lttng_channel *chan), + void (*get_value)(struct lttng_ctx_field *field, + struct lttng_ctx_value *value)); + +#endif /* _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H */ diff --git a/liblttng-ust/filter-bytecode.h b/liblttng-ust/filter-bytecode.h deleted file mode 100644 index 59e84555..00000000 --- a/liblttng-ust/filter-bytecode.h +++ /dev/null @@ -1,249 +0,0 @@ -#ifndef _FILTER_BYTECODE_H -#define _FILTER_BYTECODE_H - -/* - * filter-bytecode.h - * - * LTTng filter bytecode - * - * Copyright 2012-2016 - Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include - -#ifndef LTTNG_PACKED -#error "LTTNG_PACKED should be defined" -#endif - -/* - * offsets are absolute from start of bytecode. - */ - -struct field_ref { - /* Initially, symbol offset. After link, field offset. */ - uint16_t offset; -} __attribute__((packed)); - -struct get_symbol { - /* Symbol offset. */ - uint16_t offset; -} LTTNG_PACKED; - -struct get_index_u16 { - uint16_t index; -} LTTNG_PACKED; - -struct get_index_u64 { - uint64_t index; -} LTTNG_PACKED; - -struct literal_numeric { - int64_t v; -} __attribute__((packed)); - -struct literal_double { - double v; -} __attribute__((packed)); - -struct literal_string { - char string[0]; -} __attribute__((packed)); - -enum filter_op { - FILTER_OP_UNKNOWN = 0, - - FILTER_OP_RETURN = 1, - - /* binary */ - FILTER_OP_MUL = 2, - FILTER_OP_DIV = 3, - FILTER_OP_MOD = 4, - FILTER_OP_PLUS = 5, - FILTER_OP_MINUS = 6, - FILTER_OP_BIT_RSHIFT = 7, - FILTER_OP_BIT_LSHIFT = 8, - FILTER_OP_BIT_AND = 9, - FILTER_OP_BIT_OR = 10, - FILTER_OP_BIT_XOR = 11, - - /* binary comparators */ - FILTER_OP_EQ = 12, - FILTER_OP_NE = 13, - FILTER_OP_GT = 14, - FILTER_OP_LT = 15, - FILTER_OP_GE = 16, - FILTER_OP_LE = 17, - - /* string binary comparator: apply to */ - FILTER_OP_EQ_STRING = 18, - FILTER_OP_NE_STRING = 19, - FILTER_OP_GT_STRING = 20, - FILTER_OP_LT_STRING = 21, - FILTER_OP_GE_STRING = 22, - FILTER_OP_LE_STRING = 23, - - /* s64 binary comparator */ - FILTER_OP_EQ_S64 = 24, - FILTER_OP_NE_S64 = 25, - FILTER_OP_GT_S64 = 26, - FILTER_OP_LT_S64 = 27, - FILTER_OP_GE_S64 = 28, - FILTER_OP_LE_S64 = 29, - - /* double binary comparator */ - FILTER_OP_EQ_DOUBLE = 30, - FILTER_OP_NE_DOUBLE = 31, - FILTER_OP_GT_DOUBLE = 32, - FILTER_OP_LT_DOUBLE = 33, - FILTER_OP_GE_DOUBLE = 34, - FILTER_OP_LE_DOUBLE = 35, - - /* Mixed S64-double binary comparators */ - FILTER_OP_EQ_DOUBLE_S64 = 36, - FILTER_OP_NE_DOUBLE_S64 = 37, - FILTER_OP_GT_DOUBLE_S64 = 38, - FILTER_OP_LT_DOUBLE_S64 = 39, - FILTER_OP_GE_DOUBLE_S64 = 40, - FILTER_OP_LE_DOUBLE_S64 = 41, - - FILTER_OP_EQ_S64_DOUBLE = 42, - FILTER_OP_NE_S64_DOUBLE = 43, - FILTER_OP_GT_S64_DOUBLE = 44, - FILTER_OP_LT_S64_DOUBLE = 45, - FILTER_OP_GE_S64_DOUBLE = 46, - FILTER_OP_LE_S64_DOUBLE = 47, - - /* unary */ - FILTER_OP_UNARY_PLUS = 48, - FILTER_OP_UNARY_MINUS = 49, - FILTER_OP_UNARY_NOT = 50, - FILTER_OP_UNARY_PLUS_S64 = 51, - FILTER_OP_UNARY_MINUS_S64 = 52, - FILTER_OP_UNARY_NOT_S64 = 53, - FILTER_OP_UNARY_PLUS_DOUBLE = 54, - FILTER_OP_UNARY_MINUS_DOUBLE = 55, - FILTER_OP_UNARY_NOT_DOUBLE = 56, - - /* logical */ - FILTER_OP_AND = 57, - FILTER_OP_OR = 58, - - /* load field ref */ - FILTER_OP_LOAD_FIELD_REF = 59, - FILTER_OP_LOAD_FIELD_REF_STRING = 60, - FILTER_OP_LOAD_FIELD_REF_SEQUENCE = 61, - FILTER_OP_LOAD_FIELD_REF_S64 = 62, - FILTER_OP_LOAD_FIELD_REF_DOUBLE = 63, - - /* load immediate from operand */ - FILTER_OP_LOAD_STRING = 64, - FILTER_OP_LOAD_S64 = 65, - FILTER_OP_LOAD_DOUBLE = 66, - - /* cast */ - FILTER_OP_CAST_TO_S64 = 67, - FILTER_OP_CAST_DOUBLE_TO_S64 = 68, - FILTER_OP_CAST_NOP = 69, - - /* get context ref */ - FILTER_OP_GET_CONTEXT_REF = 70, - FILTER_OP_GET_CONTEXT_REF_STRING = 71, - FILTER_OP_GET_CONTEXT_REF_S64 = 72, - FILTER_OP_GET_CONTEXT_REF_DOUBLE = 73, - - /* load userspace field ref */ - FILTER_OP_LOAD_FIELD_REF_USER_STRING = 74, - FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75, - - /* - * load immediate star globbing pattern (literal string) - * from immediate - */ - FILTER_OP_LOAD_STAR_GLOB_STRING = 76, - - /* globbing pattern binary operator: apply to */ - FILTER_OP_EQ_STAR_GLOB_STRING = 77, - FILTER_OP_NE_STAR_GLOB_STRING = 78, - - /* - * Instructions for recursive traversal through composed types. - */ - FILTER_OP_GET_CONTEXT_ROOT = 79, - FILTER_OP_GET_APP_CONTEXT_ROOT = 80, - FILTER_OP_GET_PAYLOAD_ROOT = 81, - - FILTER_OP_GET_SYMBOL = 82, - FILTER_OP_GET_SYMBOL_FIELD = 83, - FILTER_OP_GET_INDEX_U16 = 84, - FILTER_OP_GET_INDEX_U64 = 85, - - FILTER_OP_LOAD_FIELD = 86, - FILTER_OP_LOAD_FIELD_S8 = 87, - FILTER_OP_LOAD_FIELD_S16 = 88, - FILTER_OP_LOAD_FIELD_S32 = 89, - FILTER_OP_LOAD_FIELD_S64 = 90, - FILTER_OP_LOAD_FIELD_U8 = 91, - FILTER_OP_LOAD_FIELD_U16 = 92, - FILTER_OP_LOAD_FIELD_U32 = 93, - FILTER_OP_LOAD_FIELD_U64 = 94, - FILTER_OP_LOAD_FIELD_STRING = 95, - FILTER_OP_LOAD_FIELD_SEQUENCE = 96, - FILTER_OP_LOAD_FIELD_DOUBLE = 97, - - FILTER_OP_UNARY_BIT_NOT = 98, - - FILTER_OP_RETURN_S64 = 99, - - NR_FILTER_OPS, -}; - -typedef uint8_t filter_opcode_t; - -struct load_op { - filter_opcode_t op; - char data[0]; - /* data to load. Size known by enum filter_opcode and null-term char. */ -} __attribute__((packed)); - -struct binary_op { - filter_opcode_t op; -} __attribute__((packed)); - -struct unary_op { - filter_opcode_t op; -} __attribute__((packed)); - -/* skip_offset is absolute from start of bytecode */ -struct logical_op { - filter_opcode_t op; - uint16_t skip_offset; /* bytecode insn, if skip second test */ -} __attribute__((packed)); - -struct cast_op { - filter_opcode_t op; -} __attribute__((packed)); - -struct return_op { - filter_opcode_t op; -} __attribute__((packed)); - -#endif /* _FILTER_BYTECODE_H */ diff --git a/liblttng-ust/lttng-filter-interpreter.c b/liblttng-ust/lttng-bytecode-interpreter.c similarity index 67% rename from liblttng-ust/lttng-filter-interpreter.c rename to liblttng-ust/lttng-bytecode-interpreter.c index 5255bb73..21072972 100644 --- a/liblttng-ust/lttng-filter-interpreter.c +++ b/liblttng-ust/lttng-bytecode-interpreter.c @@ -1,7 +1,7 @@ /* - * lttng-filter-interpreter.c + * lttng-bytecode-interpreter.c * - * LTTng UST filter interpreter. + * LTTng UST bytecode interpreter. * * Copyright (C) 2010-2016 Mathieu Desnoyers * @@ -29,9 +29,13 @@ #include #include #include -#include "lttng-filter.h" + +#include + +#include "lttng-bytecode.h" #include "string-utils.h" + /* * -1: wildcard found. * -2: unknown escape char. @@ -161,10 +165,17 @@ int stack_strcmp(struct estack *stack, int top, const char *cmp_type) return diff; } -uint64_t lttng_filter_false(void *filter_data, +uint64_t lttng_bytecode_filter_interpret_false(void *filter_data, const char *filter_stack_data) { - return LTTNG_FILTER_DISCARD; + return LTTNG_INTERPRETER_DISCARD; +} + +uint64_t lttng_bytecode_capture_interpret_false(void *capture_data, + const char *capture_stack_data, + struct lttng_interpreter_output *output) +{ + return LTTNG_INTERPRETER_DISCARD; } #ifdef INTERPRETER_USE_SWITCH @@ -178,9 +189,9 @@ uint64_t lttng_filter_false(void *filter_data, for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \ pc = next_pc) { \ dbg_printf("Executing op %s (%u)\n", \ - print_op((unsigned int) *(filter_opcode_t *) pc), \ - (unsigned int) *(filter_opcode_t *) pc); \ - switch (*(filter_opcode_t *) pc) { + print_op((unsigned int) *(bytecode_opcode_t *) pc), \ + (unsigned int) *(bytecode_opcode_t *) pc); \ + switch (*(bytecode_opcode_t *) pc) { #define OP(name) jump_target_##name: __attribute__((unused)); \ case name @@ -204,14 +215,14 @@ uint64_t lttng_filter_false(void *filter_data, pc = next_pc = start_pc; \ if (unlikely(pc - start_pc >= bytecode->len)) \ goto end; \ - goto *dispatch[*(filter_opcode_t *) pc]; + goto *dispatch[*(bytecode_opcode_t *) pc]; #define OP(name) \ LABEL_##name #define PO \ pc = next_pc; \ - goto *dispatch[*(filter_opcode_t *) pc]; + goto *dispatch[*(bytecode_opcode_t *) pc]; #define END_OP @@ -220,6 +231,9 @@ LABEL_##name #endif +#define IS_INTEGER_REGISTER(reg_type) \ + (reg_type == REG_U64 || reg_type == REG_S64) + static int context_get_index(struct lttng_ctx *ctx, struct load_ptr *ptr, uint32_t idx) @@ -232,8 +246,7 @@ static int context_get_index(struct lttng_ctx *ctx, ctx_field = &ctx->fields[idx]; field = &ctx_field->event_field; ptr->type = LOAD_OBJECT; - /* field is only used for types nested within variants. */ - ptr->field = NULL; + ptr->field = field; switch (field->type.atype) { case atype_integer: @@ -260,11 +273,11 @@ static int context_get_index(struct lttng_ctx *ctx, } ctx_field->get_value(ctx_field, &v); if (itype->signedness) { - ptr->object_type = OBJECT_TYPE_S64; + ptr->object_type = OBJECT_TYPE_SIGNED_ENUM; ptr->u.s64 = v.u.s64; ptr->ptr = &ptr->u.s64; } else { - ptr->object_type = OBJECT_TYPE_U64; + ptr->object_type = OBJECT_TYPE_UNSIGNED_ENUM; ptr->u.u64 = v.u.s64; /* Cast. */ ptr->ptr = &ptr->u.u64; } @@ -338,12 +351,25 @@ static int context_get_index(struct lttng_ctx *ctx, switch (v.sel) { case LTTNG_UST_DYNAMIC_TYPE_NONE: return -EINVAL; + case LTTNG_UST_DYNAMIC_TYPE_U8: + case LTTNG_UST_DYNAMIC_TYPE_U16: + case LTTNG_UST_DYNAMIC_TYPE_U32: + case LTTNG_UST_DYNAMIC_TYPE_U64: + ptr->object_type = OBJECT_TYPE_U64; + ptr->u.u64 = v.u.u64; + ptr->ptr = &ptr->u.u64; + dbg_printf("context get index dynamic u64 %" PRIi64 "\n", ptr->u.u64); + break; + case LTTNG_UST_DYNAMIC_TYPE_S8: + case LTTNG_UST_DYNAMIC_TYPE_S16: + case LTTNG_UST_DYNAMIC_TYPE_S32: case LTTNG_UST_DYNAMIC_TYPE_S64: ptr->object_type = OBJECT_TYPE_S64; ptr->u.s64 = v.u.s64; ptr->ptr = &ptr->u.s64; dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64); break; + case LTTNG_UST_DYNAMIC_TYPE_FLOAT: case LTTNG_UST_DYNAMIC_TYPE_DOUBLE: ptr->object_type = OBJECT_TYPE_DOUBLE; ptr->u.d = v.u.d; @@ -356,7 +382,7 @@ static int context_get_index(struct lttng_ctx *ctx, dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr); break; default: - dbg_printf("Filter warning: unknown dynamic type (%d).\n", (int) v.sel); + dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel); return -EINVAL; } break; @@ -370,21 +396,14 @@ static int context_get_index(struct lttng_ctx *ctx, return 0; } -static int dynamic_get_index(struct lttng_session *session, +static int dynamic_get_index(struct lttng_ctx *ctx, struct bytecode_runtime *runtime, uint64_t index, struct estack_entry *stack_top) { int ret; - const struct filter_get_index_data *gid; - - /* - * Types nested within variants need to perform dynamic lookup - * based on the field descriptions. LTTng-UST does not implement - * variants for now. - */ - if (stack_top->u.ptr.field) - return -EINVAL; - gid = (const struct filter_get_index_data *) &runtime->data[index]; + const struct bytecode_get_index_data *gid; + + gid = (const struct bytecode_get_index_data *) &runtime->data[index]; switch (stack_top->u.ptr.type) { case LOAD_OBJECT: switch (stack_top->u.ptr.object_type) { @@ -399,7 +418,8 @@ static int dynamic_get_index(struct lttng_session *session, stack_top->u.ptr.ptr = ptr; stack_top->u.ptr.object_type = gid->elem.type; stack_top->u.ptr.rev_bo = gid->elem.rev_bo; - /* field is only used for types nested within variants. */ + assert(stack_top->u.ptr.field->type.atype == atype_array || + stack_top->u.ptr.field->type.atype == atype_array_nestable); stack_top->u.ptr.field = NULL; break; } @@ -418,7 +438,8 @@ static int dynamic_get_index(struct lttng_session *session, stack_top->u.ptr.ptr = ptr; stack_top->u.ptr.object_type = gid->elem.type; stack_top->u.ptr.rev_bo = gid->elem.rev_bo; - /* field is only used for types nested within variants. */ + assert(stack_top->u.ptr.field->type.atype == atype_sequence || + stack_top->u.ptr.field->type.atype == atype_sequence_nestable); stack_top->u.ptr.field = NULL; break; } @@ -437,9 +458,6 @@ static int dynamic_get_index(struct lttng_session *session, case LOAD_ROOT_CONTEXT: case LOAD_ROOT_APP_CONTEXT: /* Fall-through */ { - struct lttng_ctx *ctx; - - ctx = rcu_dereference(session->ctx); ret = context_get_index(ctx, &stack_top->u.ptr, gid->ctx_index); @@ -454,10 +472,13 @@ static int dynamic_get_index(struct lttng_session *session, stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr; stack_top->u.ptr.object_type = gid->elem.type; stack_top->u.ptr.type = LOAD_OBJECT; - /* field is only used for types nested within variants. */ - stack_top->u.ptr.field = NULL; + stack_top->u.ptr.field = gid->field; + stack_top->u.ptr.rev_bo = gid->elem.rev_bo; break; } + + stack_top->type = REG_PTR; + return 0; end: @@ -475,7 +496,7 @@ static int dynamic_load_field(struct estack_entry *stack_top) case LOAD_ROOT_APP_CONTEXT: case LOAD_ROOT_PAYLOAD: default: - dbg_printf("Filter warning: cannot load root, missing field name.\n"); + dbg_printf("Interpreter warning: cannot load root, missing field name.\n"); ret = -EINVAL; goto end; } @@ -521,10 +542,22 @@ static int dynamic_load_field(struct estack_entry *stack_top) stack_top->type = REG_S64; break; } + case OBJECT_TYPE_SIGNED_ENUM: + { + int64_t tmp; + + dbg_printf("op load field signed enumeration\n"); + tmp = *(int64_t *) stack_top->u.ptr.ptr; + if (stack_top->u.ptr.rev_bo) + tmp = bswap_64(tmp); + stack_top->u.v = tmp; + stack_top->type = REG_S64; + break; + } case OBJECT_TYPE_U8: dbg_printf("op load field u8\n"); stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr; - stack_top->type = REG_S64; + stack_top->type = REG_U64; break; case OBJECT_TYPE_U16: { @@ -535,7 +568,7 @@ static int dynamic_load_field(struct estack_entry *stack_top) if (stack_top->u.ptr.rev_bo) tmp = bswap_16(tmp); stack_top->u.v = tmp; - stack_top->type = REG_S64; + stack_top->type = REG_U64; break; } case OBJECT_TYPE_U32: @@ -547,7 +580,7 @@ static int dynamic_load_field(struct estack_entry *stack_top) if (stack_top->u.ptr.rev_bo) tmp = bswap_32(tmp); stack_top->u.v = tmp; - stack_top->type = REG_S64; + stack_top->type = REG_U64; break; } case OBJECT_TYPE_U64: @@ -559,7 +592,19 @@ static int dynamic_load_field(struct estack_entry *stack_top) if (stack_top->u.ptr.rev_bo) tmp = bswap_64(tmp); stack_top->u.v = tmp; - stack_top->type = REG_S64; + stack_top->type = REG_U64; + break; + } + case OBJECT_TYPE_UNSIGNED_ENUM: + { + uint64_t tmp; + + dbg_printf("op load field unsigned enumeration\n"); + tmp = *(uint64_t *) stack_top->u.ptr.ptr; + if (stack_top->u.ptr.rev_bo) + tmp = bswap_64(tmp); + stack_top->u.v = tmp; + stack_top->type = REG_U64; break; } case OBJECT_TYPE_DOUBLE: @@ -576,7 +621,7 @@ static int dynamic_load_field(struct estack_entry *stack_top) str = (const char *) stack_top->u.ptr.ptr; stack_top->u.s.str = str; if (unlikely(!stack_top->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); + dbg_printf("Interpreter warning: loading a NULL string.\n"); ret = -EINVAL; goto end; } @@ -596,7 +641,7 @@ static int dynamic_load_field(struct estack_entry *stack_top) stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long)); stack_top->type = REG_STRING; if (unlikely(!stack_top->u.s.str)) { - dbg_printf("Filter warning: loading a NULL sequence.\n"); + dbg_printf("Interpreter warning: loading a NULL sequence.\n"); ret = -EINVAL; goto end; } @@ -625,16 +670,107 @@ end: return ret; } +static +int lttng_bytecode_interpret_format_output(struct estack_entry *ax, + struct lttng_interpreter_output *output) +{ + int ret; + +again: + switch (ax->type) { + case REG_S64: + output->type = LTTNG_INTERPRETER_TYPE_S64; + output->u.s = ax->u.v; + break; + case REG_U64: + output->type = LTTNG_INTERPRETER_TYPE_U64; + output->u.u = (uint64_t) ax->u.v; + break; + case REG_DOUBLE: + output->type = LTTNG_INTERPRETER_TYPE_DOUBLE; + output->u.d = ax->u.d; + break; + case REG_STRING: + output->type = LTTNG_INTERPRETER_TYPE_STRING; + output->u.str.str = ax->u.s.str; + output->u.str.len = ax->u.s.seq_len; + break; + case REG_PTR: + switch (ax->u.ptr.object_type) { + case OBJECT_TYPE_S8: + case OBJECT_TYPE_S16: + case OBJECT_TYPE_S32: + case OBJECT_TYPE_S64: + case OBJECT_TYPE_U8: + case OBJECT_TYPE_U16: + case OBJECT_TYPE_U32: + case OBJECT_TYPE_U64: + case OBJECT_TYPE_DOUBLE: + case OBJECT_TYPE_STRING: + case OBJECT_TYPE_STRING_SEQUENCE: + ret = dynamic_load_field(ax); + if (ret) + return ret; + /* Retry after loading ptr into stack top. */ + goto again; + case OBJECT_TYPE_SEQUENCE: + output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE; + output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long)); + output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr; + output->u.sequence.nested_type = ax->u.ptr.field->type.u.sequence_nestable.elem_type; + break; + case OBJECT_TYPE_ARRAY: + /* Skip count (unsigned long) */ + output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE; + output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long)); + output->u.sequence.nr_elem = ax->u.ptr.field->type.u.array_nestable.length; + output->u.sequence.nested_type = ax->u.ptr.field->type.u.array_nestable.elem_type; + break; + case OBJECT_TYPE_SIGNED_ENUM: + ret = dynamic_load_field(ax); + if (ret) + return ret; + output->type = LTTNG_INTERPRETER_TYPE_SIGNED_ENUM; + output->u.s = ax->u.v; + break; + case OBJECT_TYPE_UNSIGNED_ENUM: + ret = dynamic_load_field(ax); + if (ret) + return ret; + output->type = LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM; + output->u.u = ax->u.v; + break; + case OBJECT_TYPE_STRUCT: + case OBJECT_TYPE_VARIANT: + default: + return -EINVAL; + } + + break; + case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: + default: + return -EINVAL; + } + + return LTTNG_INTERPRETER_RECORD_FLAG; +} + /* - * Return 0 (discard), or raise the 0x1 flag (log event). - * Currently, other flags are kept for future extensions and have no - * effect. + * For `output` equal to NULL: + * Return 0 (discard), or raise the 0x1 flag (log event). + * Currently, other flags are kept for future extensions and have no + * effect. + * For `output` not equal to NULL: + * Return 0 on success, negative error value on error. */ -uint64_t lttng_filter_interpret_bytecode(void *filter_data, - const char *filter_stack_data) +static +uint64_t bytecode_interpret(void *interpreter_data, + const char *interpreter_stack_data, + struct lttng_interpreter_output *output) { - struct bytecode_runtime *bytecode = filter_data; - struct lttng_session *session = bytecode->p.session; + struct bytecode_runtime *bytecode = interpreter_data; + struct lttng_ctx *ctx = rcu_dereference(*bytecode->p.pctx); void *pc, *next_pc, *start_pc; int ret = -EINVAL; uint64_t retval = 0; @@ -642,166 +778,175 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, struct estack *stack = &_stack; register int64_t ax = 0, bx = 0; register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN; - register int top = FILTER_STACK_EMPTY; + register int top = INTERPRETER_STACK_EMPTY; #ifndef INTERPRETER_USE_SWITCH - static void *dispatch[NR_FILTER_OPS] = { - [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN, + static void *dispatch[NR_BYTECODE_OPS] = { + [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN, - [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN, + [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN, /* binary */ - [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL, - [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV, - [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD, - [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS, - [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS, - [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT, - [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT, - [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND, - [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR, - [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR, + [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL, + [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV, + [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD, + [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS, + [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS, + [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT, + [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT, + [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND, + [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR, + [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR, /* binary comparators */ - [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ, - [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE, - [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT, - [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT, - [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE, - [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE, + [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ, + [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE, + [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT, + [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT, + [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE, + [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE, /* string binary comparator */ - [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING, - [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING, - [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING, - [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING, - [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING, - [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING, + [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING, + [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING, + [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING, + [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING, + [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING, + [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING, /* globbing pattern binary comparator */ - [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING, - [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING, + [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING, + [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING, /* s64 binary comparator */ - [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64, - [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64, - [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64, - [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64, - [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64, - [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64, + [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64, + [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64, + [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64, + [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64, + [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64, + [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64, /* double binary comparator */ - [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE, - [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE, - [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE, - [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE, - [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE, - [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE, + [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE, + [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE, + [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE, + [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE, + [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE, + [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE, /* Mixed S64-double binary comparators */ - [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64, - [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64, - [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64, - [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64, - [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64, - [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64, - - [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE, - [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE, - [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE, - [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE, - [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE, - [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE, + [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64, + [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64, + [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64, + [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64, + [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64, + [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64, + + [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE, + [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE, + [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE, + [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE, + [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE, + [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE, /* unary */ - [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS, - [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS, - [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT, - [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64, - [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64, - [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64, - [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE, - [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE, - [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE, + [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS, + [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS, + [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT, + [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64, + [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64, + [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64, + [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE, + [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE, + [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE, /* logical */ - [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND, - [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR, + [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND, + [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR, /* load field ref */ - [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF, - [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING, - [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE, - [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64, - [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE, + [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF, + [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING, + [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE, + [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64, + [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE, /* load from immediate operand */ - [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING, - [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING, - [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64, - [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE, + [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING, + [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING, + [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64, + [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE, /* cast */ - [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64, - [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64, - [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP, + [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64, + [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64, + [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP, /* get context ref */ - [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF, - [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING, - [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64, - [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE, + [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF, + [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING, + [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64, + [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE, /* Instructions for recursive traversal through composed types. */ - [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT, - [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT, - [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT, - - [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL, - [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD, - [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16, - [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64, - - [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD, - [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8, - [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16, - [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32, - [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64, - [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8, - [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16, - [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32, - [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64, - [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING, - [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE, - [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE, - - [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT, - - [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64, + [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT, + [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT, + [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT, + + [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL, + [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD, + [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16, + [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64, + + [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD, + [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8, + [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16, + [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32, + [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64, + [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8, + [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16, + [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32, + [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64, + [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING, + [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE, + [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE, + + [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT, + + [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64, }; #endif /* #ifndef INTERPRETER_USE_SWITCH */ START_OP - OP(FILTER_OP_UNKNOWN): - OP(FILTER_OP_LOAD_FIELD_REF): + OP(BYTECODE_OP_UNKNOWN): + OP(BYTECODE_OP_LOAD_FIELD_REF): #ifdef INTERPRETER_USE_SWITCH default: #endif /* INTERPRETER_USE_SWITCH */ ERR("unknown bytecode op %u", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; goto end; - OP(FILTER_OP_RETURN): - /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */ + OP(BYTECODE_OP_RETURN): + /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */ /* Handle dynamic typing. */ switch (estack_ax_t) { case REG_S64: + case REG_U64: retval = !!estack_ax_v; break; case REG_DOUBLE: case REG_STRING: + case REG_PTR: + if (!output) { + ret = -EINVAL; + goto end; + } + retval = 0; + break; case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: default: ret = -EINVAL; goto end; @@ -809,39 +954,41 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, ret = 0; goto end; - OP(FILTER_OP_RETURN_S64): - /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */ + OP(BYTECODE_OP_RETURN_S64): + /* LTTNG_INTERPRETER_DISCARD or LTTNG_INTERPRETER_RECORD_FLAG */ retval = !!estack_ax_v; ret = 0; goto end; /* binary */ - OP(FILTER_OP_MUL): - OP(FILTER_OP_DIV): - OP(FILTER_OP_MOD): - OP(FILTER_OP_PLUS): - OP(FILTER_OP_MINUS): + OP(BYTECODE_OP_MUL): + OP(BYTECODE_OP_DIV): + OP(BYTECODE_OP_MOD): + OP(BYTECODE_OP_PLUS): + OP(BYTECODE_OP_MINUS): ERR("unsupported bytecode op %u", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; goto end; - OP(FILTER_OP_EQ): + OP(BYTECODE_OP_EQ): { /* Dynamic typing. */ switch (estack_ax_t) { - case REG_S64: + case REG_S64: /* Fall-through */ + case REG_U64: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_EQ_S64); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_EQ_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_EQ_DOUBLE_S64); + JUMP_TO(BYTECODE_OP_EQ_DOUBLE_S64); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -849,16 +996,17 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, break; case REG_DOUBLE: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_EQ_S64_DOUBLE); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_EQ_S64_DOUBLE); case REG_DOUBLE: - JUMP_TO(FILTER_OP_EQ_DOUBLE); + JUMP_TO(BYTECODE_OP_EQ_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -867,15 +1015,16 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case REG_STRING: switch (estack_bx_t) { case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ case REG_DOUBLE: ret = -EINVAL; goto end; case REG_STRING: - JUMP_TO(FILTER_OP_EQ_STRING); + JUMP_TO(BYTECODE_OP_EQ_STRING); case REG_STAR_GLOB_STRING: - JUMP_TO(FILTER_OP_EQ_STAR_GLOB_STRING); + JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING); default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -884,44 +1033,47 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case REG_STAR_GLOB_STRING: switch (estack_bx_t) { case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ case REG_DOUBLE: ret = -EINVAL; goto end; case REG_STRING: - JUMP_TO(FILTER_OP_EQ_STAR_GLOB_STRING); + JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING); case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; } break; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_NE): + OP(BYTECODE_OP_NE): { /* Dynamic typing. */ switch (estack_ax_t) { - case REG_S64: + case REG_S64: /* Fall-through */ + case REG_U64: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_NE_S64); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_NE_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_NE_DOUBLE_S64); + JUMP_TO(BYTECODE_OP_NE_DOUBLE_S64); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -929,16 +1081,17 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, break; case REG_DOUBLE: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_NE_S64_DOUBLE); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_NE_S64_DOUBLE); case REG_DOUBLE: - JUMP_TO(FILTER_OP_NE_DOUBLE); + JUMP_TO(BYTECODE_OP_NE_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -947,15 +1100,16 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case REG_STRING: switch (estack_bx_t) { case REG_S64: /* Fall-through */ + case REG_U64: case REG_DOUBLE: ret = -EINVAL; goto end; case REG_STRING: - JUMP_TO(FILTER_OP_NE_STRING); + JUMP_TO(BYTECODE_OP_NE_STRING); case REG_STAR_GLOB_STRING: - JUMP_TO(FILTER_OP_NE_STAR_GLOB_STRING); + JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING); default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -964,44 +1118,47 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case REG_STAR_GLOB_STRING: switch (estack_bx_t) { case REG_S64: /* Fall-through */ + case REG_U64: case REG_DOUBLE: ret = -EINVAL; goto end; case REG_STRING: - JUMP_TO(FILTER_OP_NE_STAR_GLOB_STRING); + JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING); case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; } break; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_GT): + OP(BYTECODE_OP_GT): { /* Dynamic typing. */ switch (estack_ax_t) { - case REG_S64: + case REG_S64: /* Fall-through */ + case REG_U64: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_GT_S64); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_GT_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_GT_DOUBLE_S64); + JUMP_TO(BYTECODE_OP_GT_DOUBLE_S64); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -1009,16 +1166,17 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, break; case REG_DOUBLE: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_GT_S64_DOUBLE); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_GT_S64_DOUBLE); case REG_DOUBLE: - JUMP_TO(FILTER_OP_GT_DOUBLE); + JUMP_TO(BYTECODE_OP_GT_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -1027,42 +1185,45 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case REG_STRING: switch (estack_bx_t) { case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ case REG_DOUBLE: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; case REG_STRING: - JUMP_TO(FILTER_OP_GT_STRING); + JUMP_TO(BYTECODE_OP_GT_STRING); default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; } break; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_LT): + OP(BYTECODE_OP_LT): { /* Dynamic typing. */ switch (estack_ax_t) { - case REG_S64: + case REG_S64: /* Fall-through */ + case REG_U64: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_LT_S64); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_LT_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_LT_DOUBLE_S64); + JUMP_TO(BYTECODE_OP_LT_DOUBLE_S64); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -1070,16 +1231,17 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, break; case REG_DOUBLE: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_LT_S64_DOUBLE); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_LT_S64_DOUBLE); case REG_DOUBLE: - JUMP_TO(FILTER_OP_LT_DOUBLE); + JUMP_TO(BYTECODE_OP_LT_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -1088,42 +1250,45 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case REG_STRING: switch (estack_bx_t) { case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ case REG_DOUBLE: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; case REG_STRING: - JUMP_TO(FILTER_OP_LT_STRING); + JUMP_TO(BYTECODE_OP_LT_STRING); default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; } break; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_GE): + OP(BYTECODE_OP_GE): { /* Dynamic typing. */ switch (estack_ax_t) { - case REG_S64: + case REG_S64: /* Fall-through */ + case REG_U64: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_GE_S64); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_GE_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_GE_DOUBLE_S64); + JUMP_TO(BYTECODE_OP_GE_DOUBLE_S64); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -1131,16 +1296,17 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, break; case REG_DOUBLE: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_GE_S64_DOUBLE); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_GE_S64_DOUBLE); case REG_DOUBLE: - JUMP_TO(FILTER_OP_GE_DOUBLE); + JUMP_TO(BYTECODE_OP_GE_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -1149,42 +1315,45 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case REG_STRING: switch (estack_bx_t) { case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ case REG_DOUBLE: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; case REG_STRING: - JUMP_TO(FILTER_OP_GE_STRING); + JUMP_TO(BYTECODE_OP_GE_STRING); default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; } break; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_LE): + OP(BYTECODE_OP_LE): { /* Dynamic typing. */ switch (estack_ax_t) { - case REG_S64: + case REG_S64: /* Fall-through */ + case REG_U64: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_LE_S64); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_LE_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_LE_DOUBLE_S64); + JUMP_TO(BYTECODE_OP_LE_DOUBLE_S64); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -1192,16 +1361,17 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, break; case REG_DOUBLE: switch (estack_bx_t) { - case REG_S64: - JUMP_TO(FILTER_OP_LE_S64_DOUBLE); + case REG_S64: /* Fall-through */ + case REG_U64: + JUMP_TO(BYTECODE_OP_LE_S64_DOUBLE); case REG_DOUBLE: - JUMP_TO(FILTER_OP_LE_DOUBLE); + JUMP_TO(BYTECODE_OP_LE_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; @@ -1210,28 +1380,29 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case REG_STRING: switch (estack_bx_t) { case REG_S64: /* Fall-through */ + case REG_U64: /* Fall-through */ case REG_DOUBLE: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; case REG_STRING: - JUMP_TO(FILTER_OP_LE_STRING); + JUMP_TO(BYTECODE_OP_LE_STRING); default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_bx_t); ret = -EINVAL; goto end; } break; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_EQ_STRING): + OP(BYTECODE_OP_EQ_STRING): { int res; @@ -1242,7 +1413,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_NE_STRING): + OP(BYTECODE_OP_NE_STRING): { int res; @@ -1253,7 +1424,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GT_STRING): + OP(BYTECODE_OP_GT_STRING): { int res; @@ -1264,7 +1435,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LT_STRING): + OP(BYTECODE_OP_LT_STRING): { int res; @@ -1275,7 +1446,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GE_STRING): + OP(BYTECODE_OP_GE_STRING): { int res; @@ -1286,7 +1457,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LE_STRING): + OP(BYTECODE_OP_LE_STRING): { int res; @@ -1298,7 +1469,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_EQ_STAR_GLOB_STRING): + OP(BYTECODE_OP_EQ_STAR_GLOB_STRING): { int res; @@ -1309,7 +1480,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_NE_STAR_GLOB_STRING): + OP(BYTECODE_OP_NE_STAR_GLOB_STRING): { int res; @@ -1321,7 +1492,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_EQ_S64): + OP(BYTECODE_OP_EQ_S64): { int res; @@ -1332,7 +1503,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_NE_S64): + OP(BYTECODE_OP_NE_S64): { int res; @@ -1343,7 +1514,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GT_S64): + OP(BYTECODE_OP_GT_S64): { int res; @@ -1354,7 +1525,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LT_S64): + OP(BYTECODE_OP_LT_S64): { int res; @@ -1365,7 +1536,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GE_S64): + OP(BYTECODE_OP_GE_S64): { int res; @@ -1376,7 +1547,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LE_S64): + OP(BYTECODE_OP_LE_S64): { int res; @@ -1388,7 +1559,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_EQ_DOUBLE): + OP(BYTECODE_OP_EQ_DOUBLE): { int res; @@ -1399,7 +1570,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_NE_DOUBLE): + OP(BYTECODE_OP_NE_DOUBLE): { int res; @@ -1410,7 +1581,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GT_DOUBLE): + OP(BYTECODE_OP_GT_DOUBLE): { int res; @@ -1421,7 +1592,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LT_DOUBLE): + OP(BYTECODE_OP_LT_DOUBLE): { int res; @@ -1432,7 +1603,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GE_DOUBLE): + OP(BYTECODE_OP_GE_DOUBLE): { int res; @@ -1443,7 +1614,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LE_DOUBLE): + OP(BYTECODE_OP_LE_DOUBLE): { int res; @@ -1456,7 +1627,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, } /* Mixed S64-double binary comparators */ - OP(FILTER_OP_EQ_DOUBLE_S64): + OP(BYTECODE_OP_EQ_DOUBLE_S64): { int res; @@ -1467,7 +1638,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_NE_DOUBLE_S64): + OP(BYTECODE_OP_NE_DOUBLE_S64): { int res; @@ -1478,7 +1649,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GT_DOUBLE_S64): + OP(BYTECODE_OP_GT_DOUBLE_S64): { int res; @@ -1489,7 +1660,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LT_DOUBLE_S64): + OP(BYTECODE_OP_LT_DOUBLE_S64): { int res; @@ -1500,7 +1671,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GE_DOUBLE_S64): + OP(BYTECODE_OP_GE_DOUBLE_S64): { int res; @@ -1511,7 +1682,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LE_DOUBLE_S64): + OP(BYTECODE_OP_LE_DOUBLE_S64): { int res; @@ -1523,7 +1694,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_EQ_S64_DOUBLE): + OP(BYTECODE_OP_EQ_S64_DOUBLE): { int res; @@ -1534,7 +1705,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_NE_S64_DOUBLE): + OP(BYTECODE_OP_NE_S64_DOUBLE): { int res; @@ -1545,7 +1716,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GT_S64_DOUBLE): + OP(BYTECODE_OP_GT_S64_DOUBLE): { int res; @@ -1556,7 +1727,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LT_S64_DOUBLE): + OP(BYTECODE_OP_LT_S64_DOUBLE): { int res; @@ -1567,7 +1738,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_GE_S64_DOUBLE): + OP(BYTECODE_OP_GE_S64_DOUBLE): { int res; @@ -1578,7 +1749,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_LE_S64_DOUBLE): + OP(BYTECODE_OP_LE_S64_DOUBLE): { int res; @@ -1589,15 +1760,15 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_BIT_RSHIFT): + OP(BYTECODE_OP_BIT_RSHIFT): { int64_t res; - /* Dynamic typing. */ - if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) { + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { ret = -EINVAL; goto end; } + /* Catch undefined behavior. */ if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) { ret = -EINVAL; @@ -1606,19 +1777,19 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v); estack_pop(stack, top, ax, bx, ax_t, bx_t); estack_ax_v = res; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_BIT_LSHIFT): + OP(BYTECODE_OP_BIT_LSHIFT): { int64_t res; - /* Dynamic typing. */ - if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) { + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { ret = -EINVAL; goto end; } + /* Catch undefined behavior. */ if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) { ret = -EINVAL; @@ -1627,16 +1798,15 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v); estack_pop(stack, top, ax, bx, ax_t, bx_t); estack_ax_v = res; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_BIT_AND): + OP(BYTECODE_OP_BIT_AND): { int64_t res; - /* Dynamic typing. */ - if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) { + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { ret = -EINVAL; goto end; } @@ -1644,16 +1814,15 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v); estack_pop(stack, top, ax, bx, ax_t, bx_t); estack_ax_v = res; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_BIT_OR): + OP(BYTECODE_OP_BIT_OR): { int64_t res; - /* Dynamic typing. */ - if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) { + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { ret = -EINVAL; goto end; } @@ -1661,16 +1830,15 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v); estack_pop(stack, top, ax, bx, ax_t, bx_t); estack_ax_v = res; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct binary_op); PO; } - OP(FILTER_OP_BIT_XOR): + OP(BYTECODE_OP_BIT_XOR): { int64_t res; - /* Dynamic typing. */ - if (estack_ax_t != REG_S64 || estack_bx_t != REG_S64) { + if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) { ret = -EINVAL; goto end; } @@ -1678,64 +1846,67 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v); estack_pop(stack, top, ax, bx, ax_t, bx_t); estack_ax_v = res; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct binary_op); PO; } /* unary */ - OP(FILTER_OP_UNARY_PLUS): + OP(BYTECODE_OP_UNARY_PLUS): { /* Dynamic typing. */ switch (estack_ax_t) { case REG_S64: /* Fall-through. */ - JUMP_TO(FILTER_OP_UNARY_PLUS_S64); + case REG_U64: + JUMP_TO(BYTECODE_OP_UNARY_PLUS_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_UNARY_PLUS_DOUBLE); + JUMP_TO(BYTECODE_OP_UNARY_PLUS_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_UNARY_MINUS): + OP(BYTECODE_OP_UNARY_MINUS): { /* Dynamic typing. */ switch (estack_ax_t) { - case REG_S64: - JUMP_TO(FILTER_OP_UNARY_MINUS_S64); + case REG_S64: /* Fall-through. */ + case REG_U64: + JUMP_TO(BYTECODE_OP_UNARY_MINUS_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_UNARY_MINUS_DOUBLE); + JUMP_TO(BYTECODE_OP_UNARY_MINUS_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_UNARY_NOT): + OP(BYTECODE_OP_UNARY_NOT): { /* Dynamic typing. */ switch (estack_ax_t) { - case REG_S64: - JUMP_TO(FILTER_OP_UNARY_NOT_S64); + case REG_S64: /* Fall-through. */ + case REG_U64: + JUMP_TO(BYTECODE_OP_UNARY_NOT_S64); case REG_DOUBLE: - JUMP_TO(FILTER_OP_UNARY_NOT_DOUBLE); + JUMP_TO(BYTECODE_OP_UNARY_NOT_DOUBLE); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; @@ -1744,44 +1915,46 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_UNARY_BIT_NOT): + OP(BYTECODE_OP_UNARY_BIT_NOT): { /* Dynamic typing. */ - if (estack_ax_t != REG_S64) { + if (!IS_INTEGER_REGISTER(estack_ax_t)) { ret = -EINVAL; goto end; } estack_ax_v = ~(uint64_t) estack_ax_v; + estack_ax_t = REG_U64; next_pc += sizeof(struct unary_op); PO; } - OP(FILTER_OP_UNARY_PLUS_S64): - OP(FILTER_OP_UNARY_PLUS_DOUBLE): + OP(BYTECODE_OP_UNARY_PLUS_S64): + OP(BYTECODE_OP_UNARY_PLUS_DOUBLE): { next_pc += sizeof(struct unary_op); PO; } - OP(FILTER_OP_UNARY_MINUS_S64): + OP(BYTECODE_OP_UNARY_MINUS_S64): { estack_ax_v = -estack_ax_v; next_pc += sizeof(struct unary_op); PO; } - OP(FILTER_OP_UNARY_MINUS_DOUBLE): + OP(BYTECODE_OP_UNARY_MINUS_DOUBLE): { estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d; next_pc += sizeof(struct unary_op); PO; } - OP(FILTER_OP_UNARY_NOT_S64): + OP(BYTECODE_OP_UNARY_NOT_S64): { estack_ax_v = !estack_ax_v; + estack_ax_t = REG_S64; next_pc += sizeof(struct unary_op); PO; } - OP(FILTER_OP_UNARY_NOT_DOUBLE): + OP(BYTECODE_OP_UNARY_NOT_DOUBLE): { estack_ax_v = !estack_ax(stack, top)->u.d; estack_ax_t = REG_S64; @@ -1790,11 +1963,11 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, } /* logical */ - OP(FILTER_OP_AND): + OP(BYTECODE_OP_AND): { struct logical_op *insn = (struct logical_op *) pc; - if (estack_ax_t != REG_S64) { + if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) { ret = -EINVAL; goto end; } @@ -1810,11 +1983,11 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, } PO; } - OP(FILTER_OP_OR): + OP(BYTECODE_OP_OR): { struct logical_op *insn = (struct logical_op *) pc; - if (estack_ax_t != REG_S64) { + if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) { ret = -EINVAL; goto end; } @@ -1834,7 +2007,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, /* load field ref */ - OP(FILTER_OP_LOAD_FIELD_REF_STRING): + OP(BYTECODE_OP_LOAD_FIELD_REF_STRING): { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -1843,9 +2016,9 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, ref->offset); estack_push(stack, top, ax, bx, ax_t, bx_t); estack_ax(stack, top)->u.s.str = - *(const char * const *) &filter_stack_data[ref->offset]; + *(const char * const *) &interpreter_stack_data[ref->offset]; if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); + dbg_printf("Interpreter warning: loading a NULL string.\n"); ret = -EINVAL; goto end; } @@ -1858,7 +2031,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE): + OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE): { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -1867,13 +2040,13 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, ref->offset); estack_push(stack, top, ax, bx, ax_t, bx_t); estack_ax(stack, top)->u.s.seq_len = - *(unsigned long *) &filter_stack_data[ref->offset]; + *(unsigned long *) &interpreter_stack_data[ref->offset]; estack_ax(stack, top)->u.s.str = - *(const char **) (&filter_stack_data[ref->offset + *(const char **) (&interpreter_stack_data[ref->offset + sizeof(unsigned long)]); estack_ax_t = REG_STRING; if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL sequence.\n"); + dbg_printf("Interpreter warning: loading a NULL sequence.\n"); ret = -EINVAL; goto end; } @@ -1883,7 +2056,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_FIELD_REF_S64): + OP(BYTECODE_OP_LOAD_FIELD_REF_S64): { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -1892,14 +2065,14 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, ref->offset); estack_push(stack, top, ax, bx, ax_t, bx_t); estack_ax_v = - ((struct literal_numeric *) &filter_stack_data[ref->offset])->v; + ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v; estack_ax_t = REG_S64; dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v); next_pc += sizeof(struct load_op) + sizeof(struct field_ref); PO; } - OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE): + OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE): { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -1907,7 +2080,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, dbg_printf("load field ref offset %u type double\n", ref->offset); estack_push(stack, top, ax, bx, ax_t, bx_t); - memcpy(&estack_ax(stack, top)->u.d, &filter_stack_data[ref->offset], + memcpy(&estack_ax(stack, top)->u.d, &interpreter_stack_data[ref->offset], sizeof(struct literal_double)); estack_ax_t = REG_DOUBLE; dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d); @@ -1916,7 +2089,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, } /* load from immediate operand */ - OP(FILTER_OP_LOAD_STRING): + OP(BYTECODE_OP_LOAD_STRING): { struct load_op *insn = (struct load_op *) pc; @@ -1931,7 +2104,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_STAR_GLOB_STRING): + OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING): { struct load_op *insn = (struct load_op *) pc; @@ -1946,7 +2119,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_S64): + OP(BYTECODE_OP_LOAD_S64): { struct load_op *insn = (struct load_op *) pc; @@ -1959,7 +2132,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_DOUBLE): + OP(BYTECODE_OP_LOAD_DOUBLE): { struct load_op *insn = (struct load_op *) pc; @@ -1974,27 +2147,30 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, } /* cast */ - OP(FILTER_OP_CAST_TO_S64): + OP(BYTECODE_OP_CAST_TO_S64): { /* Dynamic typing. */ switch (estack_ax_t) { case REG_S64: - JUMP_TO(FILTER_OP_CAST_NOP); + JUMP_TO(BYTECODE_OP_CAST_NOP); case REG_DOUBLE: - JUMP_TO(FILTER_OP_CAST_DOUBLE_TO_S64); + JUMP_TO(BYTECODE_OP_CAST_DOUBLE_TO_S64); + case REG_U64: + estack_ax_t = REG_S64; + next_pc += sizeof(struct cast_op); case REG_STRING: /* Fall-through */ case REG_STAR_GLOB_STRING: ret = -EINVAL; goto end; default: - ERR("Unknown filter register type (%d)", + ERR("Unknown interpreter register type (%d)", (int) estack_ax_t); ret = -EINVAL; goto end; } } - OP(FILTER_OP_CAST_DOUBLE_TO_S64): + OP(BYTECODE_OP_CAST_DOUBLE_TO_S64): { estack_ax_v = (int64_t) estack_ax(stack, top)->u.d; estack_ax_t = REG_S64; @@ -2002,24 +2178,22 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_CAST_NOP): + OP(BYTECODE_OP_CAST_NOP): { next_pc += sizeof(struct cast_op); PO; } /* get context ref */ - OP(FILTER_OP_GET_CONTEXT_REF): + OP(BYTECODE_OP_GET_CONTEXT_REF): { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; - struct lttng_ctx *ctx; struct lttng_ctx_field *ctx_field; struct lttng_ctx_value v; dbg_printf("get context ref offset %u type dynamic\n", ref->offset); - ctx = rcu_dereference(session->ctx); ctx_field = &ctx->fields[ref->offset]; ctx_field->get_value(ctx_field, &v); estack_push(stack, top, ax, bx, ax_t, bx_t); @@ -2040,7 +2214,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, case LTTNG_UST_DYNAMIC_TYPE_STRING: estack_ax(stack, top)->u.s.str = v.u.str; if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); + dbg_printf("Interpreter warning: loading a NULL string.\n"); ret = -EINVAL; goto end; } @@ -2051,7 +2225,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, estack_ax_t = REG_STRING; break; default: - dbg_printf("Filter warning: unknown dynamic type (%d).\n", (int) v.sel); + dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel); ret = -EINVAL; goto end; } @@ -2059,23 +2233,21 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_CONTEXT_REF_STRING): + OP(BYTECODE_OP_GET_CONTEXT_REF_STRING): { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; - struct lttng_ctx *ctx; struct lttng_ctx_field *ctx_field; struct lttng_ctx_value v; dbg_printf("get context ref offset %u type string\n", ref->offset); - ctx = rcu_dereference(session->ctx); ctx_field = &ctx->fields[ref->offset]; ctx_field->get_value(ctx_field, &v); estack_push(stack, top, ax, bx, ax_t, bx_t); estack_ax(stack, top)->u.s.str = v.u.str; if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); + dbg_printf("Interpreter warning: loading a NULL string.\n"); ret = -EINVAL; goto end; } @@ -2088,17 +2260,15 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_CONTEXT_REF_S64): + OP(BYTECODE_OP_GET_CONTEXT_REF_S64): { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; - struct lttng_ctx *ctx; struct lttng_ctx_field *ctx_field; struct lttng_ctx_value v; dbg_printf("get context ref offset %u type s64\n", ref->offset); - ctx = rcu_dereference(session->ctx); ctx_field = &ctx->fields[ref->offset]; ctx_field->get_value(ctx_field, &v); estack_push(stack, top, ax, bx, ax_t, bx_t); @@ -2109,17 +2279,15 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE): + OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE): { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; - struct lttng_ctx *ctx; struct lttng_ctx_field *ctx_field; struct lttng_ctx_value v; dbg_printf("get context ref offset %u type double\n", ref->offset); - ctx = rcu_dereference(session->ctx); ctx_field = &ctx->fields[ref->offset]; ctx_field->get_value(ctx_field, &v); estack_push(stack, top, ax, bx, ax_t, bx_t); @@ -2130,7 +2298,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_CONTEXT_ROOT): + OP(BYTECODE_OP_GET_CONTEXT_ROOT): { dbg_printf("op get context root\n"); estack_push(stack, top, ax, bx, ax_t, bx_t); @@ -2142,7 +2310,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_APP_CONTEXT_ROOT): + OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT): { dbg_printf("op get app context root\n"); estack_push(stack, top, ax, bx, ax_t, bx_t); @@ -2154,12 +2322,12 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_PAYLOAD_ROOT): + OP(BYTECODE_OP_GET_PAYLOAD_ROOT): { dbg_printf("op get app payload root\n"); estack_push(stack, top, ax, bx, ax_t, bx_t); estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD; - estack_ax(stack, top)->u.ptr.ptr = filter_stack_data; + estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data; /* "field" only needed for variants. */ estack_ax(stack, top)->u.ptr.field = NULL; estack_ax_t = REG_PTR; @@ -2167,7 +2335,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_SYMBOL): + OP(BYTECODE_OP_GET_SYMBOL): { dbg_printf("op get symbol\n"); switch (estack_ax(stack, top)->u.ptr.type) { @@ -2189,7 +2357,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_SYMBOL_FIELD): + OP(BYTECODE_OP_GET_SYMBOL_FIELD): { /* * Used for first variant encountered in a @@ -2199,13 +2367,13 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, goto end; } - OP(FILTER_OP_GET_INDEX_U16): + OP(BYTECODE_OP_GET_INDEX_U16): { struct load_op *insn = (struct load_op *) pc; struct get_index_u16 *index = (struct get_index_u16 *) insn->data; dbg_printf("op get index u16\n"); - ret = dynamic_get_index(session, bytecode, index->index, estack_ax(stack, top)); + ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top)); if (ret) goto end; estack_ax_v = estack_ax(stack, top)->u.v; @@ -2214,13 +2382,13 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_GET_INDEX_U64): + OP(BYTECODE_OP_GET_INDEX_U64): { struct load_op *insn = (struct load_op *) pc; struct get_index_u64 *index = (struct get_index_u64 *) insn->data; dbg_printf("op get index u64\n"); - ret = dynamic_get_index(session, bytecode, index->index, estack_ax(stack, top)); + ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top)); if (ret) goto end; estack_ax_v = estack_ax(stack, top)->u.v; @@ -2229,7 +2397,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_FIELD): + OP(BYTECODE_OP_LOAD_FIELD): { dbg_printf("op load field\n"); ret = dynamic_load_field(estack_ax(stack, top)); @@ -2241,7 +2409,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_FIELD_S8): + OP(BYTECODE_OP_LOAD_FIELD_S8): { dbg_printf("op load field s8\n"); @@ -2250,7 +2418,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct load_op); PO; } - OP(FILTER_OP_LOAD_FIELD_S16): + OP(BYTECODE_OP_LOAD_FIELD_S16): { dbg_printf("op load field s16\n"); @@ -2259,7 +2427,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct load_op); PO; } - OP(FILTER_OP_LOAD_FIELD_S32): + OP(BYTECODE_OP_LOAD_FIELD_S32): { dbg_printf("op load field s32\n"); @@ -2268,7 +2436,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct load_op); PO; } - OP(FILTER_OP_LOAD_FIELD_S64): + OP(BYTECODE_OP_LOAD_FIELD_S64): { dbg_printf("op load field s64\n"); @@ -2277,43 +2445,43 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, next_pc += sizeof(struct load_op); PO; } - OP(FILTER_OP_LOAD_FIELD_U8): + OP(BYTECODE_OP_LOAD_FIELD_U8): { dbg_printf("op load field u8\n"); estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct load_op); PO; } - OP(FILTER_OP_LOAD_FIELD_U16): + OP(BYTECODE_OP_LOAD_FIELD_U16): { dbg_printf("op load field u16\n"); estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct load_op); PO; } - OP(FILTER_OP_LOAD_FIELD_U32): + OP(BYTECODE_OP_LOAD_FIELD_U32): { dbg_printf("op load field u32\n"); estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct load_op); PO; } - OP(FILTER_OP_LOAD_FIELD_U64): + OP(BYTECODE_OP_LOAD_FIELD_U64): { dbg_printf("op load field u64\n"); estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr; - estack_ax_t = REG_S64; + estack_ax_t = REG_U64; next_pc += sizeof(struct load_op); PO; } - OP(FILTER_OP_LOAD_FIELD_DOUBLE): + OP(BYTECODE_OP_LOAD_FIELD_DOUBLE): { dbg_printf("op load field double\n"); @@ -2325,7 +2493,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_FIELD_STRING): + OP(BYTECODE_OP_LOAD_FIELD_STRING): { const char *str; @@ -2333,7 +2501,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, str = (const char *) estack_ax(stack, top)->u.ptr.ptr; estack_ax(stack, top)->u.s.str = str; if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL string.\n"); + dbg_printf("Interpreter warning: loading a NULL string.\n"); ret = -EINVAL; goto end; } @@ -2345,7 +2513,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, PO; } - OP(FILTER_OP_LOAD_FIELD_SEQUENCE): + OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE): { const char *ptr; @@ -2355,7 +2523,7 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long)); estack_ax(stack, top)->type = REG_STRING; if (unlikely(!estack_ax(stack, top)->u.s.str)) { - dbg_printf("Filter warning: loading a NULL sequence.\n"); + dbg_printf("Interpreter warning: loading a NULL sequence.\n"); ret = -EINVAL; goto end; } @@ -2369,10 +2537,30 @@ uint64_t lttng_filter_interpret_bytecode(void *filter_data, end: /* Return _DISCARD on error. */ if (ret) - return LTTNG_FILTER_DISCARD; + return LTTNG_INTERPRETER_DISCARD; + + if (output) { + return lttng_bytecode_interpret_format_output(estack_ax(stack, top), + output); + } + return retval; } +uint64_t lttng_bytecode_filter_interpret(void *filter_data, + const char *filter_stack_data) +{ + return bytecode_interpret(filter_data, filter_stack_data, NULL); +} + +uint64_t lttng_bytecode_capture_interpret(void *capture_data, + const char *capture_stack_data, + struct lttng_interpreter_output *output) +{ + return bytecode_interpret(capture_data, capture_stack_data, + (struct lttng_interpreter_output *) output); +} + #undef START_OP #undef OP #undef PO diff --git a/liblttng-ust/lttng-filter-specialize.c b/liblttng-ust/lttng-bytecode-specialize.c similarity index 75% rename from liblttng-ust/lttng-filter-specialize.c rename to liblttng-ust/lttng-bytecode-specialize.c index ef45904f..12c00723 100644 --- a/liblttng-ust/lttng-filter-specialize.c +++ b/liblttng-ust/lttng-bytecode-specialize.c @@ -1,7 +1,7 @@ /* - * lttng-filter-specialize.c + * lttng-bytecode-specialize.c * - * LTTng UST filter code specializer. + * LTTng UST bytecode specializer. * * Copyright (C) 2010-2016 Mathieu Desnoyers * @@ -28,8 +28,9 @@ #include #include -#include "lttng-filter.h" +#include "lttng-bytecode.h" #include +#include "ust-events-internal.h" static int lttng_fls(int val) { @@ -79,7 +80,7 @@ static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime, size_t new_alloc_len = new_len; size_t old_alloc_len = runtime->data_alloc_len; - if (new_len > FILTER_MAX_DATA_LEN) + if (new_len > BYTECODE_MAX_DATA_LEN) return -EINVAL; if (new_alloc_len > old_alloc_len) { @@ -125,7 +126,7 @@ static int specialize_load_field(struct vstack_entry *stack_top, case LOAD_ROOT_APP_CONTEXT: case LOAD_ROOT_PAYLOAD: default: - dbg_printf("Filter warning: cannot load root, missing field name.\n"); + dbg_printf("Bytecode warning: cannot load root, missing field name.\n"); ret = -EINVAL; goto end; } @@ -134,62 +135,70 @@ static int specialize_load_field(struct vstack_entry *stack_top, dbg_printf("op load field s8\n"); stack_top->type = REG_S64; if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_S8; + insn->op = BYTECODE_OP_LOAD_FIELD_S8; break; case OBJECT_TYPE_S16: dbg_printf("op load field s16\n"); stack_top->type = REG_S64; if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_S16; + insn->op = BYTECODE_OP_LOAD_FIELD_S16; break; case OBJECT_TYPE_S32: dbg_printf("op load field s32\n"); stack_top->type = REG_S64; if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_S32; + insn->op = BYTECODE_OP_LOAD_FIELD_S32; break; case OBJECT_TYPE_S64: dbg_printf("op load field s64\n"); stack_top->type = REG_S64; if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_S64; + insn->op = BYTECODE_OP_LOAD_FIELD_S64; + break; + case OBJECT_TYPE_SIGNED_ENUM: + dbg_printf("op load field signed enumeration\n"); + stack_top->type = REG_PTR; break; case OBJECT_TYPE_U8: dbg_printf("op load field u8\n"); - stack_top->type = REG_S64; - insn->op = FILTER_OP_LOAD_FIELD_U8; + stack_top->type = REG_U64; + insn->op = BYTECODE_OP_LOAD_FIELD_U8; break; case OBJECT_TYPE_U16: dbg_printf("op load field u16\n"); - stack_top->type = REG_S64; + stack_top->type = REG_U64; if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_U16; + insn->op = BYTECODE_OP_LOAD_FIELD_U16; break; case OBJECT_TYPE_U32: dbg_printf("op load field u32\n"); - stack_top->type = REG_S64; + stack_top->type = REG_U64; if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_U32; + insn->op = BYTECODE_OP_LOAD_FIELD_U32; break; case OBJECT_TYPE_U64: dbg_printf("op load field u64\n"); - stack_top->type = REG_S64; + stack_top->type = REG_U64; if (!stack_top->load.rev_bo) - insn->op = FILTER_OP_LOAD_FIELD_U64; + insn->op = BYTECODE_OP_LOAD_FIELD_U64; + break; + case OBJECT_TYPE_UNSIGNED_ENUM: + dbg_printf("op load field unsigned enumeration\n"); + stack_top->type = REG_PTR; break; case OBJECT_TYPE_DOUBLE: stack_top->type = REG_DOUBLE; - insn->op = FILTER_OP_LOAD_FIELD_DOUBLE; + insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE; break; case OBJECT_TYPE_STRING: dbg_printf("op load field string\n"); stack_top->type = REG_STRING; - insn->op = FILTER_OP_LOAD_FIELD_STRING; + insn->op = BYTECODE_OP_LOAD_FIELD_STRING; break; case OBJECT_TYPE_STRING_SEQUENCE: dbg_printf("op load field string sequence\n"); stack_top->type = REG_STRING; - insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE; + insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE; break; case OBJECT_TYPE_DYNAMIC: dbg_printf("op load field dynamic\n"); @@ -250,7 +259,7 @@ static int specialize_get_index(struct bytecode_runtime *runtime, int idx_len) { int ret; - struct filter_get_index_data gid; + struct bytecode_get_index_data gid; ssize_t data_offset; memset(&gid, 0, sizeof(gid)); @@ -395,9 +404,7 @@ static int specialize_load_object(const struct lttng_event_field *field, struct vstack_load *load, bool is_context) { load->type = LOAD_OBJECT; - /* - * LTTng-UST layout all integer fields as s64 on the stack for the filter. - */ + switch (field->type.atype) { case atype_integer: if (field->type.u.integer.signedness) @@ -417,9 +424,9 @@ static int specialize_load_object(const struct lttng_event_field *field, itype = &field->type.u.enum_nestable.container_type->u.integer; } if (itype->signedness) - load->object_type = OBJECT_TYPE_S64; + load->object_type = OBJECT_TYPE_SIGNED_ENUM; else - load->object_type = OBJECT_TYPE_U64; + load->object_type = OBJECT_TYPE_UNSIGNED_ENUM; load->rev_bo = false; break; } @@ -507,7 +514,7 @@ static int specialize_load_object(const struct lttng_event_field *field, return 0; } -static int specialize_context_lookup(struct lttng_session *session, +static int specialize_context_lookup(struct lttng_ctx *ctx, struct bytecode_runtime *runtime, struct load_op *insn, struct vstack_load *load) @@ -515,23 +522,25 @@ static int specialize_context_lookup(struct lttng_session *session, int idx, ret; struct lttng_ctx_field *ctx_field; struct lttng_event_field *field; - struct filter_get_index_data gid; + struct bytecode_get_index_data gid; ssize_t data_offset; - idx = specialize_context_lookup_name(session->ctx, runtime, insn); + idx = specialize_context_lookup_name(ctx, runtime, insn); if (idx < 0) { return -ENOENT; } - ctx_field = &session->ctx->fields[idx]; + ctx_field = &ctx->fields[idx]; field = &ctx_field->event_field; ret = specialize_load_object(field, load, true); if (ret) return ret; /* Specialize each get_symbol into a get_index. */ - insn->op = FILTER_OP_GET_INDEX_U16; + insn->op = BYTECODE_OP_GET_INDEX_U16; memset(&gid, 0, sizeof(gid)); gid.ctx_index = idx; gid.elem.type = load->object_type; + gid.elem.rev_bo = load->rev_bo; + gid.field = field; data_offset = bytecode_push_data(runtime, &gid, __alignof__(gid), sizeof(gid)); if (data_offset < 0) { @@ -541,7 +550,7 @@ static int specialize_context_lookup(struct lttng_session *session, return 0; } -static int specialize_app_context_lookup(struct lttng_session *session, +static int specialize_app_context_lookup(struct lttng_ctx **pctx, struct bytecode_runtime *runtime, struct load_op *insn, struct vstack_load *load) @@ -552,7 +561,7 @@ static int specialize_app_context_lookup(struct lttng_session *session, int idx, ret; struct lttng_ctx_field *ctx_field; struct lttng_event_field *field; - struct filter_get_index_data gid; + struct bytecode_get_index_data gid; ssize_t data_offset; offset = ((struct get_symbol *) insn->data)->offset; @@ -564,28 +573,29 @@ static int specialize_app_context_lookup(struct lttng_session *session, } strcpy(name, "$app."); strcat(name, orig_name); - idx = lttng_get_context_index(session->ctx, name); + idx = lttng_get_context_index(*pctx, name); if (idx < 0) { assert(lttng_context_is_app(name)); ret = lttng_ust_add_app_context_to_ctx_rcu(name, - &session->ctx); + pctx); if (ret) return ret; - idx = lttng_get_context_index(session->ctx, - name); + idx = lttng_get_context_index(*pctx, name); if (idx < 0) return -ENOENT; } - ctx_field = &session->ctx->fields[idx]; + ctx_field = &(*pctx)->fields[idx]; field = &ctx_field->event_field; ret = specialize_load_object(field, load, true); if (ret) goto end; /* Specialize each get_symbol into a get_index. */ - insn->op = FILTER_OP_GET_INDEX_U16; + insn->op = BYTECODE_OP_GET_INDEX_U16; memset(&gid, 0, sizeof(gid)); gid.ctx_index = idx; gid.elem.type = load->object_type; + gid.elem.rev_bo = load->rev_bo; + gid.field = field; data_offset = bytecode_push_data(runtime, &gid, __alignof__(gid), sizeof(gid)); if (data_offset < 0) { @@ -599,27 +609,26 @@ end: return ret; } -static int specialize_event_payload_lookup(struct lttng_event *event, +static int specialize_payload_lookup(const struct lttng_event_desc *event_desc, struct bytecode_runtime *runtime, struct load_op *insn, struct vstack_load *load) { const char *name; uint16_t offset; - const struct lttng_event_desc *desc = event->desc; unsigned int i, nr_fields; bool found = false; uint32_t field_offset = 0; const struct lttng_event_field *field; int ret; - struct filter_get_index_data gid; + struct bytecode_get_index_data gid; ssize_t data_offset; - nr_fields = desc->nr_fields; + nr_fields = event_desc->nr_fields; offset = ((struct get_symbol *) insn->data)->offset; name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset; for (i = 0; i < nr_fields; i++) { - field = &desc->fields[i]; + field = &event_desc->fields[i]; if (field->u.ext.nofilter) { continue; } @@ -662,10 +671,12 @@ static int specialize_event_payload_lookup(struct lttng_event *event, goto end; /* Specialize each get_symbol into a get_index. */ - insn->op = FILTER_OP_GET_INDEX_U16; + insn->op = BYTECODE_OP_GET_INDEX_U16; memset(&gid, 0, sizeof(gid)); gid.offset = field_offset; gid.elem.type = load->object_type; + gid.elem.rev_bo = load->rev_bo; + gid.field = field; data_offset = bytecode_push_data(runtime, &gid, __alignof__(gid), sizeof(gid)); if (data_offset < 0) { @@ -678,36 +689,38 @@ end: return ret; } -int lttng_filter_specialize_bytecode(struct lttng_event *event, +int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc, struct bytecode_runtime *bytecode) { void *pc, *next_pc, *start_pc; int ret = -EINVAL; struct vstack _stack; struct vstack *stack = &_stack; - struct lttng_session *session = bytecode->p.session; + struct lttng_ctx **pctx = bytecode->p.pctx; vstack_init(stack); start_pc = &bytecode->code[0]; for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; pc = next_pc) { - switch (*(filter_opcode_t *) pc) { - case FILTER_OP_UNKNOWN: + switch (*(bytecode_opcode_t *) pc) { + case BYTECODE_OP_UNKNOWN: default: ERR("unknown bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; goto end; - case FILTER_OP_RETURN: - if (vstack_ax(stack)->type == REG_S64) - *(filter_opcode_t *) pc = FILTER_OP_RETURN_S64; + case BYTECODE_OP_RETURN: + if (vstack_ax(stack)->type == REG_S64 || + vstack_ax(stack)->type == REG_U64) + *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64; ret = 0; goto end; - case FILTER_OP_RETURN_S64: - if (vstack_ax(stack)->type != REG_S64) { + case BYTECODE_OP_RETURN_S64: + if (vstack_ax(stack)->type != REG_S64 && + vstack_ax(stack)->type != REG_U64) { ERR("Unexpected register type\n"); ret = -EINVAL; goto end; @@ -716,17 +729,17 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, goto end; /* binary */ - case FILTER_OP_MUL: - case FILTER_OP_DIV: - case FILTER_OP_MOD: - case FILTER_OP_PLUS: - case FILTER_OP_MINUS: + case BYTECODE_OP_MUL: + case BYTECODE_OP_DIV: + case BYTECODE_OP_MOD: + case BYTECODE_OP_PLUS: + case BYTECODE_OP_MINUS: ERR("unsupported bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; goto end; - case FILTER_OP_EQ: + case BYTECODE_OP_EQ: { struct binary_op *insn = (struct binary_op *) pc; @@ -740,30 +753,33 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, if (vstack_bx(stack)->type == REG_UNKNOWN) break; if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING) - insn->op = FILTER_OP_EQ_STAR_GLOB_STRING; + insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING; else - insn->op = FILTER_OP_EQ_STRING; + insn->op = BYTECODE_OP_EQ_STRING; break; case REG_STAR_GLOB_STRING: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - insn->op = FILTER_OP_EQ_STAR_GLOB_STRING; + insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING; break; case REG_S64: + case REG_U64: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_EQ_S64; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_EQ_S64; else - insn->op = FILTER_OP_EQ_DOUBLE_S64; + insn->op = BYTECODE_OP_EQ_DOUBLE_S64; break; case REG_DOUBLE: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_EQ_S64_DOUBLE; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_EQ_S64_DOUBLE; else - insn->op = FILTER_OP_EQ_DOUBLE; + insn->op = BYTECODE_OP_EQ_DOUBLE; break; case REG_UNKNOWN: break; /* Dynamic typing. */ @@ -778,7 +794,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_NE: + case BYTECODE_OP_NE: { struct binary_op *insn = (struct binary_op *) pc; @@ -792,30 +808,33 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, if (vstack_bx(stack)->type == REG_UNKNOWN) break; if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING) - insn->op = FILTER_OP_NE_STAR_GLOB_STRING; + insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING; else - insn->op = FILTER_OP_NE_STRING; + insn->op = BYTECODE_OP_NE_STRING; break; case REG_STAR_GLOB_STRING: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - insn->op = FILTER_OP_NE_STAR_GLOB_STRING; + insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING; break; case REG_S64: + case REG_U64: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_NE_S64; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_NE_S64; else - insn->op = FILTER_OP_NE_DOUBLE_S64; + insn->op = BYTECODE_OP_NE_DOUBLE_S64; break; case REG_DOUBLE: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_NE_S64_DOUBLE; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_NE_S64_DOUBLE; else - insn->op = FILTER_OP_NE_DOUBLE; + insn->op = BYTECODE_OP_NE_DOUBLE; break; case REG_UNKNOWN: break; /* Dynamic typing. */ @@ -830,7 +849,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_GT: + case BYTECODE_OP_GT: { struct binary_op *insn = (struct binary_op *) pc; @@ -847,23 +866,26 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, case REG_STRING: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - insn->op = FILTER_OP_GT_STRING; + insn->op = BYTECODE_OP_GT_STRING; break; case REG_S64: + case REG_U64: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_GT_S64; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_GT_S64; else - insn->op = FILTER_OP_GT_DOUBLE_S64; + insn->op = BYTECODE_OP_GT_DOUBLE_S64; break; case REG_DOUBLE: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_GT_S64_DOUBLE; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_GT_S64_DOUBLE; else - insn->op = FILTER_OP_GT_DOUBLE; + insn->op = BYTECODE_OP_GT_DOUBLE; break; case REG_UNKNOWN: break; /* Dynamic typing. */ @@ -878,7 +900,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_LT: + case BYTECODE_OP_LT: { struct binary_op *insn = (struct binary_op *) pc; @@ -895,23 +917,26 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, case REG_STRING: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - insn->op = FILTER_OP_LT_STRING; + insn->op = BYTECODE_OP_LT_STRING; break; case REG_S64: + case REG_U64: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_LT_S64; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_LT_S64; else - insn->op = FILTER_OP_LT_DOUBLE_S64; + insn->op = BYTECODE_OP_LT_DOUBLE_S64; break; case REG_DOUBLE: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_LT_S64_DOUBLE; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_LT_S64_DOUBLE; else - insn->op = FILTER_OP_LT_DOUBLE; + insn->op = BYTECODE_OP_LT_DOUBLE; break; case REG_UNKNOWN: break; /* Dynamic typing. */ @@ -926,7 +951,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_GE: + case BYTECODE_OP_GE: { struct binary_op *insn = (struct binary_op *) pc; @@ -943,23 +968,26 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, case REG_STRING: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - insn->op = FILTER_OP_GE_STRING; + insn->op = BYTECODE_OP_GE_STRING; break; case REG_S64: + case REG_U64: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_GE_S64; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_GE_S64; else - insn->op = FILTER_OP_GE_DOUBLE_S64; + insn->op = BYTECODE_OP_GE_DOUBLE_S64; break; case REG_DOUBLE: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_GE_S64_DOUBLE; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_GE_S64_DOUBLE; else - insn->op = FILTER_OP_GE_DOUBLE; + insn->op = BYTECODE_OP_GE_DOUBLE; break; case REG_UNKNOWN: break; /* Dynamic typing. */ @@ -969,11 +997,11 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, ret = -EINVAL; goto end; } - vstack_ax(stack)->type = REG_S64; + vstack_ax(stack)->type = REG_U64; next_pc += sizeof(struct binary_op); break; } - case FILTER_OP_LE: + case BYTECODE_OP_LE: { struct binary_op *insn = (struct binary_op *) pc; @@ -990,23 +1018,26 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, case REG_STRING: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - insn->op = FILTER_OP_LE_STRING; + insn->op = BYTECODE_OP_LE_STRING; break; case REG_S64: + case REG_U64: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_LE_S64; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_LE_S64; else - insn->op = FILTER_OP_LE_DOUBLE_S64; + insn->op = BYTECODE_OP_LE_DOUBLE_S64; break; case REG_DOUBLE: if (vstack_bx(stack)->type == REG_UNKNOWN) break; - if (vstack_bx(stack)->type == REG_S64) - insn->op = FILTER_OP_LE_S64_DOUBLE; + if (vstack_bx(stack)->type == REG_S64 || + vstack_bx(stack)->type == REG_U64) + insn->op = BYTECODE_OP_LE_S64_DOUBLE; else - insn->op = FILTER_OP_LE_DOUBLE; + insn->op = BYTECODE_OP_LE_DOUBLE; break; case REG_UNKNOWN: break; /* Dynamic typing. */ @@ -1016,43 +1047,54 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_EQ_STRING: - case FILTER_OP_NE_STRING: - case FILTER_OP_GT_STRING: - case FILTER_OP_LT_STRING: - case FILTER_OP_GE_STRING: - case FILTER_OP_LE_STRING: - case FILTER_OP_EQ_STAR_GLOB_STRING: - case FILTER_OP_NE_STAR_GLOB_STRING: - case FILTER_OP_EQ_S64: - case FILTER_OP_NE_S64: - case FILTER_OP_GT_S64: - case FILTER_OP_LT_S64: - case FILTER_OP_GE_S64: - case FILTER_OP_LE_S64: - case FILTER_OP_EQ_DOUBLE: - case FILTER_OP_NE_DOUBLE: - case FILTER_OP_GT_DOUBLE: - case FILTER_OP_LT_DOUBLE: - case FILTER_OP_GE_DOUBLE: - case FILTER_OP_LE_DOUBLE: - case FILTER_OP_EQ_DOUBLE_S64: - case FILTER_OP_NE_DOUBLE_S64: - case FILTER_OP_GT_DOUBLE_S64: - case FILTER_OP_LT_DOUBLE_S64: - case FILTER_OP_GE_DOUBLE_S64: - case FILTER_OP_LE_DOUBLE_S64: - case FILTER_OP_EQ_S64_DOUBLE: - case FILTER_OP_NE_S64_DOUBLE: - case FILTER_OP_GT_S64_DOUBLE: - case FILTER_OP_LT_S64_DOUBLE: - case FILTER_OP_GE_S64_DOUBLE: - case FILTER_OP_LE_S64_DOUBLE: - case FILTER_OP_BIT_RSHIFT: - case FILTER_OP_BIT_LSHIFT: - case FILTER_OP_BIT_AND: - case FILTER_OP_BIT_OR: - case FILTER_OP_BIT_XOR: + case BYTECODE_OP_EQ_STRING: + case BYTECODE_OP_NE_STRING: + case BYTECODE_OP_GT_STRING: + case BYTECODE_OP_LT_STRING: + case BYTECODE_OP_GE_STRING: + case BYTECODE_OP_LE_STRING: + case BYTECODE_OP_EQ_STAR_GLOB_STRING: + case BYTECODE_OP_NE_STAR_GLOB_STRING: + case BYTECODE_OP_EQ_S64: + case BYTECODE_OP_NE_S64: + case BYTECODE_OP_GT_S64: + case BYTECODE_OP_LT_S64: + case BYTECODE_OP_GE_S64: + case BYTECODE_OP_LE_S64: + case BYTECODE_OP_EQ_DOUBLE: + case BYTECODE_OP_NE_DOUBLE: + case BYTECODE_OP_GT_DOUBLE: + case BYTECODE_OP_LT_DOUBLE: + case BYTECODE_OP_GE_DOUBLE: + case BYTECODE_OP_LE_DOUBLE: + case BYTECODE_OP_EQ_DOUBLE_S64: + case BYTECODE_OP_NE_DOUBLE_S64: + case BYTECODE_OP_GT_DOUBLE_S64: + case BYTECODE_OP_LT_DOUBLE_S64: + case BYTECODE_OP_GE_DOUBLE_S64: + case BYTECODE_OP_LE_DOUBLE_S64: + case BYTECODE_OP_EQ_S64_DOUBLE: + case BYTECODE_OP_NE_S64_DOUBLE: + case BYTECODE_OP_GT_S64_DOUBLE: + case BYTECODE_OP_LT_S64_DOUBLE: + case BYTECODE_OP_GE_S64_DOUBLE: + case BYTECODE_OP_LE_S64_DOUBLE: + { + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct binary_op); + break; + } + + case BYTECODE_OP_BIT_RSHIFT: + case BYTECODE_OP_BIT_LSHIFT: + case BYTECODE_OP_BIT_AND: + case BYTECODE_OP_BIT_OR: + case BYTECODE_OP_BIT_XOR: { /* Pop 2, push 1 */ if (vstack_pop(stack)) { @@ -1065,7 +1107,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, } /* unary */ - case FILTER_OP_UNARY_PLUS: + case BYTECODE_OP_UNARY_PLUS: { struct unary_op *insn = (struct unary_op *) pc; @@ -1076,10 +1118,11 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, goto end; case REG_S64: - insn->op = FILTER_OP_UNARY_PLUS_S64; + case REG_U64: + insn->op = BYTECODE_OP_UNARY_PLUS_S64; break; case REG_DOUBLE: - insn->op = FILTER_OP_UNARY_PLUS_DOUBLE; + insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE; break; case REG_UNKNOWN: /* Dynamic typing. */ break; @@ -1089,7 +1132,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_UNARY_MINUS: + case BYTECODE_OP_UNARY_MINUS: { struct unary_op *insn = (struct unary_op *) pc; @@ -1100,10 +1143,11 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, goto end; case REG_S64: - insn->op = FILTER_OP_UNARY_MINUS_S64; + case REG_U64: + insn->op = BYTECODE_OP_UNARY_MINUS_S64; break; case REG_DOUBLE: - insn->op = FILTER_OP_UNARY_MINUS_DOUBLE; + insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE; break; case REG_UNKNOWN: /* Dynamic typing. */ break; @@ -1113,7 +1157,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_UNARY_NOT: + case BYTECODE_OP_UNARY_NOT: { struct unary_op *insn = (struct unary_op *) pc; @@ -1124,10 +1168,11 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, goto end; case REG_S64: - insn->op = FILTER_OP_UNARY_NOT_S64; + case REG_U64: + insn->op = BYTECODE_OP_UNARY_NOT_S64; break; case REG_DOUBLE: - insn->op = FILTER_OP_UNARY_NOT_DOUBLE; + insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE; break; case REG_UNKNOWN: /* Dynamic typing. */ break; @@ -1137,19 +1182,19 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_UNARY_BIT_NOT: + case BYTECODE_OP_UNARY_BIT_NOT: { /* Pop 1, push 1 */ next_pc += sizeof(struct unary_op); break; } - case FILTER_OP_UNARY_PLUS_S64: - case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT_S64: - case FILTER_OP_UNARY_PLUS_DOUBLE: - case FILTER_OP_UNARY_MINUS_DOUBLE: - case FILTER_OP_UNARY_NOT_DOUBLE: + case BYTECODE_OP_UNARY_PLUS_S64: + case BYTECODE_OP_UNARY_MINUS_S64: + case BYTECODE_OP_UNARY_NOT_S64: + case BYTECODE_OP_UNARY_PLUS_DOUBLE: + case BYTECODE_OP_UNARY_MINUS_DOUBLE: + case BYTECODE_OP_UNARY_NOT_DOUBLE: { /* Pop 1, push 1 */ next_pc += sizeof(struct unary_op); @@ -1157,8 +1202,8 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, } /* logical */ - case FILTER_OP_AND: - case FILTER_OP_OR: + case BYTECODE_OP_AND: + case BYTECODE_OP_OR: { /* Continue to next instruction */ /* Pop 1 when jump not taken */ @@ -1171,14 +1216,14 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, } /* load field ref */ - case FILTER_OP_LOAD_FIELD_REF: + case BYTECODE_OP_LOAD_FIELD_REF: { ERR("Unknown field ref type\n"); ret = -EINVAL; goto end; } /* get context ref */ - case FILTER_OP_GET_CONTEXT_REF: + case BYTECODE_OP_GET_CONTEXT_REF: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1188,9 +1233,9 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, next_pc += sizeof(struct load_op) + sizeof(struct field_ref); break; } - case FILTER_OP_LOAD_FIELD_REF_STRING: - case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: - case FILTER_OP_GET_CONTEXT_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE: + case BYTECODE_OP_GET_CONTEXT_REF_STRING: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1200,8 +1245,8 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, next_pc += sizeof(struct load_op) + sizeof(struct field_ref); break; } - case FILTER_OP_LOAD_FIELD_REF_S64: - case FILTER_OP_GET_CONTEXT_REF_S64: + case BYTECODE_OP_LOAD_FIELD_REF_S64: + case BYTECODE_OP_GET_CONTEXT_REF_S64: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1211,8 +1256,8 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, next_pc += sizeof(struct load_op) + sizeof(struct field_ref); break; } - case FILTER_OP_LOAD_FIELD_REF_DOUBLE: - case FILTER_OP_GET_CONTEXT_REF_DOUBLE: + case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1224,7 +1269,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, } /* load from immediate operand */ - case FILTER_OP_LOAD_STRING: + case BYTECODE_OP_LOAD_STRING: { struct load_op *insn = (struct load_op *) pc; @@ -1237,7 +1282,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_LOAD_STAR_GLOB_STRING: + case BYTECODE_OP_LOAD_STAR_GLOB_STRING: { struct load_op *insn = (struct load_op *) pc; @@ -1250,7 +1295,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_LOAD_S64: + case BYTECODE_OP_LOAD_S64: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1262,7 +1307,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_LOAD_DOUBLE: + case BYTECODE_OP_LOAD_DOUBLE: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1275,7 +1320,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, } /* cast */ - case FILTER_OP_CAST_TO_S64: + case BYTECODE_OP_CAST_TO_S64: { struct cast_op *insn = (struct cast_op *) pc; @@ -1291,12 +1336,13 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, ret = -EINVAL; goto end; case REG_S64: - insn->op = FILTER_OP_CAST_NOP; + insn->op = BYTECODE_OP_CAST_NOP; break; case REG_DOUBLE: - insn->op = FILTER_OP_CAST_DOUBLE_TO_S64; + insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64; break; case REG_UNKNOWN: + case REG_U64: break; } /* Pop 1, push 1 */ @@ -1304,14 +1350,14 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, next_pc += sizeof(struct cast_op); break; } - case FILTER_OP_CAST_DOUBLE_TO_S64: + case BYTECODE_OP_CAST_DOUBLE_TO_S64: { /* Pop 1, push 1 */ vstack_ax(stack)->type = REG_S64; next_pc += sizeof(struct cast_op); break; } - case FILTER_OP_CAST_NOP: + case BYTECODE_OP_CAST_NOP: { next_pc += sizeof(struct cast_op); break; @@ -1320,7 +1366,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, /* * Instructions for recursive traversal through composed types. */ - case FILTER_OP_GET_CONTEXT_ROOT: + case BYTECODE_OP_GET_CONTEXT_ROOT: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1331,7 +1377,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, next_pc += sizeof(struct load_op); break; } - case FILTER_OP_GET_APP_CONTEXT_ROOT: + case BYTECODE_OP_GET_APP_CONTEXT_ROOT: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1342,7 +1388,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, next_pc += sizeof(struct load_op); break; } - case FILTER_OP_GET_PAYLOAD_ROOT: + case BYTECODE_OP_GET_PAYLOAD_ROOT: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1354,7 +1400,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_LOAD_FIELD: + case BYTECODE_OP_LOAD_FIELD: { struct load_op *insn = (struct load_op *) pc; @@ -1368,14 +1414,10 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_LOAD_FIELD_S8: - case FILTER_OP_LOAD_FIELD_S16: - case FILTER_OP_LOAD_FIELD_S32: - case FILTER_OP_LOAD_FIELD_S64: - case FILTER_OP_LOAD_FIELD_U8: - case FILTER_OP_LOAD_FIELD_U16: - case FILTER_OP_LOAD_FIELD_U32: - case FILTER_OP_LOAD_FIELD_U64: + case BYTECODE_OP_LOAD_FIELD_S8: + case BYTECODE_OP_LOAD_FIELD_S16: + case BYTECODE_OP_LOAD_FIELD_S32: + case BYTECODE_OP_LOAD_FIELD_S64: { /* Pop 1, push 1 */ vstack_ax(stack)->type = REG_S64; @@ -1383,8 +1425,19 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_LOAD_FIELD_STRING: - case FILTER_OP_LOAD_FIELD_SEQUENCE: + case BYTECODE_OP_LOAD_FIELD_U8: + case BYTECODE_OP_LOAD_FIELD_U16: + case BYTECODE_OP_LOAD_FIELD_U32: + case BYTECODE_OP_LOAD_FIELD_U64: + { + /* Pop 1, push 1 */ + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_STRING: + case BYTECODE_OP_LOAD_FIELD_SEQUENCE: { /* Pop 1, push 1 */ vstack_ax(stack)->type = REG_STRING; @@ -1392,7 +1445,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_LOAD_FIELD_DOUBLE: + case BYTECODE_OP_LOAD_FIELD_DOUBLE: { /* Pop 1, push 1 */ vstack_ax(stack)->type = REG_DOUBLE; @@ -1400,7 +1453,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_GET_SYMBOL: + case BYTECODE_OP_GET_SYMBOL: { struct load_op *insn = (struct load_op *) pc; @@ -1412,7 +1465,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, goto end; case LOAD_ROOT_CONTEXT: /* Lookup context field. */ - ret = specialize_context_lookup(session, + ret = specialize_context_lookup(*pctx, bytecode, insn, &vstack_ax(stack)->load); if (ret) @@ -1420,7 +1473,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; case LOAD_ROOT_APP_CONTEXT: /* Lookup app context field. */ - ret = specialize_app_context_lookup(session, + ret = specialize_app_context_lookup(pctx, bytecode, insn, &vstack_ax(stack)->load); if (ret) @@ -1428,7 +1481,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; case LOAD_ROOT_PAYLOAD: /* Lookup event payload field. */ - ret = specialize_event_payload_lookup(event, + ret = specialize_payload_lookup(event_desc, bytecode, insn, &vstack_ax(stack)->load); if (ret) @@ -1439,14 +1492,14 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_GET_SYMBOL_FIELD: + case BYTECODE_OP_GET_SYMBOL_FIELD: { /* Always generated by specialize phase. */ ret = -EINVAL; goto end; } - case FILTER_OP_GET_INDEX_U16: + case BYTECODE_OP_GET_INDEX_U16: { struct load_op *insn = (struct load_op *) pc; struct get_index_u16 *index = (struct get_index_u16 *) insn->data; @@ -1461,7 +1514,7 @@ int lttng_filter_specialize_bytecode(struct lttng_event *event, break; } - case FILTER_OP_GET_INDEX_U64: + case BYTECODE_OP_GET_INDEX_U64: { struct load_op *insn = (struct load_op *) pc; struct get_index_u64 *index = (struct get_index_u64 *) insn->data; diff --git a/liblttng-ust/lttng-filter-validator.c b/liblttng-ust/lttng-bytecode-validator.c similarity index 70% rename from liblttng-ust/lttng-filter-validator.c rename to liblttng-ust/lttng-bytecode-validator.c index 953bbdd7..f60c9367 100644 --- a/liblttng-ust/lttng-filter-validator.c +++ b/liblttng-ust/lttng-bytecode-validator.c @@ -1,7 +1,7 @@ /* - * lttng-filter-validator.c + * lttng-bytecode-validator.c * - * LTTng UST filter bytecode validator. + * LTTng UST bytecode validator. * * Copyright (C) 2010-2016 Mathieu Desnoyers * @@ -32,9 +32,10 @@ #include #include -#include "lttng-filter.h" +#include "lttng-bytecode.h" #include "lttng-hash-helper.h" #include "string-utils.h" +#include "ust-events-internal.h" /* * Number of merge points for hash table size. Hash table initialized to @@ -100,7 +101,7 @@ int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc, lttng_hash_seed); struct cds_lfht_node *ret; - dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n", + dbg_printf("Bytecode: adding merge point at offset %lu, hash %lu\n", target_pc, hash); node = zmalloc(sizeof(struct lfht_mp_node)); if (!node) @@ -114,7 +115,7 @@ int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc, caa_container_of(ret, struct lfht_mp_node, node); /* Key already present */ - dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n", + dbg_printf("Bytecode: compare merge points for offset %lu, hash %lu\n", target_pc, hash); free(node); if (merge_points_compare(stack, &ret_mp->stack)) { @@ -132,7 +133,7 @@ int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc, * (unknown), negative error value on error. */ static -int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode, +int bin_op_compare_check(struct vstack *stack, bytecode_opcode_t opcode, const char *str) { if (unlikely(!vstack_ax(stack) || !vstack_bx(stack))) @@ -154,11 +155,12 @@ int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode, case REG_STRING: break; case REG_STAR_GLOB_STRING: - if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) { + if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) { goto error_mismatch; } break; case REG_S64: + case REG_U64: case REG_DOUBLE: goto error_mismatch; } @@ -171,17 +173,19 @@ int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode, case REG_UNKNOWN: goto unknown; case REG_STRING: - if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) { + if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) { goto error_mismatch; } break; case REG_STAR_GLOB_STRING: case REG_S64: + case REG_U64: case REG_DOUBLE: goto error_mismatch; } break; case REG_S64: + case REG_U64: case REG_DOUBLE: switch (vstack_bx(stack)->type) { default: @@ -193,6 +197,7 @@ int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode, case REG_STAR_GLOB_STRING: goto error_mismatch; case REG_S64: + case REG_U64: case REG_DOUBLE: break; } @@ -222,7 +227,7 @@ error_type: * (unknown), negative error value on error. */ static -int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode, +int bin_op_bitwise_check(struct vstack *stack, bytecode_opcode_t opcode, const char *str) { if (unlikely(!vstack_ax(stack) || !vstack_bx(stack))) @@ -235,6 +240,7 @@ int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode, case REG_UNKNOWN: goto unknown; case REG_S64: + case REG_U64: switch (vstack_bx(stack)->type) { default: goto error_type; @@ -242,6 +248,7 @@ int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode, case REG_UNKNOWN: goto unknown; case REG_S64: + case REG_U64: break; } break; @@ -288,18 +295,18 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, { int ret = 0; - switch (*(filter_opcode_t *) pc) { - case FILTER_OP_UNKNOWN: + switch (*(bytecode_opcode_t *) pc) { + case BYTECODE_OP_UNKNOWN: default: { ERR("unknown bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; break; } - case FILTER_OP_RETURN: - case FILTER_OP_RETURN_S64: + case BYTECODE_OP_RETURN: + case BYTECODE_OP_RETURN_S64: { if (unlikely(pc + sizeof(struct return_op) > start_pc + bytecode->len)) { @@ -309,61 +316,61 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, } /* binary */ - case FILTER_OP_MUL: - case FILTER_OP_DIV: - case FILTER_OP_MOD: - case FILTER_OP_PLUS: - case FILTER_OP_MINUS: + case BYTECODE_OP_MUL: + case BYTECODE_OP_DIV: + case BYTECODE_OP_MOD: + case BYTECODE_OP_PLUS: + case BYTECODE_OP_MINUS: { ERR("unsupported bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; break; } - case FILTER_OP_EQ: - case FILTER_OP_NE: - case FILTER_OP_GT: - case FILTER_OP_LT: - case FILTER_OP_GE: - case FILTER_OP_LE: - case FILTER_OP_EQ_STRING: - case FILTER_OP_NE_STRING: - case FILTER_OP_GT_STRING: - case FILTER_OP_LT_STRING: - case FILTER_OP_GE_STRING: - case FILTER_OP_LE_STRING: - case FILTER_OP_EQ_STAR_GLOB_STRING: - case FILTER_OP_NE_STAR_GLOB_STRING: - case FILTER_OP_EQ_S64: - case FILTER_OP_NE_S64: - case FILTER_OP_GT_S64: - case FILTER_OP_LT_S64: - case FILTER_OP_GE_S64: - case FILTER_OP_LE_S64: - case FILTER_OP_EQ_DOUBLE: - case FILTER_OP_NE_DOUBLE: - case FILTER_OP_GT_DOUBLE: - case FILTER_OP_LT_DOUBLE: - case FILTER_OP_GE_DOUBLE: - case FILTER_OP_LE_DOUBLE: - case FILTER_OP_EQ_DOUBLE_S64: - case FILTER_OP_NE_DOUBLE_S64: - case FILTER_OP_GT_DOUBLE_S64: - case FILTER_OP_LT_DOUBLE_S64: - case FILTER_OP_GE_DOUBLE_S64: - case FILTER_OP_LE_DOUBLE_S64: - case FILTER_OP_EQ_S64_DOUBLE: - case FILTER_OP_NE_S64_DOUBLE: - case FILTER_OP_GT_S64_DOUBLE: - case FILTER_OP_LT_S64_DOUBLE: - case FILTER_OP_GE_S64_DOUBLE: - case FILTER_OP_LE_S64_DOUBLE: - case FILTER_OP_BIT_RSHIFT: - case FILTER_OP_BIT_LSHIFT: - case FILTER_OP_BIT_AND: - case FILTER_OP_BIT_OR: - case FILTER_OP_BIT_XOR: + case BYTECODE_OP_EQ: + case BYTECODE_OP_NE: + case BYTECODE_OP_GT: + case BYTECODE_OP_LT: + case BYTECODE_OP_GE: + case BYTECODE_OP_LE: + case BYTECODE_OP_EQ_STRING: + case BYTECODE_OP_NE_STRING: + case BYTECODE_OP_GT_STRING: + case BYTECODE_OP_LT_STRING: + case BYTECODE_OP_GE_STRING: + case BYTECODE_OP_LE_STRING: + case BYTECODE_OP_EQ_STAR_GLOB_STRING: + case BYTECODE_OP_NE_STAR_GLOB_STRING: + case BYTECODE_OP_EQ_S64: + case BYTECODE_OP_NE_S64: + case BYTECODE_OP_GT_S64: + case BYTECODE_OP_LT_S64: + case BYTECODE_OP_GE_S64: + case BYTECODE_OP_LE_S64: + case BYTECODE_OP_EQ_DOUBLE: + case BYTECODE_OP_NE_DOUBLE: + case BYTECODE_OP_GT_DOUBLE: + case BYTECODE_OP_LT_DOUBLE: + case BYTECODE_OP_GE_DOUBLE: + case BYTECODE_OP_LE_DOUBLE: + case BYTECODE_OP_EQ_DOUBLE_S64: + case BYTECODE_OP_NE_DOUBLE_S64: + case BYTECODE_OP_GT_DOUBLE_S64: + case BYTECODE_OP_LT_DOUBLE_S64: + case BYTECODE_OP_GE_DOUBLE_S64: + case BYTECODE_OP_LE_DOUBLE_S64: + case BYTECODE_OP_EQ_S64_DOUBLE: + case BYTECODE_OP_NE_S64_DOUBLE: + case BYTECODE_OP_GT_S64_DOUBLE: + case BYTECODE_OP_LT_S64_DOUBLE: + case BYTECODE_OP_GE_S64_DOUBLE: + case BYTECODE_OP_LE_S64_DOUBLE: + case BYTECODE_OP_BIT_RSHIFT: + case BYTECODE_OP_BIT_LSHIFT: + case BYTECODE_OP_BIT_AND: + case BYTECODE_OP_BIT_OR: + case BYTECODE_OP_BIT_XOR: { if (unlikely(pc + sizeof(struct binary_op) > start_pc + bytecode->len)) { @@ -373,16 +380,16 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, } /* unary */ - case FILTER_OP_UNARY_PLUS: - case FILTER_OP_UNARY_MINUS: - case FILTER_OP_UNARY_NOT: - case FILTER_OP_UNARY_PLUS_S64: - case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT_S64: - case FILTER_OP_UNARY_PLUS_DOUBLE: - case FILTER_OP_UNARY_MINUS_DOUBLE: - case FILTER_OP_UNARY_NOT_DOUBLE: - case FILTER_OP_UNARY_BIT_NOT: + case BYTECODE_OP_UNARY_PLUS: + case BYTECODE_OP_UNARY_MINUS: + case BYTECODE_OP_UNARY_NOT: + case BYTECODE_OP_UNARY_PLUS_S64: + case BYTECODE_OP_UNARY_MINUS_S64: + case BYTECODE_OP_UNARY_NOT_S64: + case BYTECODE_OP_UNARY_PLUS_DOUBLE: + case BYTECODE_OP_UNARY_MINUS_DOUBLE: + case BYTECODE_OP_UNARY_NOT_DOUBLE: + case BYTECODE_OP_UNARY_BIT_NOT: { if (unlikely(pc + sizeof(struct unary_op) > start_pc + bytecode->len)) { @@ -392,8 +399,8 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, } /* logical */ - case FILTER_OP_AND: - case FILTER_OP_OR: + case BYTECODE_OP_AND: + case BYTECODE_OP_OR: { if (unlikely(pc + sizeof(struct logical_op) > start_pc + bytecode->len)) { @@ -403,7 +410,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, } /* load field ref */ - case FILTER_OP_LOAD_FIELD_REF: + case BYTECODE_OP_LOAD_FIELD_REF: { ERR("Unknown field ref type\n"); ret = -EINVAL; @@ -411,14 +418,14 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, } /* get context ref */ - case FILTER_OP_GET_CONTEXT_REF: - case FILTER_OP_LOAD_FIELD_REF_STRING: - case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: - case FILTER_OP_LOAD_FIELD_REF_S64: - case FILTER_OP_LOAD_FIELD_REF_DOUBLE: - case FILTER_OP_GET_CONTEXT_REF_STRING: - case FILTER_OP_GET_CONTEXT_REF_S64: - case FILTER_OP_GET_CONTEXT_REF_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_REF: + case BYTECODE_OP_LOAD_FIELD_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE: + case BYTECODE_OP_LOAD_FIELD_REF_S64: + case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_REF_STRING: + case BYTECODE_OP_GET_CONTEXT_REF_S64: + case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE: { if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref) > start_pc + bytecode->len)) { @@ -428,8 +435,8 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, } /* load from immediate operand */ - case FILTER_OP_LOAD_STRING: - case FILTER_OP_LOAD_STAR_GLOB_STRING: + case BYTECODE_OP_LOAD_STRING: + case BYTECODE_OP_LOAD_STAR_GLOB_STRING: { struct load_op *insn = (struct load_op *) pc; uint32_t str_len, maxlen; @@ -449,7 +456,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_S64: + case BYTECODE_OP_LOAD_S64: { if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric) > start_pc + bytecode->len)) { @@ -458,7 +465,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_DOUBLE: + case BYTECODE_OP_LOAD_DOUBLE: { if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double) > start_pc + bytecode->len)) { @@ -467,9 +474,9 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_CAST_TO_S64: - case FILTER_OP_CAST_DOUBLE_TO_S64: - case FILTER_OP_CAST_NOP: + case BYTECODE_OP_CAST_TO_S64: + case BYTECODE_OP_CAST_DOUBLE_TO_S64: + case BYTECODE_OP_CAST_NOP: { if (unlikely(pc + sizeof(struct cast_op) > start_pc + bytecode->len)) { @@ -481,28 +488,28 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, /* * Instructions for recursive traversal through composed types. */ - case FILTER_OP_GET_CONTEXT_ROOT: - case FILTER_OP_GET_APP_CONTEXT_ROOT: - case FILTER_OP_GET_PAYLOAD_ROOT: - case FILTER_OP_LOAD_FIELD: - case FILTER_OP_LOAD_FIELD_S8: - case FILTER_OP_LOAD_FIELD_S16: - case FILTER_OP_LOAD_FIELD_S32: - case FILTER_OP_LOAD_FIELD_S64: - case FILTER_OP_LOAD_FIELD_U8: - case FILTER_OP_LOAD_FIELD_U16: - case FILTER_OP_LOAD_FIELD_U32: - case FILTER_OP_LOAD_FIELD_U64: - case FILTER_OP_LOAD_FIELD_STRING: - case FILTER_OP_LOAD_FIELD_SEQUENCE: - case FILTER_OP_LOAD_FIELD_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_ROOT: + case BYTECODE_OP_GET_APP_CONTEXT_ROOT: + case BYTECODE_OP_GET_PAYLOAD_ROOT: + case BYTECODE_OP_LOAD_FIELD: + case BYTECODE_OP_LOAD_FIELD_S8: + case BYTECODE_OP_LOAD_FIELD_S16: + case BYTECODE_OP_LOAD_FIELD_S32: + case BYTECODE_OP_LOAD_FIELD_S64: + case BYTECODE_OP_LOAD_FIELD_U8: + case BYTECODE_OP_LOAD_FIELD_U16: + case BYTECODE_OP_LOAD_FIELD_U32: + case BYTECODE_OP_LOAD_FIELD_U64: + case BYTECODE_OP_LOAD_FIELD_STRING: + case BYTECODE_OP_LOAD_FIELD_SEQUENCE: + case BYTECODE_OP_LOAD_FIELD_DOUBLE: if (unlikely(pc + sizeof(struct load_op) > start_pc + bytecode->len)) { ret = -ERANGE; } break; - case FILTER_OP_GET_SYMBOL: + case BYTECODE_OP_GET_SYMBOL: { struct load_op *insn = (struct load_op *) pc; struct get_symbol *sym = (struct get_symbol *) insn->data; @@ -516,19 +523,19 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_GET_SYMBOL_FIELD: + case BYTECODE_OP_GET_SYMBOL_FIELD: ERR("Unexpected get symbol field"); ret = -EINVAL; break; - case FILTER_OP_GET_INDEX_U16: + case BYTECODE_OP_GET_INDEX_U16: if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16) > start_pc + bytecode->len)) { ret = -ERANGE; } break; - case FILTER_OP_GET_INDEX_U64: + case BYTECODE_OP_GET_INDEX_U64: if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64) > start_pc + bytecode->len)) { ret = -ERANGE; @@ -570,30 +577,30 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, char *pc) { int ret = 0; - const filter_opcode_t opcode = *(filter_opcode_t *) pc; + const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc; switch (opcode) { - case FILTER_OP_UNKNOWN: + case BYTECODE_OP_UNKNOWN: default: { ERR("unknown bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; goto end; } - case FILTER_OP_RETURN: - case FILTER_OP_RETURN_S64: + case BYTECODE_OP_RETURN: + case BYTECODE_OP_RETURN_S64: { goto end; } /* binary */ - case FILTER_OP_MUL: - case FILTER_OP_DIV: - case FILTER_OP_MOD: - case FILTER_OP_PLUS: - case FILTER_OP_MINUS: + case BYTECODE_OP_MUL: + case BYTECODE_OP_DIV: + case BYTECODE_OP_MOD: + case BYTECODE_OP_PLUS: + case BYTECODE_OP_MINUS: { ERR("unsupported bytecode op %u\n", (unsigned int) opcode); @@ -601,42 +608,42 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, goto end; } - case FILTER_OP_EQ: + case BYTECODE_OP_EQ: { ret = bin_op_compare_check(stack, opcode, "=="); if (ret < 0) goto end; break; } - case FILTER_OP_NE: + case BYTECODE_OP_NE: { ret = bin_op_compare_check(stack, opcode, "!="); if (ret < 0) goto end; break; } - case FILTER_OP_GT: + case BYTECODE_OP_GT: { ret = bin_op_compare_check(stack, opcode, ">"); if (ret < 0) goto end; break; } - case FILTER_OP_LT: + case BYTECODE_OP_LT: { ret = bin_op_compare_check(stack, opcode, "<"); if (ret < 0) goto end; break; } - case FILTER_OP_GE: + case BYTECODE_OP_GE: { ret = bin_op_compare_check(stack, opcode, ">="); if (ret < 0) goto end; break; } - case FILTER_OP_LE: + case BYTECODE_OP_LE: { ret = bin_op_compare_check(stack, opcode, "<="); if (ret < 0) @@ -644,12 +651,12 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_EQ_STRING: - case FILTER_OP_NE_STRING: - case FILTER_OP_GT_STRING: - case FILTER_OP_LT_STRING: - case FILTER_OP_GE_STRING: - case FILTER_OP_LE_STRING: + case BYTECODE_OP_EQ_STRING: + case BYTECODE_OP_NE_STRING: + case BYTECODE_OP_GT_STRING: + case BYTECODE_OP_LT_STRING: + case BYTECODE_OP_GE_STRING: + case BYTECODE_OP_LE_STRING: { if (!vstack_ax(stack) || !vstack_bx(stack)) { ERR("Empty stack\n"); @@ -665,8 +672,8 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_EQ_STAR_GLOB_STRING: - case FILTER_OP_NE_STAR_GLOB_STRING: + case BYTECODE_OP_EQ_STAR_GLOB_STRING: + case BYTECODE_OP_NE_STAR_GLOB_STRING: { if (!vstack_ax(stack) || !vstack_bx(stack)) { ERR("Empty stack\n"); @@ -682,20 +689,32 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_EQ_S64: - case FILTER_OP_NE_S64: - case FILTER_OP_GT_S64: - case FILTER_OP_LT_S64: - case FILTER_OP_GE_S64: - case FILTER_OP_LE_S64: + case BYTECODE_OP_EQ_S64: + case BYTECODE_OP_NE_S64: + case BYTECODE_OP_GT_S64: + case BYTECODE_OP_LT_S64: + case BYTECODE_OP_GE_S64: + case BYTECODE_OP_LE_S64: { if (!vstack_ax(stack) || !vstack_bx(stack)) { ERR("Empty stack\n"); ret = -EINVAL; goto end; } - if (vstack_ax(stack)->type != REG_S64 - || vstack_bx(stack)->type != REG_S64) { + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type for s64 comparator\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: ERR("Unexpected register type for s64 comparator\n"); ret = -EINVAL; goto end; @@ -703,12 +722,12 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_EQ_DOUBLE: - case FILTER_OP_NE_DOUBLE: - case FILTER_OP_GT_DOUBLE: - case FILTER_OP_LT_DOUBLE: - case FILTER_OP_GE_DOUBLE: - case FILTER_OP_LE_DOUBLE: + case BYTECODE_OP_EQ_DOUBLE: + case BYTECODE_OP_NE_DOUBLE: + case BYTECODE_OP_GT_DOUBLE: + case BYTECODE_OP_LT_DOUBLE: + case BYTECODE_OP_GE_DOUBLE: + case BYTECODE_OP_LE_DOUBLE: { if (!vstack_ax(stack) || !vstack_bx(stack)) { ERR("Empty stack\n"); @@ -723,19 +742,31 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_EQ_DOUBLE_S64: - case FILTER_OP_NE_DOUBLE_S64: - case FILTER_OP_GT_DOUBLE_S64: - case FILTER_OP_LT_DOUBLE_S64: - case FILTER_OP_GE_DOUBLE_S64: - case FILTER_OP_LE_DOUBLE_S64: + case BYTECODE_OP_EQ_DOUBLE_S64: + case BYTECODE_OP_NE_DOUBLE_S64: + case BYTECODE_OP_GT_DOUBLE_S64: + case BYTECODE_OP_LT_DOUBLE_S64: + case BYTECODE_OP_GE_DOUBLE_S64: + case BYTECODE_OP_LE_DOUBLE_S64: { if (!vstack_ax(stack) || !vstack_bx(stack)) { ERR("Empty stack\n"); ret = -EINVAL; goto end; } - if (vstack_ax(stack)->type != REG_S64 && vstack_bx(stack)->type != REG_DOUBLE) { + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Double-S64 operator has unexpected register types\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_DOUBLE: + break; + default: ERR("Double-S64 operator has unexpected register types\n"); ret = -EINVAL; goto end; @@ -743,19 +774,31 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_EQ_S64_DOUBLE: - case FILTER_OP_NE_S64_DOUBLE: - case FILTER_OP_GT_S64_DOUBLE: - case FILTER_OP_LT_S64_DOUBLE: - case FILTER_OP_GE_S64_DOUBLE: - case FILTER_OP_LE_S64_DOUBLE: + case BYTECODE_OP_EQ_S64_DOUBLE: + case BYTECODE_OP_NE_S64_DOUBLE: + case BYTECODE_OP_GT_S64_DOUBLE: + case BYTECODE_OP_LT_S64_DOUBLE: + case BYTECODE_OP_GE_S64_DOUBLE: + case BYTECODE_OP_LE_S64_DOUBLE: { if (!vstack_ax(stack) || !vstack_bx(stack)) { ERR("Empty stack\n"); ret = -EINVAL; goto end; } - if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_S64) { + switch (vstack_ax(stack)->type) { + case REG_DOUBLE: + break; + default: + ERR("S64-Double operator has unexpected register types\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: ERR("S64-Double operator has unexpected register types\n"); ret = -EINVAL; goto end; @@ -763,36 +806,36 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_BIT_RSHIFT: + case BYTECODE_OP_BIT_RSHIFT: ret = bin_op_bitwise_check(stack, opcode, ">>"); if (ret < 0) goto end; break; - case FILTER_OP_BIT_LSHIFT: + case BYTECODE_OP_BIT_LSHIFT: ret = bin_op_bitwise_check(stack, opcode, "<<"); if (ret < 0) goto end; break; - case FILTER_OP_BIT_AND: + case BYTECODE_OP_BIT_AND: ret = bin_op_bitwise_check(stack, opcode, "&"); if (ret < 0) goto end; break; - case FILTER_OP_BIT_OR: + case BYTECODE_OP_BIT_OR: ret = bin_op_bitwise_check(stack, opcode, "|"); if (ret < 0) goto end; break; - case FILTER_OP_BIT_XOR: + case BYTECODE_OP_BIT_XOR: ret = bin_op_bitwise_check(stack, opcode, "^"); if (ret < 0) goto end; break; /* unary */ - case FILTER_OP_UNARY_PLUS: - case FILTER_OP_UNARY_MINUS: - case FILTER_OP_UNARY_NOT: + case BYTECODE_OP_UNARY_PLUS: + case BYTECODE_OP_UNARY_MINUS: + case BYTECODE_OP_UNARY_NOT: { if (!vstack_ax(stack)) { ERR("Empty stack\n"); @@ -812,6 +855,8 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, goto end; case REG_S64: break; + case REG_U64: + break; case REG_DOUBLE: break; case REG_UNKNOWN: @@ -819,7 +864,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, } break; } - case FILTER_OP_UNARY_BIT_NOT: + case BYTECODE_OP_UNARY_BIT_NOT: { if (!vstack_ax(stack)) { ERR("Empty stack\n"); @@ -840,22 +885,25 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, goto end; case REG_S64: break; + case REG_U64: + break; case REG_UNKNOWN: break; } break; } - case FILTER_OP_UNARY_PLUS_S64: - case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT_S64: + case BYTECODE_OP_UNARY_PLUS_S64: + case BYTECODE_OP_UNARY_MINUS_S64: + case BYTECODE_OP_UNARY_NOT_S64: { if (!vstack_ax(stack)) { ERR("Empty stack\n"); ret = -EINVAL; goto end; } - if (vstack_ax(stack)->type != REG_S64) { + if (vstack_ax(stack)->type != REG_S64 && + vstack_ax(stack)->type != REG_U64) { ERR("Invalid register type\n"); ret = -EINVAL; goto end; @@ -863,9 +911,9 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_UNARY_PLUS_DOUBLE: - case FILTER_OP_UNARY_MINUS_DOUBLE: - case FILTER_OP_UNARY_NOT_DOUBLE: + case BYTECODE_OP_UNARY_PLUS_DOUBLE: + case BYTECODE_OP_UNARY_MINUS_DOUBLE: + case BYTECODE_OP_UNARY_NOT_DOUBLE: { if (!vstack_ax(stack)) { ERR("Empty stack\n"); @@ -881,8 +929,8 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, } /* logical */ - case FILTER_OP_AND: - case FILTER_OP_OR: + case BYTECODE_OP_AND: + case BYTECODE_OP_OR: { struct logical_op *insn = (struct logical_op *) pc; @@ -892,8 +940,9 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, goto end; } if (vstack_ax(stack)->type != REG_S64 + && vstack_ax(stack)->type != REG_U64 && vstack_ax(stack)->type != REG_UNKNOWN) { - ERR("Logical comparator expects S64 or dynamic register\n"); + ERR("Logical comparator expects S64, U64 or dynamic register\n"); ret = -EINVAL; goto end; } @@ -909,14 +958,14 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, } /* load field ref */ - case FILTER_OP_LOAD_FIELD_REF: + case BYTECODE_OP_LOAD_FIELD_REF: { ERR("Unknown field ref type\n"); ret = -EINVAL; goto end; } - case FILTER_OP_LOAD_FIELD_REF_STRING: - case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: + case BYTECODE_OP_LOAD_FIELD_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE: { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -925,7 +974,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ref->offset); break; } - case FILTER_OP_LOAD_FIELD_REF_S64: + case BYTECODE_OP_LOAD_FIELD_REF_S64: { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -934,7 +983,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ref->offset); break; } - case FILTER_OP_LOAD_FIELD_REF_DOUBLE: + case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE: { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -945,24 +994,24 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, } /* load from immediate operand */ - case FILTER_OP_LOAD_STRING: - case FILTER_OP_LOAD_STAR_GLOB_STRING: + case BYTECODE_OP_LOAD_STRING: + case BYTECODE_OP_LOAD_STAR_GLOB_STRING: { break; } - case FILTER_OP_LOAD_S64: + case BYTECODE_OP_LOAD_S64: { break; } - case FILTER_OP_LOAD_DOUBLE: + case BYTECODE_OP_LOAD_DOUBLE: { break; } - case FILTER_OP_CAST_TO_S64: - case FILTER_OP_CAST_DOUBLE_TO_S64: + case BYTECODE_OP_CAST_TO_S64: + case BYTECODE_OP_CAST_DOUBLE_TO_S64: { struct cast_op *insn = (struct cast_op *) pc; @@ -984,12 +1033,14 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, goto end; case REG_S64: break; + case REG_U64: + break; case REG_DOUBLE: break; case REG_UNKNOWN: break; } - if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) { + if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) { if (vstack_ax(stack)->type != REG_DOUBLE) { ERR("Cast expects double\n"); ret = -EINVAL; @@ -998,13 +1049,13 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, } break; } - case FILTER_OP_CAST_NOP: + case BYTECODE_OP_CAST_NOP: { break; } /* get context ref */ - case FILTER_OP_GET_CONTEXT_REF: + case BYTECODE_OP_GET_CONTEXT_REF: { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -1013,7 +1064,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ref->offset); break; } - case FILTER_OP_GET_CONTEXT_REF_STRING: + case BYTECODE_OP_GET_CONTEXT_REF_STRING: { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -1022,7 +1073,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ref->offset); break; } - case FILTER_OP_GET_CONTEXT_REF_S64: + case BYTECODE_OP_GET_CONTEXT_REF_S64: { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -1031,7 +1082,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ref->offset); break; } - case FILTER_OP_GET_CONTEXT_REF_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE: { struct load_op *insn = (struct load_op *) pc; struct field_ref *ref = (struct field_ref *) insn->data; @@ -1044,22 +1095,22 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, /* * Instructions for recursive traversal through composed types. */ - case FILTER_OP_GET_CONTEXT_ROOT: + case BYTECODE_OP_GET_CONTEXT_ROOT: { dbg_printf("Validate get context root\n"); break; } - case FILTER_OP_GET_APP_CONTEXT_ROOT: + case BYTECODE_OP_GET_APP_CONTEXT_ROOT: { dbg_printf("Validate get app context root\n"); break; } - case FILTER_OP_GET_PAYLOAD_ROOT: + case BYTECODE_OP_GET_PAYLOAD_ROOT: { dbg_printf("Validate get payload root\n"); break; } - case FILTER_OP_LOAD_FIELD: + case BYTECODE_OP_LOAD_FIELD: { /* * We tolerate that field type is unknown at validation, @@ -1069,63 +1120,63 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, dbg_printf("Validate load field\n"); break; } - case FILTER_OP_LOAD_FIELD_S8: + case BYTECODE_OP_LOAD_FIELD_S8: { dbg_printf("Validate load field s8\n"); break; } - case FILTER_OP_LOAD_FIELD_S16: + case BYTECODE_OP_LOAD_FIELD_S16: { dbg_printf("Validate load field s16\n"); break; } - case FILTER_OP_LOAD_FIELD_S32: + case BYTECODE_OP_LOAD_FIELD_S32: { dbg_printf("Validate load field s32\n"); break; } - case FILTER_OP_LOAD_FIELD_S64: + case BYTECODE_OP_LOAD_FIELD_S64: { dbg_printf("Validate load field s64\n"); break; } - case FILTER_OP_LOAD_FIELD_U8: + case BYTECODE_OP_LOAD_FIELD_U8: { dbg_printf("Validate load field u8\n"); break; } - case FILTER_OP_LOAD_FIELD_U16: + case BYTECODE_OP_LOAD_FIELD_U16: { dbg_printf("Validate load field u16\n"); break; } - case FILTER_OP_LOAD_FIELD_U32: + case BYTECODE_OP_LOAD_FIELD_U32: { dbg_printf("Validate load field u32\n"); break; } - case FILTER_OP_LOAD_FIELD_U64: + case BYTECODE_OP_LOAD_FIELD_U64: { dbg_printf("Validate load field u64\n"); break; } - case FILTER_OP_LOAD_FIELD_STRING: + case BYTECODE_OP_LOAD_FIELD_STRING: { dbg_printf("Validate load field string\n"); break; } - case FILTER_OP_LOAD_FIELD_SEQUENCE: + case BYTECODE_OP_LOAD_FIELD_SEQUENCE: { dbg_printf("Validate load field sequence\n"); break; } - case FILTER_OP_LOAD_FIELD_DOUBLE: + case BYTECODE_OP_LOAD_FIELD_DOUBLE: { dbg_printf("Validate load field double\n"); break; } - case FILTER_OP_GET_SYMBOL: + case BYTECODE_OP_GET_SYMBOL: { struct load_op *insn = (struct load_op *) pc; struct get_symbol *sym = (struct get_symbol *) insn->data; @@ -1134,7 +1185,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_GET_SYMBOL_FIELD: + case BYTECODE_OP_GET_SYMBOL_FIELD: { struct load_op *insn = (struct load_op *) pc; struct get_symbol *sym = (struct get_symbol *) insn->data; @@ -1143,7 +1194,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_GET_INDEX_U16: + case BYTECODE_OP_GET_INDEX_U16: { struct load_op *insn = (struct load_op *) pc; struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data; @@ -1152,7 +1203,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_GET_INDEX_U64: + case BYTECODE_OP_GET_INDEX_U64: { struct load_op *insn = (struct load_op *) pc; struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data; @@ -1198,7 +1249,7 @@ int validate_instruction_all_contexts(struct bytecode_runtime *bytecode, if (node) { mp_node = caa_container_of(node, struct lfht_mp_node, node); - dbg_printf("Filter: validate merge point at offset %lu\n", + dbg_printf("Bytecode: validate merge point at offset %lu\n", target_pc); if (merge_points_compare(stack, &mp_node->stack)) { ERR("Merge points differ for offset %lu\n", @@ -1206,7 +1257,7 @@ int validate_instruction_all_contexts(struct bytecode_runtime *bytecode, return -EINVAL; } /* Once validated, we can remove the merge point */ - dbg_printf("Filter: remove merge point at offset %lu\n", + dbg_printf("Bytecode: remove merge point at offset %lu\n", target_pc); ret = cds_lfht_del(merge_points, node); assert(!ret); @@ -1230,17 +1281,17 @@ int exec_insn(struct bytecode_runtime *bytecode, int ret = 1; char *next_pc = *_next_pc; - switch (*(filter_opcode_t *) pc) { - case FILTER_OP_UNKNOWN: + switch (*(bytecode_opcode_t *) pc) { + case BYTECODE_OP_UNKNOWN: default: { ERR("unknown bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; goto end; } - case FILTER_OP_RETURN: + case BYTECODE_OP_RETURN: { if (!vstack_ax(stack)) { ERR("Empty stack\n"); @@ -1249,6 +1300,10 @@ int exec_insn(struct bytecode_runtime *bytecode, } switch (vstack_ax(stack)->type) { case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_STRING: + case REG_PTR: case REG_UNKNOWN: break; default: @@ -1261,7 +1316,7 @@ int exec_insn(struct bytecode_runtime *bytecode, ret = 0; goto end; } - case FILTER_OP_RETURN_S64: + case BYTECODE_OP_RETURN_S64: { if (!vstack_ax(stack)) { ERR("Empty stack\n"); @@ -1270,6 +1325,7 @@ int exec_insn(struct bytecode_runtime *bytecode, } switch (vstack_ax(stack)->type) { case REG_S64: + case REG_U64: break; default: case REG_UNKNOWN: @@ -1284,61 +1340,56 @@ int exec_insn(struct bytecode_runtime *bytecode, } /* binary */ - case FILTER_OP_MUL: - case FILTER_OP_DIV: - case FILTER_OP_MOD: - case FILTER_OP_PLUS: - case FILTER_OP_MINUS: + case BYTECODE_OP_MUL: + case BYTECODE_OP_DIV: + case BYTECODE_OP_MOD: + case BYTECODE_OP_PLUS: + case BYTECODE_OP_MINUS: { ERR("unsupported bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) *(bytecode_opcode_t *) pc); ret = -EINVAL; goto end; } - case FILTER_OP_EQ: - case FILTER_OP_NE: - case FILTER_OP_GT: - case FILTER_OP_LT: - case FILTER_OP_GE: - case FILTER_OP_LE: - case FILTER_OP_EQ_STRING: - case FILTER_OP_NE_STRING: - case FILTER_OP_GT_STRING: - case FILTER_OP_LT_STRING: - case FILTER_OP_GE_STRING: - case FILTER_OP_LE_STRING: - case FILTER_OP_EQ_STAR_GLOB_STRING: - case FILTER_OP_NE_STAR_GLOB_STRING: - case FILTER_OP_EQ_S64: - case FILTER_OP_NE_S64: - case FILTER_OP_GT_S64: - case FILTER_OP_LT_S64: - case FILTER_OP_GE_S64: - case FILTER_OP_LE_S64: - case FILTER_OP_EQ_DOUBLE: - case FILTER_OP_NE_DOUBLE: - case FILTER_OP_GT_DOUBLE: - case FILTER_OP_LT_DOUBLE: - case FILTER_OP_GE_DOUBLE: - case FILTER_OP_LE_DOUBLE: - case FILTER_OP_EQ_DOUBLE_S64: - case FILTER_OP_NE_DOUBLE_S64: - case FILTER_OP_GT_DOUBLE_S64: - case FILTER_OP_LT_DOUBLE_S64: - case FILTER_OP_GE_DOUBLE_S64: - case FILTER_OP_LE_DOUBLE_S64: - case FILTER_OP_EQ_S64_DOUBLE: - case FILTER_OP_NE_S64_DOUBLE: - case FILTER_OP_GT_S64_DOUBLE: - case FILTER_OP_LT_S64_DOUBLE: - case FILTER_OP_GE_S64_DOUBLE: - case FILTER_OP_LE_S64_DOUBLE: - case FILTER_OP_BIT_RSHIFT: - case FILTER_OP_BIT_LSHIFT: - case FILTER_OP_BIT_AND: - case FILTER_OP_BIT_OR: - case FILTER_OP_BIT_XOR: + case BYTECODE_OP_EQ: + case BYTECODE_OP_NE: + case BYTECODE_OP_GT: + case BYTECODE_OP_LT: + case BYTECODE_OP_GE: + case BYTECODE_OP_LE: + case BYTECODE_OP_EQ_STRING: + case BYTECODE_OP_NE_STRING: + case BYTECODE_OP_GT_STRING: + case BYTECODE_OP_LT_STRING: + case BYTECODE_OP_GE_STRING: + case BYTECODE_OP_LE_STRING: + case BYTECODE_OP_EQ_STAR_GLOB_STRING: + case BYTECODE_OP_NE_STAR_GLOB_STRING: + case BYTECODE_OP_EQ_S64: + case BYTECODE_OP_NE_S64: + case BYTECODE_OP_GT_S64: + case BYTECODE_OP_LT_S64: + case BYTECODE_OP_GE_S64: + case BYTECODE_OP_LE_S64: + case BYTECODE_OP_EQ_DOUBLE: + case BYTECODE_OP_NE_DOUBLE: + case BYTECODE_OP_GT_DOUBLE: + case BYTECODE_OP_LT_DOUBLE: + case BYTECODE_OP_GE_DOUBLE: + case BYTECODE_OP_LE_DOUBLE: + case BYTECODE_OP_EQ_DOUBLE_S64: + case BYTECODE_OP_NE_DOUBLE_S64: + case BYTECODE_OP_GT_DOUBLE_S64: + case BYTECODE_OP_LT_DOUBLE_S64: + case BYTECODE_OP_GE_DOUBLE_S64: + case BYTECODE_OP_LE_DOUBLE_S64: + case BYTECODE_OP_EQ_S64_DOUBLE: + case BYTECODE_OP_NE_S64_DOUBLE: + case BYTECODE_OP_GT_S64_DOUBLE: + case BYTECODE_OP_LT_S64_DOUBLE: + case BYTECODE_OP_GE_S64_DOUBLE: + case BYTECODE_OP_LE_S64_DOUBLE: { /* Pop 2, push 1 */ if (vstack_pop(stack)) { @@ -1352,6 +1403,7 @@ int exec_insn(struct bytecode_runtime *bytecode, } switch (vstack_ax(stack)->type) { case REG_S64: + case REG_U64: case REG_DOUBLE: case REG_STRING: case REG_STAR_GLOB_STRING: @@ -1369,9 +1421,45 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } + case BYTECODE_OP_BIT_RSHIFT: + case BYTECODE_OP_BIT_LSHIFT: + case BYTECODE_OP_BIT_AND: + case BYTECODE_OP_BIT_OR: + case BYTECODE_OP_BIT_XOR: + { + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_STRING: + case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct binary_op); + break; + } + /* unary */ - case FILTER_OP_UNARY_PLUS: - case FILTER_OP_UNARY_MINUS: + case BYTECODE_OP_UNARY_PLUS: + case BYTECODE_OP_UNARY_MINUS: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1383,6 +1471,7 @@ int exec_insn(struct bytecode_runtime *bytecode, case REG_UNKNOWN: case REG_DOUBLE: case REG_S64: + case REG_U64: break; default: ERR("Unexpected register type %d for operation\n", @@ -1395,9 +1484,9 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_UNARY_PLUS_S64: - case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT_S64: + case BYTECODE_OP_UNARY_PLUS_S64: + case BYTECODE_OP_UNARY_MINUS_S64: + case BYTECODE_OP_UNARY_NOT_S64: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1407,6 +1496,7 @@ int exec_insn(struct bytecode_runtime *bytecode, } switch (vstack_ax(stack)->type) { case REG_S64: + case REG_U64: break; default: ERR("Unexpected register type %d for operation\n", @@ -1415,12 +1505,11 @@ int exec_insn(struct bytecode_runtime *bytecode, goto end; } - vstack_ax(stack)->type = REG_S64; next_pc += sizeof(struct unary_op); break; } - case FILTER_OP_UNARY_NOT: + case BYTECODE_OP_UNARY_NOT: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1432,6 +1521,7 @@ int exec_insn(struct bytecode_runtime *bytecode, case REG_UNKNOWN: case REG_DOUBLE: case REG_S64: + case REG_U64: break; default: ERR("Unexpected register type %d for operation\n", @@ -1440,12 +1530,11 @@ int exec_insn(struct bytecode_runtime *bytecode, goto end; } - vstack_ax(stack)->type = REG_S64; next_pc += sizeof(struct unary_op); break; } - case FILTER_OP_UNARY_BIT_NOT: + case BYTECODE_OP_UNARY_BIT_NOT: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1456,6 +1545,7 @@ int exec_insn(struct bytecode_runtime *bytecode, switch (vstack_ax(stack)->type) { case REG_UNKNOWN: case REG_S64: + case REG_U64: break; case REG_DOUBLE: default: @@ -1465,12 +1555,12 @@ int exec_insn(struct bytecode_runtime *bytecode, goto end; } - vstack_ax(stack)->type = REG_S64; + vstack_ax(stack)->type = REG_U64; next_pc += sizeof(struct unary_op); break; } - case FILTER_OP_UNARY_NOT_DOUBLE: + case BYTECODE_OP_UNARY_NOT_DOUBLE: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1493,8 +1583,8 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_UNARY_PLUS_DOUBLE: - case FILTER_OP_UNARY_MINUS_DOUBLE: + case BYTECODE_OP_UNARY_PLUS_DOUBLE: + case BYTECODE_OP_UNARY_MINUS_DOUBLE: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1518,8 +1608,8 @@ int exec_insn(struct bytecode_runtime *bytecode, } /* logical */ - case FILTER_OP_AND: - case FILTER_OP_OR: + case BYTECODE_OP_AND: + case BYTECODE_OP_OR: { struct logical_op *insn = (struct logical_op *) pc; int merge_ret; @@ -1540,6 +1630,7 @@ int exec_insn(struct bytecode_runtime *bytecode, /* There is always a cast-to-s64 operation before a or/and op. */ switch (vstack_ax(stack)->type) { case REG_S64: + case REG_U64: break; default: ERR("Incorrect register type %d for operation\n", @@ -1559,14 +1650,14 @@ int exec_insn(struct bytecode_runtime *bytecode, } /* load field ref */ - case FILTER_OP_LOAD_FIELD_REF: + case BYTECODE_OP_LOAD_FIELD_REF: { ERR("Unknown field ref type\n"); ret = -EINVAL; goto end; } /* get context ref */ - case FILTER_OP_GET_CONTEXT_REF: + case BYTECODE_OP_GET_CONTEXT_REF: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1576,9 +1667,9 @@ int exec_insn(struct bytecode_runtime *bytecode, next_pc += sizeof(struct load_op) + sizeof(struct field_ref); break; } - case FILTER_OP_LOAD_FIELD_REF_STRING: - case FILTER_OP_LOAD_FIELD_REF_SEQUENCE: - case FILTER_OP_GET_CONTEXT_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_STRING: + case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE: + case BYTECODE_OP_GET_CONTEXT_REF_STRING: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1588,8 +1679,8 @@ int exec_insn(struct bytecode_runtime *bytecode, next_pc += sizeof(struct load_op) + sizeof(struct field_ref); break; } - case FILTER_OP_LOAD_FIELD_REF_S64: - case FILTER_OP_GET_CONTEXT_REF_S64: + case BYTECODE_OP_LOAD_FIELD_REF_S64: + case BYTECODE_OP_GET_CONTEXT_REF_S64: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1599,8 +1690,8 @@ int exec_insn(struct bytecode_runtime *bytecode, next_pc += sizeof(struct load_op) + sizeof(struct field_ref); break; } - case FILTER_OP_LOAD_FIELD_REF_DOUBLE: - case FILTER_OP_GET_CONTEXT_REF_DOUBLE: + case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE: + case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1612,7 +1703,7 @@ int exec_insn(struct bytecode_runtime *bytecode, } /* load from immediate operand */ - case FILTER_OP_LOAD_STRING: + case BYTECODE_OP_LOAD_STRING: { struct load_op *insn = (struct load_op *) pc; @@ -1625,7 +1716,7 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_STAR_GLOB_STRING: + case BYTECODE_OP_LOAD_STAR_GLOB_STRING: { struct load_op *insn = (struct load_op *) pc; @@ -1638,7 +1729,7 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_S64: + case BYTECODE_OP_LOAD_S64: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1650,7 +1741,7 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_DOUBLE: + case BYTECODE_OP_LOAD_DOUBLE: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1662,8 +1753,8 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_CAST_TO_S64: - case FILTER_OP_CAST_DOUBLE_TO_S64: + case BYTECODE_OP_CAST_TO_S64: + case BYTECODE_OP_CAST_DOUBLE_TO_S64: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1673,6 +1764,7 @@ int exec_insn(struct bytecode_runtime *bytecode, } switch (vstack_ax(stack)->type) { case REG_S64: + case REG_U64: case REG_DOUBLE: case REG_UNKNOWN: break; @@ -1686,7 +1778,7 @@ int exec_insn(struct bytecode_runtime *bytecode, next_pc += sizeof(struct cast_op); break; } - case FILTER_OP_CAST_NOP: + case BYTECODE_OP_CAST_NOP: { next_pc += sizeof(struct cast_op); break; @@ -1695,9 +1787,9 @@ int exec_insn(struct bytecode_runtime *bytecode, /* * Instructions for recursive traversal through composed types. */ - case FILTER_OP_GET_CONTEXT_ROOT: - case FILTER_OP_GET_APP_CONTEXT_ROOT: - case FILTER_OP_GET_PAYLOAD_ROOT: + case BYTECODE_OP_GET_CONTEXT_ROOT: + case BYTECODE_OP_GET_APP_CONTEXT_ROOT: + case BYTECODE_OP_GET_PAYLOAD_ROOT: { if (vstack_push(stack)) { ret = -EINVAL; @@ -1708,7 +1800,7 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_FIELD: + case BYTECODE_OP_LOAD_FIELD: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1726,14 +1818,10 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_FIELD_S8: - case FILTER_OP_LOAD_FIELD_S16: - case FILTER_OP_LOAD_FIELD_S32: - case FILTER_OP_LOAD_FIELD_S64: - case FILTER_OP_LOAD_FIELD_U8: - case FILTER_OP_LOAD_FIELD_U16: - case FILTER_OP_LOAD_FIELD_U32: - case FILTER_OP_LOAD_FIELD_U64: + case BYTECODE_OP_LOAD_FIELD_S8: + case BYTECODE_OP_LOAD_FIELD_S16: + case BYTECODE_OP_LOAD_FIELD_S32: + case BYTECODE_OP_LOAD_FIELD_S64: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1751,8 +1839,29 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_FIELD_STRING: - case FILTER_OP_LOAD_FIELD_SEQUENCE: + case BYTECODE_OP_LOAD_FIELD_U8: + case BYTECODE_OP_LOAD_FIELD_U16: + case BYTECODE_OP_LOAD_FIELD_U32: + case BYTECODE_OP_LOAD_FIELD_U64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct load_op); + break; + } + + case BYTECODE_OP_LOAD_FIELD_STRING: + case BYTECODE_OP_LOAD_FIELD_SEQUENCE: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1770,7 +1879,7 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_LOAD_FIELD_DOUBLE: + case BYTECODE_OP_LOAD_FIELD_DOUBLE: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1788,8 +1897,8 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_GET_SYMBOL: - case FILTER_OP_GET_SYMBOL_FIELD: + case BYTECODE_OP_GET_SYMBOL: + case BYTECODE_OP_GET_SYMBOL_FIELD: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1806,7 +1915,7 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_GET_INDEX_U16: + case BYTECODE_OP_GET_INDEX_U16: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1823,7 +1932,7 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } - case FILTER_OP_GET_INDEX_U64: + case BYTECODE_OP_GET_INDEX_U64: { /* Pop 1, push 1 */ if (!vstack_ax(stack)) { @@ -1849,7 +1958,7 @@ end: /* * Never called concurrently (hash seed is shared). */ -int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode) +int lttng_bytecode_validate(struct bytecode_runtime *bytecode) { struct cds_lfht *merge_points; char *pc, *next_pc, *start_pc; @@ -1881,12 +1990,12 @@ int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode) ret = bytecode_validate_overflow(bytecode, start_pc, pc); if (ret != 0) { if (ret == -ERANGE) - ERR("filter bytecode overflow\n"); + ERR("Bytecode overflow\n"); goto end; } dbg_printf("Validating op %s (%u)\n", - print_op((unsigned int) *(filter_opcode_t *) pc), - (unsigned int) *(filter_opcode_t *) pc); + print_op((unsigned int) *(bytecode_opcode_t *) pc), + (unsigned int) *(bytecode_opcode_t *) pc); /* * For each instruction, validate the current context diff --git a/liblttng-ust/lttng-bytecode.c b/liblttng-ust/lttng-bytecode.c new file mode 100644 index 00000000..d01bc393 --- /dev/null +++ b/liblttng-ust/lttng-bytecode.c @@ -0,0 +1,638 @@ +/* + * lttng-bytecode.c + * + * LTTng UST bytecode code. + * + * Copyright (C) 2010-2016 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define _LGPL_SOURCE +#include +#include + +#include + +#include "lttng-bytecode.h" +#include "ust-events-internal.h" + +static const char *opnames[] = { + [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN", + + [ BYTECODE_OP_RETURN ] = "RETURN", + + /* binary */ + [ BYTECODE_OP_MUL ] = "MUL", + [ BYTECODE_OP_DIV ] = "DIV", + [ BYTECODE_OP_MOD ] = "MOD", + [ BYTECODE_OP_PLUS ] = "PLUS", + [ BYTECODE_OP_MINUS ] = "MINUS", + [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT", + [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT", + [ BYTECODE_OP_BIT_AND ] = "BIT_AND", + [ BYTECODE_OP_BIT_OR ] = "BIT_OR", + [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR", + + /* binary comparators */ + [ BYTECODE_OP_EQ ] = "EQ", + [ BYTECODE_OP_NE ] = "NE", + [ BYTECODE_OP_GT ] = "GT", + [ BYTECODE_OP_LT ] = "LT", + [ BYTECODE_OP_GE ] = "GE", + [ BYTECODE_OP_LE ] = "LE", + + /* string binary comparators */ + [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING", + [ BYTECODE_OP_NE_STRING ] = "NE_STRING", + [ BYTECODE_OP_GT_STRING ] = "GT_STRING", + [ BYTECODE_OP_LT_STRING ] = "LT_STRING", + [ BYTECODE_OP_GE_STRING ] = "GE_STRING", + [ BYTECODE_OP_LE_STRING ] = "LE_STRING", + + /* s64 binary comparators */ + [ BYTECODE_OP_EQ_S64 ] = "EQ_S64", + [ BYTECODE_OP_NE_S64 ] = "NE_S64", + [ BYTECODE_OP_GT_S64 ] = "GT_S64", + [ BYTECODE_OP_LT_S64 ] = "LT_S64", + [ BYTECODE_OP_GE_S64 ] = "GE_S64", + [ BYTECODE_OP_LE_S64 ] = "LE_S64", + + /* double binary comparators */ + [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE", + [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE", + [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE", + [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE", + [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE", + [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE", + + /* Mixed S64-double binary comparators */ + [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64", + [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64", + [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64", + [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64", + [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64", + [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64", + + [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE", + [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE", + [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE", + [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE", + [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE", + [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE", + + /* unary */ + [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS", + [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS", + [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT", + [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64", + [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64", + [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64", + [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE", + [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE", + [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE", + + /* logical */ + [ BYTECODE_OP_AND ] = "AND", + [ BYTECODE_OP_OR ] = "OR", + + /* load field ref */ + [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF", + [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING", + [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE", + [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64", + [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE", + + /* load from immediate operand */ + [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING", + [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64", + [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE", + + /* cast */ + [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64", + [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64", + [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP", + + /* get context ref */ + [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF", + [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING", + [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64", + [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE", + + /* load userspace field ref */ + [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING", + [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE", + + /* + * load immediate star globbing pattern (literal string) + * from immediate. + */ + [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING", + + /* globbing pattern binary operator: apply to */ + [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING", + [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING", + + /* + * Instructions for recursive traversal through composed types. + */ + [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT", + [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT", + [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT", + + [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL", + [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD", + [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16", + [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64", + + [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD", + [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8", + [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16", + [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32", + [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64", + [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8", + [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16", + [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32", + [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64", + [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING", + [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE", + [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE", + + [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT", + + [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64", +}; + +const char *print_op(enum bytecode_op op) +{ + if (op >= NR_BYTECODE_OPS) + return "UNKNOWN"; + else + return opnames[op]; +} + +static +int apply_field_reloc(const struct lttng_event_desc *event_desc, + struct bytecode_runtime *runtime, + uint32_t runtime_len, + uint32_t reloc_offset, + const char *field_name, + enum bytecode_op bytecode_op) +{ + const struct lttng_event_field *fields, *field = NULL; + unsigned int nr_fields, i; + struct load_op *op; + uint32_t field_offset = 0; + + dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name); + + /* Lookup event by name */ + if (!event_desc) + return -EINVAL; + fields = event_desc->fields; + if (!fields) + return -EINVAL; + nr_fields = event_desc->nr_fields; + for (i = 0; i < nr_fields; i++) { + if (fields[i].u.ext.nofilter) { + continue; + } + if (!strcmp(fields[i].name, field_name)) { + field = &fields[i]; + break; + } + /* compute field offset */ + switch (fields[i].type.atype) { + case atype_integer: + case atype_enum: + case atype_enum_nestable: + field_offset += sizeof(int64_t); + break; + case atype_array: + case atype_array_nestable: + case atype_sequence: + case atype_sequence_nestable: + field_offset += sizeof(unsigned long); + field_offset += sizeof(void *); + break; + case atype_string: + field_offset += sizeof(void *); + break; + case atype_float: + field_offset += sizeof(double); + break; + default: + return -EINVAL; + } + } + if (!field) + return -EINVAL; + + /* Check if field offset is too large for 16-bit offset */ + if (field_offset > FILTER_BYTECODE_MAX_LEN - 1) + return -EINVAL; + + /* set type */ + op = (struct load_op *) &runtime->code[reloc_offset]; + + switch (bytecode_op) { + case BYTECODE_OP_LOAD_FIELD_REF: + { + struct field_ref *field_ref; + + field_ref = (struct field_ref *) op->data; + switch (field->type.atype) { + case atype_integer: + case atype_enum: + case atype_enum_nestable: + op->op = BYTECODE_OP_LOAD_FIELD_REF_S64; + break; + case atype_array: + case atype_array_nestable: + case atype_sequence: + case atype_sequence_nestable: + op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE; + break; + case atype_string: + op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING; + break; + case atype_float: + op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE; + break; + default: + return -EINVAL; + } + /* set offset */ + field_ref->offset = (uint16_t) field_offset; + break; + } + default: + return -EINVAL; + } + return 0; +} + +static +int apply_context_reloc(struct bytecode_runtime *runtime, + uint32_t runtime_len, + uint32_t reloc_offset, + const char *context_name, + enum bytecode_op bytecode_op) +{ + struct load_op *op; + struct lttng_ctx_field *ctx_field; + int idx; + struct lttng_ctx *ctx = *runtime->p.pctx; + + dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name); + + /* Get context index */ + idx = lttng_get_context_index(ctx, context_name); + if (idx < 0) { + if (lttng_context_is_app(context_name)) { + int ret; + + ret = lttng_ust_add_app_context_to_ctx_rcu(context_name, + &ctx); + if (ret) + return ret; + idx = lttng_get_context_index(ctx, context_name); + if (idx < 0) + return -ENOENT; + } else { + return -ENOENT; + } + } + /* Check if idx is too large for 16-bit offset */ + if (idx > FILTER_BYTECODE_MAX_LEN - 1) + return -EINVAL; + + /* Get context return type */ + ctx_field = &ctx->fields[idx]; + op = (struct load_op *) &runtime->code[reloc_offset]; + + switch (bytecode_op) { + case BYTECODE_OP_GET_CONTEXT_REF: + { + struct field_ref *field_ref; + + field_ref = (struct field_ref *) op->data; + switch (ctx_field->event_field.type.atype) { + case atype_integer: + case atype_enum: + case atype_enum_nestable: + op->op = BYTECODE_OP_GET_CONTEXT_REF_S64; + break; + /* Sequence and array supported as string */ + case atype_string: + case atype_array: + case atype_array_nestable: + case atype_sequence: + case atype_sequence_nestable: + op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING; + break; + case atype_float: + op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE; + break; + case atype_dynamic: + op->op = BYTECODE_OP_GET_CONTEXT_REF; + break; + default: + return -EINVAL; + } + /* set offset to context index within channel contexts */ + field_ref->offset = (uint16_t) idx; + break; + } + default: + return -EINVAL; + } + return 0; +} + +static +int apply_reloc(const struct lttng_event_desc *event_desc, + struct bytecode_runtime *runtime, + uint32_t runtime_len, + uint32_t reloc_offset, + const char *name) +{ + struct load_op *op; + + dbg_printf("Apply reloc: %u %s\n", reloc_offset, name); + + /* Ensure that the reloc is within the code */ + if (runtime_len - reloc_offset < sizeof(uint16_t)) + return -EINVAL; + + op = (struct load_op *) &runtime->code[reloc_offset]; + switch (op->op) { + case BYTECODE_OP_LOAD_FIELD_REF: + return apply_field_reloc(event_desc, runtime, runtime_len, + reloc_offset, name, op->op); + case BYTECODE_OP_GET_CONTEXT_REF: + return apply_context_reloc(runtime, runtime_len, + reloc_offset, name, op->op); + case BYTECODE_OP_GET_SYMBOL: + case BYTECODE_OP_GET_SYMBOL_FIELD: + /* + * Will be handled by load specialize phase or + * dynamically by interpreter. + */ + return 0; + default: + ERR("Unknown reloc op type %u\n", op->op); + return -EINVAL; + } + return 0; +} + +static +int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode, + struct cds_list_head *bytecode_runtime_head) +{ + struct lttng_bytecode_runtime *bc_runtime; + + cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) { + if (bc_runtime->bc == bytecode) + return 1; + } + return 0; +} + +/* + * Take a bytecode with reloc table and link it to an event to create a + * bytecode runtime. + */ +static +int link_bytecode(const struct lttng_event_desc *event_desc, + struct lttng_ctx **ctx, + struct lttng_ust_bytecode_node *bytecode, + struct cds_list_head *insert_loc) +{ + int ret, offset, next_offset; + struct bytecode_runtime *runtime = NULL; + size_t runtime_alloc_len; + + if (!bytecode) + return 0; + /* Bytecode already linked */ + if (bytecode_is_linked(bytecode, insert_loc)) + return 0; + + dbg_printf("Linking...\n"); + + /* We don't need the reloc table in the runtime */ + runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset; + runtime = zmalloc(runtime_alloc_len); + if (!runtime) { + ret = -ENOMEM; + goto alloc_error; + } + runtime->p.bc = bytecode; + runtime->p.pctx = ctx; + runtime->len = bytecode->bc.reloc_offset; + /* copy original bytecode */ + memcpy(runtime->code, bytecode->bc.data, runtime->len); + /* + * apply relocs. Those are a uint16_t (offset in bytecode) + * followed by a string (field name). + */ + for (offset = bytecode->bc.reloc_offset; + offset < bytecode->bc.len; + offset = next_offset) { + uint16_t reloc_offset = + *(uint16_t *) &bytecode->bc.data[offset]; + const char *name = + (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)]; + + ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name); + if (ret) { + goto link_error; + } + next_offset = offset + sizeof(uint16_t) + strlen(name) + 1; + } + /* Validate bytecode */ + ret = lttng_bytecode_validate(runtime); + if (ret) { + goto link_error; + } + /* Specialize bytecode */ + ret = lttng_bytecode_specialize(event_desc, runtime); + if (ret) { + goto link_error; + } + + switch (bytecode->type) { + case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER: + runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret; + break; + case LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE: + runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret; + break; + default: + abort(); + } + + runtime->p.link_failed = 0; + cds_list_add_rcu(&runtime->p.node, insert_loc); + dbg_printf("Linking successful.\n"); + return 0; + +link_error: + switch (bytecode->type) { + case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER: + runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret_false; + break; + case LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE: + runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false; + break; + default: + abort(); + } + + runtime->p.link_failed = 1; + cds_list_add_rcu(&runtime->p.node, insert_loc); +alloc_error: + dbg_printf("Linking failed.\n"); + return ret; +} + +void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime) +{ + struct lttng_ust_bytecode_node *bc = runtime->bc; + + if (!bc->enabler->enabled || runtime->link_failed) + runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false; + else + runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret; +} + +void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime) +{ + struct lttng_ust_bytecode_node *bc = runtime->bc; + + if (!bc->enabler->enabled || runtime->link_failed) + runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false; + else + runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret; +} + +/* + * Given the lists of bytecode programs of an instance (trigger or event) and + * of a matching enabler, try to link all the enabler's bytecode programs with + * the instance. + * + * This function is called after we confirmed that name enabler and the + * instance are name matching (or glob pattern matching). + */ +void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc, + struct lttng_ctx **ctx, + struct cds_list_head *instance_bytecode_head, + struct cds_list_head *enabler_bytecode_head) +{ + struct lttng_ust_bytecode_node *enabler_bc; + struct lttng_bytecode_runtime *runtime; + + assert(event_desc); + + /* Go over all the bytecode programs of the enabler. */ + cds_list_for_each_entry(enabler_bc, enabler_bytecode_head, node) { + int found = 0, ret; + struct cds_list_head *insert_loc; + + /* + * Check if the current enabler bytecode program is already + * linked with the instance. + */ + cds_list_for_each_entry(runtime, instance_bytecode_head, node) { + if (runtime->bc == enabler_bc) { + found = 1; + break; + } + } + + /* + * Skip bytecode already linked, go to the next enabler + * bytecode program. + */ + if (found) + continue; + + /* + * Insert at specified priority (seqnum) in increasing + * order. If there already is a bytecode of the same priority, + * insert the new bytecode right after it. + */ + cds_list_for_each_entry_reverse(runtime, + instance_bytecode_head, node) { + if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) { + /* insert here */ + insert_loc = &runtime->node; + goto add_within; + } + } + + /* Add to head to list */ + insert_loc = instance_bytecode_head; + add_within: + dbg_printf("linking bytecode\n"); + ret = link_bytecode(event_desc, ctx, enabler_bc, insert_loc); + if (ret) { + dbg_printf("[lttng filter] warning: cannot link event bytecode\n"); + } + } +} + +/* + * We own the bytecode if we return success. + */ +int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler, + struct lttng_ust_bytecode_node *bytecode) +{ + cds_list_add(&bytecode->node, &enabler->filter_bytecode_head); + return 0; +} + +static +void free_filter_runtime(struct cds_list_head *bytecode_runtime_head) +{ + struct bytecode_runtime *runtime, *tmp; + + cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head, + p.node) { + free(runtime->data); + free(runtime); + } +} + +void lttng_free_event_filter_runtime(struct lttng_event *event) +{ + free_filter_runtime(&event->filter_bytecode_runtime_head); +} + +void lttng_free_trigger_filter_runtime(struct lttng_trigger *trigger) +{ + free_filter_runtime(&trigger->filter_bytecode_runtime_head); +} + +/* For backward compatibility. Leave those exported symbols in place. */ +void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime) +{ +} diff --git a/liblttng-ust/lttng-filter.h b/liblttng-ust/lttng-bytecode.h similarity index 69% rename from liblttng-ust/lttng-filter.h rename to liblttng-ust/lttng-bytecode.h index cc15c154..7f3e5ce8 100644 --- a/liblttng-ust/lttng-filter.h +++ b/liblttng-ust/lttng-bytecode.h @@ -1,10 +1,10 @@ -#ifndef _LTTNG_FILTER_H -#define _LTTNG_FILTER_H +#ifndef _LTTNG_BYTECODE_H +#define _LTTNG_BYTECODE_H /* - * lttng-filter.h + * lttng-bytecode.h * - * LTTng UST filter header. + * LTTng UST bytecode header. * * Copyright (C) 2010-2016 Mathieu Desnoyers * @@ -40,13 +40,13 @@ #include #include #include -#include "filter-bytecode.h" +#include "bytecode.h" -/* Filter stack length, in number of entries */ -#define FILTER_STACK_LEN 10 /* includes 2 dummy */ -#define FILTER_STACK_EMPTY 1 +/* Interpreter stack length, in number of entries */ +#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */ +#define INTERPRETER_STACK_EMPTY 1 -#define FILTER_MAX_DATA_LEN 65536 +#define BYTECODE_MAX_DATA_LEN 65536 #ifndef min_t #define min_t(type, a, b) \ @@ -87,6 +87,7 @@ struct bytecode_runtime { enum entry_type { REG_S64, + REG_U64, REG_DOUBLE, REG_STRING, REG_STAR_GLOB_STRING, @@ -111,6 +112,9 @@ enum object_type { OBJECT_TYPE_U32, OBJECT_TYPE_U64, + OBJECT_TYPE_SIGNED_ENUM, + OBJECT_TYPE_UNSIGNED_ENUM, + OBJECT_TYPE_DOUBLE, OBJECT_TYPE_STRING, OBJECT_TYPE_STRING_SEQUENCE, @@ -123,10 +127,17 @@ enum object_type { OBJECT_TYPE_DYNAMIC, }; -struct filter_get_index_data { +struct bytecode_get_index_data { uint64_t offset; /* in bytes */ size_t ctx_index; size_t array_len; + /* + * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT + * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the + * interpreter needs to find it from the event fields and types to + * support variants. + */ + const struct lttng_event_field *field; struct { size_t len; enum object_type type; @@ -149,7 +160,7 @@ struct vstack_entry { struct vstack { int top; /* top of stack */ - struct vstack_entry e[FILTER_STACK_LEN]; + struct vstack_entry e[INTERPRETER_STACK_LEN]; }; static inline @@ -177,7 +188,7 @@ struct vstack_entry *vstack_bx(struct vstack *stack) static inline int vstack_push(struct vstack *stack) { - if (stack->top >= FILTER_STACK_LEN - 1) { + if (stack->top >= INTERPRETER_STACK_LEN - 1) { ERR("Stack full\n"); return -EINVAL; } @@ -207,6 +218,7 @@ struct load_ptr { enum load_type type; enum object_type object_type; const void *ptr; + size_t nr_elem; bool rev_bo; /* Temporary place-holders for contexts. */ union { @@ -214,10 +226,6 @@ struct load_ptr { uint64_t u64; double d; } u; - /* - * "field" is only needed when nested under a variant, in which - * case we cannot specialize the nested operations. - */ const struct lttng_event_field *field; }; @@ -238,7 +246,7 @@ struct estack_entry { struct estack { int top; /* top of stack */ - struct estack_entry e[FILTER_STACK_LEN]; + struct estack_entry e[INTERPRETER_STACK_LEN]; }; /* @@ -255,13 +263,13 @@ struct estack { */ #define estack_ax(stack, top) \ ({ \ - assert((top) > FILTER_STACK_EMPTY); \ + assert((top) > INTERPRETER_STACK_EMPTY); \ &(stack)->e[top]; \ }) #define estack_bx(stack, top) \ ({ \ - assert((top) > FILTER_STACK_EMPTY + 1); \ + assert((top) > INTERPRETER_STACK_EMPTY + 1); \ &(stack)->e[(top) - 1]; \ }) @@ -270,7 +278,7 @@ struct estack { */ #define estack_push(stack, top, ax, bx, ax_t, bx_t) \ do { \ - assert((top) < FILTER_STACK_LEN - 1); \ + assert((top) < INTERPRETER_STACK_LEN - 1); \ (stack)->e[(top) - 1].u.v = (bx); \ (stack)->e[(top) - 1].type = (bx_t); \ (bx) = (ax); \ @@ -280,7 +288,7 @@ struct estack { #define estack_pop(stack, top, ax, bx, ax_t, bx_t) \ do { \ - assert((top) > FILTER_STACK_EMPTY); \ + assert((top) > INTERPRETER_STACK_EMPTY); \ (ax) = (bx); \ (ax_t) = (bx_t); \ (bx) = (stack)->e[(top) - 2].u.v; \ @@ -288,15 +296,61 @@ struct estack { (top)--; \ } while (0) -const char *print_op(enum filter_op op); +enum lttng_interpreter_type { + LTTNG_INTERPRETER_TYPE_S64, + LTTNG_INTERPRETER_TYPE_U64, + LTTNG_INTERPRETER_TYPE_SIGNED_ENUM, + LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM, + LTTNG_INTERPRETER_TYPE_DOUBLE, + LTTNG_INTERPRETER_TYPE_STRING, + LTTNG_INTERPRETER_TYPE_SEQUENCE, +}; + +/* + * Represents the output parameter of the lttng interpreter. + * Currently capturable field classes are integer, double, string and sequence + * of integer. + */ +struct lttng_interpreter_output { + enum lttng_interpreter_type type; + union { + int64_t s; + uint64_t u; + double d; + + struct { + const char *str; + size_t len; + } str; + struct { + const void *ptr; + size_t nr_elem; -int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode); -int lttng_filter_specialize_bytecode(struct lttng_event *event, + /* Inner type. */ + const struct lttng_type *nested_type; + } sequence; + } u; +}; + +const char *print_op(enum bytecode_op op); + +void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime); +void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime); + +int lttng_bytecode_validate(struct bytecode_runtime *bytecode); +int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc, struct bytecode_runtime *bytecode); -uint64_t lttng_filter_false(void *filter_data, +uint64_t lttng_bytecode_filter_interpret_false(void *filter_data, const char *filter_stack_data); -uint64_t lttng_filter_interpret_bytecode(void *filter_data, +uint64_t lttng_bytecode_filter_interpret(void *filter_data, const char *filter_stack_data); -#endif /* _LTTNG_FILTER_H */ +uint64_t lttng_bytecode_capture_interpret_false(void *capture_data, + const char *capture_stack_data, + struct lttng_interpreter_output *output); +uint64_t lttng_bytecode_capture_interpret(void *capture_data, + const char *capture_stack_data, + struct lttng_interpreter_output *output); + +#endif /* _LTTNG_BYTECODE_H */ diff --git a/liblttng-ust/lttng-context-provider.c b/liblttng-ust/lttng-context-provider.c index 50f73c62..10c95a5e 100644 --- a/liblttng-ust/lttng-context-provider.c +++ b/liblttng-ust/lttng-context-provider.c @@ -27,8 +27,10 @@ #include #include + #include "lttng-tracer-core.h" #include "jhash.h" +#include "context-provider-internal.h" #include #define CONTEXT_PROVIDER_HT_BITS 12 @@ -90,9 +92,14 @@ int lttng_ust_context_provider_register(struct lttng_ust_context_provider *provi hash = jhash(provider->name, name_len, 0); head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)]; cds_hlist_add_head(&provider->node, head); + lttng_ust_context_set_session_provider(provider->name, provider->get_size, provider->record, provider->get_value); + + lttng_ust_context_set_trigger_group_provider(provider->name, + provider->get_size, provider->record, + provider->get_value); end: ust_unlock(); return ret; @@ -107,6 +114,11 @@ void lttng_ust_context_provider_unregister(struct lttng_ust_context_provider *pr lttng_ust_context_set_session_provider(provider->name, lttng_ust_dummy_get_size, lttng_ust_dummy_record, lttng_ust_dummy_get_value); + + lttng_ust_context_set_trigger_group_provider(provider->name, + lttng_ust_dummy_get_size, lttng_ust_dummy_record, + lttng_ust_dummy_get_value); + cds_hlist_del(&provider->node); end: ust_unlock(); diff --git a/liblttng-ust/lttng-context.c b/liblttng-ust/lttng-context.c index fc564b6a..c457d3e1 100644 --- a/liblttng-ust/lttng-context.c +++ b/liblttng-ust/lttng-context.c @@ -31,6 +31,8 @@ #include #include +#include "context-internal.h" + /* * The filter implementation requires that two consecutive "get" for the * same context performed by the same thread return the same result. @@ -400,7 +402,7 @@ field_error: return ret; } -int lttng_session_context_init(struct lttng_ctx **ctx) +int lttng_context_init_all(struct lttng_ctx **ctx) { int ret; @@ -517,3 +519,8 @@ void lttng_context_init(void) void lttng_context_exit(void) { } + +int lttng_session_context_init(struct lttng_ctx **ctx) +{ + return 0; +} diff --git a/liblttng-ust/lttng-counter-client-percpu-32-modular.c b/liblttng-ust/lttng-counter-client-percpu-32-modular.c new file mode 100644 index 00000000..6d2bb5e2 --- /dev/null +++ b/liblttng-ust/lttng-counter-client-percpu-32-modular.c @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * + * lttng-counter-client-percpu-32-modular.c + * + * LTTng lib counter client. Per-cpu 32-bit counters in overflow + * arithmetic. + * + * Copyright (C) 2020 Mathieu Desnoyers + */ + +#define _GNU_SOURCE +#include +#include "../libcounter/counter.h" +#include "../libcounter/counter-api.h" + +static const struct lib_counter_config client_config = { + .alloc = COUNTER_ALLOC_PER_CPU, + .sync = COUNTER_SYNC_PER_CPU, + .arithmetic = COUNTER_ARITHMETIC_OVERFLOW, + .counter_size = COUNTER_SIZE_32_BIT, +}; + +static struct lib_counter *counter_create(size_t nr_dimensions, + const struct lttng_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon) +{ + size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i; + + if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX) + return NULL; + for (i = 0; i < nr_dimensions; i++) { + if (dimensions[i].has_underflow || dimensions[i].has_overflow) + return NULL; + max_nr_elem[i] = dimensions[i].size; + } + return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem, + global_sum_step, global_counter_fd, nr_counter_cpu_fds, + counter_cpu_fds, is_daemon); +} + +static void counter_destroy(struct lib_counter *counter) +{ + lttng_counter_destroy(counter); +} + +static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v) +{ + return lttng_counter_add(&client_config, counter, dimension_indexes, v); +} + +static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu, + int64_t *value, bool *overflow, bool *underflow) +{ + return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value, + overflow, underflow); +} + +static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes, + int64_t *value, bool *overflow, bool *underflow) +{ + return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value, + overflow, underflow); +} + +static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes) +{ + return lttng_counter_clear(&client_config, counter, dimension_indexes); +} + +static struct lttng_counter_transport lttng_counter_transport = { + .name = "counter-per-cpu-32-modular", + .ops = { + .counter_create = counter_create, + .counter_destroy = counter_destroy, + .counter_add = counter_add, + .counter_read = counter_read, + .counter_aggregate = counter_aggregate, + .counter_clear = counter_clear, + }, + .client_config = &client_config, +}; + +void lttng_counter_client_percpu_32_overflow_init(void) +{ + lttng_counter_transport_register(<tng_counter_transport); +} + +void lttng_counter_client_percpu_32_overflow_exit(void) +{ + lttng_counter_transport_unregister(<tng_counter_transport); +} diff --git a/liblttng-ust/lttng-counter-client-percpu-64-modular.c b/liblttng-ust/lttng-counter-client-percpu-64-modular.c new file mode 100644 index 00000000..fd8760e2 --- /dev/null +++ b/liblttng-ust/lttng-counter-client-percpu-64-modular.c @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * + * lttng-counter-client-percpu-64-modular.c + * + * LTTng lib counter client. Per-cpu 64-bit counters in overflow + * arithmetic. + * + * Copyright (C) 2020 Mathieu Desnoyers + */ + +#define _GNU_SOURCE +#include +#include "../libcounter/counter.h" +#include "../libcounter/counter-api.h" + +static const struct lib_counter_config client_config = { + .alloc = COUNTER_ALLOC_PER_CPU, + .sync = COUNTER_SYNC_PER_CPU, + .arithmetic = COUNTER_ARITHMETIC_OVERFLOW, + .counter_size = COUNTER_SIZE_64_BIT, +}; + +static struct lib_counter *counter_create(size_t nr_dimensions, + const struct lttng_counter_dimension *dimensions, + int64_t global_sum_step, + int global_counter_fd, + int nr_counter_cpu_fds, + const int *counter_cpu_fds, + bool is_daemon) +{ + size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i; + + if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX) + return NULL; + for (i = 0; i < nr_dimensions; i++) { + if (dimensions[i].has_underflow || dimensions[i].has_overflow) + return NULL; + max_nr_elem[i] = dimensions[i].size; + } + return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem, + global_sum_step, global_counter_fd, nr_counter_cpu_fds, + counter_cpu_fds, is_daemon); +} + +static void counter_destroy(struct lib_counter *counter) +{ + lttng_counter_destroy(counter); +} + +static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v) +{ + return lttng_counter_add(&client_config, counter, dimension_indexes, v); +} + +static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu, + int64_t *value, bool *overflow, bool *underflow) +{ + return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value, + overflow, underflow); +} + +static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes, + int64_t *value, bool *overflow, bool *underflow) +{ + return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value, + overflow, underflow); +} + +static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes) +{ + return lttng_counter_clear(&client_config, counter, dimension_indexes); +} + +static struct lttng_counter_transport lttng_counter_transport = { + .name = "counter-per-cpu-64-modular", + .ops = { + .counter_create = counter_create, + .counter_destroy = counter_destroy, + .counter_add = counter_add, + .counter_read = counter_read, + .counter_aggregate = counter_aggregate, + .counter_clear = counter_clear, + }, + .client_config = &client_config, +}; + +void lttng_counter_client_percpu_64_overflow_init(void) +{ + lttng_counter_transport_register(<tng_counter_transport); +} + +void lttng_counter_client_percpu_64_overflow_exit(void) +{ + lttng_counter_transport_unregister(<tng_counter_transport); +} diff --git a/liblttng-ust/lttng-events.c b/liblttng-ust/lttng-events.c index 627cd4e1..876d2a6d 100644 --- a/liblttng-ust/lttng-events.c +++ b/liblttng-ust/lttng-events.c @@ -23,10 +23,10 @@ #define _GNU_SOURCE #define _LGPL_SOURCE #include -#include -#include -#include +#include #include +#include +#include #include #include #include @@ -34,13 +34,15 @@ #include #include #include +#include #include -#include "clock.h" #include +#include #include +#include +#include #include -#include #include #include @@ -49,6 +51,7 @@ #include #include #include +#include #include #include #include "error.h" @@ -57,12 +60,17 @@ #include "tracepoint-internal.h" #include "string-utils.h" +#include "lttng-bytecode.h" #include "lttng-tracer.h" #include "lttng-tracer-core.h" #include "lttng-ust-statedump.h" +#include "context-internal.h" +#include "ust-events-internal.h" #include "wait.h" #include "../libringbuffer/shm.h" +#include "../libcounter/counter.h" #include "jhash.h" +#include /* * All operations within this file are called by the communication @@ -70,6 +78,7 @@ */ static CDS_LIST_HEAD(sessions); +static CDS_LIST_HEAD(trigger_groups); struct cds_list_head *_lttng_get_sessions(void) { @@ -77,12 +86,15 @@ struct cds_list_head *_lttng_get_sessions(void) } static void _lttng_event_destroy(struct lttng_event *event); +static void _lttng_trigger_destroy(struct lttng_trigger *trigger); static void _lttng_enum_destroy(struct lttng_enum *_enum); static -void lttng_session_lazy_sync_enablers(struct lttng_session *session); +void lttng_session_lazy_sync_event_enablers(struct lttng_session *session); +static +void lttng_session_sync_event_enablers(struct lttng_session *session); static -void lttng_session_sync_enablers(struct lttng_session *session); +void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group); static void lttng_enabler_destroy(struct lttng_enabler *enabler); @@ -143,7 +155,7 @@ struct lttng_session *lttng_session_create(void) session = zmalloc(sizeof(struct lttng_session)); if (!session) return NULL; - if (lttng_session_context_init(&session->ctx)) { + if (lttng_context_init_all(&session->ctx)) { free(session); return NULL; } @@ -159,6 +171,72 @@ struct lttng_session *lttng_session_create(void) return session; } +struct lttng_counter *lttng_ust_counter_create( + const char *counter_transport_name, + size_t number_dimensions, const struct lttng_counter_dimension *dimensions) +{ + struct lttng_counter_transport *counter_transport = NULL; + struct lttng_counter *counter = NULL; + + counter_transport = lttng_counter_transport_find(counter_transport_name); + if (!counter_transport) + goto notransport; + counter = zmalloc(sizeof(struct lttng_counter)); + if (!counter) + goto nomem; + + /* Create trigger error counter. */ + counter->ops = &counter_transport->ops; + counter->transport = counter_transport; + + counter->counter = counter->ops->counter_create( + number_dimensions, dimensions, 0, + -1, 0, NULL, false); + if (!counter->counter) { + goto create_error; + } + + return counter; + +create_error: + free(counter); +nomem: +notransport: + return NULL; +} + +static +void lttng_ust_counter_destroy(struct lttng_counter *counter) +{ + counter->ops->counter_destroy(counter->counter); + free(counter); +} + +struct lttng_trigger_group *lttng_trigger_group_create(void) +{ + struct lttng_trigger_group *trigger_group; + int i; + + trigger_group = zmalloc(sizeof(struct lttng_trigger_group)); + if (!trigger_group) + return NULL; + + /* Add all contexts. */ + if (lttng_context_init_all(&trigger_group->ctx)) { + free(trigger_group); + return NULL; + } + + CDS_INIT_LIST_HEAD(&trigger_group->enablers_head); + CDS_INIT_LIST_HEAD(&trigger_group->triggers_head); + for (i = 0; i < LTTNG_UST_TRIGGER_HT_SIZE; i++) + CDS_INIT_HLIST_HEAD(&trigger_group->triggers_ht.table[i]); + + cds_list_add(&trigger_group->node, &trigger_groups); + + return trigger_group; +} + /* * Only used internally at session destruction. */ @@ -195,6 +273,21 @@ void register_event(struct lttng_event *event) event->registered = 1; } +static +void register_trigger(struct lttng_trigger *trigger) +{ + int ret; + const struct lttng_event_desc *desc; + + assert(trigger->registered == 0); + desc = trigger->desc; + ret = __tracepoint_probe_register_queue_release(desc->name, + desc->u.ext.trigger_callback, trigger, desc->signature); + WARN_ON_ONCE(ret); + if (!ret) + trigger->registered = 1; +} + static void unregister_event(struct lttng_event *event) { @@ -211,6 +304,21 @@ void unregister_event(struct lttng_event *event) event->registered = 0; } +static +void unregister_trigger(struct lttng_trigger *trigger) +{ + int ret; + const struct lttng_event_desc *desc; + + assert(trigger->registered == 1); + desc = trigger->desc; + ret = __tracepoint_probe_unregister_queue_release(desc->name, + desc->u.ext.trigger_callback, trigger); + WARN_ON_ONCE(ret); + if (!ret) + trigger->registered = 0; +} + /* * Only used internally at session destruction. */ @@ -221,12 +329,22 @@ void _lttng_event_unregister(struct lttng_event *event) unregister_event(event); } +/* + * Only used internally at session destruction. + */ +static +void _lttng_trigger_unregister(struct lttng_trigger *trigger) +{ + if (trigger->registered) + unregister_trigger(trigger); +} + void lttng_session_destroy(struct lttng_session *session) { struct lttng_channel *chan, *tmpchan; struct lttng_event *event, *tmpevent; struct lttng_enum *_enum, *tmp_enum; - struct lttng_enabler *enabler, *tmpenabler; + struct lttng_event_enabler *event_enabler, *event_tmpenabler; CMM_ACCESS_ONCE(session->active) = 0; cds_list_for_each_entry(event, &session->events_head, node) { @@ -234,9 +352,9 @@ void lttng_session_destroy(struct lttng_session *session) } synchronize_trace(); /* Wait for in-flight events to complete */ __tracepoint_probe_prune_release_queue(); - cds_list_for_each_entry_safe(enabler, tmpenabler, + cds_list_for_each_entry_safe(event_enabler, event_tmpenabler, &session->enablers_head, node) - lttng_enabler_destroy(enabler); + lttng_event_enabler_destroy(event_enabler); cds_list_for_each_entry_safe(event, tmpevent, &session->events_head, node) _lttng_event_destroy(event); @@ -250,6 +368,86 @@ void lttng_session_destroy(struct lttng_session *session) free(session); } +void lttng_trigger_group_destroy( + struct lttng_trigger_group *trigger_group) +{ + int close_ret; + struct lttng_trigger_enabler *trigger_enabler, *tmptrigger_enabler; + struct lttng_trigger *trigger, *tmptrigger; + + if (!trigger_group) { + return; + } + + cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) + _lttng_trigger_unregister(trigger); + + synchronize_trace(); + + cds_list_for_each_entry_safe(trigger_enabler, tmptrigger_enabler, + &trigger_group->enablers_head, node) + lttng_trigger_enabler_destroy(trigger_enabler); + + cds_list_for_each_entry_safe(trigger, tmptrigger, + &trigger_group->triggers_head, node) + _lttng_trigger_destroy(trigger); + + if (trigger_group->error_counter) + lttng_ust_counter_destroy(trigger_group->error_counter); + + /* Close the notification fd to the listener of triggers. */ + + lttng_ust_lock_fd_tracker(); + close_ret = close(trigger_group->notification_fd); + if (!close_ret) { + lttng_ust_delete_fd_from_tracker(trigger_group->notification_fd); + } else { + PERROR("close"); + abort(); + } + lttng_ust_unlock_fd_tracker(); + + cds_list_del(&trigger_group->node); + + free(trigger_group); +} + +static +void lttng_enabler_destroy(struct lttng_enabler *enabler) +{ + struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node; + struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node; + + if (!enabler) { + return; + } + + /* Destroy filter bytecode */ + cds_list_for_each_entry_safe(filter_node, tmp_filter_node, + &enabler->filter_bytecode_head, node) { + free(filter_node); + } + + /* Destroy excluders */ + cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node, + &enabler->excluder_head, node) { + free(excluder_node); + } +} + + void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler) +{ + if (!trigger_enabler) { + return; + } + + cds_list_del(&trigger_enabler->node); + + lttng_enabler_destroy(lttng_trigger_enabler_as_enabler(trigger_enabler)); + + free(trigger_enabler); +} + static int lttng_enum_create(const struct lttng_enum_desc *desc, struct lttng_session *session) @@ -429,7 +627,7 @@ int lttng_session_enable(struct lttng_session *session) session->tstate = 1; /* We need to sync enablers with session before activation. */ - lttng_session_sync_enablers(session); + lttng_session_sync_event_enablers(session); /* * Snapshot the number of events per channel to know the type of header @@ -498,7 +696,7 @@ int lttng_session_disable(struct lttng_session *session) /* Set transient enabler state to "disabled" */ session->tstate = 0; - lttng_session_sync_enablers(session); + lttng_session_sync_event_enablers(session); end: return ret; } @@ -513,7 +711,7 @@ int lttng_channel_enable(struct lttng_channel *channel) } /* Set transient enabler state to "enabled" */ channel->tstate = 1; - lttng_session_sync_enablers(channel->session); + lttng_session_sync_event_enablers(channel->session); /* Set atomically the state to "enabled" */ CMM_ACCESS_ONCE(channel->enabled) = 1; end: @@ -532,11 +730,28 @@ int lttng_channel_disable(struct lttng_channel *channel) CMM_ACCESS_ONCE(channel->enabled) = 0; /* Set transient enabler state to "enabled" */ channel->tstate = 0; - lttng_session_sync_enablers(channel->session); + lttng_session_sync_event_enablers(channel->session); end: return ret; } +static inline +struct cds_hlist_head *borrow_hash_table_bucket( + struct cds_hlist_head *hash_table, + unsigned int hash_table_size, + const struct lttng_event_desc *desc) +{ + const char *event_name; + size_t name_len; + uint32_t hash; + + event_name = desc->name; + name_len = strlen(event_name); + + hash = jhash(event_name, name_len, 0); + return &hash_table[hash & (hash_table_size - 1)]; +} + /* * Supports event creation while tracing session is active. */ @@ -544,18 +759,15 @@ static int lttng_event_create(const struct lttng_event_desc *desc, struct lttng_channel *chan) { - const char *event_name = desc->name; struct lttng_event *event; struct lttng_session *session = chan->session; struct cds_hlist_head *head; int ret = 0; - size_t name_len = strlen(event_name); - uint32_t hash; int notify_socket, loglevel; const char *uri; - hash = jhash(event_name, name_len, 0); - head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)]; + head = borrow_hash_table_bucket(chan->session->events_ht.table, + LTTNG_UST_EVENT_HT_SIZE, desc); notify_socket = lttng_get_notify_socket(session->owner); if (notify_socket < 0) { @@ -583,7 +795,7 @@ int lttng_event_create(const struct lttng_event_desc *desc, /* Event will be enabled by enabler sync. */ event->enabled = 0; event->registered = 0; - CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head); + CDS_INIT_LIST_HEAD(&event->filter_bytecode_runtime_head); CDS_INIT_LIST_HEAD(&event->enablers_ref_head); event->desc = desc; @@ -601,7 +813,7 @@ int lttng_event_create(const struct lttng_event_desc *desc, session, session->objd, chan->objd, - event_name, + desc->name, loglevel, desc->signature, desc->nr_fields, @@ -625,6 +837,69 @@ socket_error: return ret; } +static +int lttng_trigger_create(const struct lttng_event_desc *desc, + uint64_t id, uint64_t error_counter_index, + struct lttng_trigger_group *trigger_group) +{ + struct lttng_trigger *trigger; + struct cds_hlist_head *head; + int ret = 0; + + /* + * Get the hashtable bucket the created lttng_trigger object should be + * inserted. + */ + head = borrow_hash_table_bucket(trigger_group->triggers_ht.table, + LTTNG_UST_TRIGGER_HT_SIZE, desc); + + trigger = zmalloc(sizeof(struct lttng_trigger)); + if (!trigger) { + ret = -ENOMEM; + goto error; + } + + trigger->group = trigger_group; + trigger->id = id; + trigger->error_counter_index = error_counter_index; + + /* Trigger will be enabled by enabler sync. */ + trigger->enabled = 0; + trigger->registered = 0; + + CDS_INIT_LIST_HEAD(&trigger->filter_bytecode_runtime_head); + CDS_INIT_LIST_HEAD(&trigger->capture_bytecode_runtime_head); + CDS_INIT_LIST_HEAD(&trigger->enablers_ref_head); + trigger->desc = desc; + + cds_list_add(&trigger->node, &trigger_group->triggers_head); + cds_hlist_add_head(&trigger->hlist, head); + + return 0; + +error: + return ret; +} + +static +void _lttng_trigger_destroy(struct lttng_trigger *trigger) +{ + struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref; + + /* Remove from trigger list. */ + cds_list_del(&trigger->node); + /* Remove from trigger hash table. */ + cds_hlist_del(&trigger->hlist); + + lttng_free_trigger_filter_runtime(trigger); + + /* Free trigger enabler refs */ + cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref, + &trigger->enablers_ref_head, node) + free(enabler_ref); + free(trigger); +} + static int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc, struct lttng_enabler *enabler) @@ -632,7 +907,7 @@ int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc, int loglevel = 0; unsigned int has_loglevel = 0; - assert(enabler->type == LTTNG_ENABLER_STAR_GLOB); + assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB); if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX, desc->name, SIZE_MAX)) return 0; @@ -655,7 +930,7 @@ int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc, int loglevel = 0; unsigned int has_loglevel = 0; - assert(enabler->type == LTTNG_ENABLER_EVENT); + assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT); if (strcmp(desc->name, enabler->event_param.name)) return 0; if (desc->loglevel) { @@ -674,8 +949,8 @@ static int lttng_desc_match_enabler(const struct lttng_event_desc *desc, struct lttng_enabler *enabler) { - switch (enabler->type) { - case LTTNG_ENABLER_STAR_GLOB: + switch (enabler->format_type) { + case LTTNG_ENABLER_FORMAT_STAR_GLOB: { struct lttng_ust_excluder_node *excluder; @@ -703,7 +978,7 @@ int lttng_desc_match_enabler(const struct lttng_event_desc *desc, } return 1; } - case LTTNG_ENABLER_EVENT: + case LTTNG_ENABLER_FORMAT_EVENT: return lttng_desc_match_event_enabler(desc, enabler); default: return -EINVAL; @@ -711,24 +986,40 @@ int lttng_desc_match_enabler(const struct lttng_event_desc *desc, } static -int lttng_event_match_enabler(struct lttng_event *event, - struct lttng_enabler *enabler) +int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler, + struct lttng_event *event) { - if (lttng_desc_match_enabler(event->desc, enabler) - && event->chan == enabler->chan) + if (lttng_desc_match_enabler(event->desc, + lttng_event_enabler_as_enabler(event_enabler)) + && event->chan == event_enabler->chan) return 1; else return 0; } static -struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event, +int lttng_trigger_enabler_match_trigger( + struct lttng_trigger_enabler *trigger_enabler, + struct lttng_trigger *trigger) +{ + int desc_matches = lttng_desc_match_enabler(trigger->desc, + lttng_trigger_enabler_as_enabler(trigger_enabler)); + + if (desc_matches && trigger->group == trigger_enabler->group && + trigger->id == trigger_enabler->id) + return 1; + else + return 0; +} + +static +struct lttng_enabler_ref *lttng_enabler_ref( + struct cds_list_head *enabler_ref_list, struct lttng_enabler *enabler) { struct lttng_enabler_ref *enabler_ref; - cds_list_for_each_entry(enabler_ref, - &event->enablers_ref_head, node) { + cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) { if (enabler_ref->ref == enabler) return enabler_ref; } @@ -740,9 +1031,9 @@ struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event, * tracepoint probes. */ static -void lttng_create_event_if_missing(struct lttng_enabler *enabler) +void lttng_create_event_if_missing(struct lttng_event_enabler *event_enabler) { - struct lttng_session *session = enabler->chan->session; + struct lttng_session *session = event_enabler->chan->session; struct lttng_probe_desc *probe_desc; const struct lttng_event_desc *desc; struct lttng_event *event; @@ -761,24 +1052,19 @@ void lttng_create_event_if_missing(struct lttng_enabler *enabler) bool found = false; struct cds_hlist_head *head; struct cds_hlist_node *node; - const char *event_name; - size_t name_len; - uint32_t hash; desc = probe_desc->event_desc[i]; - if (!lttng_desc_match_enabler(desc, enabler)) + if (!lttng_desc_match_enabler(desc, + lttng_event_enabler_as_enabler(event_enabler))) continue; - event_name = desc->name; - name_len = strlen(event_name); - /* - * Check if already created. - */ - hash = jhash(event_name, name_len, 0); - head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)]; + head = borrow_hash_table_bucket( + session->events_ht.table, + LTTNG_UST_EVENT_HT_SIZE, desc); + cds_hlist_for_each_entry(event, node, head, hlist) { if (event->desc == desc - && event->chan == enabler->chan) { + && event->chan == event_enabler->chan) { found = true; break; } @@ -791,7 +1077,7 @@ void lttng_create_event_if_missing(struct lttng_enabler *enabler) * event probe. */ ret = lttng_event_create(probe_desc->event_desc[i], - enabler->chan); + event_enabler->chan); if (ret) { DBG("Unable to create event %s, error %d\n", probe_desc->event_desc[i]->name, ret); @@ -800,54 +1086,133 @@ void lttng_create_event_if_missing(struct lttng_enabler *enabler) } } -/* - * Iterate over all the UST sessions to unregister and destroy all probes from - * the probe provider descriptor received as argument. Must me called with the - * ust_lock held. - */ -void lttng_probe_provider_unregister_events(struct lttng_probe_desc *provider_desc) +static +void probe_provider_event_for_each(struct lttng_probe_desc *provider_desc, + void (*event_func)(struct lttng_session *session, + struct lttng_event *event), + void (*trigger_func)(struct lttng_trigger *trigger)) { struct cds_hlist_node *node, *tmp_node; struct cds_list_head *sessionsp; - struct lttng_session *session; - struct cds_hlist_head *head; - struct lttng_event *event; - unsigned int i, j; + unsigned int i; /* Get handle on list of sessions. */ sessionsp = _lttng_get_sessions(); /* - * Iterate over all events in the probe provider descriptions and sessions - * to queue the unregistration of the events. + * Iterate over all events in the probe provider descriptions and + * sessions to queue the unregistration of the events. */ for (i = 0; i < provider_desc->nr_events; i++) { const struct lttng_event_desc *event_desc; - const char *event_name; - size_t name_len; - uint32_t hash; + struct lttng_trigger_group *trigger_group; + struct lttng_trigger *trigger; + struct lttng_session *session; + struct cds_hlist_head *head; + struct lttng_event *event; event_desc = provider_desc->event_desc[i]; - event_name = event_desc->name; - name_len = strlen(event_name); - hash = jhash(event_name, name_len, 0); - /* Iterate over all session to find the current event description. */ + /* + * Iterate over all session to find the current event + * description. + */ cds_list_for_each_entry(session, sessionsp, node) { /* - * Get the list of events in the hashtable bucket and iterate to - * find the event matching this descriptor. + * Get the list of events in the hashtable bucket and + * iterate to find the event matching this descriptor. */ - head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)]; - cds_hlist_for_each_entry(event, node, head, hlist) { + head = borrow_hash_table_bucket( + session->events_ht.table, + LTTNG_UST_EVENT_HT_SIZE, event_desc); + + cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) { if (event_desc == event->desc) { - /* Queue the unregistration of this event. */ - _lttng_event_unregister(event); + event_func(session, event); + break; + } + } + } + + /* + * Iterate over all trigger groups to find the current event + * description. + */ + cds_list_for_each_entry(trigger_group, &trigger_groups, node) { + /* + * Get the list of triggers in the hashtable bucket and + * iterate to find the trigger matching this + * descriptor. + */ + head = borrow_hash_table_bucket( + trigger_group->triggers_ht.table, + LTTNG_UST_TRIGGER_HT_SIZE, event_desc); + + cds_hlist_for_each_entry_safe(trigger, node, tmp_node, head, hlist) { + if (event_desc == trigger->desc) { + trigger_func(trigger); break; } } } } +} + +static +void _unregister_event(struct lttng_session *session, + struct lttng_event *event) +{ + _lttng_event_unregister(event); +} + +static +void _event_enum_destroy(struct lttng_session *session, + struct lttng_event *event) +{ + unsigned int i; + + /* Destroy enums of the current event. */ + for (i = 0; i < event->desc->nr_fields; i++) { + const struct lttng_enum_desc *enum_desc; + const struct lttng_event_field *field; + struct lttng_enum *curr_enum; + + field = &(event->desc->fields[i]); + switch (field->type.atype) { + case atype_enum: + enum_desc = field->type.u.legacy.basic.enumeration.desc; + break; + case atype_enum_nestable: + enum_desc = field->type.u.enum_nestable.desc; + break; + default: + continue; + } + + curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc); + if (curr_enum) { + _lttng_enum_destroy(curr_enum); + } + } + + /* Destroy event. */ + _lttng_event_destroy(event); +} + +/* + * Iterate over all the UST sessions to unregister and destroy all probes from + * the probe provider descriptor received as argument. Must me called with the + * ust_lock held. + */ +void lttng_probe_provider_unregister_events( + struct lttng_probe_desc *provider_desc) +{ + /* + * Iterate over all events in the probe provider descriptions and sessions + * to queue the unregistration of the events. + */ + probe_provider_event_for_each(provider_desc, _unregister_event, + _lttng_trigger_unregister); /* Wait for grace period. */ synchronize_trace(); @@ -858,82 +1223,35 @@ void lttng_probe_provider_unregister_events(struct lttng_probe_desc *provider_de * It is now safe to destroy the events and remove them from the event list * and hashtables. */ - for (i = 0; i < provider_desc->nr_events; i++) { - const struct lttng_event_desc *event_desc; - const char *event_name; - size_t name_len; - uint32_t hash; - - event_desc = provider_desc->event_desc[i]; - event_name = event_desc->name; - name_len = strlen(event_name); - hash = jhash(event_name, name_len, 0); - - /* Iterate over all sessions to find the current event description. */ - cds_list_for_each_entry(session, sessionsp, node) { - /* - * Get the list of events in the hashtable bucket and iterate to - * find the event matching this descriptor. - */ - head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)]; - cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) { - if (event_desc == event->desc) { - /* Destroy enums of the current event. */ - for (j = 0; j < event->desc->nr_fields; j++) { - const struct lttng_enum_desc *enum_desc; - const struct lttng_event_field *field; - struct lttng_enum *curr_enum; - - field = &(event->desc->fields[j]); - switch (field->type.atype) { - case atype_enum: - enum_desc = field->type.u.legacy.basic.enumeration.desc; - break; - case atype_enum_nestable: - enum_desc = field->type.u.enum_nestable.desc; - break; - default: - continue; - } - curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc); - if (curr_enum) { - _lttng_enum_destroy(curr_enum); - } - } - - /* Destroy event. */ - _lttng_event_destroy(event); - break; - } - } - } - } + probe_provider_event_for_each(provider_desc, _event_enum_destroy, + _lttng_trigger_destroy); } /* - * Create events associated with an enabler (if not already present), + * Create events associated with an event enabler (if not already present), * and add backward reference from the event to the enabler. */ static -int lttng_enabler_ref_events(struct lttng_enabler *enabler) +int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler) { - struct lttng_session *session = enabler->chan->session; + struct lttng_session *session = event_enabler->chan->session; struct lttng_event *event; - if (!enabler->enabled) + if (!lttng_event_enabler_as_enabler(event_enabler)->enabled) goto end; /* First ensure that probe events are created for this enabler. */ - lttng_create_event_if_missing(enabler); + lttng_create_event_if_missing(event_enabler); /* For each event matching enabler in session event list. */ cds_list_for_each_entry(event, &session->events_head, node) { struct lttng_enabler_ref *enabler_ref; - if (!lttng_event_match_enabler(event, enabler)) + if (!lttng_event_enabler_match_event(event_enabler, event)) continue; - enabler_ref = lttng_event_enabler_ref(event, enabler); + enabler_ref = lttng_enabler_ref(&event->enablers_ref_head, + lttng_event_enabler_as_enabler(event_enabler)); if (!enabler_ref) { /* * If no backward ref, create it. @@ -942,7 +1260,8 @@ int lttng_enabler_ref_events(struct lttng_enabler *enabler) enabler_ref = zmalloc(sizeof(*enabler_ref)); if (!enabler_ref) return -ENOMEM; - enabler_ref->ref = enabler; + enabler_ref->ref = lttng_event_enabler_as_enabler( + event_enabler); cds_list_add(&enabler_ref->node, &event->enablers_ref_head); } @@ -950,7 +1269,10 @@ int lttng_enabler_ref_events(struct lttng_enabler *enabler) /* * Link filter bytecodes if not linked yet. */ - lttng_enabler_event_link_bytecode(event, enabler); + lttng_enabler_link_bytecode(event->desc, + &session->ctx, + &event->filter_bytecode_runtime_head, + <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head); /* TODO: merge event context. */ } @@ -968,7 +1290,17 @@ int lttng_fix_pending_events(void) struct lttng_session *session; cds_list_for_each_entry(session, &sessions, node) { - lttng_session_lazy_sync_enablers(session); + lttng_session_lazy_sync_event_enablers(session); + } + return 0; +} + +int lttng_fix_pending_triggers(void) +{ + struct lttng_trigger_group *trigger_group; + + cds_list_for_each_entry(trigger_group, &trigger_groups, node) { + lttng_trigger_group_sync_enablers(trigger_group); } return 0; } @@ -1042,57 +1374,163 @@ void lttng_ust_events_exit(void) /* * Enabler management. */ -struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type, +struct lttng_event_enabler *lttng_event_enabler_create( + enum lttng_enabler_format_type format_type, struct lttng_ust_event *event_param, struct lttng_channel *chan) { - struct lttng_enabler *enabler; + struct lttng_event_enabler *event_enabler; - enabler = zmalloc(sizeof(*enabler)); - if (!enabler) + event_enabler = zmalloc(sizeof(*event_enabler)); + if (!event_enabler) return NULL; - enabler->type = type; - CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head); - CDS_INIT_LIST_HEAD(&enabler->excluder_head); - memcpy(&enabler->event_param, event_param, - sizeof(enabler->event_param)); - enabler->chan = chan; + event_enabler->base.format_type = format_type; + CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head); + CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head); + memcpy(&event_enabler->base.event_param, event_param, + sizeof(event_enabler->base.event_param)); + event_enabler->chan = chan; /* ctx left NULL */ - enabler->enabled = 0; - cds_list_add(&enabler->node, &enabler->chan->session->enablers_head); - lttng_session_lazy_sync_enablers(enabler->chan->session); - return enabler; + event_enabler->base.enabled = 0; + cds_list_add(&event_enabler->node, &event_enabler->chan->session->enablers_head); + lttng_session_lazy_sync_event_enablers(event_enabler->chan->session); + + return event_enabler; +} + +struct lttng_trigger_enabler *lttng_trigger_enabler_create( + struct lttng_trigger_group *trigger_group, + enum lttng_enabler_format_type format_type, + struct lttng_ust_trigger *trigger_param) +{ + struct lttng_trigger_enabler *trigger_enabler; + + trigger_enabler = zmalloc(sizeof(*trigger_enabler)); + if (!trigger_enabler) + return NULL; + trigger_enabler->base.format_type = format_type; + CDS_INIT_LIST_HEAD(&trigger_enabler->base.filter_bytecode_head); + CDS_INIT_LIST_HEAD(&trigger_enabler->capture_bytecode_head); + CDS_INIT_LIST_HEAD(&trigger_enabler->base.excluder_head); + + trigger_enabler->id = trigger_param->id; + trigger_enabler->num_captures = 0; + + memcpy(&trigger_enabler->base.event_param.name, trigger_param->name, + sizeof(trigger_enabler->base.event_param.name)); + trigger_enabler->base.event_param.instrumentation = trigger_param->instrumentation; + trigger_enabler->base.event_param.loglevel = trigger_param->loglevel; + trigger_enabler->base.event_param.loglevel_type = trigger_param->loglevel_type; + + trigger_enabler->base.enabled = 0; + trigger_enabler->group = trigger_group; + + cds_list_add(&trigger_enabler->node, &trigger_group->enablers_head); + + lttng_trigger_group_sync_enablers(trigger_group); + + return trigger_enabler; } -int lttng_enabler_enable(struct lttng_enabler *enabler) +int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler) { - enabler->enabled = 1; - lttng_session_lazy_sync_enablers(enabler->chan->session); + lttng_event_enabler_as_enabler(event_enabler)->enabled = 1; + lttng_session_lazy_sync_event_enablers(event_enabler->chan->session); + return 0; } -int lttng_enabler_disable(struct lttng_enabler *enabler) +int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler) { - enabler->enabled = 0; - lttng_session_lazy_sync_enablers(enabler->chan->session); + lttng_event_enabler_as_enabler(event_enabler)->enabled = 0; + lttng_session_lazy_sync_event_enablers(event_enabler->chan->session); + return 0; } -int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler, - struct lttng_ust_filter_bytecode_node *bytecode) +static +void _lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler, + struct lttng_ust_bytecode_node *bytecode) { bytecode->enabler = enabler; cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head); - lttng_session_lazy_sync_enablers(enabler->chan->session); +} + +int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler, + struct lttng_ust_bytecode_node *bytecode) +{ + _lttng_enabler_attach_filter_bytecode( + lttng_event_enabler_as_enabler(event_enabler), bytecode); + + lttng_session_lazy_sync_event_enablers(event_enabler->chan->session); return 0; } -int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler, +static +void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler, struct lttng_ust_excluder_node *excluder) { excluder->enabler = enabler; cds_list_add_tail(&excluder->node, &enabler->excluder_head); - lttng_session_lazy_sync_enablers(enabler->chan->session); +} + +int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler, + struct lttng_ust_excluder_node *excluder) +{ + _lttng_enabler_attach_exclusion( + lttng_event_enabler_as_enabler(event_enabler), excluder); + + lttng_session_lazy_sync_event_enablers(event_enabler->chan->session); + return 0; +} + +int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler) +{ + lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 1; + lttng_trigger_group_sync_enablers(trigger_enabler->group); + + return 0; +} + +int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler) +{ + lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled = 0; + lttng_trigger_group_sync_enablers(trigger_enabler->group); + + return 0; +} + +int lttng_trigger_enabler_attach_filter_bytecode( + struct lttng_trigger_enabler *trigger_enabler, + struct lttng_ust_bytecode_node *bytecode) +{ + _lttng_enabler_attach_filter_bytecode( + lttng_trigger_enabler_as_enabler(trigger_enabler), bytecode); + + lttng_trigger_group_sync_enablers(trigger_enabler->group); + return 0; +} + +int lttng_trigger_enabler_attach_capture_bytecode( + struct lttng_trigger_enabler *trigger_enabler, + struct lttng_ust_bytecode_node *bytecode) +{ + bytecode->enabler = lttng_trigger_enabler_as_enabler(trigger_enabler); + cds_list_add_tail(&bytecode->node, &trigger_enabler->capture_bytecode_head); + trigger_enabler->num_captures++; + + lttng_trigger_group_sync_enablers(trigger_enabler->group); + return 0; +} + +int lttng_trigger_enabler_attach_exclusion( + struct lttng_trigger_enabler *trigger_enabler, + struct lttng_ust_excluder_node *excluder) +{ + _lttng_enabler_attach_exclusion( + lttng_trigger_enabler_as_enabler(trigger_enabler), excluder); + + lttng_trigger_group_sync_enablers(trigger_enabler->group); return 0; } @@ -1168,59 +1606,37 @@ int lttng_attach_context(struct lttng_ust_context *context_param, } } -int lttng_enabler_attach_context(struct lttng_enabler *enabler, +int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler, struct lttng_ust_context *context_param) { -#if 0 // disabled for now. - struct lttng_session *session = enabler->chan->session; - int ret; - - ret = lttng_attach_context(context_param, &enabler->ctx, - session); - if (ret) - return ret; - lttng_session_lazy_sync_enablers(enabler->chan->session); -#endif return -ENOSYS; } -static -void lttng_enabler_destroy(struct lttng_enabler *enabler) +void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler) { - struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node; - struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node; - - /* Destroy filter bytecode */ - cds_list_for_each_entry_safe(filter_node, tmp_filter_node, - &enabler->filter_bytecode_head, node) { - free(filter_node); - } - - /* Destroy excluders */ - cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node, - &enabler->excluder_head, node) { - free(excluder_node); + if (!event_enabler) { + return; } + cds_list_del(&event_enabler->node); - /* Destroy contexts */ - lttng_destroy_context(enabler->ctx); + lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler)); - cds_list_del(&enabler->node); - free(enabler); + lttng_destroy_context(event_enabler->ctx); + free(event_enabler); } /* - * lttng_session_sync_enablers should be called just before starting a + * lttng_session_sync_event_enablers should be called just before starting a * session. */ static -void lttng_session_sync_enablers(struct lttng_session *session) +void lttng_session_sync_event_enablers(struct lttng_session *session) { - struct lttng_enabler *enabler; + struct lttng_event_enabler *event_enabler; struct lttng_event *event; - cds_list_for_each_entry(enabler, &session->enablers_head, node) - lttng_enabler_ref_events(enabler); + cds_list_for_each_entry(event_enabler, &session->enablers_head, node) + lttng_event_enabler_ref_events(event_enabler); /* * For each event, if at least one of its enablers is enabled, * and its channel and session transient states are enabled, we @@ -1273,8 +1689,202 @@ void lttng_session_sync_enablers(struct lttng_session *session) /* Enable filters */ cds_list_for_each_entry(runtime, - &event->bytecode_runtime_head, node) { - lttng_filter_sync_state(runtime); + &event->filter_bytecode_runtime_head, node) { + lttng_bytecode_filter_sync_state(runtime); + } + } + __tracepoint_probe_prune_release_queue(); +} + +static +void lttng_create_trigger_if_missing(struct lttng_trigger_enabler *trigger_enabler) +{ + struct lttng_trigger_group *trigger_group = trigger_enabler->group; + struct lttng_probe_desc *probe_desc; + struct cds_list_head *probe_list; + int i; + + probe_list = lttng_get_probe_list_head(); + + cds_list_for_each_entry(probe_desc, probe_list, head) { + for (i = 0; i < probe_desc->nr_events; i++) { + int ret; + bool found = false; + const struct lttng_event_desc *desc; + struct lttng_trigger *trigger; + struct cds_hlist_head *head; + struct cds_hlist_node *node; + + desc = probe_desc->event_desc[i]; + + if (!lttng_desc_match_enabler(desc, + lttng_trigger_enabler_as_enabler(trigger_enabler))) + continue; + + /* + * Given the current trigger group, get the bucket that + * the target trigger would be if it was already + * created. + */ + head = borrow_hash_table_bucket( + trigger_group->triggers_ht.table, + LTTNG_UST_TRIGGER_HT_SIZE, desc); + + cds_hlist_for_each_entry(trigger, node, head, hlist) { + /* + * Check if trigger already exists by checking + * if the trigger and enabler share the same + * description and id. + */ + if (trigger->desc == desc && + trigger->id == trigger_enabler->id) { + found = true; + break; + } + } + + if (found) + continue; + + /* + * We need to create a trigger for this event probe. + */ + ret = lttng_trigger_create(desc, trigger_enabler->id, + trigger_enabler->error_counter_index, + trigger_group); + if (ret) { + DBG("Unable to create trigger %s, error %d\n", + probe_desc->event_desc[i]->name, ret); + } + } + } +} + +/* + * Create triggers associated with a trigger enabler (if not already present). + */ +static +int lttng_trigger_enabler_ref_triggers(struct lttng_trigger_enabler *trigger_enabler) +{ + struct lttng_trigger_group *trigger_group = trigger_enabler->group; + struct lttng_trigger *trigger; + + /* + * Only try to create triggers for enablers that are enabled, the user + * might still be attaching filter or exclusion to the + * trigger_enabler. + */ + if (!lttng_trigger_enabler_as_enabler(trigger_enabler)->enabled) + goto end; + + /* First, ensure that probe triggers are created for this enabler. */ + lttng_create_trigger_if_missing(trigger_enabler); + + /* Link the created trigger with its associated enabler. */ + cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) { + struct lttng_enabler_ref *enabler_ref; + + if (!lttng_trigger_enabler_match_trigger(trigger_enabler, trigger)) + continue; + + enabler_ref = lttng_enabler_ref(&trigger->enablers_ref_head, + lttng_trigger_enabler_as_enabler(trigger_enabler)); + if (!enabler_ref) { + /* + * If no backward ref, create it. + * Add backward ref from trigger to enabler. + */ + enabler_ref = zmalloc(sizeof(*enabler_ref)); + if (!enabler_ref) + return -ENOMEM; + + enabler_ref->ref = lttng_trigger_enabler_as_enabler( + trigger_enabler); + cds_list_add(&enabler_ref->node, + &trigger->enablers_ref_head); + } + + /* + * Link filter bytecodes if not linked yet. + */ + lttng_enabler_link_bytecode(trigger->desc, + &trigger_group->ctx, &trigger->filter_bytecode_runtime_head, + <tng_trigger_enabler_as_enabler(trigger_enabler)->filter_bytecode_head); + + /* + * Link capture bytecodes if not linked yet. + */ + lttng_enabler_link_bytecode(trigger->desc, + &trigger_group->ctx, &trigger->capture_bytecode_runtime_head, + &trigger_enabler->capture_bytecode_head); + trigger->num_captures = trigger_enabler->num_captures; + } +end: + return 0; +} + +static +void lttng_trigger_group_sync_enablers(struct lttng_trigger_group *trigger_group) +{ + struct lttng_trigger_enabler *trigger_enabler; + struct lttng_trigger *trigger; + + cds_list_for_each_entry(trigger_enabler, &trigger_group->enablers_head, node) + lttng_trigger_enabler_ref_triggers(trigger_enabler); + + /* + * For each trigger, if at least one of its enablers is enabled, + * we enable the trigger, else we disable it. + */ + cds_list_for_each_entry(trigger, &trigger_group->triggers_head, node) { + struct lttng_enabler_ref *enabler_ref; + struct lttng_bytecode_runtime *runtime; + int enabled = 0, has_enablers_without_bytecode = 0; + + /* Enable triggers */ + cds_list_for_each_entry(enabler_ref, + &trigger->enablers_ref_head, node) { + if (enabler_ref->ref->enabled) { + enabled = 1; + break; + } + } + + CMM_STORE_SHARED(trigger->enabled, enabled); + /* + * Sync tracepoint registration with trigger enabled + * state. + */ + if (enabled) { + if (!trigger->registered) + register_trigger(trigger); + } else { + if (trigger->registered) + unregister_trigger(trigger); + } + + /* Check if has enablers without bytecode enabled */ + cds_list_for_each_entry(enabler_ref, + &trigger->enablers_ref_head, node) { + if (enabler_ref->ref->enabled + && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) { + has_enablers_without_bytecode = 1; + break; + } + } + trigger->has_enablers_without_bytecode = + has_enablers_without_bytecode; + + /* Enable filters */ + cds_list_for_each_entry(runtime, + &trigger->filter_bytecode_runtime_head, node) { + lttng_bytecode_filter_sync_state(runtime); + } + + /* Enable captures. */ + cds_list_for_each_entry(runtime, + &trigger->capture_bytecode_runtime_head, node) { + lttng_bytecode_capture_sync_state(runtime); } } __tracepoint_probe_prune_release_queue(); @@ -1287,12 +1897,12 @@ void lttng_session_sync_enablers(struct lttng_session *session) * "lazy" sync means we only sync if required. */ static -void lttng_session_lazy_sync_enablers(struct lttng_session *session) +void lttng_session_lazy_sync_event_enablers(struct lttng_session *session) { /* We can skip if session is not active */ if (!session->active) return; - lttng_session_sync_enablers(session); + lttng_session_sync_event_enablers(session); } /* @@ -1335,3 +1945,30 @@ void lttng_ust_context_set_session_provider(const char *name, } } } + +/* + * Update all trigger groups with the given app context. + * Called with ust lock held. + * This is invoked when an application context gets loaded/unloaded. It + * ensures the context callbacks are in sync with the application + * context (either app context callbacks, or dummy callbacks). + */ +void lttng_ust_context_set_trigger_group_provider(const char *name, + size_t (*get_size)(struct lttng_ctx_field *field, size_t offset), + void (*record)(struct lttng_ctx_field *field, + struct lttng_ust_lib_ring_buffer_ctx *ctx, + struct lttng_channel *chan), + void (*get_value)(struct lttng_ctx_field *field, + struct lttng_ctx_value *value)) +{ + struct lttng_trigger_group *trigger_group; + + cds_list_for_each_entry(trigger_group, &trigger_groups, node) { + int ret; + + ret = lttng_ust_context_set_provider_rcu(&trigger_group->ctx, + name, get_size, record, get_value); + if (ret) + abort(); + } +} diff --git a/liblttng-ust/lttng-filter.c b/liblttng-ust/lttng-filter.c deleted file mode 100644 index bbc21289..00000000 --- a/liblttng-ust/lttng-filter.c +++ /dev/null @@ -1,581 +0,0 @@ -/* - * lttng-filter.c - * - * LTTng UST filter code. - * - * Copyright (C) 2010-2016 Mathieu Desnoyers - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#define _LGPL_SOURCE -#include -#include - -#include - -#include "lttng-filter.h" - -static const char *opnames[] = { - [ FILTER_OP_UNKNOWN ] = "UNKNOWN", - - [ FILTER_OP_RETURN ] = "RETURN", - - /* binary */ - [ FILTER_OP_MUL ] = "MUL", - [ FILTER_OP_DIV ] = "DIV", - [ FILTER_OP_MOD ] = "MOD", - [ FILTER_OP_PLUS ] = "PLUS", - [ FILTER_OP_MINUS ] = "MINUS", - [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT", - [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT", - [ FILTER_OP_BIT_AND ] = "BIT_AND", - [ FILTER_OP_BIT_OR ] = "BIT_OR", - [ FILTER_OP_BIT_XOR ] = "BIT_XOR", - - /* binary comparators */ - [ FILTER_OP_EQ ] = "EQ", - [ FILTER_OP_NE ] = "NE", - [ FILTER_OP_GT ] = "GT", - [ FILTER_OP_LT ] = "LT", - [ FILTER_OP_GE ] = "GE", - [ FILTER_OP_LE ] = "LE", - - /* string binary comparators */ - [ FILTER_OP_EQ_STRING ] = "EQ_STRING", - [ FILTER_OP_NE_STRING ] = "NE_STRING", - [ FILTER_OP_GT_STRING ] = "GT_STRING", - [ FILTER_OP_LT_STRING ] = "LT_STRING", - [ FILTER_OP_GE_STRING ] = "GE_STRING", - [ FILTER_OP_LE_STRING ] = "LE_STRING", - - /* s64 binary comparators */ - [ FILTER_OP_EQ_S64 ] = "EQ_S64", - [ FILTER_OP_NE_S64 ] = "NE_S64", - [ FILTER_OP_GT_S64 ] = "GT_S64", - [ FILTER_OP_LT_S64 ] = "LT_S64", - [ FILTER_OP_GE_S64 ] = "GE_S64", - [ FILTER_OP_LE_S64 ] = "LE_S64", - - /* double binary comparators */ - [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE", - [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE", - [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE", - [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE", - [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE", - [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE", - - /* Mixed S64-double binary comparators */ - [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64", - [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64", - [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64", - [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64", - [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64", - [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64", - - [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE", - [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE", - [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE", - [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE", - [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE", - [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE", - - /* unary */ - [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS", - [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS", - [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT", - [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64", - [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64", - [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64", - [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE", - [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE", - [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE", - - /* logical */ - [ FILTER_OP_AND ] = "AND", - [ FILTER_OP_OR ] = "OR", - - /* load field ref */ - [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF", - [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING", - [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE", - [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64", - [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE", - - /* load from immediate operand */ - [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING", - [ FILTER_OP_LOAD_S64 ] = "LOAD_S64", - [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE", - - /* cast */ - [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64", - [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64", - [ FILTER_OP_CAST_NOP ] = "CAST_NOP", - - /* get context ref */ - [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF", - [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING", - [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64", - [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE", - - /* load userspace field ref */ - [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING", - [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE", - - /* - * load immediate star globbing pattern (literal string) - * from immediate. - */ - [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING", - - /* globbing pattern binary operator: apply to */ - [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING", - [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING", - - /* - * Instructions for recursive traversal through composed types. - */ - [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT", - [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT", - [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT", - - [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL", - [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD", - [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16", - [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64", - - [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD", - [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8", - [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16", - [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32", - [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64", - [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8", - [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16", - [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32", - [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64", - [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING", - [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE", - [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE", - - [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT", - - [ FILTER_OP_RETURN_S64 ] = "RETURN_S64", -}; - -const char *print_op(enum filter_op op) -{ - if (op >= NR_FILTER_OPS) - return "UNKNOWN"; - else - return opnames[op]; -} - -static -int apply_field_reloc(struct lttng_event *event, - struct bytecode_runtime *runtime, - uint32_t runtime_len, - uint32_t reloc_offset, - const char *field_name, - enum filter_op filter_op) -{ - const struct lttng_event_desc *desc; - const struct lttng_event_field *fields, *field = NULL; - unsigned int nr_fields, i; - struct load_op *op; - uint32_t field_offset = 0; - - dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name); - - /* Lookup event by name */ - desc = event->desc; - if (!desc) - return -EINVAL; - fields = desc->fields; - if (!fields) - return -EINVAL; - nr_fields = desc->nr_fields; - for (i = 0; i < nr_fields; i++) { - if (fields[i].u.ext.nofilter) { - continue; - } - if (!strcmp(fields[i].name, field_name)) { - field = &fields[i]; - break; - } - /* compute field offset */ - switch (fields[i].type.atype) { - case atype_integer: - case atype_enum: - case atype_enum_nestable: - field_offset += sizeof(int64_t); - break; - case atype_array: - case atype_array_nestable: - case atype_sequence: - case atype_sequence_nestable: - field_offset += sizeof(unsigned long); - field_offset += sizeof(void *); - break; - case atype_string: - field_offset += sizeof(void *); - break; - case atype_float: - field_offset += sizeof(double); - break; - default: - return -EINVAL; - } - } - if (!field) - return -EINVAL; - - /* Check if field offset is too large for 16-bit offset */ - if (field_offset > FILTER_BYTECODE_MAX_LEN - 1) - return -EINVAL; - - /* set type */ - op = (struct load_op *) &runtime->code[reloc_offset]; - - switch (filter_op) { - case FILTER_OP_LOAD_FIELD_REF: - { - struct field_ref *field_ref; - - field_ref = (struct field_ref *) op->data; - switch (field->type.atype) { - case atype_integer: - case atype_enum: - case atype_enum_nestable: - op->op = FILTER_OP_LOAD_FIELD_REF_S64; - break; - case atype_array: - case atype_array_nestable: - case atype_sequence: - case atype_sequence_nestable: - op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE; - break; - case atype_string: - op->op = FILTER_OP_LOAD_FIELD_REF_STRING; - break; - case atype_float: - op->op = FILTER_OP_LOAD_FIELD_REF_DOUBLE; - break; - default: - return -EINVAL; - } - /* set offset */ - field_ref->offset = (uint16_t) field_offset; - break; - } - default: - return -EINVAL; - } - return 0; -} - -static -int apply_context_reloc(struct lttng_event *event, - struct bytecode_runtime *runtime, - uint32_t runtime_len, - uint32_t reloc_offset, - const char *context_name, - enum filter_op filter_op) -{ - struct load_op *op; - struct lttng_ctx_field *ctx_field; - int idx; - struct lttng_session *session = runtime->p.session; - - dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name); - - /* Get context index */ - idx = lttng_get_context_index(session->ctx, context_name); - if (idx < 0) { - if (lttng_context_is_app(context_name)) { - int ret; - - ret = lttng_ust_add_app_context_to_ctx_rcu(context_name, - &session->ctx); - if (ret) - return ret; - idx = lttng_get_context_index(session->ctx, - context_name); - if (idx < 0) - return -ENOENT; - } else { - return -ENOENT; - } - } - /* Check if idx is too large for 16-bit offset */ - if (idx > FILTER_BYTECODE_MAX_LEN - 1) - return -EINVAL; - - /* Get context return type */ - ctx_field = &session->ctx->fields[idx]; - op = (struct load_op *) &runtime->code[reloc_offset]; - - switch (filter_op) { - case FILTER_OP_GET_CONTEXT_REF: - { - struct field_ref *field_ref; - - field_ref = (struct field_ref *) op->data; - switch (ctx_field->event_field.type.atype) { - case atype_integer: - case atype_enum: - case atype_enum_nestable: - op->op = FILTER_OP_GET_CONTEXT_REF_S64; - break; - /* Sequence and array supported as string */ - case atype_string: - case atype_array: - case atype_array_nestable: - case atype_sequence: - case atype_sequence_nestable: - op->op = FILTER_OP_GET_CONTEXT_REF_STRING; - break; - case atype_float: - op->op = FILTER_OP_GET_CONTEXT_REF_DOUBLE; - break; - case atype_dynamic: - op->op = FILTER_OP_GET_CONTEXT_REF; - break; - default: - return -EINVAL; - } - /* set offset to context index within channel contexts */ - field_ref->offset = (uint16_t) idx; - break; - } - default: - return -EINVAL; - } - return 0; -} - -static -int apply_reloc(struct lttng_event *event, - struct bytecode_runtime *runtime, - uint32_t runtime_len, - uint32_t reloc_offset, - const char *name) -{ - struct load_op *op; - - dbg_printf("Apply reloc: %u %s\n", reloc_offset, name); - - /* Ensure that the reloc is within the code */ - if (runtime_len - reloc_offset < sizeof(uint16_t)) - return -EINVAL; - - op = (struct load_op *) &runtime->code[reloc_offset]; - switch (op->op) { - case FILTER_OP_LOAD_FIELD_REF: - return apply_field_reloc(event, runtime, runtime_len, - reloc_offset, name, op->op); - case FILTER_OP_GET_CONTEXT_REF: - return apply_context_reloc(event, runtime, runtime_len, - reloc_offset, name, op->op); - case FILTER_OP_GET_SYMBOL: - case FILTER_OP_GET_SYMBOL_FIELD: - /* - * Will be handled by load specialize phase or - * dynamically by interpreter. - */ - return 0; - default: - ERR("Unknown reloc op type %u\n", op->op); - return -EINVAL; - } - return 0; -} - -static -int bytecode_is_linked(struct lttng_ust_filter_bytecode_node *filter_bytecode, - struct lttng_event *event) -{ - struct lttng_bytecode_runtime *bc_runtime; - - cds_list_for_each_entry(bc_runtime, - &event->bytecode_runtime_head, node) { - if (bc_runtime->bc == filter_bytecode) - return 1; - } - return 0; -} - -/* - * Take a bytecode with reloc table and link it to an event to create a - * bytecode runtime. - */ -static -int _lttng_filter_event_link_bytecode(struct lttng_event *event, - struct lttng_ust_filter_bytecode_node *filter_bytecode, - struct cds_list_head *insert_loc) -{ - int ret, offset, next_offset; - struct bytecode_runtime *runtime = NULL; - size_t runtime_alloc_len; - - if (!filter_bytecode) - return 0; - /* Bytecode already linked */ - if (bytecode_is_linked(filter_bytecode, event)) - return 0; - - dbg_printf("Linking...\n"); - - /* We don't need the reloc table in the runtime */ - runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset; - runtime = zmalloc(runtime_alloc_len); - if (!runtime) { - ret = -ENOMEM; - goto alloc_error; - } - runtime->p.bc = filter_bytecode; - runtime->p.session = event->chan->session; - runtime->len = filter_bytecode->bc.reloc_offset; - /* copy original bytecode */ - memcpy(runtime->code, filter_bytecode->bc.data, runtime->len); - /* - * apply relocs. Those are a uint16_t (offset in bytecode) - * followed by a string (field name). - */ - for (offset = filter_bytecode->bc.reloc_offset; - offset < filter_bytecode->bc.len; - offset = next_offset) { - uint16_t reloc_offset = - *(uint16_t *) &filter_bytecode->bc.data[offset]; - const char *name = - (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)]; - - ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name); - if (ret) { - goto link_error; - } - next_offset = offset + sizeof(uint16_t) + strlen(name) + 1; - } - /* Validate bytecode */ - ret = lttng_filter_validate_bytecode(runtime); - if (ret) { - goto link_error; - } - /* Specialize bytecode */ - ret = lttng_filter_specialize_bytecode(event, runtime); - if (ret) { - goto link_error; - } - runtime->p.filter = lttng_filter_interpret_bytecode; - runtime->p.link_failed = 0; - cds_list_add_rcu(&runtime->p.node, insert_loc); - dbg_printf("Linking successful.\n"); - return 0; - -link_error: - runtime->p.filter = lttng_filter_false; - runtime->p.link_failed = 1; - cds_list_add_rcu(&runtime->p.node, insert_loc); -alloc_error: - dbg_printf("Linking failed.\n"); - return ret; -} - -void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime) -{ - struct lttng_ust_filter_bytecode_node *bc = runtime->bc; - - if (!bc->enabler->enabled || runtime->link_failed) - runtime->filter = lttng_filter_false; - else - runtime->filter = lttng_filter_interpret_bytecode; -} - -/* - * Link bytecode for all enablers referenced by an event. - */ -void lttng_enabler_event_link_bytecode(struct lttng_event *event, - struct lttng_enabler *enabler) -{ - struct lttng_ust_filter_bytecode_node *bc; - struct lttng_bytecode_runtime *runtime; - - /* Can only be called for events with desc attached */ - assert(event->desc); - - /* Link each bytecode. */ - cds_list_for_each_entry(bc, &enabler->filter_bytecode_head, node) { - int found = 0, ret; - struct cds_list_head *insert_loc; - - cds_list_for_each_entry(runtime, - &event->bytecode_runtime_head, node) { - if (runtime->bc == bc) { - found = 1; - break; - } - } - /* Skip bytecode already linked */ - if (found) - continue; - - /* - * Insert at specified priority (seqnum) in increasing - * order. If there already is a bytecode of the same priority, - * insert the new bytecode right after it. - */ - cds_list_for_each_entry_reverse(runtime, - &event->bytecode_runtime_head, node) { - if (runtime->bc->bc.seqnum <= bc->bc.seqnum) { - /* insert here */ - insert_loc = &runtime->node; - goto add_within; - } - } - /* Add to head to list */ - insert_loc = &event->bytecode_runtime_head; - add_within: - dbg_printf("linking bytecode\n"); - ret = _lttng_filter_event_link_bytecode(event, bc, - insert_loc); - if (ret) { - dbg_printf("[lttng filter] warning: cannot link event bytecode\n"); - } - } -} - -/* - * We own the filter_bytecode if we return success. - */ -int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler, - struct lttng_ust_filter_bytecode_node *filter_bytecode) -{ - cds_list_add(&filter_bytecode->node, &enabler->filter_bytecode_head); - return 0; -} - -void lttng_free_event_filter_runtime(struct lttng_event *event) -{ - struct bytecode_runtime *runtime, *tmp; - - cds_list_for_each_entry_safe(runtime, tmp, - &event->bytecode_runtime_head, p.node) { - free(runtime->data); - free(runtime); - } -} diff --git a/liblttng-ust/lttng-probes.c b/liblttng-ust/lttng-probes.c index 9b3bacc4..9f16cea1 100644 --- a/liblttng-ust/lttng-probes.c +++ b/liblttng-ust/lttng-probes.c @@ -35,6 +35,7 @@ #include "lttng-tracer-core.h" #include "jhash.h" #include "error.h" +#include "ust-events-internal.h" /* * probe list is protected by ust_lock()/ust_unlock(). @@ -204,6 +205,8 @@ int lttng_probe_register(struct lttng_probe_desc *desc) if (lttng_session_active()) fixup_lazy_probes(); + lttng_fix_pending_triggers(); + ust_unlock(); return ret; } diff --git a/liblttng-ust/lttng-ust-abi.c b/liblttng-ust/lttng-ust-abi.c index c060e2d9..bc509f36 100644 --- a/liblttng-ust/lttng-ust-abi.c +++ b/liblttng-ust/lttng-ust-abi.c @@ -38,6 +38,7 @@ */ #define _LGPL_SOURCE +#include #include #include @@ -55,8 +56,10 @@ #include "../libringbuffer/frontend_types.h" #include "../libringbuffer/shm.h" +#include "../libcounter/counter.h" #include "lttng-tracer.h" #include "string-utils.h" +#include "ust-events-internal.h" #define OBJ_NAME_LEN 16 @@ -279,9 +282,11 @@ void lttng_ust_objd_table_owner_cleanup(void *owner) */ static const struct lttng_ust_objd_ops lttng_ops; +static const struct lttng_ust_objd_ops lttng_trigger_group_ops; static const struct lttng_ust_objd_ops lttng_session_ops; static const struct lttng_ust_objd_ops lttng_channel_ops; -static const struct lttng_ust_objd_ops lttng_enabler_ops; +static const struct lttng_ust_objd_ops lttng_event_enabler_ops; +static const struct lttng_ust_objd_ops lttng_trigger_enabler_ops; static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops; static const struct lttng_ust_objd_ops lttng_tracepoint_field_list_ops; @@ -339,6 +344,53 @@ long lttng_abi_tracer_version(int objd, return 0; } +static +int lttng_abi_trigger_send_fd(void *owner, int trigger_notif_fd) +{ + struct lttng_trigger_group *trigger_group; + int trigger_group_objd, ret, fd_flag, close_ret; + + trigger_group = lttng_trigger_group_create(); + if (!trigger_group) + return -ENOMEM; + + /* + * Set this file descriptor as NON-BLOCKING. + */ + fd_flag = fcntl(trigger_notif_fd, F_GETFL); + + fd_flag |= O_NONBLOCK; + + ret = fcntl(trigger_notif_fd, F_SETFL, fd_flag); + if (ret) { + ret = -errno; + goto fd_error; + } + + trigger_group_objd = objd_alloc(trigger_group, + <tng_trigger_group_ops, owner, "trigger_group"); + if (trigger_group_objd < 0) { + ret = trigger_group_objd; + goto objd_error; + } + + trigger_group->objd = trigger_group_objd; + trigger_group->owner = owner; + trigger_group->notification_fd = trigger_notif_fd; + + return trigger_group_objd; + +objd_error: + lttng_trigger_group_destroy(trigger_group); +fd_error: + close_ret = close(trigger_notif_fd); + if (close_ret) { + PERROR("close"); + } + + return ret; +} + static long lttng_abi_add_context(int objd, struct lttng_ust_context *context_param, @@ -388,6 +440,9 @@ long lttng_cmd(int objd, unsigned int cmd, unsigned long arg, case LTTNG_UST_WAIT_QUIESCENT: synchronize_trace(); return 0; + case LTTNG_UST_TRIGGER_GROUP_CREATE: + return lttng_abi_trigger_send_fd(owner, + uargs->trigger_handle.trigger_notif_fd); default: return -EINVAL; } @@ -583,6 +638,11 @@ long lttng_session_cmd(int objd, unsigned int cmd, unsigned long arg, return lttng_session_disable(session); case LTTNG_UST_SESSION_STATEDUMP: return lttng_session_statedump(session); + case LTTNG_UST_COUNTER: + case LTTNG_UST_COUNTER_GLOBAL: + case LTTNG_UST_COUNTER_CPU: + /* Not implemented yet. */ + return -EINVAL; default: return -EINVAL; } @@ -614,6 +674,266 @@ static const struct lttng_ust_objd_ops lttng_session_ops = { .cmd = lttng_session_cmd, }; +static int lttng_ust_trigger_enabler_create(int trigger_group_obj, void *owner, + struct lttng_ust_trigger *trigger_param, + enum lttng_enabler_format_type type) +{ + struct lttng_trigger_group *trigger_group = + objd_private(trigger_group_obj); + struct lttng_trigger_enabler *trigger_enabler; + int trigger_objd, ret; + + trigger_param->name[LTTNG_UST_SYM_NAME_LEN - 1] = '\0'; + trigger_objd = objd_alloc(NULL, <tng_trigger_enabler_ops, owner, + "trigger enabler"); + if (trigger_objd < 0) { + ret = trigger_objd; + goto objd_error; + } + + trigger_enabler = lttng_trigger_enabler_create(trigger_group, type, + trigger_param); + if (!trigger_enabler) { + ret = -ENOMEM; + goto trigger_error; + } + + objd_set_private(trigger_objd, trigger_enabler); + /* The trigger holds a reference on the trigger group. */ + objd_ref(trigger_enabler->group->objd); + + return trigger_objd; + +trigger_error: + { + int err; + + err = lttng_ust_objd_unref(trigger_objd, 1); + assert(!err); + } +objd_error: + return ret; +} + +static +long lttng_trigger_enabler_cmd(int objd, unsigned int cmd, unsigned long arg, + union ust_args *uargs, void *owner) +{ + struct lttng_trigger_enabler *trigger_enabler = objd_private(objd); + switch (cmd) { + case LTTNG_UST_FILTER: + return lttng_trigger_enabler_attach_filter_bytecode( + trigger_enabler, + (struct lttng_ust_bytecode_node *) arg); + case LTTNG_UST_EXCLUSION: + return lttng_trigger_enabler_attach_exclusion(trigger_enabler, + (struct lttng_ust_excluder_node *) arg); + case LTTNG_UST_CAPTURE: + return lttng_trigger_enabler_attach_capture_bytecode( + trigger_enabler, + (struct lttng_ust_bytecode_node *) arg); + case LTTNG_UST_ENABLE: + return lttng_trigger_enabler_enable(trigger_enabler); + case LTTNG_UST_DISABLE: + return lttng_trigger_enabler_disable(trigger_enabler); + default: + return -EINVAL; + } +} + +/** + * lttng_trigger_group_error_counter_cmd - lttng trigger group error counter object command + * + * @obj: the object + * @cmd: the command + * @arg: command arg + * @uargs: UST arguments (internal) + * @owner: objd owner + * + * This descriptor implements lttng commands: + * LTTNG_UST_COUNTER_GLOBAL + * Return negative error code on error, 0 on success. + * LTTNG_UST_COUNTER_CPU + * Return negative error code on error, 0 on success. + */ +static +long lttng_trigger_group_error_counter_cmd(int objd, unsigned int cmd, unsigned long arg, + union ust_args *uargs, void *owner) +{ + struct lttng_counter *counter = objd_private(objd); + + switch (cmd) { + case LTTNG_UST_COUNTER_GLOBAL: + return -EINVAL; /* Unimplemented. */ + case LTTNG_UST_COUNTER_CPU: + { + struct lttng_ust_counter_cpu *counter_cpu = + (struct lttng_ust_counter_cpu *)arg; + return lttng_counter_set_cpu_shm(counter->counter, + counter_cpu->cpu_nr, uargs->counter_shm.shm_fd); + } + default: + return -EINVAL; + } +} + +int lttng_release_trigger_group_error_counter(int objd) +{ + struct lttng_counter *counter = objd_private(objd); + + if (counter) { + return lttng_ust_objd_unref(counter->trigger_group->objd, 0); + } else { + return -EINVAL; + } +} + +static const struct lttng_ust_objd_ops lttng_trigger_group_error_counter_ops = { + .release = lttng_release_trigger_group_error_counter, + .cmd = lttng_trigger_group_error_counter_cmd, +}; + +static +int lttng_ust_trigger_group_create_error_counter(int trigger_group_objd, void *owner, + struct lttng_ust_counter_conf *error_counter_conf) +{ + const char *counter_transport_name; + struct lttng_trigger_group *trigger_group = + objd_private(trigger_group_objd); + struct lttng_counter *counter; + int counter_objd, ret; + struct lttng_counter_dimension dimensions[1]; + size_t counter_len; + + if (trigger_group->error_counter) + return -EBUSY; + + if (error_counter_conf->arithmetic != LTTNG_UST_COUNTER_ARITHMETIC_MODULAR) + return -EINVAL; + + if (error_counter_conf->number_dimensions != 1) + return -EINVAL; + + switch (error_counter_conf->bitness) { + case LTTNG_UST_COUNTER_BITNESS_64BITS: + counter_transport_name = "counter-per-cpu-64-modular"; + break; + case LTTNG_UST_COUNTER_BITNESS_32BITS: + counter_transport_name = "counter-per-cpu-32-modular"; + break; + default: + return -EINVAL; + } + + counter_objd = objd_alloc(NULL, <tng_trigger_group_error_counter_ops, owner, + "trigger group error counter"); + if (counter_objd < 0) { + ret = counter_objd; + goto objd_error; + } + + counter_len = error_counter_conf->dimensions[0].size; + dimensions[0].size = counter_len; + dimensions[0].underflow_index = 0; + dimensions[0].overflow_index = 0; + dimensions[0].has_underflow = 0; + dimensions[0].has_overflow = 0; + + counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions); + if (!counter) { + ret = -EINVAL; + goto create_error; + } + + trigger_group->error_counter = counter; + trigger_group->error_counter_len = counter_len; + + counter->objd = counter_objd; + counter->trigger_group = trigger_group; /* owner */ + + objd_set_private(counter_objd, counter); + /* The error counter holds a reference on the trigger group. */ + objd_ref(trigger_group->objd); + + return counter_objd; + +create_error: + { + int err; + + err = lttng_ust_objd_unref(counter_objd, 1); + assert(!err); + } +objd_error: + return ret; +} + +static +long lttng_trigger_group_cmd(int objd, unsigned int cmd, unsigned long arg, + union ust_args *uargs, void *owner) +{ + switch (cmd) { + case LTTNG_UST_TRIGGER_CREATE: + { + struct lttng_ust_trigger *trigger_param = + (struct lttng_ust_trigger *) arg; + if (strutils_is_star_glob_pattern(trigger_param->name)) { + /* + * If the event name is a star globbing pattern, + * we create the special star globbing enabler. + */ + return lttng_ust_trigger_enabler_create(objd, owner, + trigger_param, LTTNG_ENABLER_FORMAT_STAR_GLOB); + } else { + return lttng_ust_trigger_enabler_create(objd, owner, + trigger_param, LTTNG_ENABLER_FORMAT_EVENT); + } + } + case LTTNG_UST_COUNTER: + { + struct lttng_ust_counter_conf *counter_conf = + (struct lttng_ust_counter_conf *) uargs->counter.counter_data; + return lttng_ust_trigger_group_create_error_counter(objd, owner, + counter_conf); + } + default: + return -EINVAL; + } +} + +static +int lttng_trigger_enabler_release(int objd) +{ + struct lttng_trigger_enabler *trigger_enabler = objd_private(objd); + + if (trigger_enabler) + return lttng_ust_objd_unref(trigger_enabler->group->objd, 0); + return 0; +} + +static const struct lttng_ust_objd_ops lttng_trigger_enabler_ops = { + .release = lttng_trigger_enabler_release, + .cmd = lttng_trigger_enabler_cmd, +}; + +static +int lttng_release_trigger_group(int objd) +{ + struct lttng_trigger_group *trigger_group = objd_private(objd); + + if (trigger_group) { + lttng_trigger_group_destroy(trigger_group); + return 0; + } else { + return -EINVAL; + } +} + +static const struct lttng_ust_objd_ops lttng_trigger_group_ops = { + .release = lttng_release_trigger_group, + .cmd = lttng_trigger_group_cmd, +}; + static long lttng_tracepoint_list_cmd(int objd, unsigned int cmd, unsigned long arg, union ust_args *uargs, void *owner) @@ -794,17 +1114,18 @@ error_add_stream: } static -int lttng_abi_create_enabler(int channel_objd, +int lttng_abi_create_event_enabler(int channel_objd, struct lttng_ust_event *event_param, void *owner, - enum lttng_enabler_type type) + enum lttng_enabler_format_type format_type) { struct lttng_channel *channel = objd_private(channel_objd); - struct lttng_enabler *enabler; + struct lttng_event_enabler *enabler; int event_objd, ret; event_param->name[LTTNG_UST_SYM_NAME_LEN - 1] = '\0'; - event_objd = objd_alloc(NULL, <tng_enabler_ops, owner, "enabler"); + event_objd = objd_alloc(NULL, <tng_event_enabler_ops, owner, + "event enabler"); if (event_objd < 0) { ret = event_objd; goto objd_error; @@ -813,7 +1134,7 @@ int lttng_abi_create_enabler(int channel_objd, * We tolerate no failure path after event creation. It will stay * invariant for the rest of the session. */ - enabler = lttng_enabler_create(type, event_param, channel); + enabler = lttng_event_enabler_create(format_type, event_param, channel); if (!enabler) { ret = -ENOMEM; goto event_error; @@ -891,11 +1212,11 @@ long lttng_channel_cmd(int objd, unsigned int cmd, unsigned long arg, * If the event name is a star globbing pattern, * we create the special star globbing enabler. */ - return lttng_abi_create_enabler(objd, event_param, - owner, LTTNG_ENABLER_STAR_GLOB); + return lttng_abi_create_event_enabler(objd, event_param, + owner, LTTNG_ENABLER_FORMAT_STAR_GLOB); } else { - return lttng_abi_create_enabler(objd, event_param, - owner, LTTNG_ENABLER_EVENT); + return lttng_abi_create_event_enabler(objd, event_param, + owner, LTTNG_ENABLER_FORMAT_EVENT); } } case LTTNG_UST_CONTEXT: @@ -951,32 +1272,32 @@ static const struct lttng_ust_objd_ops lttng_channel_ops = { * Attach exclusions to an enabler. */ static -long lttng_enabler_cmd(int objd, unsigned int cmd, unsigned long arg, +long lttng_event_enabler_cmd(int objd, unsigned int cmd, unsigned long arg, union ust_args *uargs, void *owner) { - struct lttng_enabler *enabler = objd_private(objd); + struct lttng_event_enabler *enabler = objd_private(objd); switch (cmd) { case LTTNG_UST_CONTEXT: - return lttng_enabler_attach_context(enabler, + return lttng_event_enabler_attach_context(enabler, (struct lttng_ust_context *) arg); case LTTNG_UST_ENABLE: - return lttng_enabler_enable(enabler); + return lttng_event_enabler_enable(enabler); case LTTNG_UST_DISABLE: - return lttng_enabler_disable(enabler); + return lttng_event_enabler_disable(enabler); case LTTNG_UST_FILTER: { int ret; - ret = lttng_enabler_attach_bytecode(enabler, - (struct lttng_ust_filter_bytecode_node *) arg); + ret = lttng_event_enabler_attach_filter_bytecode(enabler, + (struct lttng_ust_bytecode_node *) arg); if (ret) return ret; return 0; } case LTTNG_UST_EXCLUSION: { - return lttng_enabler_attach_exclusion(enabler, + return lttng_event_enabler_attach_exclusion(enabler, (struct lttng_ust_excluder_node *) arg); } default: @@ -985,18 +1306,19 @@ long lttng_enabler_cmd(int objd, unsigned int cmd, unsigned long arg, } static -int lttng_enabler_release(int objd) +int lttng_event_enabler_release(int objd) { - struct lttng_enabler *enabler = objd_private(objd); + struct lttng_event_enabler *event_enabler = objd_private(objd); + + if (event_enabler) + return lttng_ust_objd_unref(event_enabler->chan->objd, 0); - if (enabler) - return lttng_ust_objd_unref(enabler->chan->objd, 0); return 0; } -static const struct lttng_ust_objd_ops lttng_enabler_ops = { - .release = lttng_enabler_release, - .cmd = lttng_enabler_cmd, +static const struct lttng_ust_objd_ops lttng_event_enabler_ops = { + .release = lttng_event_enabler_release, + .cmd = lttng_event_enabler_cmd, }; void lttng_ust_abi_exit(void) diff --git a/liblttng-ust/lttng-ust-comm.c b/liblttng-ust/lttng-ust-comm.c index 3847c976..c06bc391 100644 --- a/liblttng-ust/lttng-ust-comm.c +++ b/liblttng-ust/lttng-ust-comm.c @@ -61,6 +61,7 @@ #include "clock.h" #include "../libringbuffer/getcpu.h" #include "getenv.h" +#include "ust-events-internal.h" /* Concatenate lttng ust shared library name with its major version number. */ #define LTTNG_UST_LIB_SO_NAME "liblttng-ust.so." __ust_stringify(CONFIG_LTTNG_UST_LIBRARY_VERSION_MAJOR) @@ -320,6 +321,9 @@ static const char *cmd_name_mapping[] = { [ LTTNG_UST_REGISTER_DONE ] = "Registration Done", [ LTTNG_UST_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List", + [ LTTNG_UST_TRIGGER_GROUP_CREATE ] = "Create trigger group", + [ LTTNG_UST_TRIGGER_CREATE ] = "Create trigger", + /* Session FD commands */ [ LTTNG_UST_CHANNEL ] = "Create Channel", [ LTTNG_UST_SESSION_START ] = "Start Session", @@ -344,6 +348,11 @@ static const char *cmd_name_mapping[] = { /* Event FD commands */ [ LTTNG_UST_FILTER ] = "Create Filter", [ LTTNG_UST_EXCLUSION ] = "Add exclusions to event", + + /* Session and trigger FD commands */ + [ LTTNG_UST_COUNTER ] = "Create Counter", + [ LTTNG_UST_COUNTER_GLOBAL ] = "Create Counter Global", + [ LTTNG_UST_COUNTER_CPU ] = "Create Counter CPU", }; static const char *str_timeout; @@ -359,6 +368,10 @@ extern void lttng_ring_buffer_client_overwrite_rt_exit(void); extern void lttng_ring_buffer_client_discard_exit(void); extern void lttng_ring_buffer_client_discard_rt_exit(void); extern void lttng_ring_buffer_metadata_client_exit(void); +extern void lttng_counter_client_percpu_32_overflow_init(void); +extern void lttng_counter_client_percpu_32_overflow_exit(void); +extern void lttng_counter_client_percpu_64_overflow_init(void); +extern void lttng_counter_client_percpu_64_overflow_exit(void); static char *get_map_shm(struct sock_info *sock_info); @@ -738,6 +751,131 @@ void handle_pending_statedump(struct sock_info *sock_info) } } +static inline +const char *bytecode_type_str(uint32_t cmd) +{ + switch (cmd) { + case LTTNG_UST_CAPTURE: + return "capture"; + case LTTNG_UST_FILTER: + return "filter"; + default: + abort(); + } +} + +static +int handle_bytecode_recv(struct sock_info *sock_info, + int sock, struct ustcomm_ust_msg *lum) +{ + struct lttng_ust_bytecode_node *bytecode; + enum lttng_ust_bytecode_node_type type; + const struct lttng_ust_objd_ops *ops; + uint32_t data_size, data_size_max, reloc_offset; + uint64_t seqnum; + ssize_t len; + int ret = 0; + + switch (lum->cmd) { + case LTTNG_UST_FILTER: + type = LTTNG_UST_BYTECODE_NODE_TYPE_FILTER; + data_size = lum->u.filter.data_size; + data_size_max = FILTER_BYTECODE_MAX_LEN; + reloc_offset = lum->u.filter.reloc_offset; + seqnum = lum->u.filter.seqnum; + break; + case LTTNG_UST_CAPTURE: + type = LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE; + data_size = lum->u.capture.data_size; + data_size_max = CAPTURE_BYTECODE_MAX_LEN; + reloc_offset = lum->u.capture.reloc_offset; + seqnum = lum->u.capture.seqnum; + break; + default: + abort(); + } + + if (data_size > data_size_max) { + ERR("Bytecode %s data size is too large: %u bytes", + bytecode_type_str(lum->cmd), data_size); + ret = -EINVAL; + goto end; + } + + if (reloc_offset > data_size) { + ERR("Bytecode %s reloc offset %u is not within data", + bytecode_type_str(lum->cmd), reloc_offset); + ret = -EINVAL; + goto end; + } + + /* Allocate the structure AND the `data[]` field. */ + bytecode = zmalloc(sizeof(*bytecode) + data_size); + if (!bytecode) { + ret = -ENOMEM; + goto end; + } + + bytecode->bc.len = data_size; + bytecode->bc.reloc_offset = reloc_offset; + bytecode->bc.seqnum = seqnum; + bytecode->type = type; + + len = ustcomm_recv_unix_sock(sock, bytecode->bc.data, bytecode->bc.len); + switch (len) { + case 0: /* orderly shutdown */ + ret = 0; + goto error_free_bytecode; + default: + if (len == bytecode->bc.len) { + DBG("Bytecode %s data received", + bytecode_type_str(lum->cmd)); + break; + } else if (len < 0) { + DBG("Receive failed from lttng-sessiond with errno %d", + (int) -len); + if (len == -ECONNRESET) { + ERR("%s remote end closed connection", + sock_info->name); + ret = len; + goto error_free_bytecode; + } + ret = len; + goto error_free_bytecode; + } else { + DBG("Incorrect %s bytecode data message size: %zd", + bytecode_type_str(lum->cmd), len); + ret = -EINVAL; + goto error_free_bytecode; + } + } + + ops = objd_ops(lum->handle); + if (!ops) { + ret = -ENOENT; + goto error_free_bytecode; + } + + if (ops->cmd) { + ret = ops->cmd(lum->handle, lum->cmd, + (unsigned long) bytecode, + NULL, sock_info); + if (ret) + goto error_free_bytecode; + /* don't free bytecode if everything went fine. */ + } else { + ret = -ENOSYS; + goto error_free_bytecode; + } + + goto end; + +error_free_bytecode: + free(bytecode); +end: + return ret; +} + static int handle_message(struct sock_info *sock_info, int sock, struct ustcomm_ust_msg *lum) @@ -775,76 +913,12 @@ int handle_message(struct sock_info *sock_info, else ret = lttng_ust_objd_unref(lum->handle, 1); break; + case LTTNG_UST_CAPTURE: case LTTNG_UST_FILTER: - { - /* Receive filter data */ - struct lttng_ust_filter_bytecode_node *bytecode; - - if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) { - ERR("Filter data size is too large: %u bytes", - lum->u.filter.data_size); - ret = -EINVAL; - goto error; - } - - if (lum->u.filter.reloc_offset > lum->u.filter.data_size) { - ERR("Filter reloc offset %u is not within data", - lum->u.filter.reloc_offset); - ret = -EINVAL; - goto error; - } - - bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size); - if (!bytecode) { - ret = -ENOMEM; - goto error; - } - len = ustcomm_recv_unix_sock(sock, bytecode->bc.data, - lum->u.filter.data_size); - switch (len) { - case 0: /* orderly shutdown */ - ret = 0; - free(bytecode); + ret = handle_bytecode_recv(sock_info, sock, lum); + if (ret) goto error; - default: - if (len == lum->u.filter.data_size) { - DBG("filter data received"); - break; - } else if (len < 0) { - DBG("Receive failed from lttng-sessiond with errno %d", (int) -len); - if (len == -ECONNRESET) { - ERR("%s remote end closed connection", sock_info->name); - ret = len; - free(bytecode); - goto error; - } - ret = len; - free(bytecode); - goto error; - } else { - DBG("incorrect filter data message size: %zd", len); - ret = -EINVAL; - free(bytecode); - goto error; - } - } - bytecode->bc.len = lum->u.filter.data_size; - bytecode->bc.reloc_offset = lum->u.filter.reloc_offset; - bytecode->bc.seqnum = lum->u.filter.seqnum; - if (ops->cmd) { - ret = ops->cmd(lum->handle, lum->cmd, - (unsigned long) bytecode, - &args, sock_info); - if (ret) { - free(bytecode); - } - /* don't free bytecode if everything went fine. */ - } else { - ret = -ENOSYS; - free(bytecode); - } break; - } case LTTNG_UST_EXCLUSION: { /* Receive exclusion names */ @@ -907,6 +981,43 @@ int handle_message(struct sock_info *sock_info, } break; } + case LTTNG_UST_TRIGGER_GROUP_CREATE: + { + int trigger_notif_fd; + + len = ustcomm_recv_trigger_notif_fd_from_sessiond(sock, + &trigger_notif_fd); + switch (len) { + case 0: /* orderly shutdown */ + ret = 0; + goto error; + case 1: + break; + default: + if (len < 0) { + DBG("Receive failed from lttng-sessiond with errno %d", (int) -len); + if (len == -ECONNRESET) { + ERR("%s remote end closed connection", sock_info->name); + ret = len; + goto error; + } + ret = len; + goto error; + } else { + DBG("incorrect trigger fd message size: %zd", len); + ret = -EINVAL; + goto error; + } + } + args.trigger_handle.trigger_notif_fd = trigger_notif_fd; + if (ops->cmd) + ret = ops->cmd(lum->handle, lum->cmd, + (unsigned long) &lum->u, + &args, sock_info); + else + ret = -ENOSYS; + break; + } case LTTNG_UST_CHANNEL: { void *chan_data; @@ -1025,6 +1136,79 @@ int handle_message(struct sock_info *sock_info, ret = -ENOSYS; } break; + case LTTNG_UST_COUNTER: + { + void *counter_data; + + len = ustcomm_recv_counter_from_sessiond(sock, + &counter_data, lum->u.counter.len); + switch (len) { + case 0: /* orderly shutdown */ + ret = 0; + goto error; + default: + if (len == lum->u.counter.len) { + DBG("counter data received"); + break; + } else if (len < 0) { + DBG("Receive failed from lttng-sessiond with errno %d", (int) -len); + if (len == -ECONNRESET) { + ERR("%s remote end closed connection", sock_info->name); + ret = len; + goto error; + } + ret = len; + goto error; + } else { + DBG("incorrect counter data message size: %zd", len); + ret = -EINVAL; + goto error; + } + } + args.counter.counter_data = counter_data; + if (ops->cmd) + ret = ops->cmd(lum->handle, lum->cmd, + (unsigned long) &lum->u, + &args, sock_info); + else + ret = -ENOSYS; + break; + } + case LTTNG_UST_COUNTER_GLOBAL: + { + /* Receive shm_fd */ + ret = ustcomm_recv_counter_shm_from_sessiond(sock, + &args.counter_shm.shm_fd); + if (ret) { + goto error; + } + + if (ops->cmd) + ret = ops->cmd(lum->handle, lum->cmd, + (unsigned long) &lum->u, + &args, sock_info); + else + ret = -ENOSYS; + break; + } + case LTTNG_UST_COUNTER_CPU: + { + /* Receive shm_fd */ + ret = ustcomm_recv_counter_shm_from_sessiond(sock, + &args.counter_shm.shm_fd); + if (ret) { + goto error; + } + + if (ops->cmd) + ret = ops->cmd(lum->handle, lum->cmd, + (unsigned long) &lum->u, + &args, sock_info); + else + ret = -ENOSYS; + break; + } + default: if (ops->cmd) ret = ops->cmd(lum->handle, lum->cmd, @@ -1850,6 +2034,8 @@ void __attribute__((constructor)) lttng_ust_init(void) lttng_ring_buffer_client_overwrite_rt_init(); lttng_ring_buffer_client_discard_init(); lttng_ring_buffer_client_discard_rt_init(); + lttng_counter_client_percpu_32_overflow_init(); + lttng_counter_client_percpu_64_overflow_init(); lttng_perf_counter_init(); /* * Invoke ust malloc wrapper init before starting other threads. @@ -1995,6 +2181,8 @@ void lttng_ust_cleanup(int exiting) lttng_ring_buffer_client_overwrite_rt_exit(); lttng_ring_buffer_client_overwrite_exit(); lttng_ring_buffer_metadata_client_exit(); + lttng_counter_client_percpu_32_overflow_exit(); + lttng_counter_client_percpu_64_overflow_exit(); lttng_ust_statedump_destroy(); exit_tracepoint(); if (!exiting) { diff --git a/liblttng-ust/trigger-notification.c b/liblttng-ust/trigger-notification.c new file mode 100644 index 00000000..b9e55ba5 --- /dev/null +++ b/liblttng-ust/trigger-notification.c @@ -0,0 +1,396 @@ +/* + * trigger-notification.c + * + * Copyright (C) 2020 Francis Deslauriers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define _GNU_SOURCE +#define _LGPL_SOURCE + +#include +#include +#include +#include +#include + +#include "../libmsgpack/msgpack.h" +#include "lttng-bytecode.h" +#include "share.h" + +/* + * We want this write to be atomic AND non-blocking, meaning that we + * want to write either everything OR nothing. + * According to `pipe(7)`, writes that are less than `PIPE_BUF` bytes must be + * atomic, so we bound the capture buffer size to the `PIPE_BUF` minus the size + * of the notification struct we are sending alongside the capture buffer. + */ +#define CAPTURE_BUFFER_SIZE \ + (PIPE_BUF - sizeof(struct lttng_ust_trigger_notification) - 1) + +struct lttng_trigger_notification { + int notification_fd; + uint64_t trigger_id; + uint8_t capture_buf[CAPTURE_BUFFER_SIZE]; + struct lttng_msgpack_writer writer; + bool has_captures; +}; + +static +void capture_enum(struct lttng_msgpack_writer *writer, + struct lttng_interpreter_output *output) +{ + lttng_msgpack_begin_map(writer, 2); + lttng_msgpack_write_str(writer, "type"); + lttng_msgpack_write_str(writer, "enum"); + + lttng_msgpack_write_str(writer, "value"); + + switch (output->type) { + case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM: + lttng_msgpack_write_signed_integer(writer, output->u.s); + break; + case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM: + lttng_msgpack_write_signed_integer(writer, output->u.u); + break; + default: + abort(); + } + + lttng_msgpack_end_map(writer); +} + +static +int64_t capture_sequence_element_signed(uint8_t *ptr, + const struct lttng_integer_type *type) +{ + int64_t value; + unsigned int size = type->size; + bool byte_order_reversed = type->reverse_byte_order; + + switch (size) { + case 8: + value = *ptr; + break; + case 16: + { + int16_t tmp; + tmp = *(int16_t *) ptr; + if (byte_order_reversed) + tmp = bswap_16(tmp); + + value = tmp; + break; + } + case 32: + { + int32_t tmp; + tmp = *(int32_t *) ptr; + if (byte_order_reversed) + tmp = bswap_32(tmp); + + value = tmp; + break; + } + case 64: + { + int64_t tmp; + tmp = *(int64_t *) ptr; + if (byte_order_reversed) + tmp = bswap_64(tmp); + + value = tmp; + break; + } + default: + abort(); + } + + return value; +} + +static +uint64_t capture_sequence_element_unsigned(uint8_t *ptr, + const struct lttng_integer_type *type) +{ + uint64_t value; + unsigned int size = type->size; + bool byte_order_reversed = type->reverse_byte_order; + + switch (size) { + case 8: + value = *ptr; + break; + case 16: + { + uint16_t tmp; + tmp = *(uint16_t *) ptr; + if (byte_order_reversed) + tmp = bswap_16(tmp); + + value = tmp; + break; + } + case 32: + { + uint32_t tmp; + tmp = *(uint32_t *) ptr; + if (byte_order_reversed) + tmp = bswap_32(tmp); + + value = tmp; + break; + } + case 64: + { + uint64_t tmp; + tmp = *(uint64_t *) ptr; + if (byte_order_reversed) + tmp = bswap_64(tmp); + + value = tmp; + break; + } + default: + abort(); + } + + return value; +} + +static +void capture_sequence(struct lttng_msgpack_writer *writer, + struct lttng_interpreter_output *output) +{ + const struct lttng_integer_type *integer_type; + const struct lttng_type *nested_type; + uint8_t *ptr; + bool signedness; + int i; + + lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem); + + ptr = (uint8_t *) output->u.sequence.ptr; + nested_type = output->u.sequence.nested_type; + switch (nested_type->atype) { + case atype_integer: + integer_type = &nested_type->u.integer; + break; + case atype_enum: + /* Treat enumeration as an integer. */ + integer_type = &nested_type->u.enum_nestable.container_type->u.integer; + break; + default: + /* Capture of array of non-integer are not supported. */ + abort(); + } + signedness = integer_type->signedness; + for (i = 0; i < output->u.sequence.nr_elem; i++) { + if (signedness) { + lttng_msgpack_write_signed_integer(writer, + capture_sequence_element_signed(ptr, integer_type)); + } else { + lttng_msgpack_write_unsigned_integer(writer, + capture_sequence_element_unsigned(ptr, integer_type)); + } + + /* + * We assume that alignment is smaller or equal to the size. + * This currently holds true but if it changes in the future, + * we will want to change the pointer arithmetics below to + * take into account that the next element might be further + * away. + */ + assert(integer_type->alignment <= integer_type->size); + + /* Size is in number of bits. */ + ptr += (integer_type->size / CHAR_BIT) ; + } + + lttng_msgpack_end_array(writer); +} + +static +void notification_init(struct lttng_trigger_notification *notif, + struct lttng_trigger *trigger) +{ + struct lttng_msgpack_writer *writer = ¬if->writer; + + notif->trigger_id = trigger->id; + notif->notification_fd = trigger->group->notification_fd; + notif->has_captures = false; + + if (trigger->num_captures > 0) { + lttng_msgpack_writer_init(writer, notif->capture_buf, + CAPTURE_BUFFER_SIZE); + + lttng_msgpack_begin_array(writer, trigger->num_captures); + notif->has_captures = true; + } +} + +static +void notification_append_capture( + struct lttng_trigger_notification *notif, + struct lttng_interpreter_output *output) +{ + struct lttng_msgpack_writer *writer = ¬if->writer; + + switch (output->type) { + case LTTNG_INTERPRETER_TYPE_S64: + lttng_msgpack_write_signed_integer(writer, output->u.s); + break; + case LTTNG_INTERPRETER_TYPE_U64: + lttng_msgpack_write_unsigned_integer(writer, output->u.u); + break; + case LTTNG_INTERPRETER_TYPE_DOUBLE: + lttng_msgpack_write_double(writer, output->u.d); + break; + case LTTNG_INTERPRETER_TYPE_STRING: + lttng_msgpack_write_str(writer, output->u.str.str); + break; + case LTTNG_INTERPRETER_TYPE_SEQUENCE: + capture_sequence(writer, output); + break; + case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM: + case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM: + capture_enum(writer, output); + break; + default: + abort(); + } +} + +static +void notification_append_empty_capture( + struct lttng_trigger_notification *notif) +{ + lttng_msgpack_write_nil(¬if->writer); +} + +static void record_error(struct lttng_trigger *trigger) +{ + struct lttng_trigger_group *trigger_group = trigger->group; + size_t dimension_index[1]; + int ret; + + dimension_index[0] = trigger->error_counter_index; + ret = trigger_group->error_counter->ops->counter_add( + trigger_group->error_counter->counter, + dimension_index, 1); + if (ret) + WARN_ON_ONCE(1); +} + +static +void notification_send(struct lttng_trigger_notification *notif, + struct lttng_trigger *trigger) +{ + ssize_t ret; + size_t content_len; + int iovec_count = 1; + struct lttng_ust_trigger_notification ust_notif; + struct iovec iov[2]; + + assert(notif); + + ust_notif.id = trigger->id; + + /* + * Prepare sending the notification from multiple buffers using an + * array of `struct iovec`. The first buffer of the vector is + * notification structure itself and is always present. + */ + iov[0].iov_base = &ust_notif; + iov[0].iov_len = sizeof(ust_notif); + + if (notif->has_captures) { + /* + * If captures were requested, the second buffer of the array + * is the capture buffer. + */ + assert(notif->writer.buffer); + content_len = notif->writer.write_pos - notif->writer.buffer; + + assert(content_len > 0 && content_len <= CAPTURE_BUFFER_SIZE); + + iov[1].iov_base = notif->capture_buf; + iov[1].iov_len = content_len; + + iovec_count++; + } else { + content_len = 0; + } + + /* + * Update the capture buffer size so that receiver of the buffer will + * know how much to expect. + */ + ust_notif.capture_buf_size = content_len; + + /* Send all the buffers. */ + ret = patient_writev(notif->notification_fd, iov, iovec_count); + if (ret == -1) { + if (errno == EAGAIN) { + record_error(trigger); + DBG("Cannot send trigger notification without blocking: %s", + strerror(errno)); + } else { + DBG("Error to sending trigger notification: %s", + strerror(errno)); + abort(); + } + } +} + +void lttng_trigger_notification_send(struct lttng_trigger *trigger, + const char *stack_data) +{ + /* + * This function is called from the probe, we must do dynamic + * allocation in this context. + */ + struct lttng_trigger_notification notif = {0}; + + notification_init(¬if, trigger); + + if (caa_unlikely(!cds_list_empty(&trigger->capture_bytecode_runtime_head))) { + struct lttng_bytecode_runtime *capture_bc_runtime; + + /* + * Iterate over all the capture bytecodes. If the interpreter + * functions returns successfully, append the value of the + * `output` parameter to the capture buffer. If the interpreter + * fails, append an empty capture to the buffer. + */ + cds_list_for_each_entry(capture_bc_runtime, + &trigger->capture_bytecode_runtime_head, node) { + struct lttng_interpreter_output output; + + if (capture_bc_runtime->interpreter_funcs.capture(capture_bc_runtime, + stack_data, &output) & LTTNG_INTERPRETER_RECORD_FLAG) + notification_append_capture(¬if, &output); + else + notification_append_empty_capture(¬if); + } + } + + /* + * Send the notification (including the capture buffer) to the + * sessiond. + */ + notification_send(¬if, trigger); +} diff --git a/liblttng-ust/ust-core.c b/liblttng-ust/ust-core.c index abea7bbd..e8dff983 100644 --- a/liblttng-ust/ust-core.c +++ b/liblttng-ust/ust-core.c @@ -28,6 +28,7 @@ #include "jhash.h" static CDS_LIST_HEAD(lttng_transport_list); +static CDS_LIST_HEAD(lttng_counter_transport_list); struct lttng_transport *lttng_transport_find(const char *name) { @@ -40,6 +41,17 @@ struct lttng_transport *lttng_transport_find(const char *name) return NULL; } +struct lttng_counter_transport *lttng_counter_transport_find(const char *name) +{ + struct lttng_counter_transport *transport; + + cds_list_for_each_entry(transport, <tng_counter_transport_list, node) { + if (!strcmp(transport->name, name)) + return transport; + } + return NULL; +} + /** * lttng_transport_register - LTT transport registration * @transport: transport structure @@ -62,6 +74,28 @@ void lttng_transport_unregister(struct lttng_transport *transport) cds_list_del(&transport->node); } +/** + * lttng_counter_transport_register - LTTng counter transport registration + * @transport: transport structure + * + * Registers a counter transport which can be used as output to extract + * the data out of LTTng. Called with ust_lock held. + */ +void lttng_counter_transport_register(struct lttng_counter_transport *transport) +{ + cds_list_add_tail(&transport->node, <tng_counter_transport_list); +} + +/** + * lttng_counter_transport_unregister - LTTng counter transport unregistration + * @transport: transport structure + * Called with ust_lock held. + */ +void lttng_counter_transport_unregister(struct lttng_counter_transport *transport) +{ + cds_list_del(&transport->node); +} + /* * Needed by comm layer. */ diff --git a/liblttng-ust/ust-events-internal.h b/liblttng-ust/ust-events-internal.h new file mode 100644 index 00000000..8a49e321 --- /dev/null +++ b/liblttng-ust/ust-events-internal.h @@ -0,0 +1,254 @@ +#ifndef _LTTNG_UST_EVENTS_INTERNAL_H +#define _LTTNG_UST_EVENTS_INTERNAL_H + +/* + * ust-events-internal.h + * + * Copyright 2019 (c) - Francis Deslauriers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include +#include + +#include +#include + +struct lttng_event_enabler { + struct lttng_enabler base; + struct cds_list_head node; /* per-session list of enablers */ + struct lttng_channel *chan; + /* + * Unused, but kept around to make it explicit that the tracer can do + * it. + */ + struct lttng_ctx *ctx; +}; + +struct lttng_trigger_enabler { + struct lttng_enabler base; + uint64_t id; + uint64_t error_counter_index; + struct cds_list_head node; /* per-app list of trigger enablers */ + struct cds_list_head capture_bytecode_head; + struct lttng_trigger_group *group; /* weak ref */ + uint64_t num_captures; +}; + +enum lttng_ust_bytecode_node_type{ + LTTNG_UST_BYTECODE_NODE_TYPE_FILTER, + LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE, +}; + + +struct lttng_ust_bytecode_node { + enum lttng_ust_bytecode_node_type type; + struct cds_list_head node; + struct lttng_enabler *enabler; + struct { + uint32_t len; + uint32_t reloc_offset; + uint64_t seqnum; + char data[]; + } bc; +}; + +struct lttng_ust_excluder_node { + struct cds_list_head node; + struct lttng_enabler *enabler; + /* + * struct lttng_ust_event_exclusion had variable sized array, + * must be last field. + */ + struct lttng_ust_event_exclusion excluder; +}; + +static inline +struct lttng_enabler *lttng_event_enabler_as_enabler( + struct lttng_event_enabler *event_enabler) +{ + return &event_enabler->base; +} + +static inline +struct lttng_enabler *lttng_trigger_enabler_as_enabler( + struct lttng_trigger_enabler *trigger_enabler) +{ + return &trigger_enabler->base; +} + +/* + * Allocate and initialize a `struct lttng_event_enabler` object. + * + * On success, returns a `struct lttng_event_enabler`, + * On memory error, returns NULL. + */ +LTTNG_HIDDEN +struct lttng_event_enabler *lttng_event_enabler_create( + enum lttng_enabler_format_type format_type, + struct lttng_ust_event *event_param, + struct lttng_channel *chan); + +/* + * Destroy a `struct lttng_event_enabler` object. + */ +LTTNG_HIDDEN +void lttng_event_enabler_destroy(struct lttng_event_enabler *enabler); + +/* + * Enable a `struct lttng_event_enabler` object and all events related to this + * enabler. + */ +LTTNG_HIDDEN +int lttng_event_enabler_enable(struct lttng_event_enabler *enabler); + +/* + * Disable a `struct lttng_event_enabler` object and all events related to this + * enabler. + */ +LTTNG_HIDDEN +int lttng_event_enabler_disable(struct lttng_event_enabler *enabler); + +/* + * Attach filter bytecode program to `struct lttng_event_enabler` and all + * events related to this enabler. + */ +LTTNG_HIDDEN +int lttng_event_enabler_attach_filter_bytecode( + struct lttng_event_enabler *enabler, + struct lttng_ust_bytecode_node *bytecode); + +/* + * Attach an application context to an event enabler. + * + * Not implemented. + */ +LTTNG_HIDDEN +int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler, + struct lttng_ust_context *ctx); + +/* + * Attach exclusion list to `struct lttng_event_enabler` and all + * events related to this enabler. + */ +LTTNG_HIDDEN +int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *enabler, + struct lttng_ust_excluder_node *excluder); + +/* + * Synchronize bytecodes for the enabler and the instance (event or trigger). + * + * This function goes over all bytecode programs of the enabler (event or + * trigger enabler) to ensure each is linked to the provided instance. + */ +LTTNG_HIDDEN +void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc, + struct lttng_ctx **ctx, + struct cds_list_head *instance_bytecode_runtime_head, + struct cds_list_head *enabler_bytecode_runtime_head); + +/* + * Allocate and initialize a `struct lttng_trigger_group` object. + * + * On success, returns a `struct lttng_triggre_group`, + * on memory error, returns NULL. + */ +LTTNG_HIDDEN +struct lttng_trigger_group *lttng_trigger_group_create(void); + +/* + * Destroy a `struct lttng_trigger_group` object. + */ +LTTNG_HIDDEN +void lttng_trigger_group_destroy( + struct lttng_trigger_group *trigger_group); + +/* + * Allocate and initialize a `struct lttng_trigger_enabler` object. + * + * On success, returns a `struct lttng_trigger_enabler`, + * On memory error, returns NULL. + */ +LTTNG_HIDDEN +struct lttng_trigger_enabler *lttng_trigger_enabler_create( + struct lttng_trigger_group *trigger_group, + enum lttng_enabler_format_type format_type, + struct lttng_ust_trigger *trigger_param); + +/* + * Destroy a `struct lttng_trigger_enabler` object. + */ +LTTNG_HIDDEN +void lttng_trigger_enabler_destroy(struct lttng_trigger_enabler *trigger_enabler); + +/* + * Enable a `struct lttng_trigger_enabler` object and all triggers related to + * this enabler. + */ +LTTNG_HIDDEN +int lttng_trigger_enabler_enable(struct lttng_trigger_enabler *trigger_enabler); + +/* + * Disable a `struct lttng_trigger_enabler` object and all triggers related to + * this enabler. + */ +LTTNG_HIDDEN +int lttng_trigger_enabler_disable(struct lttng_trigger_enabler *trigger_enabler); + +/* + * Attach filter bytecode program to `struct lttng_trigger_enabler` and all + * triggers related to this enabler. + */ +LTTNG_HIDDEN +int lttng_trigger_enabler_attach_filter_bytecode( + struct lttng_trigger_enabler *trigger_enabler, + struct lttng_ust_bytecode_node *bytecode); + +/* + * Attach capture bytecode program to `struct lttng_trigger_enabler` and all + * triggers related to this enabler. + */ +LTTNG_HIDDEN +int lttng_trigger_enabler_attach_capture_bytecode( + struct lttng_trigger_enabler *trigger_enabler, + struct lttng_ust_bytecode_node *bytecode); + +/* + * Attach exclusion list to `struct lttng_trigger_enabler` and all + * triggers related to this enabler. + */ +LTTNG_HIDDEN +int lttng_trigger_enabler_attach_exclusion( + struct lttng_trigger_enabler *trigger_enabler, + struct lttng_ust_excluder_node *excluder); + +LTTNG_HIDDEN +void lttng_free_trigger_filter_runtime(struct lttng_trigger *trigger); + +/* + * Connect the probe on all enablers matching this event description. + * Called on library load. + */ +LTTNG_HIDDEN +int lttng_fix_pending_triggers(void); + +#endif /* _LTTNG_UST_EVENTS_INTERNAL_H */ diff --git a/libmsgpack/Makefile.am b/libmsgpack/Makefile.am new file mode 100644 index 00000000..b157e674 --- /dev/null +++ b/libmsgpack/Makefile.am @@ -0,0 +1,9 @@ +AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include +AM_CFLAGS += -fno-strict-aliasing + +noinst_LTLIBRARIES = libmsgpack.la + +libmsgpack_la_SOURCES = \ + msgpack.c msgpack.h + +libmsgpack_la_CFLAGS = -DUST_COMPONENT="libmsgpack" $(AM_CFLAGS) diff --git a/libmsgpack/msgpack.c b/libmsgpack/msgpack.c new file mode 100644 index 00000000..0803bc65 --- /dev/null +++ b/libmsgpack/msgpack.c @@ -0,0 +1,517 @@ +/* + * msgpack.c + * + * Copyright (C) 2020 Francis Deslauriers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define _GNU_SOURCE +#define _LGPL_SOURCE +#include + +#define MSGPACK_FIXSTR_ID_MASK 0xA0 +#define MSGPACK_FIXMAP_ID_MASK 0x80 +#define MSGPACK_FIXARRAY_ID_MASK 0x90 + +#define MSGPACK_NIL_ID 0xC0 +#define MSGPACK_FALSE_ID 0xC2 +#define MSGPACK_TRUE_ID 0xC3 +#define MSGPACK_MAP16_ID 0xDE +#define MSGPACK_ARRAY16_ID 0xDC + +#define MSGPACK_UINT8_ID 0xCC +#define MSGPACK_UINT16_ID 0xCD +#define MSGPACK_UINT32_ID 0xCE +#define MSGPACK_UINT64_ID 0xCF + +#define MSGPACK_INT8_ID 0xD0 +#define MSGPACK_INT16_ID 0xD1 +#define MSGPACK_INT32_ID 0xD2 +#define MSGPACK_INT64_ID 0xD3 + +#define MSGPACK_FLOAT64_ID 0xCB +#define MSGPACK_STR16_ID 0xDA + +#define MSGPACK_FIXINT_MAX ((1 << 7) - 1) +#define MSGPACK_FIXINT_MIN -(1 << 5) +#define MSGPACK_FIXMAP_MAX_COUNT 15 +#define MSGPACK_FIXARRAY_MAX_COUNT 15 +#define MSGPACK_FIXSTR_MAX_LENGTH 31 + +#ifdef __KERNEL__ +#include +#include +#include + +#include + +#define INT8_MIN (-128) +#define INT16_MIN (-32767-1) +#define INT32_MIN (-2147483647-1) +#define INT8_MAX (127) +#define INT16_MAX (32767) +#define INT32_MAX (2147483647) +#define UINT8_MAX (255) +#define UINT16_MAX (65535) +#define UINT32_MAX (4294967295U) + +#define byteswap_host_to_be16(_tmp) cpu_to_be16(_tmp) +#define byteswap_host_to_be32(_tmp) cpu_to_be32(_tmp) +#define byteswap_host_to_be64(_tmp) cpu_to_be64(_tmp) + +#define lttng_msgpack_assert(cond) WARN_ON(!(cond)) + +#else /* __KERNEL__ */ + +#include +#include +#include + +#include "msgpack.h" + +#define byteswap_host_to_be16(_tmp) htobe16(_tmp) +#define byteswap_host_to_be32(_tmp) htobe32(_tmp) +#define byteswap_host_to_be64(_tmp) htobe64(_tmp) + +#define lttng_msgpack_assert(cond) ({ \ + if (!(cond)) \ + fprintf(stderr, "Assertion failed. %s:%d\n", __FILE__, __LINE__); \ + }) +#endif /* __KERNEL__ */ + +static inline int lttng_msgpack_append_buffer( + struct lttng_msgpack_writer *writer, + const uint8_t *buf, + size_t length) +{ + int ret = 0; + + lttng_msgpack_assert(buf); + + /* Ensure we are not trying to write after the end of the buffer. */ + if (writer->write_pos + length > writer->end_write_pos) { + ret = -1; + goto end; + } + + memcpy(writer->write_pos, buf, length); + writer->write_pos += length; +end: + return ret; +} + +static inline int lttng_msgpack_append_u8( + struct lttng_msgpack_writer *writer, uint8_t value) +{ + return lttng_msgpack_append_buffer(writer, &value, sizeof(value)); +} + +static inline int lttng_msgpack_append_u16( + struct lttng_msgpack_writer *writer, uint16_t value) +{ + value = byteswap_host_to_be16(value); + + return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value)); +} + +static inline int lttng_msgpack_append_u32( + struct lttng_msgpack_writer *writer, uint32_t value) +{ + value = byteswap_host_to_be32(value); + + return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value)); +} + +static inline int lttng_msgpack_append_u64( + struct lttng_msgpack_writer *writer, uint64_t value) +{ + value = byteswap_host_to_be64(value); + + return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value)); +} + +static inline int lttng_msgpack_append_f64( + struct lttng_msgpack_writer *writer, double value) +{ + + union { + double d; + uint64_t u; + } u; + + u.d = value; + + return lttng_msgpack_append_u64(writer, u.u); +} + +static inline int lttng_msgpack_append_i8( + struct lttng_msgpack_writer *writer, int8_t value) +{ + return lttng_msgpack_append_u8(writer, (uint8_t) value); +} + +static inline int lttng_msgpack_append_i16( + struct lttng_msgpack_writer *writer, int16_t value) +{ + return lttng_msgpack_append_u16(writer, (uint16_t) value); +} + +static inline int lttng_msgpack_append_i32( + struct lttng_msgpack_writer *writer, int32_t value) +{ + return lttng_msgpack_append_u32(writer, (uint32_t) value); +} + +static inline int lttng_msgpack_append_i64( + struct lttng_msgpack_writer *writer, int64_t value) +{ + return lttng_msgpack_append_u64(writer, (uint64_t) value); +} + +static inline int lttng_msgpack_encode_f64( + struct lttng_msgpack_writer *writer, double value) +{ + int ret; + + ret = lttng_msgpack_append_u8(writer, MSGPACK_FLOAT64_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_f64(writer, value); + if (ret) + goto end; + +end: + return ret; +} + +static inline int lttng_msgpack_encode_fixmap( + struct lttng_msgpack_writer *writer, uint8_t count) +{ + int ret = 0; + + lttng_msgpack_assert(count <= MSGPACK_FIXMAP_MAX_COUNT); + + ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXMAP_ID_MASK | count); + if (ret) + goto end; + +end: + return ret; +} + +static inline int lttng_msgpack_encode_map16( + struct lttng_msgpack_writer *writer, uint16_t count) +{ + int ret; + + lttng_msgpack_assert(count > MSGPACK_FIXMAP_MAX_COUNT); + + ret = lttng_msgpack_append_u8(writer, MSGPACK_MAP16_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_u16(writer, count); + if (ret) + goto end; + +end: + return ret; +} + +static inline int lttng_msgpack_encode_fixarray( + struct lttng_msgpack_writer *writer, uint8_t count) +{ + int ret = 0; + + lttng_msgpack_assert(count <= MSGPACK_FIXARRAY_MAX_COUNT); + + ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXARRAY_ID_MASK | count); + if (ret) + goto end; + +end: + return ret; +} + +static inline int lttng_msgpack_encode_array16( + struct lttng_msgpack_writer *writer, uint16_t count) +{ + int ret; + + lttng_msgpack_assert(count > MSGPACK_FIXARRAY_MAX_COUNT); + + ret = lttng_msgpack_append_u8(writer, MSGPACK_ARRAY16_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_u16(writer, count); + if (ret) + goto end; + +end: + return ret; +} + +static inline int lttng_msgpack_encode_fixstr( + struct lttng_msgpack_writer *writer, + const char *str, + uint8_t len) +{ + int ret; + + lttng_msgpack_assert(len <= MSGPACK_FIXSTR_MAX_LENGTH); + + ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXSTR_ID_MASK | len); + if (ret) + goto end; + + ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len); + if (ret) + goto end; + +end: + return ret; +} + +static inline int lttng_msgpack_encode_str16( + struct lttng_msgpack_writer *writer, + const char *str, + uint16_t len) +{ + int ret; + + lttng_msgpack_assert(len > MSGPACK_FIXSTR_MAX_LENGTH); + + ret = lttng_msgpack_append_u8(writer, MSGPACK_STR16_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_u16(writer, len); + if (ret) + goto end; + + ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len); + if (ret) + goto end; + +end: + return ret; +} + +int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count) +{ + int ret; + + if (count < 0 || count >= (1 << 16)) { + ret = -1; + goto end; + } + + if (count <= MSGPACK_FIXMAP_MAX_COUNT) + ret = lttng_msgpack_encode_fixmap(writer, count); + else + ret = lttng_msgpack_encode_map16(writer, count); + + writer->map_nesting++; +end: + return ret; +} + +int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer) +{ + lttng_msgpack_assert(writer->map_nesting > 0); + writer->map_nesting--; + return 0; +} + +int lttng_msgpack_begin_array( + struct lttng_msgpack_writer *writer, size_t count) +{ + int ret; + + if (count < 0 || count >= (1 << 16)) { + ret = -1; + goto end; + } + + if (count <= MSGPACK_FIXARRAY_MAX_COUNT) + ret = lttng_msgpack_encode_fixarray(writer, count); + else + ret = lttng_msgpack_encode_array16(writer, count); + + writer->array_nesting++; +end: + return ret; +} + +int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer) +{ + lttng_msgpack_assert(writer->array_nesting > 0); + writer->array_nesting--; + return 0; +} + +int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer, + const char *str) +{ + int ret; + size_t length = strlen(str); + if (length < 0 || length >= (1 << 16)) { + ret = -1; + goto end; + } + + if (length <= MSGPACK_FIXSTR_MAX_LENGTH) + ret = lttng_msgpack_encode_fixstr(writer, str, length); + else + ret = lttng_msgpack_encode_str16(writer, str, length); + +end: + return ret; +} + +int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer) +{ + return lttng_msgpack_append_u8(writer, MSGPACK_NIL_ID); +} + +int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer) +{ + return lttng_msgpack_append_u8(writer, MSGPACK_TRUE_ID); +} + +int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer) +{ + return lttng_msgpack_append_u8(writer, MSGPACK_FALSE_ID); +} + +int lttng_msgpack_write_unsigned_integer( + struct lttng_msgpack_writer *writer, uint64_t value) +{ + int ret = 0; + + if (value <= MSGPACK_FIXINT_MAX) { + ret = lttng_msgpack_append_u8(writer, (uint8_t) value); + if (ret) + goto end; + } else if (value <= UINT8_MAX) { + ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT8_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_u8(writer, (uint8_t) value); + if (ret) + goto end; + } else if (value <= UINT16_MAX) { + ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT16_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_u16(writer, (uint16_t) value); + if (ret) + goto end; + } else if (value <= UINT32_MAX) { + ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT32_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_u32(writer, (uint32_t) value); + if (ret) + goto end; + } else { + ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT64_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_u64(writer, value); + if (ret) + goto end; + } + +end: + return ret; +} + +int lttng_msgpack_write_signed_integer(struct lttng_msgpack_writer *writer, int64_t value) +{ + int ret; + + if (value >= MSGPACK_FIXINT_MIN && value <= MSGPACK_FIXINT_MAX){ + ret = lttng_msgpack_append_i8(writer, (int8_t) value); + if (ret) + goto end; + } else if (value >= INT8_MIN && value <= INT8_MAX) { + ret = lttng_msgpack_append_u8(writer, MSGPACK_INT8_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_i8(writer, (int8_t) value); + if (ret) + goto end; + } else if (value >= INT16_MIN && value <= INT16_MAX) { + ret = lttng_msgpack_append_u8(writer, MSGPACK_INT16_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_i16(writer, (int16_t) value); + if (ret) + goto end; + } else if (value >= INT32_MIN && value <= INT32_MAX) { + ret = lttng_msgpack_append_u8(writer, MSGPACK_INT32_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_i32(writer, (int32_t) value); + if (ret) + goto end; + } else { + ret = lttng_msgpack_append_u8(writer, MSGPACK_INT64_ID); + if (ret) + goto end; + + ret = lttng_msgpack_append_i64(writer, value); + if (ret) + goto end; + } + +end: + return ret; +} + +int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value) +{ + return lttng_msgpack_encode_f64(writer, value); +} + +void lttng_msgpack_writer_init(struct lttng_msgpack_writer *writer, + uint8_t *buffer, size_t size) +{ + lttng_msgpack_assert(buffer); + lttng_msgpack_assert(size >= 0); + + writer->buffer = buffer; + writer->write_pos = buffer; + writer->end_write_pos = buffer + size; + + writer->array_nesting = 0; + writer->map_nesting = 0; +} + +void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer) +{ + memset(writer, 0, sizeof(*writer)); +} diff --git a/libmsgpack/msgpack.h b/libmsgpack/msgpack.h new file mode 100644 index 00000000..e5c011ea --- /dev/null +++ b/libmsgpack/msgpack.h @@ -0,0 +1,61 @@ +#ifndef _LTTNG_UST_MSGPACK_H +#define _LTTNG_UST_MSGPACK_H + +/* + * msgpack.h + * + * Copyright (C) 2020 Francis Deslauriers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#ifdef __KERNEL__ +#include +#else /* __KERNEL__ */ +#include +#endif /* __KERNEL__ */ + +struct lttng_msgpack_writer { + uint8_t *buffer; + uint8_t *write_pos; + const uint8_t *end_write_pos; + uint8_t array_nesting; + uint8_t map_nesting; +}; + +void lttng_msgpack_writer_init( + struct lttng_msgpack_writer *writer, + uint8_t *buffer, size_t size); + +void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer); + +int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer); +int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer); +int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer); +int lttng_msgpack_write_unsigned_integer( + struct lttng_msgpack_writer *writer, uint64_t value); +int lttng_msgpack_write_signed_integer( + struct lttng_msgpack_writer *writer, int64_t value); +int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value); +int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer, + const char *value); +int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count); +int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer); +int lttng_msgpack_begin_array( + struct lttng_msgpack_writer *writer, size_t count); +int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer); + +#endif /* _LTTNG_UST_MSGPACK_H */ diff --git a/snprintf/patient_write.c b/snprintf/patient_write.c index 9bd2913f..aa59ba14 100644 --- a/snprintf/patient_write.c +++ b/snprintf/patient_write.c @@ -22,6 +22,9 @@ /* write() */ #include +/* writev() */ +#include + /* send() */ #include #include @@ -58,6 +61,52 @@ ssize_t patient_write(int fd, const void *buf, size_t count) return bufc-(const char *)buf; } +/* + * The `struct iovec *iov` is not `const` because we modify it to support + * partial writes. + */ +ssize_t patient_writev(int fd, struct iovec *iov, int iovcnt) +{ + ssize_t written, total_written = 0; + int curr_element_idx = 0; + + for(;;) { + written = writev(fd, iov + curr_element_idx, + iovcnt - curr_element_idx); + if (written == -1 && errno == EINTR) { + continue; + } + if (written <= 0) { + return written; + } + + total_written += written; + + /* + * If it's not the last element in the vector and we have + * written more than the current element size, then increment + * the current element index until we reach the element that + * was partially written. + */ + while (curr_element_idx < iovcnt && + written >= iov[curr_element_idx].iov_len) { + written -= iov[curr_element_idx].iov_len; + curr_element_idx++; + } + + /* Maybe we are done. */ + if (curr_element_idx >= iovcnt) { + break; + } + + /* Update the current element base and size. */ + iov[curr_element_idx].iov_base += written; + iov[curr_element_idx].iov_len -= written; + } + + return total_written; +} + ssize_t patient_send(int fd, const void *buf, size_t count, int flags) { const char *bufc = (const char *) buf; diff --git a/tests/Makefile.am b/tests/Makefile.am index b1042dbe..488c098b 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,5 +1,6 @@ SUBDIRS = utils hello same_line_tracepoint snprintf benchmark ust-elf \ - ctf-types test-app-ctx gcc-weak-hidden hello-many + ctf-types test-app-ctx gcc-weak-hidden hello-many \ + libmsgpack if CXX_WORKS SUBDIRS += hello.cxx @@ -11,7 +12,8 @@ LOG_DRIVER = env AM_TAP_AWK='$(AWK)' $(SHELL) \ TESTS = snprintf/test_snprintf \ ust-elf/test_ust_elf \ - gcc-weak-hidden/test_gcc_weak_hidden + gcc-weak-hidden/test_gcc_weak_hidden \ + libmsgpack/test_msgpack EXTRA_DIST = README diff --git a/tests/gcc-weak-hidden/main.c b/tests/gcc-weak-hidden/main.c index 78199f5b..0a80c05b 100644 --- a/tests/gcc-weak-hidden/main.c +++ b/tests/gcc-weak-hidden/main.c @@ -82,5 +82,6 @@ int main() "Weak-hidden behavior is the same for 4 bytes integer and pointer objects within main program"); ok(match_matrix[MATCH_LIB_INT] == match_matrix[MATCH_LIB_PTR], "Weak-hidden behavior is the same for 4 bytes integer and pointer objects within shared library"); - return 0; + + return exit_status(); } diff --git a/tests/libmsgpack/Makefile.am b/tests/libmsgpack/Makefile.am new file mode 100644 index 00000000..784c3ef5 --- /dev/null +++ b/tests/libmsgpack/Makefile.am @@ -0,0 +1,9 @@ +AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/tests/utils + +noinst_PROGRAMS = test_msgpack +test_msgpack_SOURCES = test_msgpack.c +test_msgpack_LDADD = \ + $(top_builddir)/libmsgpack/libmsgpack.la \ + $(top_builddir)/tests/utils/libtap.a + +test_msgpack_CFLAGS = $(AM_CFLAGS) diff --git a/tests/libmsgpack/test_msgpack.c b/tests/libmsgpack/test_msgpack.c new file mode 100644 index 00000000..7ab61e0c --- /dev/null +++ b/tests/libmsgpack/test_msgpack.c @@ -0,0 +1,386 @@ +/* + * test_msgpack.c + * + * Copyright (C) 2020 Francis Deslauriers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include "tap.h" + +#include "../libmsgpack/msgpack.h" + +#define BUFFER_SIZE 4096 +#define NUM_TESTS 23 + + +/* + * echo 'null' | json2msgpack | xxd -i + */ +static const uint8_t NIL_EXPECTED[] = { 0xc0 }; + +/* + * echo '"bye"' | json2msgpack | xxd -i + */ +static const uint8_t STRING_BYE_EXPECTED[] = { 0xa3, 0x62, 0x79, 0x65 }; + +/* + * echo '1337' | json2msgpack | xxd -i + */ +static const uint8_t UINT_1337_EXPECTED[] = { 0xcd, 0x05, 0x39 }; + +/* + * echo '127' | json2msgpack | xxd -i + */ +static const uint8_t UINT_127_EXPECTED[] = { 0x7f }; + +/* + * echo '128' | json2msgpack | xxd -i + */ +static const uint8_t UINT_128_EXPECTED[] = { 0xcc, 0x80 }; + +/* + * echo '256' | json2msgpack | xxd -i + */ +static const uint8_t UINT_256_EXPECTED[] = { 0xcd, 0x01, 0x00 }; + +/* + * echo '65535' | json2msgpack | xxd -i + */ +static const uint8_t UINT_65535_EXPECTED[] = { 0xcd, 0xff, 0xff }; + +/* + * echo '65536' | json2msgpack | xxd -i + */ +static const uint8_t UINT_65536_EXPECTED[] = { 0xce, 0x00, 0x01, 0x00, 0x00 }; + +/* + * echo '4294967295' | json2msgpack | xxd -i + */ +static const uint8_t UINT_4294967295_EXPECTED[] = { 0xce, 0xff, 0xff, 0xff, 0xff }; + +/* + * echo '4294967296' | json2msgpack | xxd -i + */ +static const uint8_t UINT_4294967296_EXPECTED[] = { 0xcf, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00 }; + +/* + * echo '-32' | json2msgpack | xxd -i + */ +static const uint8_t INT_NEG_32_EXPECTED[] = { 0xe0 }; + +/* + * echo '-33' | json2msgpack | xxd -i + */ +static const uint8_t INT_NEG_33_EXPECTED[] = { 0xd0, 0xdf }; + +/* + * echo '-129' | json2msgpack | xxd -i + */ +static const uint8_t INT_NEG_129_EXPECTED[] = { 0xd1, 0xff, 0x7f}; + +/* + * echo '-32768' | json2msgpack | xxd -i + */ +static const uint8_t INT_NEG_32768_EXPECTED[] = { 0xd1, 0x80, 0x00 }; + +/* + * echo '-32769' | json2msgpack | xxd -i + */ +static const uint8_t INT_NEG_32769_EXPECTED[] = { 0xd2, 0xff, 0xff, 0x7f, + 0xff }; + +/* + * echo '-2147483648' | json2msgpack | xxd -i + */ +static const uint8_t INT_NEG_2147483648_EXPECTED[] = { 0xd2, 0x80, 0x00, 0x00, + 0x00 }; + +/* + * echo '-2147483649' | json2msgpack | xxd -i + */ +static const uint8_t INT_NEG_2147483649_EXPECTED[] = { 0xd3, 0xff, 0xff, 0xff, + 0xff, 0x7f, 0xff, 0xff, 0xff }; +/* + * echo '0.0' | json2msgpack | xxd -i + */ +static const uint8_t DOUBLE_ZERO_EXPECTED[] = { 0xcb, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00 }; + +/* + * echo '3.14159265' | json2msgpack | xxd -i + */ +static const uint8_t DOUBLE_PI_EXPECTED[] = { 0xcb, 0x40, 0x09, 0x21, 0xfb, 0x53, + 0xc8, 0xd4, 0xf1 }; + +/* + * echo '3.14159265' | json2msgpack | xxd -i + */ +static const uint8_t DOUBLE_NEG_PI_EXPECTED[] = { 0xcb, 0xc0, 0x09, 0x21, 0xfb, + 0x53, 0xc8, 0xd4, 0xf1 }; + +/* + * echo [1.1, 2.3, -12345.2] | json2msgpack | xxd -i + */ +static const uint8_t ARRAY_DOUBLE_EXPECTED[] = { 0x93, 0xcb, 0x3f, 0xf1, 0x99, + 0x99, 0x99, 0x99, 0x99, 0x9a, 0xcb, 0x40, 0x02, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0xcb, 0xc0, 0xc8, 0x1c, 0x99, 0x99, + 0x99, 0x99, 0x9a }; + +/* + * echo '{"type":"enum","value":117}' | json2msgpack | xxd -i + */ +static const uint8_t MAP_EXPECTED[] = { + 0x82, 0xa4, 0x74, 0x79, 0x70, 0x65, 0xa4, 0x65, 0x6e, 0x75, 0x6d, 0xa5, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x75 }; + +/* + * echo '["meow mix", 18, null, 14.197, [1980, 1995]]' | json2msgpack | xxd -i + */ +static const uint8_t COMPLETE_CAPTURE_EXPECTED[] = { 0x95, 0xa8, 0x6d, 0x65, + 0x6f, 0x77, 0x20, 0x6d, 0x69, 0x78, 0x12, 0xc0, 0xcb, 0x40, + 0x2c, 0x64, 0xdd, 0x2f, 0x1a, 0x9f, 0xbe, 0x92, 0xcd, 0x07, + 0xbc, 0xcd, 0x07, 0xcb }; + +static void string_test(uint8_t *buf, const char *value) +{ + struct lttng_msgpack_writer writer; + + lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE); + lttng_msgpack_write_str(&writer, value); + lttng_msgpack_writer_fini(&writer); +} + +static void int_test(uint8_t *buf, int64_t value) +{ + struct lttng_msgpack_writer writer; + + lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE); + lttng_msgpack_write_signed_integer(&writer, value); + + lttng_msgpack_writer_fini(&writer); +} + +static void uint_test(uint8_t *buf, uint64_t value) +{ + struct lttng_msgpack_writer writer; + + lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE); + lttng_msgpack_write_unsigned_integer(&writer, value); + lttng_msgpack_writer_fini(&writer); +} + +static void double_test(uint8_t *buf, double value) +{ + struct lttng_msgpack_writer writer; + + lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE); + lttng_msgpack_write_double(&writer, value); + lttng_msgpack_writer_fini(&writer); +} + +static void array_double_test(uint8_t *buf, double *values, size_t nb_values) +{ + int i = 0; + struct lttng_msgpack_writer writer; + + lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE); + lttng_msgpack_begin_array(&writer, nb_values); + + for (i = 0; i < nb_values; i++) { + lttng_msgpack_write_double(&writer, values[i]); + } + + lttng_msgpack_end_array(&writer); + lttng_msgpack_writer_fini(&writer); +} + +static void map_test(uint8_t *buf) +{ + struct lttng_msgpack_writer writer; + + lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE); + + lttng_msgpack_begin_map(&writer, 2); + + lttng_msgpack_write_str(&writer, "type"); + lttng_msgpack_write_str(&writer, "enum"); + + lttng_msgpack_write_str(&writer, "value"); + lttng_msgpack_write_unsigned_integer(&writer, 117); + + lttng_msgpack_end_map(&writer); + lttng_msgpack_writer_fini(&writer); +} + +static void complete_capture_test(uint8_t *buf) +{ + /* + * This testcase tests the following json representation: + * "meow mix",18, null, 14.197,[1980, 1995]] + */ + struct lttng_msgpack_writer writer; + + lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE); + + lttng_msgpack_begin_array(&writer, 5); + + lttng_msgpack_write_str(&writer, "meow mix"); + lttng_msgpack_write_signed_integer(&writer, 18); + lttng_msgpack_write_nil(&writer); + lttng_msgpack_write_double(&writer, 14.197); + + lttng_msgpack_begin_array(&writer, 2); + + lttng_msgpack_write_unsigned_integer(&writer, 1980); + lttng_msgpack_write_unsigned_integer(&writer, 1995); + + lttng_msgpack_end_array(&writer); + + lttng_msgpack_end_array(&writer); + + lttng_msgpack_writer_fini(&writer); +} + +static void nil_test(uint8_t *buf) +{ + struct lttng_msgpack_writer writer; + + lttng_msgpack_writer_init(&writer, buf, BUFFER_SIZE); + lttng_msgpack_write_nil(&writer); + lttng_msgpack_writer_fini(&writer); +} + +int main(int argc, char *argv[]) +{ + uint8_t buf[BUFFER_SIZE] = {0}; + double arr_double[] = {1.1, 2.3, -12345.2}; + + plan_tests(NUM_TESTS); + + diag("Testing msgpack implementation"); + + /* + * Expected outputs were produced using the `json2msgpack` tool. + * https://github.com/ludocode/msgpack-tools + * For example, here is the command to produce the null test expected + * output: + * echo 'null' | json2msgpack | hexdump -v -e '"\\\x" 1/1 "%02x"' + * + * The only exception is that we always produce 64bits integer to + * represent integers even if they would fit into smaller objects so + * they need to be manually crafted in 64bits two's complement (if + * signed) big endian. + */ + nil_test(buf); + ok(memcmp(buf, NIL_EXPECTED, sizeof(NIL_EXPECTED)) == 0, + "NIL object"); + + string_test(buf, "bye"); + ok(memcmp(buf, STRING_BYE_EXPECTED, sizeof(STRING_BYE_EXPECTED)) == 0, + "String \"bye\" object"); + + uint_test(buf, 1337); + ok(memcmp(buf, UINT_1337_EXPECTED, sizeof(UINT_1337_EXPECTED)) == 0, + "Unsigned integer \"1337\" object"); + + uint_test(buf, 127); + ok(memcmp(buf, UINT_127_EXPECTED, sizeof(UINT_127_EXPECTED)) == 0, + "Unsigned integer \"127\" object"); + + uint_test(buf, 128); + ok(memcmp(buf, UINT_128_EXPECTED, sizeof(UINT_128_EXPECTED)) == 0, + "Unsigned integer \"128\" object"); + + uint_test(buf, 256); + ok(memcmp(buf, UINT_256_EXPECTED, sizeof(UINT_256_EXPECTED)) == 0, + "Unsigned integer \"256\" object"); + + uint_test(buf, 65536); + ok(memcmp(buf, UINT_65536_EXPECTED, sizeof(UINT_65536_EXPECTED)) == 0, + "Unsigned integer \"65536\" object"); + + uint_test(buf, 65535); + ok(memcmp(buf, UINT_65535_EXPECTED, sizeof(UINT_65535_EXPECTED)) == 0, + "Unsigned integer \"65535\" object"); + + uint_test(buf, 4294967295); + ok(memcmp(buf, UINT_4294967295_EXPECTED, sizeof(UINT_4294967295_EXPECTED)) == 0, + "Unsigned integer \"4294967295\" object"); + + uint_test(buf, 4294967296); + ok(memcmp(buf, UINT_4294967296_EXPECTED, sizeof(UINT_4294967296_EXPECTED)) == 0, + "Unsigned integer \"4294967296\" object"); + + int_test(buf, -32); + ok(memcmp(buf, INT_NEG_32_EXPECTED, sizeof(INT_NEG_32_EXPECTED)) == 0, + "Signed integer \"-32\" object"); + + int_test(buf, -33); + ok(memcmp(buf, INT_NEG_33_EXPECTED, sizeof(INT_NEG_33_EXPECTED)) == 0, + "Signed integer \"-33\" object"); + + int_test(buf, -129); + ok(memcmp(buf, INT_NEG_129_EXPECTED, sizeof(INT_NEG_129_EXPECTED)) == 0, + "Signed integer \"-129\" object"); + + int_test(buf, -32768); + ok(memcmp(buf, INT_NEG_32768_EXPECTED, sizeof(INT_NEG_32768_EXPECTED)) == 0, + "Signed integer \"-32768\" object"); + + int_test(buf, -32769); + ok(memcmp(buf, INT_NEG_32769_EXPECTED, sizeof(INT_NEG_32769_EXPECTED)) == 0, + "Signed integer \"-32769\" object"); + + int_test(buf, -2147483648); + ok(memcmp(buf, INT_NEG_2147483648_EXPECTED, sizeof(INT_NEG_2147483648_EXPECTED)) == 0, + "Signed integer \"-2147483648\" object"); + + int_test(buf, -2147483649); + ok(memcmp(buf, INT_NEG_2147483649_EXPECTED, sizeof(INT_NEG_2147483649_EXPECTED)) == 0, + "Signed integer \"-2147483649\" object"); + + double_test(buf, 0.0); + ok(memcmp(buf, DOUBLE_ZERO_EXPECTED, sizeof(DOUBLE_ZERO_EXPECTED)) == 0, + "double \"0.0\" object"); + + double_test(buf, 3.14159265); + ok(memcmp(buf, DOUBLE_PI_EXPECTED, sizeof(DOUBLE_PI_EXPECTED)) == 0, + "double \"PI\" object"); + + double_test(buf, -3.14159265); + ok(memcmp(buf, DOUBLE_NEG_PI_EXPECTED, sizeof(DOUBLE_NEG_PI_EXPECTED)) == 0, + "double \"-PI\" object"); + + array_double_test(buf, arr_double, 3); + ok(memcmp(buf, ARRAY_DOUBLE_EXPECTED, sizeof(ARRAY_DOUBLE_EXPECTED)) == 0, + "Array of double object"); + + map_test(buf); + ok(memcmp(buf, MAP_EXPECTED, sizeof(MAP_EXPECTED)) == 0, + "Map object"); + + complete_capture_test(buf); + ok(memcmp(buf, COMPLETE_CAPTURE_EXPECTED, sizeof(COMPLETE_CAPTURE_EXPECTED)) == 0, + "Complete capture object"); + + return EXIT_SUCCESS; +} diff --git a/tests/snprintf/snprintf.c b/tests/snprintf/snprintf.c index 8d3fd5c0..c1087402 100644 --- a/tests/snprintf/snprintf.c +++ b/tests/snprintf/snprintf.c @@ -38,5 +38,5 @@ int main() sprintf(test_desc, test_desc_fmt_str, escaped_test_fmt_str); ok(strcmp(buf, expected) == 0, test_desc); - return 0; + return exit_status(); } diff --git a/tests/test-app-ctx/hello.c b/tests/test-app-ctx/hello.c index e9e45ec7..ec512635 100644 --- a/tests/test-app-ctx/hello.c +++ b/tests/test-app-ctx/hello.c @@ -240,16 +240,16 @@ void test_get_value(struct lttng_ctx_field *field, value->u.s64 = -64; break; case LTTNG_UST_DYNAMIC_TYPE_U8: - value->u.s64 = 8; + value->u.u64 = 8; break; case LTTNG_UST_DYNAMIC_TYPE_U16: - value->u.s64 = 16; + value->u.u64 = 16; break; case LTTNG_UST_DYNAMIC_TYPE_U32: - value->u.s64 = 32; + value->u.u64 = 32; break; case LTTNG_UST_DYNAMIC_TYPE_U64: - value->u.s64 = 64; + value->u.u64 = 64; break; case LTTNG_UST_DYNAMIC_TYPE_FLOAT: value->u.d = 22322.0; diff --git a/tests/ust-elf/ust-elf.c b/tests/ust-elf/ust-elf.c index af9b1239..24102819 100644 --- a/tests/ust-elf/ust-elf.c +++ b/tests/ust-elf/ust-elf.c @@ -166,5 +166,5 @@ int main(int argc, char **argv) AARCH64_BE_CRC); test_pic(test_dir); - return EXIT_SUCCESS; + return exit_status(); } -- 2.34.1