2 * lttng-filter-validator.c
4 * LTTng UST filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 #include "lttng-filter.h"
32 #include <urcu/rculfhash.h>
33 #include "lttng-hash-helper.h"
34 #include "string-utils.h"
37 * Number of merge points for hash table size. Hash table initialized to
38 * that size, and we do not resize, because we do not want to trigger
39 * RCU worker thread execution: fall-back on linear traversal if number
40 * of merge points exceeds this value.
42 #define DEFAULT_NR_MERGE_POINTS 128
43 #define MIN_NR_BUCKETS 128
44 #define MAX_NR_BUCKETS 128
46 /* merge point table node */
48 struct cds_lfht_node node
;
50 /* Context at merge point */
52 unsigned long target_pc
;
55 static unsigned long lttng_hash_seed
;
56 static unsigned int lttng_hash_seed_ready
;
59 int lttng_hash_match(struct cds_lfht_node
*node
, const void *key
)
61 struct lfht_mp_node
*mp_node
=
62 caa_container_of(node
, struct lfht_mp_node
, node
);
63 unsigned long key_pc
= (unsigned long) key
;
65 if (mp_node
->target_pc
== key_pc
)
72 int merge_points_compare(const struct vstack
*stacka
,
73 const struct vstack
*stackb
)
77 if (stacka
->top
!= stackb
->top
)
79 len
= stacka
->top
+ 1;
81 for (i
= 0; i
< len
; i
++) {
82 if (stacka
->e
[i
].type
!= REG_UNKNOWN
83 && stackb
->e
[i
].type
!= REG_UNKNOWN
84 && stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
91 int merge_point_add_check(struct cds_lfht
*ht
, unsigned long target_pc
,
92 const struct vstack
*stack
)
94 struct lfht_mp_node
*node
;
95 unsigned long hash
= lttng_hash_mix((const char *) target_pc
,
98 struct cds_lfht_node
*ret
;
100 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
102 node
= zmalloc(sizeof(struct lfht_mp_node
));
105 node
->target_pc
= target_pc
;
106 memcpy(&node
->stack
, stack
, sizeof(node
->stack
));
107 ret
= cds_lfht_add_unique(ht
, hash
, lttng_hash_match
,
108 (const char *) target_pc
, &node
->node
);
109 if (ret
!= &node
->node
) {
110 struct lfht_mp_node
*ret_mp
=
111 caa_container_of(ret
, struct lfht_mp_node
, node
);
113 /* Key already present */
114 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
117 if (merge_points_compare(stack
, &ret_mp
->stack
)) {
118 ERR("Merge points differ for offset %lu\n",
127 * Binary comparators use top of stack and top of stack -1.
128 * Return 0 if typing is known to match, 1 if typing is dynamic
129 * (unknown), negative error value on error.
132 int bin_op_compare_check(struct vstack
*stack
, filter_opcode_t opcode
,
135 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
138 switch (vstack_ax(stack
)->type
) {
145 switch (vstack_bx(stack
)->type
) {
153 case REG_STAR_GLOB_STRING
:
154 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
163 case REG_STAR_GLOB_STRING
:
164 switch (vstack_bx(stack
)->type
) {
171 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
175 case REG_STAR_GLOB_STRING
:
183 switch (vstack_bx(stack
)->type
) {
190 case REG_STAR_GLOB_STRING
:
204 ERR("type mismatch for '%s' binary operator\n", str
);
208 ERR("empty stack for '%s' binary operator\n", str
);
212 ERR("unknown type for '%s' binary operator\n", str
);
217 * Binary bitwise operators use top of stack and top of stack -1.
218 * Return 0 if typing is known to match, 1 if typing is dynamic
219 * (unknown), negative error value on error.
222 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
225 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
228 switch (vstack_ax(stack
)->type
) {
235 switch (vstack_bx(stack
)->type
) {
252 ERR("empty stack for '%s' binary operator\n", str
);
256 ERR("unknown type for '%s' binary operator\n", str
);
261 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
262 const struct get_symbol
*sym
)
264 const char *str
, *str_limit
;
267 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
270 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
271 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
272 len_limit
= str_limit
- str
;
273 if (strnlen(str
, len_limit
) == len_limit
)
279 * Validate bytecode range overflow within the validation pass.
280 * Called for each instruction encountered.
283 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
284 char *start_pc
, char *pc
)
288 switch (*(filter_opcode_t
*) pc
) {
289 case FILTER_OP_UNKNOWN
:
292 ERR("unknown bytecode op %u\n",
293 (unsigned int) *(filter_opcode_t
*) pc
);
298 case FILTER_OP_RETURN
:
299 case FILTER_OP_RETURN_S64
:
301 if (unlikely(pc
+ sizeof(struct return_op
)
302 > start_pc
+ bytecode
->len
)) {
313 case FILTER_OP_MINUS
:
315 ERR("unsupported bytecode op %u\n",
316 (unsigned int) *(filter_opcode_t
*) pc
);
327 case FILTER_OP_EQ_STRING
:
328 case FILTER_OP_NE_STRING
:
329 case FILTER_OP_GT_STRING
:
330 case FILTER_OP_LT_STRING
:
331 case FILTER_OP_GE_STRING
:
332 case FILTER_OP_LE_STRING
:
333 case FILTER_OP_EQ_STAR_GLOB_STRING
:
334 case FILTER_OP_NE_STAR_GLOB_STRING
:
335 case FILTER_OP_EQ_S64
:
336 case FILTER_OP_NE_S64
:
337 case FILTER_OP_GT_S64
:
338 case FILTER_OP_LT_S64
:
339 case FILTER_OP_GE_S64
:
340 case FILTER_OP_LE_S64
:
341 case FILTER_OP_EQ_DOUBLE
:
342 case FILTER_OP_NE_DOUBLE
:
343 case FILTER_OP_GT_DOUBLE
:
344 case FILTER_OP_LT_DOUBLE
:
345 case FILTER_OP_GE_DOUBLE
:
346 case FILTER_OP_LE_DOUBLE
:
347 case FILTER_OP_EQ_DOUBLE_S64
:
348 case FILTER_OP_NE_DOUBLE_S64
:
349 case FILTER_OP_GT_DOUBLE_S64
:
350 case FILTER_OP_LT_DOUBLE_S64
:
351 case FILTER_OP_GE_DOUBLE_S64
:
352 case FILTER_OP_LE_DOUBLE_S64
:
353 case FILTER_OP_EQ_S64_DOUBLE
:
354 case FILTER_OP_NE_S64_DOUBLE
:
355 case FILTER_OP_GT_S64_DOUBLE
:
356 case FILTER_OP_LT_S64_DOUBLE
:
357 case FILTER_OP_GE_S64_DOUBLE
:
358 case FILTER_OP_LE_S64_DOUBLE
:
359 case FILTER_OP_BIT_RSHIFT
:
360 case FILTER_OP_BIT_LSHIFT
:
361 case FILTER_OP_BIT_AND
:
362 case FILTER_OP_BIT_OR
:
363 case FILTER_OP_BIT_XOR
:
365 if (unlikely(pc
+ sizeof(struct binary_op
)
366 > start_pc
+ bytecode
->len
)) {
373 case FILTER_OP_UNARY_PLUS
:
374 case FILTER_OP_UNARY_MINUS
:
375 case FILTER_OP_UNARY_NOT
:
376 case FILTER_OP_UNARY_PLUS_S64
:
377 case FILTER_OP_UNARY_MINUS_S64
:
378 case FILTER_OP_UNARY_NOT_S64
:
379 case FILTER_OP_UNARY_PLUS_DOUBLE
:
380 case FILTER_OP_UNARY_MINUS_DOUBLE
:
381 case FILTER_OP_UNARY_NOT_DOUBLE
:
382 case FILTER_OP_UNARY_BIT_NOT
:
384 if (unlikely(pc
+ sizeof(struct unary_op
)
385 > start_pc
+ bytecode
->len
)) {
395 if (unlikely(pc
+ sizeof(struct logical_op
)
396 > start_pc
+ bytecode
->len
)) {
403 case FILTER_OP_LOAD_FIELD_REF
:
405 ERR("Unknown field ref type\n");
410 /* get context ref */
411 case FILTER_OP_GET_CONTEXT_REF
:
412 case FILTER_OP_LOAD_FIELD_REF_STRING
:
413 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
414 case FILTER_OP_LOAD_FIELD_REF_S64
:
415 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
416 case FILTER_OP_GET_CONTEXT_REF_STRING
:
417 case FILTER_OP_GET_CONTEXT_REF_S64
:
418 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
420 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
421 > start_pc
+ bytecode
->len
)) {
427 /* load from immediate operand */
428 case FILTER_OP_LOAD_STRING
:
429 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
431 struct load_op
*insn
= (struct load_op
*) pc
;
432 uint32_t str_len
, maxlen
;
434 if (unlikely(pc
+ sizeof(struct load_op
)
435 > start_pc
+ bytecode
->len
)) {
440 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
441 str_len
= strnlen(insn
->data
, maxlen
);
442 if (unlikely(str_len
>= maxlen
)) {
443 /* Final '\0' not found within range */
449 case FILTER_OP_LOAD_S64
:
451 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
452 > start_pc
+ bytecode
->len
)) {
458 case FILTER_OP_LOAD_DOUBLE
:
460 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_double
)
461 > start_pc
+ bytecode
->len
)) {
467 case FILTER_OP_CAST_TO_S64
:
468 case FILTER_OP_CAST_DOUBLE_TO_S64
:
469 case FILTER_OP_CAST_NOP
:
471 if (unlikely(pc
+ sizeof(struct cast_op
)
472 > start_pc
+ bytecode
->len
)) {
479 * Instructions for recursive traversal through composed types.
481 case FILTER_OP_GET_CONTEXT_ROOT
:
482 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
483 case FILTER_OP_GET_PAYLOAD_ROOT
:
484 case FILTER_OP_LOAD_FIELD
:
485 case FILTER_OP_LOAD_FIELD_S8
:
486 case FILTER_OP_LOAD_FIELD_S16
:
487 case FILTER_OP_LOAD_FIELD_S32
:
488 case FILTER_OP_LOAD_FIELD_S64
:
489 case FILTER_OP_LOAD_FIELD_U8
:
490 case FILTER_OP_LOAD_FIELD_U16
:
491 case FILTER_OP_LOAD_FIELD_U32
:
492 case FILTER_OP_LOAD_FIELD_U64
:
493 case FILTER_OP_LOAD_FIELD_STRING
:
494 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
495 case FILTER_OP_LOAD_FIELD_DOUBLE
:
496 if (unlikely(pc
+ sizeof(struct load_op
)
497 > start_pc
+ bytecode
->len
)) {
502 case FILTER_OP_GET_SYMBOL
:
504 struct load_op
*insn
= (struct load_op
*) pc
;
505 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
507 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
508 > start_pc
+ bytecode
->len
)) {
512 ret
= validate_get_symbol(bytecode
, sym
);
516 case FILTER_OP_GET_SYMBOL_FIELD
:
517 ERR("Unexpected get symbol field");
521 case FILTER_OP_GET_INDEX_U16
:
522 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
523 > start_pc
+ bytecode
->len
)) {
528 case FILTER_OP_GET_INDEX_U64
:
529 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
530 > start_pc
+ bytecode
->len
)) {
540 unsigned long delete_all_nodes(struct cds_lfht
*ht
)
542 struct cds_lfht_iter iter
;
543 struct lfht_mp_node
*node
;
544 unsigned long nr_nodes
= 0;
546 cds_lfht_for_each_entry(ht
, &iter
, node
, node
) {
549 ret
= cds_lfht_del(ht
, cds_lfht_iter_get_node(&iter
));
551 /* note: this hash table is never used concurrently */
564 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
565 struct vstack
*stack
,
570 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
573 case FILTER_OP_UNKNOWN
:
576 ERR("unknown bytecode op %u\n",
577 (unsigned int) *(filter_opcode_t
*) pc
);
582 case FILTER_OP_RETURN
:
583 case FILTER_OP_RETURN_S64
:
593 case FILTER_OP_MINUS
:
595 ERR("unsupported bytecode op %u\n",
596 (unsigned int) opcode
);
603 ret
= bin_op_compare_check(stack
, opcode
, "==");
610 ret
= bin_op_compare_check(stack
, opcode
, "!=");
617 ret
= bin_op_compare_check(stack
, opcode
, ">");
624 ret
= bin_op_compare_check(stack
, opcode
, "<");
631 ret
= bin_op_compare_check(stack
, opcode
, ">=");
638 ret
= bin_op_compare_check(stack
, opcode
, "<=");
644 case FILTER_OP_EQ_STRING
:
645 case FILTER_OP_NE_STRING
:
646 case FILTER_OP_GT_STRING
:
647 case FILTER_OP_LT_STRING
:
648 case FILTER_OP_GE_STRING
:
649 case FILTER_OP_LE_STRING
:
651 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
652 ERR("Empty stack\n");
656 if (vstack_ax(stack
)->type
!= REG_STRING
657 || vstack_bx(stack
)->type
!= REG_STRING
) {
658 ERR("Unexpected register type for string comparator\n");
665 case FILTER_OP_EQ_STAR_GLOB_STRING
:
666 case FILTER_OP_NE_STAR_GLOB_STRING
:
668 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
669 ERR("Empty stack\n");
673 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
674 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
675 ERR("Unexpected register type for globbing pattern comparator\n");
682 case FILTER_OP_EQ_S64
:
683 case FILTER_OP_NE_S64
:
684 case FILTER_OP_GT_S64
:
685 case FILTER_OP_LT_S64
:
686 case FILTER_OP_GE_S64
:
687 case FILTER_OP_LE_S64
:
689 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
690 ERR("Empty stack\n");
694 if (vstack_ax(stack
)->type
!= REG_S64
695 || vstack_bx(stack
)->type
!= REG_S64
) {
696 ERR("Unexpected register type for s64 comparator\n");
703 case FILTER_OP_EQ_DOUBLE
:
704 case FILTER_OP_NE_DOUBLE
:
705 case FILTER_OP_GT_DOUBLE
:
706 case FILTER_OP_LT_DOUBLE
:
707 case FILTER_OP_GE_DOUBLE
:
708 case FILTER_OP_LE_DOUBLE
:
710 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
711 ERR("Empty stack\n");
715 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
716 ERR("Double operator should have two double registers\n");
723 case FILTER_OP_EQ_DOUBLE_S64
:
724 case FILTER_OP_NE_DOUBLE_S64
:
725 case FILTER_OP_GT_DOUBLE_S64
:
726 case FILTER_OP_LT_DOUBLE_S64
:
727 case FILTER_OP_GE_DOUBLE_S64
:
728 case FILTER_OP_LE_DOUBLE_S64
:
730 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
731 ERR("Empty stack\n");
735 if (vstack_ax(stack
)->type
!= REG_S64
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
736 ERR("Double-S64 operator has unexpected register types\n");
743 case FILTER_OP_EQ_S64_DOUBLE
:
744 case FILTER_OP_NE_S64_DOUBLE
:
745 case FILTER_OP_GT_S64_DOUBLE
:
746 case FILTER_OP_LT_S64_DOUBLE
:
747 case FILTER_OP_GE_S64_DOUBLE
:
748 case FILTER_OP_LE_S64_DOUBLE
:
750 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
751 ERR("Empty stack\n");
755 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_S64
) {
756 ERR("S64-Double operator has unexpected register types\n");
763 case FILTER_OP_BIT_RSHIFT
:
764 ret
= bin_op_bitwise_check(stack
, opcode
, ">>");
768 case FILTER_OP_BIT_LSHIFT
:
769 ret
= bin_op_bitwise_check(stack
, opcode
, "<<");
773 case FILTER_OP_BIT_AND
:
774 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
778 case FILTER_OP_BIT_OR
:
779 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
783 case FILTER_OP_BIT_XOR
:
784 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
790 case FILTER_OP_UNARY_PLUS
:
791 case FILTER_OP_UNARY_MINUS
:
792 case FILTER_OP_UNARY_NOT
:
794 if (!vstack_ax(stack
)) {
795 ERR("Empty stack\n");
799 switch (vstack_ax(stack
)->type
) {
801 ERR("unknown register type\n");
806 case REG_STAR_GLOB_STRING
:
807 ERR("Unary op can only be applied to numeric or floating point registers\n");
819 case FILTER_OP_UNARY_BIT_NOT
:
821 if (!vstack_ax(stack
)) {
822 ERR("Empty stack\n");
826 switch (vstack_ax(stack
)->type
) {
828 ERR("unknown register type\n");
833 case REG_STAR_GLOB_STRING
:
835 ERR("Unary bitwise op can only be applied to numeric registers\n");
846 case FILTER_OP_UNARY_PLUS_S64
:
847 case FILTER_OP_UNARY_MINUS_S64
:
848 case FILTER_OP_UNARY_NOT_S64
:
850 if (!vstack_ax(stack
)) {
851 ERR("Empty stack\n");
855 if (vstack_ax(stack
)->type
!= REG_S64
) {
856 ERR("Invalid register type\n");
863 case FILTER_OP_UNARY_PLUS_DOUBLE
:
864 case FILTER_OP_UNARY_MINUS_DOUBLE
:
865 case FILTER_OP_UNARY_NOT_DOUBLE
:
867 if (!vstack_ax(stack
)) {
868 ERR("Empty stack\n");
872 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
873 ERR("Invalid register type\n");
884 struct logical_op
*insn
= (struct logical_op
*) pc
;
886 if (!vstack_ax(stack
)) {
887 ERR("Empty stack\n");
891 if (vstack_ax(stack
)->type
!= REG_S64
892 && vstack_ax(stack
)->type
!= REG_UNKNOWN
) {
893 ERR("Logical comparator expects S64 or dynamic register\n");
898 dbg_printf("Validate jumping to bytecode offset %u\n",
899 (unsigned int) insn
->skip_offset
);
900 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
901 ERR("Loops are not allowed in bytecode\n");
909 case FILTER_OP_LOAD_FIELD_REF
:
911 ERR("Unknown field ref type\n");
915 case FILTER_OP_LOAD_FIELD_REF_STRING
:
916 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
918 struct load_op
*insn
= (struct load_op
*) pc
;
919 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
921 dbg_printf("Validate load field ref offset %u type string\n",
925 case FILTER_OP_LOAD_FIELD_REF_S64
:
927 struct load_op
*insn
= (struct load_op
*) pc
;
928 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
930 dbg_printf("Validate load field ref offset %u type s64\n",
934 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
936 struct load_op
*insn
= (struct load_op
*) pc
;
937 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
939 dbg_printf("Validate load field ref offset %u type double\n",
944 /* load from immediate operand */
945 case FILTER_OP_LOAD_STRING
:
946 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
951 case FILTER_OP_LOAD_S64
:
956 case FILTER_OP_LOAD_DOUBLE
:
961 case FILTER_OP_CAST_TO_S64
:
962 case FILTER_OP_CAST_DOUBLE_TO_S64
:
964 struct cast_op
*insn
= (struct cast_op
*) pc
;
966 if (!vstack_ax(stack
)) {
967 ERR("Empty stack\n");
971 switch (vstack_ax(stack
)->type
) {
973 ERR("unknown register type\n");
978 case REG_STAR_GLOB_STRING
:
979 ERR("Cast op can only be applied to numeric or floating point registers\n");
989 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
990 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
991 ERR("Cast expects double\n");
998 case FILTER_OP_CAST_NOP
:
1003 /* get context ref */
1004 case FILTER_OP_GET_CONTEXT_REF
:
1006 struct load_op
*insn
= (struct load_op
*) pc
;
1007 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1009 dbg_printf("Validate get context ref offset %u type dynamic\n",
1013 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1015 struct load_op
*insn
= (struct load_op
*) pc
;
1016 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1018 dbg_printf("Validate get context ref offset %u type string\n",
1022 case FILTER_OP_GET_CONTEXT_REF_S64
:
1024 struct load_op
*insn
= (struct load_op
*) pc
;
1025 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1027 dbg_printf("Validate get context ref offset %u type s64\n",
1031 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1033 struct load_op
*insn
= (struct load_op
*) pc
;
1034 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1036 dbg_printf("Validate get context ref offset %u type double\n",
1042 * Instructions for recursive traversal through composed types.
1044 case FILTER_OP_GET_CONTEXT_ROOT
:
1046 dbg_printf("Validate get context root\n");
1049 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1051 dbg_printf("Validate get app context root\n");
1054 case FILTER_OP_GET_PAYLOAD_ROOT
:
1056 dbg_printf("Validate get payload root\n");
1059 case FILTER_OP_LOAD_FIELD
:
1062 * We tolerate that field type is unknown at validation,
1063 * because we are performing the load specialization in
1064 * a phase after validation.
1066 dbg_printf("Validate load field\n");
1069 case FILTER_OP_LOAD_FIELD_S8
:
1071 dbg_printf("Validate load field s8\n");
1074 case FILTER_OP_LOAD_FIELD_S16
:
1076 dbg_printf("Validate load field s16\n");
1079 case FILTER_OP_LOAD_FIELD_S32
:
1081 dbg_printf("Validate load field s32\n");
1084 case FILTER_OP_LOAD_FIELD_S64
:
1086 dbg_printf("Validate load field s64\n");
1089 case FILTER_OP_LOAD_FIELD_U8
:
1091 dbg_printf("Validate load field u8\n");
1094 case FILTER_OP_LOAD_FIELD_U16
:
1096 dbg_printf("Validate load field u16\n");
1099 case FILTER_OP_LOAD_FIELD_U32
:
1101 dbg_printf("Validate load field u32\n");
1104 case FILTER_OP_LOAD_FIELD_U64
:
1106 dbg_printf("Validate load field u64\n");
1109 case FILTER_OP_LOAD_FIELD_STRING
:
1111 dbg_printf("Validate load field string\n");
1114 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1116 dbg_printf("Validate load field sequence\n");
1119 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1121 dbg_printf("Validate load field double\n");
1125 case FILTER_OP_GET_SYMBOL
:
1127 struct load_op
*insn
= (struct load_op
*) pc
;
1128 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1130 dbg_printf("Validate get symbol offset %u\n", sym
->offset
);
1134 case FILTER_OP_GET_SYMBOL_FIELD
:
1136 struct load_op
*insn
= (struct load_op
*) pc
;
1137 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1139 dbg_printf("Validate get symbol field offset %u\n", sym
->offset
);
1143 case FILTER_OP_GET_INDEX_U16
:
1145 struct load_op
*insn
= (struct load_op
*) pc
;
1146 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1148 dbg_printf("Validate get index u16 index %u\n", get_index
->index
);
1152 case FILTER_OP_GET_INDEX_U64
:
1154 struct load_op
*insn
= (struct load_op
*) pc
;
1155 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1157 dbg_printf("Validate get index u64 index %" PRIu64
"\n", get_index
->index
);
1171 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1172 struct cds_lfht
*merge_points
,
1173 struct vstack
*stack
,
1178 unsigned long target_pc
= pc
- start_pc
;
1179 struct cds_lfht_iter iter
;
1180 struct cds_lfht_node
*node
;
1181 struct lfht_mp_node
*mp_node
;
1184 /* Validate the context resulting from the previous instruction */
1185 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1189 /* Validate merge points */
1190 hash
= lttng_hash_mix((const char *) target_pc
, sizeof(target_pc
),
1192 cds_lfht_lookup(merge_points
, hash
, lttng_hash_match
,
1193 (const char *) target_pc
, &iter
);
1194 node
= cds_lfht_iter_get_node(&iter
);
1196 mp_node
= caa_container_of(node
, struct lfht_mp_node
, node
);
1198 dbg_printf("Filter: validate merge point at offset %lu\n",
1200 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1201 ERR("Merge points differ for offset %lu\n",
1205 /* Once validated, we can remove the merge point */
1206 dbg_printf("Filter: remove merge point at offset %lu\n",
1208 ret
= cds_lfht_del(merge_points
, node
);
1216 * >0: going to next insn.
1217 * 0: success, stop iteration.
1221 int exec_insn(struct bytecode_runtime
*bytecode
,
1222 struct cds_lfht
*merge_points
,
1223 struct vstack
*stack
,
1228 char *next_pc
= *_next_pc
;
1230 switch (*(filter_opcode_t
*) pc
) {
1231 case FILTER_OP_UNKNOWN
:
1234 ERR("unknown bytecode op %u\n",
1235 (unsigned int) *(filter_opcode_t
*) pc
);
1240 case FILTER_OP_RETURN
:
1242 if (!vstack_ax(stack
)) {
1243 ERR("Empty stack\n");
1247 switch (vstack_ax(stack
)->type
) {
1252 ERR("Unexpected register type %d at end of bytecode\n",
1253 (int) vstack_ax(stack
)->type
);
1261 case FILTER_OP_RETURN_S64
:
1263 if (!vstack_ax(stack
)) {
1264 ERR("Empty stack\n");
1268 switch (vstack_ax(stack
)->type
) {
1273 ERR("Unexpected register type %d at end of bytecode\n",
1274 (int) vstack_ax(stack
)->type
);
1287 case FILTER_OP_PLUS
:
1288 case FILTER_OP_MINUS
:
1290 ERR("unsupported bytecode op %u\n",
1291 (unsigned int) *(filter_opcode_t
*) pc
);
1302 case FILTER_OP_EQ_STRING
:
1303 case FILTER_OP_NE_STRING
:
1304 case FILTER_OP_GT_STRING
:
1305 case FILTER_OP_LT_STRING
:
1306 case FILTER_OP_GE_STRING
:
1307 case FILTER_OP_LE_STRING
:
1308 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1309 case FILTER_OP_NE_STAR_GLOB_STRING
:
1310 case FILTER_OP_EQ_S64
:
1311 case FILTER_OP_NE_S64
:
1312 case FILTER_OP_GT_S64
:
1313 case FILTER_OP_LT_S64
:
1314 case FILTER_OP_GE_S64
:
1315 case FILTER_OP_LE_S64
:
1316 case FILTER_OP_EQ_DOUBLE
:
1317 case FILTER_OP_NE_DOUBLE
:
1318 case FILTER_OP_GT_DOUBLE
:
1319 case FILTER_OP_LT_DOUBLE
:
1320 case FILTER_OP_GE_DOUBLE
:
1321 case FILTER_OP_LE_DOUBLE
:
1322 case FILTER_OP_EQ_DOUBLE_S64
:
1323 case FILTER_OP_NE_DOUBLE_S64
:
1324 case FILTER_OP_GT_DOUBLE_S64
:
1325 case FILTER_OP_LT_DOUBLE_S64
:
1326 case FILTER_OP_GE_DOUBLE_S64
:
1327 case FILTER_OP_LE_DOUBLE_S64
:
1328 case FILTER_OP_EQ_S64_DOUBLE
:
1329 case FILTER_OP_NE_S64_DOUBLE
:
1330 case FILTER_OP_GT_S64_DOUBLE
:
1331 case FILTER_OP_LT_S64_DOUBLE
:
1332 case FILTER_OP_GE_S64_DOUBLE
:
1333 case FILTER_OP_LE_S64_DOUBLE
:
1334 case FILTER_OP_BIT_RSHIFT
:
1335 case FILTER_OP_BIT_LSHIFT
:
1336 case FILTER_OP_BIT_AND
:
1337 case FILTER_OP_BIT_OR
:
1338 case FILTER_OP_BIT_XOR
:
1341 if (vstack_pop(stack
)) {
1345 if (!vstack_ax(stack
)) {
1346 ERR("Empty stack\n");
1350 switch (vstack_ax(stack
)->type
) {
1354 case REG_STAR_GLOB_STRING
:
1358 ERR("Unexpected register type %d for operation\n",
1359 (int) vstack_ax(stack
)->type
);
1364 vstack_ax(stack
)->type
= REG_S64
;
1365 next_pc
+= sizeof(struct binary_op
);
1370 case FILTER_OP_UNARY_PLUS
:
1371 case FILTER_OP_UNARY_MINUS
:
1374 if (!vstack_ax(stack
)) {
1375 ERR("Empty stack\n");
1379 switch (vstack_ax(stack
)->type
) {
1385 ERR("Unexpected register type %d for operation\n",
1386 (int) vstack_ax(stack
)->type
);
1390 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1391 next_pc
+= sizeof(struct unary_op
);
1395 case FILTER_OP_UNARY_PLUS_S64
:
1396 case FILTER_OP_UNARY_MINUS_S64
:
1397 case FILTER_OP_UNARY_NOT_S64
:
1400 if (!vstack_ax(stack
)) {
1401 ERR("Empty stack\n");
1405 switch (vstack_ax(stack
)->type
) {
1409 ERR("Unexpected register type %d for operation\n",
1410 (int) vstack_ax(stack
)->type
);
1415 vstack_ax(stack
)->type
= REG_S64
;
1416 next_pc
+= sizeof(struct unary_op
);
1420 case FILTER_OP_UNARY_NOT
:
1423 if (!vstack_ax(stack
)) {
1424 ERR("Empty stack\n");
1428 switch (vstack_ax(stack
)->type
) {
1434 ERR("Unexpected register type %d for operation\n",
1435 (int) vstack_ax(stack
)->type
);
1440 vstack_ax(stack
)->type
= REG_S64
;
1441 next_pc
+= sizeof(struct unary_op
);
1445 case FILTER_OP_UNARY_BIT_NOT
:
1448 if (!vstack_ax(stack
)) {
1449 ERR("Empty stack\n");
1453 switch (vstack_ax(stack
)->type
) {
1459 ERR("Unexpected register type %d for operation\n",
1460 (int) vstack_ax(stack
)->type
);
1465 vstack_ax(stack
)->type
= REG_S64
;
1466 next_pc
+= sizeof(struct unary_op
);
1470 case FILTER_OP_UNARY_NOT_DOUBLE
:
1473 if (!vstack_ax(stack
)) {
1474 ERR("Empty stack\n");
1478 switch (vstack_ax(stack
)->type
) {
1482 ERR("Incorrect register type %d for operation\n",
1483 (int) vstack_ax(stack
)->type
);
1488 vstack_ax(stack
)->type
= REG_S64
;
1489 next_pc
+= sizeof(struct unary_op
);
1493 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1494 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1497 if (!vstack_ax(stack
)) {
1498 ERR("Empty stack\n");
1502 switch (vstack_ax(stack
)->type
) {
1506 ERR("Incorrect register type %d for operation\n",
1507 (int) vstack_ax(stack
)->type
);
1512 vstack_ax(stack
)->type
= REG_DOUBLE
;
1513 next_pc
+= sizeof(struct unary_op
);
1521 struct logical_op
*insn
= (struct logical_op
*) pc
;
1524 /* Add merge point to table */
1525 merge_ret
= merge_point_add_check(merge_points
,
1526 insn
->skip_offset
, stack
);
1532 if (!vstack_ax(stack
)) {
1533 ERR("Empty stack\n");
1537 /* There is always a cast-to-s64 operation before a or/and op. */
1538 switch (vstack_ax(stack
)->type
) {
1542 ERR("Incorrect register type %d for operation\n",
1543 (int) vstack_ax(stack
)->type
);
1548 /* Continue to next instruction */
1549 /* Pop 1 when jump not taken */
1550 if (vstack_pop(stack
)) {
1554 next_pc
+= sizeof(struct logical_op
);
1558 /* load field ref */
1559 case FILTER_OP_LOAD_FIELD_REF
:
1561 ERR("Unknown field ref type\n");
1565 /* get context ref */
1566 case FILTER_OP_GET_CONTEXT_REF
:
1568 if (vstack_push(stack
)) {
1572 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1573 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1576 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1577 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1578 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1580 if (vstack_push(stack
)) {
1584 vstack_ax(stack
)->type
= REG_STRING
;
1585 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1588 case FILTER_OP_LOAD_FIELD_REF_S64
:
1589 case FILTER_OP_GET_CONTEXT_REF_S64
:
1591 if (vstack_push(stack
)) {
1595 vstack_ax(stack
)->type
= REG_S64
;
1596 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1599 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1600 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1602 if (vstack_push(stack
)) {
1606 vstack_ax(stack
)->type
= REG_DOUBLE
;
1607 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1611 /* load from immediate operand */
1612 case FILTER_OP_LOAD_STRING
:
1614 struct load_op
*insn
= (struct load_op
*) pc
;
1616 if (vstack_push(stack
)) {
1620 vstack_ax(stack
)->type
= REG_STRING
;
1621 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1625 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1627 struct load_op
*insn
= (struct load_op
*) pc
;
1629 if (vstack_push(stack
)) {
1633 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1634 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1638 case FILTER_OP_LOAD_S64
:
1640 if (vstack_push(stack
)) {
1644 vstack_ax(stack
)->type
= REG_S64
;
1645 next_pc
+= sizeof(struct load_op
)
1646 + sizeof(struct literal_numeric
);
1650 case FILTER_OP_LOAD_DOUBLE
:
1652 if (vstack_push(stack
)) {
1656 vstack_ax(stack
)->type
= REG_DOUBLE
;
1657 next_pc
+= sizeof(struct load_op
)
1658 + sizeof(struct literal_double
);
1662 case FILTER_OP_CAST_TO_S64
:
1663 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1666 if (!vstack_ax(stack
)) {
1667 ERR("Empty stack\n");
1671 switch (vstack_ax(stack
)->type
) {
1677 ERR("Incorrect register type %d for cast\n",
1678 (int) vstack_ax(stack
)->type
);
1682 vstack_ax(stack
)->type
= REG_S64
;
1683 next_pc
+= sizeof(struct cast_op
);
1686 case FILTER_OP_CAST_NOP
:
1688 next_pc
+= sizeof(struct cast_op
);
1693 * Instructions for recursive traversal through composed types.
1695 case FILTER_OP_GET_CONTEXT_ROOT
:
1696 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1697 case FILTER_OP_GET_PAYLOAD_ROOT
:
1699 if (vstack_push(stack
)) {
1703 vstack_ax(stack
)->type
= REG_PTR
;
1704 next_pc
+= sizeof(struct load_op
);
1708 case FILTER_OP_LOAD_FIELD
:
1711 if (!vstack_ax(stack
)) {
1712 ERR("Empty stack\n");
1716 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1717 ERR("Expecting pointer on top of stack\n");
1721 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1722 next_pc
+= sizeof(struct load_op
);
1726 case FILTER_OP_LOAD_FIELD_S8
:
1727 case FILTER_OP_LOAD_FIELD_S16
:
1728 case FILTER_OP_LOAD_FIELD_S32
:
1729 case FILTER_OP_LOAD_FIELD_S64
:
1730 case FILTER_OP_LOAD_FIELD_U8
:
1731 case FILTER_OP_LOAD_FIELD_U16
:
1732 case FILTER_OP_LOAD_FIELD_U32
:
1733 case FILTER_OP_LOAD_FIELD_U64
:
1736 if (!vstack_ax(stack
)) {
1737 ERR("Empty stack\n");
1741 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1742 ERR("Expecting pointer on top of stack\n");
1746 vstack_ax(stack
)->type
= REG_S64
;
1747 next_pc
+= sizeof(struct load_op
);
1751 case FILTER_OP_LOAD_FIELD_STRING
:
1752 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1755 if (!vstack_ax(stack
)) {
1756 ERR("Empty stack\n");
1760 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1761 ERR("Expecting pointer on top of stack\n");
1765 vstack_ax(stack
)->type
= REG_STRING
;
1766 next_pc
+= sizeof(struct load_op
);
1770 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1773 if (!vstack_ax(stack
)) {
1774 ERR("Empty stack\n");
1778 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1779 ERR("Expecting pointer on top of stack\n");
1783 vstack_ax(stack
)->type
= REG_DOUBLE
;
1784 next_pc
+= sizeof(struct load_op
);
1788 case FILTER_OP_GET_SYMBOL
:
1789 case FILTER_OP_GET_SYMBOL_FIELD
:
1792 if (!vstack_ax(stack
)) {
1793 ERR("Empty stack\n");
1797 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1798 ERR("Expecting pointer on top of stack\n");
1802 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1806 case FILTER_OP_GET_INDEX_U16
:
1809 if (!vstack_ax(stack
)) {
1810 ERR("Empty stack\n");
1814 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1815 ERR("Expecting pointer on top of stack\n");
1819 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1823 case FILTER_OP_GET_INDEX_U64
:
1826 if (!vstack_ax(stack
)) {
1827 ERR("Empty stack\n");
1831 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1832 ERR("Expecting pointer on top of stack\n");
1836 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1842 *_next_pc
= next_pc
;
1847 * Never called concurrently (hash seed is shared).
1849 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1851 struct cds_lfht
*merge_points
;
1852 char *pc
, *next_pc
, *start_pc
;
1854 struct vstack stack
;
1856 vstack_init(&stack
);
1858 if (!lttng_hash_seed_ready
) {
1859 lttng_hash_seed
= time(NULL
);
1860 lttng_hash_seed_ready
= 1;
1863 * Note: merge_points hash table used by single thread, and
1864 * never concurrently resized. Therefore, we can use it without
1865 * holding RCU read-side lock and free nodes without using
1868 merge_points
= cds_lfht_new(DEFAULT_NR_MERGE_POINTS
,
1869 MIN_NR_BUCKETS
, MAX_NR_BUCKETS
,
1871 if (!merge_points
) {
1872 ERR("Error allocating hash table for bytecode validation\n");
1875 start_pc
= &bytecode
->code
[0];
1876 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1878 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1881 ERR("filter bytecode overflow\n");
1884 dbg_printf("Validating op %s (%u)\n",
1885 print_op((unsigned int) *(filter_opcode_t
*) pc
),
1886 (unsigned int) *(filter_opcode_t
*) pc
);
1889 * For each instruction, validate the current context
1890 * (traversal of entire execution flow), and validate
1891 * all merge points targeting this instruction.
1893 ret
= validate_instruction_all_contexts(bytecode
, merge_points
,
1894 &stack
, start_pc
, pc
);
1897 ret
= exec_insn(bytecode
, merge_points
, &stack
, &next_pc
, pc
);
1902 if (delete_all_nodes(merge_points
)) {
1904 ERR("Unexpected merge points\n");
1908 if (cds_lfht_destroy(merge_points
, NULL
)) {
1909 ERR("Error destroying hash table\n");