2 * lttng-filter-validator.c
4 * LTTng UST filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <urcu/rculfhash.h>
35 #include "lttng-filter.h"
36 #include "lttng-hash-helper.h"
37 #include "string-utils.h"
38 #include "ust-events-internal.h"
41 * Number of merge points for hash table size. Hash table initialized to
42 * that size, and we do not resize, because we do not want to trigger
43 * RCU worker thread execution: fall-back on linear traversal if number
44 * of merge points exceeds this value.
46 #define DEFAULT_NR_MERGE_POINTS 128
47 #define MIN_NR_BUCKETS 128
48 #define MAX_NR_BUCKETS 128
50 /* merge point table node */
52 struct cds_lfht_node node
;
54 /* Context at merge point */
56 unsigned long target_pc
;
59 static unsigned long lttng_hash_seed
;
60 static unsigned int lttng_hash_seed_ready
;
63 int lttng_hash_match(struct cds_lfht_node
*node
, const void *key
)
65 struct lfht_mp_node
*mp_node
=
66 caa_container_of(node
, struct lfht_mp_node
, node
);
67 unsigned long key_pc
= (unsigned long) key
;
69 if (mp_node
->target_pc
== key_pc
)
76 int merge_points_compare(const struct vstack
*stacka
,
77 const struct vstack
*stackb
)
81 if (stacka
->top
!= stackb
->top
)
83 len
= stacka
->top
+ 1;
85 for (i
= 0; i
< len
; i
++) {
86 if (stacka
->e
[i
].type
!= REG_UNKNOWN
87 && stackb
->e
[i
].type
!= REG_UNKNOWN
88 && stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
95 int merge_point_add_check(struct cds_lfht
*ht
, unsigned long target_pc
,
96 const struct vstack
*stack
)
98 struct lfht_mp_node
*node
;
99 unsigned long hash
= lttng_hash_mix((const char *) target_pc
,
102 struct cds_lfht_node
*ret
;
104 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
106 node
= zmalloc(sizeof(struct lfht_mp_node
));
109 node
->target_pc
= target_pc
;
110 memcpy(&node
->stack
, stack
, sizeof(node
->stack
));
111 ret
= cds_lfht_add_unique(ht
, hash
, lttng_hash_match
,
112 (const char *) target_pc
, &node
->node
);
113 if (ret
!= &node
->node
) {
114 struct lfht_mp_node
*ret_mp
=
115 caa_container_of(ret
, struct lfht_mp_node
, node
);
117 /* Key already present */
118 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
121 if (merge_points_compare(stack
, &ret_mp
->stack
)) {
122 ERR("Merge points differ for offset %lu\n",
131 * Binary comparators use top of stack and top of stack -1.
132 * Return 0 if typing is known to match, 1 if typing is dynamic
133 * (unknown), negative error value on error.
136 int bin_op_compare_check(struct vstack
*stack
, filter_opcode_t opcode
,
139 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
142 switch (vstack_ax(stack
)->type
) {
149 switch (vstack_bx(stack
)->type
) {
157 case REG_STAR_GLOB_STRING
:
158 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
167 case REG_STAR_GLOB_STRING
:
168 switch (vstack_bx(stack
)->type
) {
175 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
179 case REG_STAR_GLOB_STRING
:
187 switch (vstack_bx(stack
)->type
) {
194 case REG_STAR_GLOB_STRING
:
208 ERR("type mismatch for '%s' binary operator\n", str
);
212 ERR("empty stack for '%s' binary operator\n", str
);
216 ERR("unknown type for '%s' binary operator\n", str
);
221 * Binary bitwise operators use top of stack and top of stack -1.
222 * Return 0 if typing is known to match, 1 if typing is dynamic
223 * (unknown), negative error value on error.
226 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
229 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
232 switch (vstack_ax(stack
)->type
) {
239 switch (vstack_bx(stack
)->type
) {
256 ERR("empty stack for '%s' binary operator\n", str
);
260 ERR("unknown type for '%s' binary operator\n", str
);
265 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
266 const struct get_symbol
*sym
)
268 const char *str
, *str_limit
;
271 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
274 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
275 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
276 len_limit
= str_limit
- str
;
277 if (strnlen(str
, len_limit
) == len_limit
)
283 * Validate bytecode range overflow within the validation pass.
284 * Called for each instruction encountered.
287 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
288 char *start_pc
, char *pc
)
292 switch (*(filter_opcode_t
*) pc
) {
293 case FILTER_OP_UNKNOWN
:
296 ERR("unknown bytecode op %u\n",
297 (unsigned int) *(filter_opcode_t
*) pc
);
302 case FILTER_OP_RETURN
:
303 case FILTER_OP_RETURN_S64
:
305 if (unlikely(pc
+ sizeof(struct return_op
)
306 > start_pc
+ bytecode
->len
)) {
317 case FILTER_OP_MINUS
:
319 ERR("unsupported bytecode op %u\n",
320 (unsigned int) *(filter_opcode_t
*) pc
);
331 case FILTER_OP_EQ_STRING
:
332 case FILTER_OP_NE_STRING
:
333 case FILTER_OP_GT_STRING
:
334 case FILTER_OP_LT_STRING
:
335 case FILTER_OP_GE_STRING
:
336 case FILTER_OP_LE_STRING
:
337 case FILTER_OP_EQ_STAR_GLOB_STRING
:
338 case FILTER_OP_NE_STAR_GLOB_STRING
:
339 case FILTER_OP_EQ_S64
:
340 case FILTER_OP_NE_S64
:
341 case FILTER_OP_GT_S64
:
342 case FILTER_OP_LT_S64
:
343 case FILTER_OP_GE_S64
:
344 case FILTER_OP_LE_S64
:
345 case FILTER_OP_EQ_DOUBLE
:
346 case FILTER_OP_NE_DOUBLE
:
347 case FILTER_OP_GT_DOUBLE
:
348 case FILTER_OP_LT_DOUBLE
:
349 case FILTER_OP_GE_DOUBLE
:
350 case FILTER_OP_LE_DOUBLE
:
351 case FILTER_OP_EQ_DOUBLE_S64
:
352 case FILTER_OP_NE_DOUBLE_S64
:
353 case FILTER_OP_GT_DOUBLE_S64
:
354 case FILTER_OP_LT_DOUBLE_S64
:
355 case FILTER_OP_GE_DOUBLE_S64
:
356 case FILTER_OP_LE_DOUBLE_S64
:
357 case FILTER_OP_EQ_S64_DOUBLE
:
358 case FILTER_OP_NE_S64_DOUBLE
:
359 case FILTER_OP_GT_S64_DOUBLE
:
360 case FILTER_OP_LT_S64_DOUBLE
:
361 case FILTER_OP_GE_S64_DOUBLE
:
362 case FILTER_OP_LE_S64_DOUBLE
:
363 case FILTER_OP_BIT_RSHIFT
:
364 case FILTER_OP_BIT_LSHIFT
:
365 case FILTER_OP_BIT_AND
:
366 case FILTER_OP_BIT_OR
:
367 case FILTER_OP_BIT_XOR
:
369 if (unlikely(pc
+ sizeof(struct binary_op
)
370 > start_pc
+ bytecode
->len
)) {
377 case FILTER_OP_UNARY_PLUS
:
378 case FILTER_OP_UNARY_MINUS
:
379 case FILTER_OP_UNARY_NOT
:
380 case FILTER_OP_UNARY_PLUS_S64
:
381 case FILTER_OP_UNARY_MINUS_S64
:
382 case FILTER_OP_UNARY_NOT_S64
:
383 case FILTER_OP_UNARY_PLUS_DOUBLE
:
384 case FILTER_OP_UNARY_MINUS_DOUBLE
:
385 case FILTER_OP_UNARY_NOT_DOUBLE
:
386 case FILTER_OP_UNARY_BIT_NOT
:
388 if (unlikely(pc
+ sizeof(struct unary_op
)
389 > start_pc
+ bytecode
->len
)) {
399 if (unlikely(pc
+ sizeof(struct logical_op
)
400 > start_pc
+ bytecode
->len
)) {
407 case FILTER_OP_LOAD_FIELD_REF
:
409 ERR("Unknown field ref type\n");
414 /* get context ref */
415 case FILTER_OP_GET_CONTEXT_REF
:
416 case FILTER_OP_LOAD_FIELD_REF_STRING
:
417 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
418 case FILTER_OP_LOAD_FIELD_REF_S64
:
419 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
420 case FILTER_OP_GET_CONTEXT_REF_STRING
:
421 case FILTER_OP_GET_CONTEXT_REF_S64
:
422 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
424 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
425 > start_pc
+ bytecode
->len
)) {
431 /* load from immediate operand */
432 case FILTER_OP_LOAD_STRING
:
433 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
435 struct load_op
*insn
= (struct load_op
*) pc
;
436 uint32_t str_len
, maxlen
;
438 if (unlikely(pc
+ sizeof(struct load_op
)
439 > start_pc
+ bytecode
->len
)) {
444 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
445 str_len
= strnlen(insn
->data
, maxlen
);
446 if (unlikely(str_len
>= maxlen
)) {
447 /* Final '\0' not found within range */
453 case FILTER_OP_LOAD_S64
:
455 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
456 > start_pc
+ bytecode
->len
)) {
462 case FILTER_OP_LOAD_DOUBLE
:
464 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_double
)
465 > start_pc
+ bytecode
->len
)) {
471 case FILTER_OP_CAST_TO_S64
:
472 case FILTER_OP_CAST_DOUBLE_TO_S64
:
473 case FILTER_OP_CAST_NOP
:
475 if (unlikely(pc
+ sizeof(struct cast_op
)
476 > start_pc
+ bytecode
->len
)) {
483 * Instructions for recursive traversal through composed types.
485 case FILTER_OP_GET_CONTEXT_ROOT
:
486 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
487 case FILTER_OP_GET_PAYLOAD_ROOT
:
488 case FILTER_OP_LOAD_FIELD
:
489 case FILTER_OP_LOAD_FIELD_S8
:
490 case FILTER_OP_LOAD_FIELD_S16
:
491 case FILTER_OP_LOAD_FIELD_S32
:
492 case FILTER_OP_LOAD_FIELD_S64
:
493 case FILTER_OP_LOAD_FIELD_U8
:
494 case FILTER_OP_LOAD_FIELD_U16
:
495 case FILTER_OP_LOAD_FIELD_U32
:
496 case FILTER_OP_LOAD_FIELD_U64
:
497 case FILTER_OP_LOAD_FIELD_STRING
:
498 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
499 case FILTER_OP_LOAD_FIELD_DOUBLE
:
500 if (unlikely(pc
+ sizeof(struct load_op
)
501 > start_pc
+ bytecode
->len
)) {
506 case FILTER_OP_GET_SYMBOL
:
508 struct load_op
*insn
= (struct load_op
*) pc
;
509 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
511 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
512 > start_pc
+ bytecode
->len
)) {
516 ret
= validate_get_symbol(bytecode
, sym
);
520 case FILTER_OP_GET_SYMBOL_FIELD
:
521 ERR("Unexpected get symbol field");
525 case FILTER_OP_GET_INDEX_U16
:
526 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
527 > start_pc
+ bytecode
->len
)) {
532 case FILTER_OP_GET_INDEX_U64
:
533 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
534 > start_pc
+ bytecode
->len
)) {
544 unsigned long delete_all_nodes(struct cds_lfht
*ht
)
546 struct cds_lfht_iter iter
;
547 struct lfht_mp_node
*node
;
548 unsigned long nr_nodes
= 0;
550 cds_lfht_for_each_entry(ht
, &iter
, node
, node
) {
553 ret
= cds_lfht_del(ht
, cds_lfht_iter_get_node(&iter
));
555 /* note: this hash table is never used concurrently */
568 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
569 struct vstack
*stack
,
574 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
577 case FILTER_OP_UNKNOWN
:
580 ERR("unknown bytecode op %u\n",
581 (unsigned int) *(filter_opcode_t
*) pc
);
586 case FILTER_OP_RETURN
:
587 case FILTER_OP_RETURN_S64
:
597 case FILTER_OP_MINUS
:
599 ERR("unsupported bytecode op %u\n",
600 (unsigned int) opcode
);
607 ret
= bin_op_compare_check(stack
, opcode
, "==");
614 ret
= bin_op_compare_check(stack
, opcode
, "!=");
621 ret
= bin_op_compare_check(stack
, opcode
, ">");
628 ret
= bin_op_compare_check(stack
, opcode
, "<");
635 ret
= bin_op_compare_check(stack
, opcode
, ">=");
642 ret
= bin_op_compare_check(stack
, opcode
, "<=");
648 case FILTER_OP_EQ_STRING
:
649 case FILTER_OP_NE_STRING
:
650 case FILTER_OP_GT_STRING
:
651 case FILTER_OP_LT_STRING
:
652 case FILTER_OP_GE_STRING
:
653 case FILTER_OP_LE_STRING
:
655 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
656 ERR("Empty stack\n");
660 if (vstack_ax(stack
)->type
!= REG_STRING
661 || vstack_bx(stack
)->type
!= REG_STRING
) {
662 ERR("Unexpected register type for string comparator\n");
669 case FILTER_OP_EQ_STAR_GLOB_STRING
:
670 case FILTER_OP_NE_STAR_GLOB_STRING
:
672 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
673 ERR("Empty stack\n");
677 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
678 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
679 ERR("Unexpected register type for globbing pattern comparator\n");
686 case FILTER_OP_EQ_S64
:
687 case FILTER_OP_NE_S64
:
688 case FILTER_OP_GT_S64
:
689 case FILTER_OP_LT_S64
:
690 case FILTER_OP_GE_S64
:
691 case FILTER_OP_LE_S64
:
693 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
694 ERR("Empty stack\n");
698 if (vstack_ax(stack
)->type
!= REG_S64
699 || vstack_bx(stack
)->type
!= REG_S64
) {
700 ERR("Unexpected register type for s64 comparator\n");
707 case FILTER_OP_EQ_DOUBLE
:
708 case FILTER_OP_NE_DOUBLE
:
709 case FILTER_OP_GT_DOUBLE
:
710 case FILTER_OP_LT_DOUBLE
:
711 case FILTER_OP_GE_DOUBLE
:
712 case FILTER_OP_LE_DOUBLE
:
714 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
715 ERR("Empty stack\n");
719 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
720 ERR("Double operator should have two double registers\n");
727 case FILTER_OP_EQ_DOUBLE_S64
:
728 case FILTER_OP_NE_DOUBLE_S64
:
729 case FILTER_OP_GT_DOUBLE_S64
:
730 case FILTER_OP_LT_DOUBLE_S64
:
731 case FILTER_OP_GE_DOUBLE_S64
:
732 case FILTER_OP_LE_DOUBLE_S64
:
734 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
735 ERR("Empty stack\n");
739 if (vstack_ax(stack
)->type
!= REG_S64
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
740 ERR("Double-S64 operator has unexpected register types\n");
747 case FILTER_OP_EQ_S64_DOUBLE
:
748 case FILTER_OP_NE_S64_DOUBLE
:
749 case FILTER_OP_GT_S64_DOUBLE
:
750 case FILTER_OP_LT_S64_DOUBLE
:
751 case FILTER_OP_GE_S64_DOUBLE
:
752 case FILTER_OP_LE_S64_DOUBLE
:
754 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
755 ERR("Empty stack\n");
759 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_S64
) {
760 ERR("S64-Double operator has unexpected register types\n");
767 case FILTER_OP_BIT_RSHIFT
:
768 ret
= bin_op_bitwise_check(stack
, opcode
, ">>");
772 case FILTER_OP_BIT_LSHIFT
:
773 ret
= bin_op_bitwise_check(stack
, opcode
, "<<");
777 case FILTER_OP_BIT_AND
:
778 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
782 case FILTER_OP_BIT_OR
:
783 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
787 case FILTER_OP_BIT_XOR
:
788 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
794 case FILTER_OP_UNARY_PLUS
:
795 case FILTER_OP_UNARY_MINUS
:
796 case FILTER_OP_UNARY_NOT
:
798 if (!vstack_ax(stack
)) {
799 ERR("Empty stack\n");
803 switch (vstack_ax(stack
)->type
) {
805 ERR("unknown register type\n");
810 case REG_STAR_GLOB_STRING
:
811 ERR("Unary op can only be applied to numeric or floating point registers\n");
823 case FILTER_OP_UNARY_BIT_NOT
:
825 if (!vstack_ax(stack
)) {
826 ERR("Empty stack\n");
830 switch (vstack_ax(stack
)->type
) {
832 ERR("unknown register type\n");
837 case REG_STAR_GLOB_STRING
:
839 ERR("Unary bitwise op can only be applied to numeric registers\n");
850 case FILTER_OP_UNARY_PLUS_S64
:
851 case FILTER_OP_UNARY_MINUS_S64
:
852 case FILTER_OP_UNARY_NOT_S64
:
854 if (!vstack_ax(stack
)) {
855 ERR("Empty stack\n");
859 if (vstack_ax(stack
)->type
!= REG_S64
) {
860 ERR("Invalid register type\n");
867 case FILTER_OP_UNARY_PLUS_DOUBLE
:
868 case FILTER_OP_UNARY_MINUS_DOUBLE
:
869 case FILTER_OP_UNARY_NOT_DOUBLE
:
871 if (!vstack_ax(stack
)) {
872 ERR("Empty stack\n");
876 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
877 ERR("Invalid register type\n");
888 struct logical_op
*insn
= (struct logical_op
*) pc
;
890 if (!vstack_ax(stack
)) {
891 ERR("Empty stack\n");
895 if (vstack_ax(stack
)->type
!= REG_S64
896 && vstack_ax(stack
)->type
!= REG_UNKNOWN
) {
897 ERR("Logical comparator expects S64 or dynamic register\n");
902 dbg_printf("Validate jumping to bytecode offset %u\n",
903 (unsigned int) insn
->skip_offset
);
904 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
905 ERR("Loops are not allowed in bytecode\n");
913 case FILTER_OP_LOAD_FIELD_REF
:
915 ERR("Unknown field ref type\n");
919 case FILTER_OP_LOAD_FIELD_REF_STRING
:
920 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
922 struct load_op
*insn
= (struct load_op
*) pc
;
923 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
925 dbg_printf("Validate load field ref offset %u type string\n",
929 case FILTER_OP_LOAD_FIELD_REF_S64
:
931 struct load_op
*insn
= (struct load_op
*) pc
;
932 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
934 dbg_printf("Validate load field ref offset %u type s64\n",
938 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
940 struct load_op
*insn
= (struct load_op
*) pc
;
941 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
943 dbg_printf("Validate load field ref offset %u type double\n",
948 /* load from immediate operand */
949 case FILTER_OP_LOAD_STRING
:
950 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
955 case FILTER_OP_LOAD_S64
:
960 case FILTER_OP_LOAD_DOUBLE
:
965 case FILTER_OP_CAST_TO_S64
:
966 case FILTER_OP_CAST_DOUBLE_TO_S64
:
968 struct cast_op
*insn
= (struct cast_op
*) pc
;
970 if (!vstack_ax(stack
)) {
971 ERR("Empty stack\n");
975 switch (vstack_ax(stack
)->type
) {
977 ERR("unknown register type\n");
982 case REG_STAR_GLOB_STRING
:
983 ERR("Cast op can only be applied to numeric or floating point registers\n");
993 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
994 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
995 ERR("Cast expects double\n");
1002 case FILTER_OP_CAST_NOP
:
1007 /* get context ref */
1008 case FILTER_OP_GET_CONTEXT_REF
:
1010 struct load_op
*insn
= (struct load_op
*) pc
;
1011 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1013 dbg_printf("Validate get context ref offset %u type dynamic\n",
1017 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1019 struct load_op
*insn
= (struct load_op
*) pc
;
1020 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1022 dbg_printf("Validate get context ref offset %u type string\n",
1026 case FILTER_OP_GET_CONTEXT_REF_S64
:
1028 struct load_op
*insn
= (struct load_op
*) pc
;
1029 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1031 dbg_printf("Validate get context ref offset %u type s64\n",
1035 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1037 struct load_op
*insn
= (struct load_op
*) pc
;
1038 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1040 dbg_printf("Validate get context ref offset %u type double\n",
1046 * Instructions for recursive traversal through composed types.
1048 case FILTER_OP_GET_CONTEXT_ROOT
:
1050 dbg_printf("Validate get context root\n");
1053 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1055 dbg_printf("Validate get app context root\n");
1058 case FILTER_OP_GET_PAYLOAD_ROOT
:
1060 dbg_printf("Validate get payload root\n");
1063 case FILTER_OP_LOAD_FIELD
:
1066 * We tolerate that field type is unknown at validation,
1067 * because we are performing the load specialization in
1068 * a phase after validation.
1070 dbg_printf("Validate load field\n");
1073 case FILTER_OP_LOAD_FIELD_S8
:
1075 dbg_printf("Validate load field s8\n");
1078 case FILTER_OP_LOAD_FIELD_S16
:
1080 dbg_printf("Validate load field s16\n");
1083 case FILTER_OP_LOAD_FIELD_S32
:
1085 dbg_printf("Validate load field s32\n");
1088 case FILTER_OP_LOAD_FIELD_S64
:
1090 dbg_printf("Validate load field s64\n");
1093 case FILTER_OP_LOAD_FIELD_U8
:
1095 dbg_printf("Validate load field u8\n");
1098 case FILTER_OP_LOAD_FIELD_U16
:
1100 dbg_printf("Validate load field u16\n");
1103 case FILTER_OP_LOAD_FIELD_U32
:
1105 dbg_printf("Validate load field u32\n");
1108 case FILTER_OP_LOAD_FIELD_U64
:
1110 dbg_printf("Validate load field u64\n");
1113 case FILTER_OP_LOAD_FIELD_STRING
:
1115 dbg_printf("Validate load field string\n");
1118 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1120 dbg_printf("Validate load field sequence\n");
1123 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1125 dbg_printf("Validate load field double\n");
1129 case FILTER_OP_GET_SYMBOL
:
1131 struct load_op
*insn
= (struct load_op
*) pc
;
1132 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1134 dbg_printf("Validate get symbol offset %u\n", sym
->offset
);
1138 case FILTER_OP_GET_SYMBOL_FIELD
:
1140 struct load_op
*insn
= (struct load_op
*) pc
;
1141 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1143 dbg_printf("Validate get symbol field offset %u\n", sym
->offset
);
1147 case FILTER_OP_GET_INDEX_U16
:
1149 struct load_op
*insn
= (struct load_op
*) pc
;
1150 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1152 dbg_printf("Validate get index u16 index %u\n", get_index
->index
);
1156 case FILTER_OP_GET_INDEX_U64
:
1158 struct load_op
*insn
= (struct load_op
*) pc
;
1159 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1161 dbg_printf("Validate get index u64 index %" PRIu64
"\n", get_index
->index
);
1175 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1176 struct cds_lfht
*merge_points
,
1177 struct vstack
*stack
,
1182 unsigned long target_pc
= pc
- start_pc
;
1183 struct cds_lfht_iter iter
;
1184 struct cds_lfht_node
*node
;
1185 struct lfht_mp_node
*mp_node
;
1188 /* Validate the context resulting from the previous instruction */
1189 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1193 /* Validate merge points */
1194 hash
= lttng_hash_mix((const char *) target_pc
, sizeof(target_pc
),
1196 cds_lfht_lookup(merge_points
, hash
, lttng_hash_match
,
1197 (const char *) target_pc
, &iter
);
1198 node
= cds_lfht_iter_get_node(&iter
);
1200 mp_node
= caa_container_of(node
, struct lfht_mp_node
, node
);
1202 dbg_printf("Filter: validate merge point at offset %lu\n",
1204 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1205 ERR("Merge points differ for offset %lu\n",
1209 /* Once validated, we can remove the merge point */
1210 dbg_printf("Filter: remove merge point at offset %lu\n",
1212 ret
= cds_lfht_del(merge_points
, node
);
1220 * >0: going to next insn.
1221 * 0: success, stop iteration.
1225 int exec_insn(struct bytecode_runtime
*bytecode
,
1226 struct cds_lfht
*merge_points
,
1227 struct vstack
*stack
,
1232 char *next_pc
= *_next_pc
;
1234 switch (*(filter_opcode_t
*) pc
) {
1235 case FILTER_OP_UNKNOWN
:
1238 ERR("unknown bytecode op %u\n",
1239 (unsigned int) *(filter_opcode_t
*) pc
);
1244 case FILTER_OP_RETURN
:
1246 if (!vstack_ax(stack
)) {
1247 ERR("Empty stack\n");
1251 switch (vstack_ax(stack
)->type
) {
1256 ERR("Unexpected register type %d at end of bytecode\n",
1257 (int) vstack_ax(stack
)->type
);
1265 case FILTER_OP_RETURN_S64
:
1267 if (!vstack_ax(stack
)) {
1268 ERR("Empty stack\n");
1272 switch (vstack_ax(stack
)->type
) {
1277 ERR("Unexpected register type %d at end of bytecode\n",
1278 (int) vstack_ax(stack
)->type
);
1291 case FILTER_OP_PLUS
:
1292 case FILTER_OP_MINUS
:
1294 ERR("unsupported bytecode op %u\n",
1295 (unsigned int) *(filter_opcode_t
*) pc
);
1306 case FILTER_OP_EQ_STRING
:
1307 case FILTER_OP_NE_STRING
:
1308 case FILTER_OP_GT_STRING
:
1309 case FILTER_OP_LT_STRING
:
1310 case FILTER_OP_GE_STRING
:
1311 case FILTER_OP_LE_STRING
:
1312 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1313 case FILTER_OP_NE_STAR_GLOB_STRING
:
1314 case FILTER_OP_EQ_S64
:
1315 case FILTER_OP_NE_S64
:
1316 case FILTER_OP_GT_S64
:
1317 case FILTER_OP_LT_S64
:
1318 case FILTER_OP_GE_S64
:
1319 case FILTER_OP_LE_S64
:
1320 case FILTER_OP_EQ_DOUBLE
:
1321 case FILTER_OP_NE_DOUBLE
:
1322 case FILTER_OP_GT_DOUBLE
:
1323 case FILTER_OP_LT_DOUBLE
:
1324 case FILTER_OP_GE_DOUBLE
:
1325 case FILTER_OP_LE_DOUBLE
:
1326 case FILTER_OP_EQ_DOUBLE_S64
:
1327 case FILTER_OP_NE_DOUBLE_S64
:
1328 case FILTER_OP_GT_DOUBLE_S64
:
1329 case FILTER_OP_LT_DOUBLE_S64
:
1330 case FILTER_OP_GE_DOUBLE_S64
:
1331 case FILTER_OP_LE_DOUBLE_S64
:
1332 case FILTER_OP_EQ_S64_DOUBLE
:
1333 case FILTER_OP_NE_S64_DOUBLE
:
1334 case FILTER_OP_GT_S64_DOUBLE
:
1335 case FILTER_OP_LT_S64_DOUBLE
:
1336 case FILTER_OP_GE_S64_DOUBLE
:
1337 case FILTER_OP_LE_S64_DOUBLE
:
1338 case FILTER_OP_BIT_RSHIFT
:
1339 case FILTER_OP_BIT_LSHIFT
:
1340 case FILTER_OP_BIT_AND
:
1341 case FILTER_OP_BIT_OR
:
1342 case FILTER_OP_BIT_XOR
:
1345 if (vstack_pop(stack
)) {
1349 if (!vstack_ax(stack
)) {
1350 ERR("Empty stack\n");
1354 switch (vstack_ax(stack
)->type
) {
1358 case REG_STAR_GLOB_STRING
:
1362 ERR("Unexpected register type %d for operation\n",
1363 (int) vstack_ax(stack
)->type
);
1368 vstack_ax(stack
)->type
= REG_S64
;
1369 next_pc
+= sizeof(struct binary_op
);
1374 case FILTER_OP_UNARY_PLUS
:
1375 case FILTER_OP_UNARY_MINUS
:
1378 if (!vstack_ax(stack
)) {
1379 ERR("Empty stack\n");
1383 switch (vstack_ax(stack
)->type
) {
1389 ERR("Unexpected register type %d for operation\n",
1390 (int) vstack_ax(stack
)->type
);
1394 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1395 next_pc
+= sizeof(struct unary_op
);
1399 case FILTER_OP_UNARY_PLUS_S64
:
1400 case FILTER_OP_UNARY_MINUS_S64
:
1401 case FILTER_OP_UNARY_NOT_S64
:
1404 if (!vstack_ax(stack
)) {
1405 ERR("Empty stack\n");
1409 switch (vstack_ax(stack
)->type
) {
1413 ERR("Unexpected register type %d for operation\n",
1414 (int) vstack_ax(stack
)->type
);
1419 vstack_ax(stack
)->type
= REG_S64
;
1420 next_pc
+= sizeof(struct unary_op
);
1424 case FILTER_OP_UNARY_NOT
:
1427 if (!vstack_ax(stack
)) {
1428 ERR("Empty stack\n");
1432 switch (vstack_ax(stack
)->type
) {
1438 ERR("Unexpected register type %d for operation\n",
1439 (int) vstack_ax(stack
)->type
);
1444 vstack_ax(stack
)->type
= REG_S64
;
1445 next_pc
+= sizeof(struct unary_op
);
1449 case FILTER_OP_UNARY_BIT_NOT
:
1452 if (!vstack_ax(stack
)) {
1453 ERR("Empty stack\n");
1457 switch (vstack_ax(stack
)->type
) {
1463 ERR("Unexpected register type %d for operation\n",
1464 (int) vstack_ax(stack
)->type
);
1469 vstack_ax(stack
)->type
= REG_S64
;
1470 next_pc
+= sizeof(struct unary_op
);
1474 case FILTER_OP_UNARY_NOT_DOUBLE
:
1477 if (!vstack_ax(stack
)) {
1478 ERR("Empty stack\n");
1482 switch (vstack_ax(stack
)->type
) {
1486 ERR("Incorrect register type %d for operation\n",
1487 (int) vstack_ax(stack
)->type
);
1492 vstack_ax(stack
)->type
= REG_S64
;
1493 next_pc
+= sizeof(struct unary_op
);
1497 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1498 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1501 if (!vstack_ax(stack
)) {
1502 ERR("Empty stack\n");
1506 switch (vstack_ax(stack
)->type
) {
1510 ERR("Incorrect register type %d for operation\n",
1511 (int) vstack_ax(stack
)->type
);
1516 vstack_ax(stack
)->type
= REG_DOUBLE
;
1517 next_pc
+= sizeof(struct unary_op
);
1525 struct logical_op
*insn
= (struct logical_op
*) pc
;
1528 /* Add merge point to table */
1529 merge_ret
= merge_point_add_check(merge_points
,
1530 insn
->skip_offset
, stack
);
1536 if (!vstack_ax(stack
)) {
1537 ERR("Empty stack\n");
1541 /* There is always a cast-to-s64 operation before a or/and op. */
1542 switch (vstack_ax(stack
)->type
) {
1546 ERR("Incorrect register type %d for operation\n",
1547 (int) vstack_ax(stack
)->type
);
1552 /* Continue to next instruction */
1553 /* Pop 1 when jump not taken */
1554 if (vstack_pop(stack
)) {
1558 next_pc
+= sizeof(struct logical_op
);
1562 /* load field ref */
1563 case FILTER_OP_LOAD_FIELD_REF
:
1565 ERR("Unknown field ref type\n");
1569 /* get context ref */
1570 case FILTER_OP_GET_CONTEXT_REF
:
1572 if (vstack_push(stack
)) {
1576 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1577 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1580 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1581 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1582 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1584 if (vstack_push(stack
)) {
1588 vstack_ax(stack
)->type
= REG_STRING
;
1589 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1592 case FILTER_OP_LOAD_FIELD_REF_S64
:
1593 case FILTER_OP_GET_CONTEXT_REF_S64
:
1595 if (vstack_push(stack
)) {
1599 vstack_ax(stack
)->type
= REG_S64
;
1600 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1603 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1604 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1606 if (vstack_push(stack
)) {
1610 vstack_ax(stack
)->type
= REG_DOUBLE
;
1611 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1615 /* load from immediate operand */
1616 case FILTER_OP_LOAD_STRING
:
1618 struct load_op
*insn
= (struct load_op
*) pc
;
1620 if (vstack_push(stack
)) {
1624 vstack_ax(stack
)->type
= REG_STRING
;
1625 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1629 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1631 struct load_op
*insn
= (struct load_op
*) pc
;
1633 if (vstack_push(stack
)) {
1637 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1638 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1642 case FILTER_OP_LOAD_S64
:
1644 if (vstack_push(stack
)) {
1648 vstack_ax(stack
)->type
= REG_S64
;
1649 next_pc
+= sizeof(struct load_op
)
1650 + sizeof(struct literal_numeric
);
1654 case FILTER_OP_LOAD_DOUBLE
:
1656 if (vstack_push(stack
)) {
1660 vstack_ax(stack
)->type
= REG_DOUBLE
;
1661 next_pc
+= sizeof(struct load_op
)
1662 + sizeof(struct literal_double
);
1666 case FILTER_OP_CAST_TO_S64
:
1667 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1670 if (!vstack_ax(stack
)) {
1671 ERR("Empty stack\n");
1675 switch (vstack_ax(stack
)->type
) {
1681 ERR("Incorrect register type %d for cast\n",
1682 (int) vstack_ax(stack
)->type
);
1686 vstack_ax(stack
)->type
= REG_S64
;
1687 next_pc
+= sizeof(struct cast_op
);
1690 case FILTER_OP_CAST_NOP
:
1692 next_pc
+= sizeof(struct cast_op
);
1697 * Instructions for recursive traversal through composed types.
1699 case FILTER_OP_GET_CONTEXT_ROOT
:
1700 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1701 case FILTER_OP_GET_PAYLOAD_ROOT
:
1703 if (vstack_push(stack
)) {
1707 vstack_ax(stack
)->type
= REG_PTR
;
1708 next_pc
+= sizeof(struct load_op
);
1712 case FILTER_OP_LOAD_FIELD
:
1715 if (!vstack_ax(stack
)) {
1716 ERR("Empty stack\n");
1720 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1721 ERR("Expecting pointer on top of stack\n");
1725 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1726 next_pc
+= sizeof(struct load_op
);
1730 case FILTER_OP_LOAD_FIELD_S8
:
1731 case FILTER_OP_LOAD_FIELD_S16
:
1732 case FILTER_OP_LOAD_FIELD_S32
:
1733 case FILTER_OP_LOAD_FIELD_S64
:
1734 case FILTER_OP_LOAD_FIELD_U8
:
1735 case FILTER_OP_LOAD_FIELD_U16
:
1736 case FILTER_OP_LOAD_FIELD_U32
:
1737 case FILTER_OP_LOAD_FIELD_U64
:
1740 if (!vstack_ax(stack
)) {
1741 ERR("Empty stack\n");
1745 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1746 ERR("Expecting pointer on top of stack\n");
1750 vstack_ax(stack
)->type
= REG_S64
;
1751 next_pc
+= sizeof(struct load_op
);
1755 case FILTER_OP_LOAD_FIELD_STRING
:
1756 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1759 if (!vstack_ax(stack
)) {
1760 ERR("Empty stack\n");
1764 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1765 ERR("Expecting pointer on top of stack\n");
1769 vstack_ax(stack
)->type
= REG_STRING
;
1770 next_pc
+= sizeof(struct load_op
);
1774 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1777 if (!vstack_ax(stack
)) {
1778 ERR("Empty stack\n");
1782 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1783 ERR("Expecting pointer on top of stack\n");
1787 vstack_ax(stack
)->type
= REG_DOUBLE
;
1788 next_pc
+= sizeof(struct load_op
);
1792 case FILTER_OP_GET_SYMBOL
:
1793 case FILTER_OP_GET_SYMBOL_FIELD
:
1796 if (!vstack_ax(stack
)) {
1797 ERR("Empty stack\n");
1801 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1802 ERR("Expecting pointer on top of stack\n");
1806 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1810 case FILTER_OP_GET_INDEX_U16
:
1813 if (!vstack_ax(stack
)) {
1814 ERR("Empty stack\n");
1818 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1819 ERR("Expecting pointer on top of stack\n");
1823 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1827 case FILTER_OP_GET_INDEX_U64
:
1830 if (!vstack_ax(stack
)) {
1831 ERR("Empty stack\n");
1835 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1836 ERR("Expecting pointer on top of stack\n");
1840 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1846 *_next_pc
= next_pc
;
1851 * Never called concurrently (hash seed is shared).
1853 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1855 struct cds_lfht
*merge_points
;
1856 char *pc
, *next_pc
, *start_pc
;
1858 struct vstack stack
;
1860 vstack_init(&stack
);
1862 if (!lttng_hash_seed_ready
) {
1863 lttng_hash_seed
= time(NULL
);
1864 lttng_hash_seed_ready
= 1;
1867 * Note: merge_points hash table used by single thread, and
1868 * never concurrently resized. Therefore, we can use it without
1869 * holding RCU read-side lock and free nodes without using
1872 merge_points
= cds_lfht_new(DEFAULT_NR_MERGE_POINTS
,
1873 MIN_NR_BUCKETS
, MAX_NR_BUCKETS
,
1875 if (!merge_points
) {
1876 ERR("Error allocating hash table for bytecode validation\n");
1879 start_pc
= &bytecode
->code
[0];
1880 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1882 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1885 ERR("filter bytecode overflow\n");
1888 dbg_printf("Validating op %s (%u)\n",
1889 print_op((unsigned int) *(filter_opcode_t
*) pc
),
1890 (unsigned int) *(filter_opcode_t
*) pc
);
1893 * For each instruction, validate the current context
1894 * (traversal of entire execution flow), and validate
1895 * all merge points targeting this instruction.
1897 ret
= validate_instruction_all_contexts(bytecode
, merge_points
,
1898 &stack
, start_pc
, pc
);
1901 ret
= exec_insn(bytecode
, merge_points
, &stack
, &next_pc
, pc
);
1906 if (delete_all_nodes(merge_points
)) {
1908 ERR("Unexpected merge points\n");
1912 if (cds_lfht_destroy(merge_points
, NULL
)) {
1913 ERR("Error destroying hash table\n");