2 * lttng-filter-validator.c
4 * LTTng modules filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <linux/types.h>
28 #include <linux/jhash.h>
29 #include <linux/slab.h>
31 #include <wrapper/list.h>
32 #include <lttng-filter.h>
34 #define MERGE_POINT_TABLE_BITS 7
35 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
37 /* merge point table node */
39 struct hlist_node node
;
41 /* Context at merge point */
43 unsigned long target_pc
;
47 struct hlist_head mp_head
[MERGE_POINT_TABLE_SIZE
];
51 int lttng_hash_match(struct mp_node
*mp_node
, unsigned long key_pc
)
53 if (mp_node
->target_pc
== key_pc
)
60 int merge_points_compare(const struct vstack
*stacka
,
61 const struct vstack
*stackb
)
65 if (stacka
->top
!= stackb
->top
)
67 len
= stacka
->top
+ 1;
68 WARN_ON_ONCE(len
< 0);
69 for (i
= 0; i
< len
; i
++) {
70 if (stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
77 int merge_point_add_check(struct mp_table
*mp_table
, unsigned long target_pc
,
78 const struct vstack
*stack
)
80 struct mp_node
*mp_node
;
81 unsigned long hash
= jhash_1word(target_pc
, 0);
82 struct hlist_head
*head
;
83 struct mp_node
*lookup_node
;
86 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
88 mp_node
= kzalloc(sizeof(struct mp_node
), GFP_KERNEL
);
91 mp_node
->target_pc
= target_pc
;
92 memcpy(&mp_node
->stack
, stack
, sizeof(mp_node
->stack
));
94 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
95 lttng_hlist_for_each_entry(lookup_node
, head
, node
) {
96 if (lttng_hash_match(lookup_node
, target_pc
)) {
102 /* Key already present */
103 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
106 if (merge_points_compare(stack
, &lookup_node
->stack
)) {
107 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
112 hlist_add_head(&mp_node
->node
, head
);
118 * Binary comparators use top of stack and top of stack -1.
121 int bin_op_compare_check(struct vstack
*stack
, const char *str
)
123 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
126 switch (vstack_ax(stack
)->type
) {
132 switch (vstack_bx(stack
)->type
) {
144 switch (vstack_bx(stack
)->type
) {
163 printk(KERN_WARNING
"type mismatch for '%s' binary operator\n", str
);
168 * Validate bytecode range overflow within the validation pass.
169 * Called for each instruction encountered.
172 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
173 char *start_pc
, char *pc
)
177 switch (*(filter_opcode_t
*) pc
) {
178 case FILTER_OP_UNKNOWN
:
181 printk(KERN_WARNING
"unknown bytecode op %u\n",
182 (unsigned int) *(filter_opcode_t
*) pc
);
187 case FILTER_OP_RETURN
:
189 if (unlikely(pc
+ sizeof(struct return_op
)
190 > start_pc
+ bytecode
->len
)) {
201 case FILTER_OP_MINUS
:
202 case FILTER_OP_RSHIFT
:
203 case FILTER_OP_LSHIFT
:
204 case FILTER_OP_BIN_AND
:
205 case FILTER_OP_BIN_OR
:
206 case FILTER_OP_BIN_XOR
:
207 case FILTER_OP_EQ_DOUBLE
:
208 case FILTER_OP_NE_DOUBLE
:
209 case FILTER_OP_GT_DOUBLE
:
210 case FILTER_OP_LT_DOUBLE
:
211 case FILTER_OP_GE_DOUBLE
:
212 case FILTER_OP_LE_DOUBLE
:
214 case FILTER_OP_EQ_DOUBLE_S64
:
215 case FILTER_OP_NE_DOUBLE_S64
:
216 case FILTER_OP_GT_DOUBLE_S64
:
217 case FILTER_OP_LT_DOUBLE_S64
:
218 case FILTER_OP_GE_DOUBLE_S64
:
219 case FILTER_OP_LE_DOUBLE_S64
:
220 case FILTER_OP_EQ_S64_DOUBLE
:
221 case FILTER_OP_NE_S64_DOUBLE
:
222 case FILTER_OP_GT_S64_DOUBLE
:
223 case FILTER_OP_LT_S64_DOUBLE
:
224 case FILTER_OP_GE_S64_DOUBLE
:
225 case FILTER_OP_LE_S64_DOUBLE
:
226 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
227 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
228 case FILTER_OP_LOAD_DOUBLE
:
229 case FILTER_OP_CAST_DOUBLE_TO_S64
:
230 case FILTER_OP_UNARY_PLUS_DOUBLE
:
231 case FILTER_OP_UNARY_MINUS_DOUBLE
:
232 case FILTER_OP_UNARY_NOT_DOUBLE
:
234 printk(KERN_WARNING
"unsupported bytecode op %u\n",
235 (unsigned int) *(filter_opcode_t
*) pc
);
246 case FILTER_OP_EQ_STRING
:
247 case FILTER_OP_NE_STRING
:
248 case FILTER_OP_GT_STRING
:
249 case FILTER_OP_LT_STRING
:
250 case FILTER_OP_GE_STRING
:
251 case FILTER_OP_LE_STRING
:
252 case FILTER_OP_EQ_S64
:
253 case FILTER_OP_NE_S64
:
254 case FILTER_OP_GT_S64
:
255 case FILTER_OP_LT_S64
:
256 case FILTER_OP_GE_S64
:
257 case FILTER_OP_LE_S64
:
259 if (unlikely(pc
+ sizeof(struct binary_op
)
260 > start_pc
+ bytecode
->len
)) {
267 case FILTER_OP_UNARY_PLUS
:
268 case FILTER_OP_UNARY_MINUS
:
269 case FILTER_OP_UNARY_NOT
:
270 case FILTER_OP_UNARY_PLUS_S64
:
271 case FILTER_OP_UNARY_MINUS_S64
:
272 case FILTER_OP_UNARY_NOT_S64
:
274 if (unlikely(pc
+ sizeof(struct unary_op
)
275 > start_pc
+ bytecode
->len
)) {
285 if (unlikely(pc
+ sizeof(struct logical_op
)
286 > start_pc
+ bytecode
->len
)) {
293 case FILTER_OP_LOAD_FIELD_REF
:
295 printk(KERN_WARNING
"Unknown field ref type\n");
299 /* get context ref */
300 case FILTER_OP_GET_CONTEXT_REF
:
302 printk(KERN_WARNING
"Unknown field ref type\n");
306 case FILTER_OP_LOAD_FIELD_REF_STRING
:
307 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
308 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
309 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
310 case FILTER_OP_LOAD_FIELD_REF_S64
:
311 case FILTER_OP_GET_CONTEXT_REF_STRING
:
312 case FILTER_OP_GET_CONTEXT_REF_S64
:
314 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
315 > start_pc
+ bytecode
->len
)) {
321 /* load from immediate operand */
322 case FILTER_OP_LOAD_STRING
:
324 struct load_op
*insn
= (struct load_op
*) pc
;
325 uint32_t str_len
, maxlen
;
327 if (unlikely(pc
+ sizeof(struct load_op
)
328 > start_pc
+ bytecode
->len
)) {
333 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
334 str_len
= strnlen(insn
->data
, maxlen
);
335 if (unlikely(str_len
>= maxlen
)) {
336 /* Final '\0' not found within range */
342 case FILTER_OP_LOAD_S64
:
344 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
345 > start_pc
+ bytecode
->len
)) {
351 case FILTER_OP_CAST_TO_S64
:
352 case FILTER_OP_CAST_NOP
:
354 if (unlikely(pc
+ sizeof(struct cast_op
)
355 > start_pc
+ bytecode
->len
)) {
367 unsigned long delete_all_nodes(struct mp_table
*mp_table
)
369 struct mp_node
*mp_node
;
370 struct hlist_node
*tmp
;
371 unsigned long nr_nodes
= 0;
374 for (i
= 0; i
< MERGE_POINT_TABLE_SIZE
; i
++) {
375 struct hlist_head
*head
;
377 head
= &mp_table
->mp_head
[i
];
378 lttng_hlist_for_each_entry_safe(mp_node
, tmp
, head
, node
) {
392 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
393 struct vstack
*stack
,
399 switch (*(filter_opcode_t
*) pc
) {
400 case FILTER_OP_UNKNOWN
:
403 printk(KERN_WARNING
"unknown bytecode op %u\n",
404 (unsigned int) *(filter_opcode_t
*) pc
);
409 case FILTER_OP_RETURN
:
419 case FILTER_OP_MINUS
:
420 case FILTER_OP_RSHIFT
:
421 case FILTER_OP_LSHIFT
:
422 case FILTER_OP_BIN_AND
:
423 case FILTER_OP_BIN_OR
:
424 case FILTER_OP_BIN_XOR
:
426 case FILTER_OP_EQ_DOUBLE
:
427 case FILTER_OP_NE_DOUBLE
:
428 case FILTER_OP_GT_DOUBLE
:
429 case FILTER_OP_LT_DOUBLE
:
430 case FILTER_OP_GE_DOUBLE
:
431 case FILTER_OP_LE_DOUBLE
:
432 case FILTER_OP_EQ_DOUBLE_S64
:
433 case FILTER_OP_NE_DOUBLE_S64
:
434 case FILTER_OP_GT_DOUBLE_S64
:
435 case FILTER_OP_LT_DOUBLE_S64
:
436 case FILTER_OP_GE_DOUBLE_S64
:
437 case FILTER_OP_LE_DOUBLE_S64
:
438 case FILTER_OP_EQ_S64_DOUBLE
:
439 case FILTER_OP_NE_S64_DOUBLE
:
440 case FILTER_OP_GT_S64_DOUBLE
:
441 case FILTER_OP_LT_S64_DOUBLE
:
442 case FILTER_OP_GE_S64_DOUBLE
:
443 case FILTER_OP_LE_S64_DOUBLE
:
444 case FILTER_OP_UNARY_PLUS_DOUBLE
:
445 case FILTER_OP_UNARY_MINUS_DOUBLE
:
446 case FILTER_OP_UNARY_NOT_DOUBLE
:
447 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
448 case FILTER_OP_LOAD_DOUBLE
:
449 case FILTER_OP_CAST_DOUBLE_TO_S64
:
450 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
452 printk(KERN_WARNING
"unsupported bytecode op %u\n",
453 (unsigned int) *(filter_opcode_t
*) pc
);
460 ret
= bin_op_compare_check(stack
, "==");
467 ret
= bin_op_compare_check(stack
, "!=");
474 ret
= bin_op_compare_check(stack
, ">");
481 ret
= bin_op_compare_check(stack
, "<");
488 ret
= bin_op_compare_check(stack
, ">=");
495 ret
= bin_op_compare_check(stack
, "<=");
501 case FILTER_OP_EQ_STRING
:
502 case FILTER_OP_NE_STRING
:
503 case FILTER_OP_GT_STRING
:
504 case FILTER_OP_LT_STRING
:
505 case FILTER_OP_GE_STRING
:
506 case FILTER_OP_LE_STRING
:
508 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
509 printk(KERN_WARNING
"Empty stack\n");
513 if (vstack_ax(stack
)->type
!= REG_STRING
514 || vstack_bx(stack
)->type
!= REG_STRING
) {
515 printk(KERN_WARNING
"Unexpected register type for string comparator\n");
522 case FILTER_OP_EQ_S64
:
523 case FILTER_OP_NE_S64
:
524 case FILTER_OP_GT_S64
:
525 case FILTER_OP_LT_S64
:
526 case FILTER_OP_GE_S64
:
527 case FILTER_OP_LE_S64
:
529 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
530 printk(KERN_WARNING
"Empty stack\n");
534 if (vstack_ax(stack
)->type
!= REG_S64
535 || vstack_bx(stack
)->type
!= REG_S64
) {
536 printk(KERN_WARNING
"Unexpected register type for s64 comparator\n");
544 case FILTER_OP_UNARY_PLUS
:
545 case FILTER_OP_UNARY_MINUS
:
546 case FILTER_OP_UNARY_NOT
:
548 if (!vstack_ax(stack
)) {
549 printk(KERN_WARNING
"Empty stack\n");
553 switch (vstack_ax(stack
)->type
) {
556 printk(KERN_WARNING
"unknown register type\n");
561 printk(KERN_WARNING
"Unary op can only be applied to numeric or floating point registers\n");
570 case FILTER_OP_UNARY_PLUS_S64
:
571 case FILTER_OP_UNARY_MINUS_S64
:
572 case FILTER_OP_UNARY_NOT_S64
:
574 if (!vstack_ax(stack
)) {
575 printk(KERN_WARNING
"Empty stack\n");
579 if (vstack_ax(stack
)->type
!= REG_S64
) {
580 printk(KERN_WARNING
"Invalid register type\n");
591 struct logical_op
*insn
= (struct logical_op
*) pc
;
593 if (!vstack_ax(stack
)) {
594 printk(KERN_WARNING
"Empty stack\n");
598 if (vstack_ax(stack
)->type
!= REG_S64
) {
599 printk(KERN_WARNING
"Logical comparator expects S64 register\n");
604 dbg_printk("Validate jumping to bytecode offset %u\n",
605 (unsigned int) insn
->skip_offset
);
606 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
607 printk(KERN_WARNING
"Loops are not allowed in bytecode\n");
615 case FILTER_OP_LOAD_FIELD_REF
:
617 printk(KERN_WARNING
"Unknown field ref type\n");
621 case FILTER_OP_LOAD_FIELD_REF_STRING
:
622 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
623 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
624 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
626 struct load_op
*insn
= (struct load_op
*) pc
;
627 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
629 dbg_printk("Validate load field ref offset %u type string\n",
633 case FILTER_OP_LOAD_FIELD_REF_S64
:
635 struct load_op
*insn
= (struct load_op
*) pc
;
636 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
638 dbg_printk("Validate load field ref offset %u type s64\n",
643 /* load from immediate operand */
644 case FILTER_OP_LOAD_STRING
:
649 case FILTER_OP_LOAD_S64
:
654 case FILTER_OP_CAST_TO_S64
:
656 struct cast_op
*insn
= (struct cast_op
*) pc
;
658 if (!vstack_ax(stack
)) {
659 printk(KERN_WARNING
"Empty stack\n");
663 switch (vstack_ax(stack
)->type
) {
666 printk(KERN_WARNING
"unknown register type\n");
671 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
677 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
678 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
679 printk(KERN_WARNING
"Cast expects double\n");
686 case FILTER_OP_CAST_NOP
:
691 /* get context ref */
692 case FILTER_OP_GET_CONTEXT_REF
:
694 printk(KERN_WARNING
"Unknown get context ref type\n");
698 case FILTER_OP_GET_CONTEXT_REF_STRING
:
700 struct load_op
*insn
= (struct load_op
*) pc
;
701 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
703 dbg_printk("Validate get context ref offset %u type string\n",
707 case FILTER_OP_GET_CONTEXT_REF_S64
:
709 struct load_op
*insn
= (struct load_op
*) pc
;
710 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
712 dbg_printk("Validate get context ref offset %u type s64\n",
728 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
729 struct mp_table
*mp_table
,
730 struct vstack
*stack
,
735 unsigned long target_pc
= pc
- start_pc
;
737 struct hlist_head
*head
;
738 struct mp_node
*mp_node
;
740 /* Validate the context resulting from the previous instruction */
741 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
745 /* Validate merge points */
746 hash
= jhash_1word(target_pc
, 0);
747 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
748 lttng_hlist_for_each_entry(mp_node
, head
, node
) {
749 if (lttng_hash_match(mp_node
, target_pc
)) {
755 dbg_printk("Filter: validate merge point at offset %lu\n",
757 if (merge_points_compare(stack
, &mp_node
->stack
)) {
758 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
762 /* Once validated, we can remove the merge point */
763 dbg_printk("Filter: remove merge point at offset %lu\n",
765 hlist_del(&mp_node
->node
);
772 * >0: going to next insn.
773 * 0: success, stop iteration.
777 int exec_insn(struct bytecode_runtime
*bytecode
,
778 struct mp_table
*mp_table
,
779 struct vstack
*stack
,
784 char *next_pc
= *_next_pc
;
786 switch (*(filter_opcode_t
*) pc
) {
787 case FILTER_OP_UNKNOWN
:
790 printk(KERN_WARNING
"unknown bytecode op %u\n",
791 (unsigned int) *(filter_opcode_t
*) pc
);
796 case FILTER_OP_RETURN
:
798 if (!vstack_ax(stack
)) {
799 printk(KERN_WARNING
"Empty stack\n");
812 case FILTER_OP_MINUS
:
813 case FILTER_OP_RSHIFT
:
814 case FILTER_OP_LSHIFT
:
815 case FILTER_OP_BIN_AND
:
816 case FILTER_OP_BIN_OR
:
817 case FILTER_OP_BIN_XOR
:
819 case FILTER_OP_EQ_DOUBLE
:
820 case FILTER_OP_NE_DOUBLE
:
821 case FILTER_OP_GT_DOUBLE
:
822 case FILTER_OP_LT_DOUBLE
:
823 case FILTER_OP_GE_DOUBLE
:
824 case FILTER_OP_LE_DOUBLE
:
825 case FILTER_OP_EQ_DOUBLE_S64
:
826 case FILTER_OP_NE_DOUBLE_S64
:
827 case FILTER_OP_GT_DOUBLE_S64
:
828 case FILTER_OP_LT_DOUBLE_S64
:
829 case FILTER_OP_GE_DOUBLE_S64
:
830 case FILTER_OP_LE_DOUBLE_S64
:
831 case FILTER_OP_EQ_S64_DOUBLE
:
832 case FILTER_OP_NE_S64_DOUBLE
:
833 case FILTER_OP_GT_S64_DOUBLE
:
834 case FILTER_OP_LT_S64_DOUBLE
:
835 case FILTER_OP_GE_S64_DOUBLE
:
836 case FILTER_OP_LE_S64_DOUBLE
:
837 case FILTER_OP_UNARY_PLUS_DOUBLE
:
838 case FILTER_OP_UNARY_MINUS_DOUBLE
:
839 case FILTER_OP_UNARY_NOT_DOUBLE
:
840 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
841 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
842 case FILTER_OP_LOAD_DOUBLE
:
843 case FILTER_OP_CAST_DOUBLE_TO_S64
:
845 printk(KERN_WARNING
"unsupported bytecode op %u\n",
846 (unsigned int) *(filter_opcode_t
*) pc
);
857 case FILTER_OP_EQ_STRING
:
858 case FILTER_OP_NE_STRING
:
859 case FILTER_OP_GT_STRING
:
860 case FILTER_OP_LT_STRING
:
861 case FILTER_OP_GE_STRING
:
862 case FILTER_OP_LE_STRING
:
863 case FILTER_OP_EQ_S64
:
864 case FILTER_OP_NE_S64
:
865 case FILTER_OP_GT_S64
:
866 case FILTER_OP_LT_S64
:
867 case FILTER_OP_GE_S64
:
868 case FILTER_OP_LE_S64
:
871 if (vstack_pop(stack
)) {
875 if (!vstack_ax(stack
)) {
876 printk(KERN_WARNING
"Empty stack\n");
880 vstack_ax(stack
)->type
= REG_S64
;
881 next_pc
+= sizeof(struct binary_op
);
886 case FILTER_OP_UNARY_PLUS
:
887 case FILTER_OP_UNARY_MINUS
:
888 case FILTER_OP_UNARY_NOT
:
889 case FILTER_OP_UNARY_PLUS_S64
:
890 case FILTER_OP_UNARY_MINUS_S64
:
891 case FILTER_OP_UNARY_NOT_S64
:
894 if (!vstack_ax(stack
)) {
895 printk(KERN_WARNING
"Empty stack\n");
899 vstack_ax(stack
)->type
= REG_S64
;
900 next_pc
+= sizeof(struct unary_op
);
908 struct logical_op
*insn
= (struct logical_op
*) pc
;
911 /* Add merge point to table */
912 merge_ret
= merge_point_add_check(mp_table
,
913 insn
->skip_offset
, stack
);
918 /* Continue to next instruction */
919 /* Pop 1 when jump not taken */
920 if (vstack_pop(stack
)) {
924 next_pc
+= sizeof(struct logical_op
);
929 case FILTER_OP_LOAD_FIELD_REF
:
931 printk(KERN_WARNING
"Unknown field ref type\n");
935 /* get context ref */
936 case FILTER_OP_GET_CONTEXT_REF
:
938 printk(KERN_WARNING
"Unknown get context ref type\n");
942 case FILTER_OP_LOAD_FIELD_REF_STRING
:
943 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
944 case FILTER_OP_GET_CONTEXT_REF_STRING
:
945 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
946 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
948 if (vstack_push(stack
)) {
952 vstack_ax(stack
)->type
= REG_STRING
;
953 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
956 case FILTER_OP_LOAD_FIELD_REF_S64
:
957 case FILTER_OP_GET_CONTEXT_REF_S64
:
959 if (vstack_push(stack
)) {
963 vstack_ax(stack
)->type
= REG_S64
;
964 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
968 /* load from immediate operand */
969 case FILTER_OP_LOAD_STRING
:
971 struct load_op
*insn
= (struct load_op
*) pc
;
973 if (vstack_push(stack
)) {
977 vstack_ax(stack
)->type
= REG_STRING
;
978 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
982 case FILTER_OP_LOAD_S64
:
984 if (vstack_push(stack
)) {
988 vstack_ax(stack
)->type
= REG_S64
;
989 next_pc
+= sizeof(struct load_op
)
990 + sizeof(struct literal_numeric
);
994 case FILTER_OP_CAST_TO_S64
:
997 if (!vstack_ax(stack
)) {
998 printk(KERN_WARNING
"Empty stack\n");
1002 vstack_ax(stack
)->type
= REG_S64
;
1003 next_pc
+= sizeof(struct cast_op
);
1006 case FILTER_OP_CAST_NOP
:
1008 next_pc
+= sizeof(struct cast_op
);
1014 *_next_pc
= next_pc
;
1019 * Never called concurrently (hash seed is shared).
1021 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1023 struct mp_table
*mp_table
;
1024 char *pc
, *next_pc
, *start_pc
;
1026 struct vstack stack
;
1028 vstack_init(&stack
);
1030 mp_table
= kzalloc(sizeof(*mp_table
), GFP_KERNEL
);
1032 printk(KERN_WARNING
"Error allocating hash table for bytecode validation\n");
1035 start_pc
= &bytecode
->data
[0];
1036 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1038 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1041 printk(KERN_WARNING
"filter bytecode overflow\n");
1044 dbg_printk("Validating op %s (%u)\n",
1045 lttng_filter_print_op((unsigned int) *(filter_opcode_t
*) pc
),
1046 (unsigned int) *(filter_opcode_t
*) pc
);
1049 * For each instruction, validate the current context
1050 * (traversal of entire execution flow), and validate
1051 * all merge points targeting this instruction.
1053 ret
= validate_instruction_all_contexts(bytecode
, mp_table
,
1054 &stack
, start_pc
, pc
);
1057 ret
= exec_insn(bytecode
, mp_table
, &stack
, &next_pc
, pc
);
1062 if (delete_all_nodes(mp_table
)) {
1064 printk(KERN_WARNING
"Unexpected merge points\n");