1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-specialize.c
5 * LTTng modules filter code specializer.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/slab.h>
11 #include <lttng-filter.h>
12 #include "lib/align.h"
14 static ssize_t
bytecode_reserve_data(struct bytecode_runtime
*runtime
,
15 size_t align
, size_t len
)
18 size_t padding
= offset_align(runtime
->data_len
, align
);
19 size_t new_len
= runtime
->data_len
+ padding
+ len
;
20 size_t new_alloc_len
= new_len
;
21 size_t old_alloc_len
= runtime
->data_alloc_len
;
23 if (new_len
> FILTER_MAX_DATA_LEN
)
26 if (new_alloc_len
> old_alloc_len
) {
30 max_t(size_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
31 newptr
= krealloc(runtime
->data
, new_alloc_len
, GFP_KERNEL
);
34 runtime
->data
= newptr
;
35 /* We zero directly the memory from start of allocation. */
36 memset(&runtime
->data
[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
37 runtime
->data_alloc_len
= new_alloc_len
;
39 runtime
->data_len
+= padding
;
40 ret
= runtime
->data_len
;
41 runtime
->data_len
+= len
;
45 static ssize_t
bytecode_push_data(struct bytecode_runtime
*runtime
,
46 const void *p
, size_t align
, size_t len
)
50 offset
= bytecode_reserve_data(runtime
, align
, len
);
53 memcpy(&runtime
->data
[offset
], p
, len
);
57 static int specialize_load_field(struct vstack_entry
*stack_top
,
62 switch (stack_top
->load
.type
) {
65 case LOAD_ROOT_CONTEXT
:
66 case LOAD_ROOT_APP_CONTEXT
:
67 case LOAD_ROOT_PAYLOAD
:
69 dbg_printk("Filter warning: cannot load root, missing field name.\n");
73 switch (stack_top
->load
.object_type
) {
75 dbg_printk("op load field s8\n");
76 stack_top
->type
= REG_S64
;
77 if (!stack_top
->load
.rev_bo
)
78 insn
->op
= FILTER_OP_LOAD_FIELD_S8
;
81 dbg_printk("op load field s16\n");
82 stack_top
->type
= REG_S64
;
83 if (!stack_top
->load
.rev_bo
)
84 insn
->op
= FILTER_OP_LOAD_FIELD_S16
;
87 dbg_printk("op load field s32\n");
88 stack_top
->type
= REG_S64
;
89 if (!stack_top
->load
.rev_bo
)
90 insn
->op
= FILTER_OP_LOAD_FIELD_S32
;
93 dbg_printk("op load field s64\n");
94 stack_top
->type
= REG_S64
;
95 if (!stack_top
->load
.rev_bo
)
96 insn
->op
= FILTER_OP_LOAD_FIELD_S64
;
99 dbg_printk("op load field u8\n");
100 stack_top
->type
= REG_S64
;
101 insn
->op
= FILTER_OP_LOAD_FIELD_U8
;
103 case OBJECT_TYPE_U16
:
104 dbg_printk("op load field u16\n");
105 stack_top
->type
= REG_S64
;
106 if (!stack_top
->load
.rev_bo
)
107 insn
->op
= FILTER_OP_LOAD_FIELD_U16
;
109 case OBJECT_TYPE_U32
:
110 dbg_printk("op load field u32\n");
111 stack_top
->type
= REG_S64
;
112 if (!stack_top
->load
.rev_bo
)
113 insn
->op
= FILTER_OP_LOAD_FIELD_U32
;
115 case OBJECT_TYPE_U64
:
116 dbg_printk("op load field u64\n");
117 stack_top
->type
= REG_S64
;
118 if (!stack_top
->load
.rev_bo
)
119 insn
->op
= FILTER_OP_LOAD_FIELD_U64
;
121 case OBJECT_TYPE_DOUBLE
:
122 printk(KERN_WARNING
"Double type unsupported\n\n");
125 case OBJECT_TYPE_STRING
:
126 dbg_printk("op load field string\n");
127 stack_top
->type
= REG_STRING
;
128 insn
->op
= FILTER_OP_LOAD_FIELD_STRING
;
130 case OBJECT_TYPE_STRING_SEQUENCE
:
131 dbg_printk("op load field string sequence\n");
132 stack_top
->type
= REG_STRING
;
133 insn
->op
= FILTER_OP_LOAD_FIELD_SEQUENCE
;
135 case OBJECT_TYPE_DYNAMIC
:
138 case OBJECT_TYPE_SEQUENCE
:
139 case OBJECT_TYPE_ARRAY
:
140 case OBJECT_TYPE_STRUCT
:
141 case OBJECT_TYPE_VARIANT
:
142 printk(KERN_WARNING
"Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
152 static int specialize_get_index_object_type(enum object_type
*otype
,
153 int signedness
, uint32_t elem_len
)
158 *otype
= OBJECT_TYPE_S8
;
160 *otype
= OBJECT_TYPE_U8
;
164 *otype
= OBJECT_TYPE_S16
;
166 *otype
= OBJECT_TYPE_U16
;
170 *otype
= OBJECT_TYPE_S32
;
172 *otype
= OBJECT_TYPE_U32
;
176 *otype
= OBJECT_TYPE_S64
;
178 *otype
= OBJECT_TYPE_U64
;
186 static int specialize_get_index(struct bytecode_runtime
*runtime
,
187 struct load_op
*insn
, uint64_t index
,
188 struct vstack_entry
*stack_top
,
192 struct filter_get_index_data gid
;
195 memset(&gid
, 0, sizeof(gid
));
196 switch (stack_top
->load
.type
) {
198 switch (stack_top
->load
.object_type
) {
199 case OBJECT_TYPE_ARRAY
:
201 const struct lttng_event_field
*field
;
202 uint32_t elem_len
, num_elems
;
205 field
= stack_top
->load
.field
;
206 elem_len
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.size
;
207 signedness
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.signedness
;
208 num_elems
= field
->type
.u
.array
.length
;
209 if (index
>= num_elems
) {
213 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
214 signedness
, elem_len
);
217 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
218 gid
.array_len
= num_elems
* (elem_len
/ CHAR_BIT
);
219 gid
.elem
.type
= stack_top
->load
.object_type
;
220 gid
.elem
.len
= elem_len
;
221 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
222 gid
.elem
.rev_bo
= true;
223 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
226 case OBJECT_TYPE_SEQUENCE
:
228 const struct lttng_event_field
*field
;
232 field
= stack_top
->load
.field
;
233 elem_len
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
;
234 signedness
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
;
235 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
236 signedness
, elem_len
);
239 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
240 gid
.elem
.type
= stack_top
->load
.object_type
;
241 gid
.elem
.len
= elem_len
;
242 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
243 gid
.elem
.rev_bo
= true;
244 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
247 case OBJECT_TYPE_STRUCT
:
248 /* Only generated by the specialize phase. */
249 case OBJECT_TYPE_VARIANT
: /* Fall-through */
251 printk(KERN_WARNING
"Unexpected get index type %d",
252 (int) stack_top
->load
.object_type
);
257 case LOAD_ROOT_CONTEXT
:
258 case LOAD_ROOT_APP_CONTEXT
:
259 case LOAD_ROOT_PAYLOAD
:
260 printk(KERN_WARNING
"Index lookup for root field not implemented yet.\n");
264 data_offset
= bytecode_push_data(runtime
, &gid
,
265 __alignof__(gid
), sizeof(gid
));
266 if (data_offset
< 0) {
272 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
275 ((struct get_index_u64
*) insn
->data
)->index
= data_offset
;
288 static int specialize_context_lookup_name(struct lttng_ctx
*ctx
,
289 struct bytecode_runtime
*bytecode
,
290 struct load_op
*insn
)
295 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
296 name
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ offset
;
297 return lttng_get_context_index(ctx
, name
);
300 static int specialize_load_object(const struct lttng_event_field
*field
,
301 struct vstack_load
*load
, bool is_context
)
303 load
->type
= LOAD_OBJECT
;
305 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
307 switch (field
->type
.atype
) {
309 if (field
->type
.u
.basic
.integer
.signedness
)
310 load
->object_type
= OBJECT_TYPE_S64
;
312 load
->object_type
= OBJECT_TYPE_U64
;
313 load
->rev_bo
= false;
317 const struct lttng_integer_type
*itype
=
318 &field
->type
.u
.basic
.enumeration
.container_type
;
320 if (itype
->signedness
)
321 load
->object_type
= OBJECT_TYPE_S64
;
323 load
->object_type
= OBJECT_TYPE_U64
;
324 load
->rev_bo
= false;
328 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
329 printk(KERN_WARNING
"Array nesting only supports integer types.\n");
333 load
->object_type
= OBJECT_TYPE_STRING
;
335 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
336 load
->object_type
= OBJECT_TYPE_ARRAY
;
339 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
344 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
345 printk(KERN_WARNING
"Sequence nesting only supports integer types.\n");
349 load
->object_type
= OBJECT_TYPE_STRING
;
351 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
352 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
355 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
359 case atype_array_bitfield
:
360 printk(KERN_WARNING
"Bitfield array type is not supported.\n");
362 case atype_sequence_bitfield
:
363 printk(KERN_WARNING
"Bitfield sequence type is not supported.\n");
366 load
->object_type
= OBJECT_TYPE_STRING
;
369 printk(KERN_WARNING
"Structure type cannot be loaded.\n");
372 printk(KERN_WARNING
"Unknown type: %d", (int) field
->type
.atype
);
378 static int specialize_context_lookup(struct lttng_ctx
*ctx
,
379 struct bytecode_runtime
*runtime
,
380 struct load_op
*insn
,
381 struct vstack_load
*load
)
384 struct lttng_ctx_field
*ctx_field
;
385 struct lttng_event_field
*field
;
386 struct filter_get_index_data gid
;
389 idx
= specialize_context_lookup_name(ctx
, runtime
, insn
);
393 ctx_field
= <tng_static_ctx
->fields
[idx
];
394 field
= &ctx_field
->event_field
;
395 ret
= specialize_load_object(field
, load
, true);
398 /* Specialize each get_symbol into a get_index. */
399 insn
->op
= FILTER_OP_GET_INDEX_U16
;
400 memset(&gid
, 0, sizeof(gid
));
402 gid
.elem
.type
= load
->object_type
;
403 data_offset
= bytecode_push_data(runtime
, &gid
,
404 __alignof__(gid
), sizeof(gid
));
405 if (data_offset
< 0) {
408 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
412 static int specialize_payload_lookup(const struct lttng_event_desc
*event_desc
,
413 struct bytecode_runtime
*runtime
,
414 struct load_op
*insn
,
415 struct vstack_load
*load
)
419 unsigned int i
, nr_fields
;
421 uint32_t field_offset
= 0;
422 const struct lttng_event_field
*field
;
424 struct filter_get_index_data gid
;
427 nr_fields
= event_desc
->nr_fields
;
428 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
429 name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
430 for (i
= 0; i
< nr_fields
; i
++) {
431 field
= &event_desc
->fields
[i
];
432 if (!strcmp(field
->name
, name
)) {
436 /* compute field offset on stack */
437 switch (field
->type
.atype
) {
440 field_offset
+= sizeof(int64_t);
444 case atype_array_bitfield
:
445 case atype_sequence_bitfield
:
446 field_offset
+= sizeof(unsigned long);
447 field_offset
+= sizeof(void *);
450 field_offset
+= sizeof(void *);
462 ret
= specialize_load_object(field
, load
, false);
466 /* Specialize each get_symbol into a get_index. */
467 insn
->op
= FILTER_OP_GET_INDEX_U16
;
468 memset(&gid
, 0, sizeof(gid
));
469 gid
.offset
= field_offset
;
470 gid
.elem
.type
= load
->object_type
;
471 data_offset
= bytecode_push_data(runtime
, &gid
,
472 __alignof__(gid
), sizeof(gid
));
473 if (data_offset
< 0) {
477 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
483 int lttng_filter_specialize_bytecode(const struct lttng_event_desc
*event_desc
,
484 struct bytecode_runtime
*bytecode
)
486 void *pc
, *next_pc
, *start_pc
;
488 struct vstack _stack
;
489 struct vstack
*stack
= &_stack
;
490 struct lttng_ctx
*ctx
= bytecode
->p
.ctx
;
494 start_pc
= &bytecode
->code
[0];
495 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
497 switch (*(filter_opcode_t
*) pc
) {
498 case FILTER_OP_UNKNOWN
:
500 printk(KERN_WARNING
"unknown bytecode op %u\n",
501 (unsigned int) *(filter_opcode_t
*) pc
);
505 case FILTER_OP_RETURN
:
506 case FILTER_OP_RETURN_S64
:
515 case FILTER_OP_MINUS
:
516 printk(KERN_WARNING
"unsupported bytecode op %u\n",
517 (unsigned int) *(filter_opcode_t
*) pc
);
523 struct binary_op
*insn
= (struct binary_op
*) pc
;
525 switch(vstack_ax(stack
)->type
) {
527 printk(KERN_WARNING
"unknown register type\n");
532 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
533 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
535 insn
->op
= FILTER_OP_EQ_STRING
;
537 case REG_STAR_GLOB_STRING
:
538 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
541 if (vstack_bx(stack
)->type
== REG_S64
)
542 insn
->op
= FILTER_OP_EQ_S64
;
544 insn
->op
= FILTER_OP_EQ_DOUBLE_S64
;
547 if (vstack_bx(stack
)->type
== REG_S64
)
548 insn
->op
= FILTER_OP_EQ_S64_DOUBLE
;
550 insn
->op
= FILTER_OP_EQ_DOUBLE
;
554 if (vstack_pop(stack
)) {
558 vstack_ax(stack
)->type
= REG_S64
;
559 next_pc
+= sizeof(struct binary_op
);
565 struct binary_op
*insn
= (struct binary_op
*) pc
;
567 switch(vstack_ax(stack
)->type
) {
569 printk(KERN_WARNING
"unknown register type\n");
574 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
575 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
577 insn
->op
= FILTER_OP_NE_STRING
;
579 case REG_STAR_GLOB_STRING
:
580 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
583 if (vstack_bx(stack
)->type
== REG_S64
)
584 insn
->op
= FILTER_OP_NE_S64
;
586 insn
->op
= FILTER_OP_NE_DOUBLE_S64
;
589 if (vstack_bx(stack
)->type
== REG_S64
)
590 insn
->op
= FILTER_OP_NE_S64_DOUBLE
;
592 insn
->op
= FILTER_OP_NE_DOUBLE
;
596 if (vstack_pop(stack
)) {
600 vstack_ax(stack
)->type
= REG_S64
;
601 next_pc
+= sizeof(struct binary_op
);
607 struct binary_op
*insn
= (struct binary_op
*) pc
;
609 switch(vstack_ax(stack
)->type
) {
611 printk(KERN_WARNING
"unknown register type\n");
615 case REG_STAR_GLOB_STRING
:
616 printk(KERN_WARNING
"invalid register type for > binary operator\n");
620 insn
->op
= FILTER_OP_GT_STRING
;
623 if (vstack_bx(stack
)->type
== REG_S64
)
624 insn
->op
= FILTER_OP_GT_S64
;
626 insn
->op
= FILTER_OP_GT_DOUBLE_S64
;
629 if (vstack_bx(stack
)->type
== REG_S64
)
630 insn
->op
= FILTER_OP_GT_S64_DOUBLE
;
632 insn
->op
= FILTER_OP_GT_DOUBLE
;
636 if (vstack_pop(stack
)) {
640 vstack_ax(stack
)->type
= REG_S64
;
641 next_pc
+= sizeof(struct binary_op
);
647 struct binary_op
*insn
= (struct binary_op
*) pc
;
649 switch(vstack_ax(stack
)->type
) {
651 printk(KERN_WARNING
"unknown register type\n");
655 case REG_STAR_GLOB_STRING
:
656 printk(KERN_WARNING
"invalid register type for < binary operator\n");
660 insn
->op
= FILTER_OP_LT_STRING
;
663 if (vstack_bx(stack
)->type
== REG_S64
)
664 insn
->op
= FILTER_OP_LT_S64
;
666 insn
->op
= FILTER_OP_LT_DOUBLE_S64
;
669 if (vstack_bx(stack
)->type
== REG_S64
)
670 insn
->op
= FILTER_OP_LT_S64_DOUBLE
;
672 insn
->op
= FILTER_OP_LT_DOUBLE
;
676 if (vstack_pop(stack
)) {
680 vstack_ax(stack
)->type
= REG_S64
;
681 next_pc
+= sizeof(struct binary_op
);
687 struct binary_op
*insn
= (struct binary_op
*) pc
;
689 switch(vstack_ax(stack
)->type
) {
691 printk(KERN_WARNING
"unknown register type\n");
695 case REG_STAR_GLOB_STRING
:
696 printk(KERN_WARNING
"invalid register type for >= binary operator\n");
700 insn
->op
= FILTER_OP_GE_STRING
;
703 if (vstack_bx(stack
)->type
== REG_S64
)
704 insn
->op
= FILTER_OP_GE_S64
;
706 insn
->op
= FILTER_OP_GE_DOUBLE_S64
;
709 if (vstack_bx(stack
)->type
== REG_S64
)
710 insn
->op
= FILTER_OP_GE_S64_DOUBLE
;
712 insn
->op
= FILTER_OP_GE_DOUBLE
;
716 if (vstack_pop(stack
)) {
720 vstack_ax(stack
)->type
= REG_S64
;
721 next_pc
+= sizeof(struct binary_op
);
726 struct binary_op
*insn
= (struct binary_op
*) pc
;
728 switch(vstack_ax(stack
)->type
) {
730 printk(KERN_WARNING
"unknown register type\n");
734 case REG_STAR_GLOB_STRING
:
735 printk(KERN_WARNING
"invalid register type for <= binary operator\n");
739 insn
->op
= FILTER_OP_LE_STRING
;
742 if (vstack_bx(stack
)->type
== REG_S64
)
743 insn
->op
= FILTER_OP_LE_S64
;
745 insn
->op
= FILTER_OP_LE_DOUBLE_S64
;
748 if (vstack_bx(stack
)->type
== REG_S64
)
749 insn
->op
= FILTER_OP_LE_S64_DOUBLE
;
751 insn
->op
= FILTER_OP_LE_DOUBLE
;
754 vstack_ax(stack
)->type
= REG_S64
;
755 next_pc
+= sizeof(struct binary_op
);
759 case FILTER_OP_EQ_STRING
:
760 case FILTER_OP_NE_STRING
:
761 case FILTER_OP_GT_STRING
:
762 case FILTER_OP_LT_STRING
:
763 case FILTER_OP_GE_STRING
:
764 case FILTER_OP_LE_STRING
:
765 case FILTER_OP_EQ_STAR_GLOB_STRING
:
766 case FILTER_OP_NE_STAR_GLOB_STRING
:
767 case FILTER_OP_EQ_S64
:
768 case FILTER_OP_NE_S64
:
769 case FILTER_OP_GT_S64
:
770 case FILTER_OP_LT_S64
:
771 case FILTER_OP_GE_S64
:
772 case FILTER_OP_LE_S64
:
773 case FILTER_OP_EQ_DOUBLE
:
774 case FILTER_OP_NE_DOUBLE
:
775 case FILTER_OP_GT_DOUBLE
:
776 case FILTER_OP_LT_DOUBLE
:
777 case FILTER_OP_GE_DOUBLE
:
778 case FILTER_OP_LE_DOUBLE
:
779 case FILTER_OP_EQ_DOUBLE_S64
:
780 case FILTER_OP_NE_DOUBLE_S64
:
781 case FILTER_OP_GT_DOUBLE_S64
:
782 case FILTER_OP_LT_DOUBLE_S64
:
783 case FILTER_OP_GE_DOUBLE_S64
:
784 case FILTER_OP_LE_DOUBLE_S64
:
785 case FILTER_OP_EQ_S64_DOUBLE
:
786 case FILTER_OP_NE_S64_DOUBLE
:
787 case FILTER_OP_GT_S64_DOUBLE
:
788 case FILTER_OP_LT_S64_DOUBLE
:
789 case FILTER_OP_GE_S64_DOUBLE
:
790 case FILTER_OP_LE_S64_DOUBLE
:
791 case FILTER_OP_BIT_RSHIFT
:
792 case FILTER_OP_BIT_LSHIFT
:
793 case FILTER_OP_BIT_AND
:
794 case FILTER_OP_BIT_OR
:
795 case FILTER_OP_BIT_XOR
:
798 if (vstack_pop(stack
)) {
802 vstack_ax(stack
)->type
= REG_S64
;
803 next_pc
+= sizeof(struct binary_op
);
808 case FILTER_OP_UNARY_PLUS
:
810 struct unary_op
*insn
= (struct unary_op
*) pc
;
812 switch(vstack_ax(stack
)->type
) {
814 printk(KERN_WARNING
"unknown register type\n");
819 insn
->op
= FILTER_OP_UNARY_PLUS_S64
;
822 insn
->op
= FILTER_OP_UNARY_PLUS_DOUBLE
;
826 next_pc
+= sizeof(struct unary_op
);
830 case FILTER_OP_UNARY_MINUS
:
832 struct unary_op
*insn
= (struct unary_op
*) pc
;
834 switch(vstack_ax(stack
)->type
) {
836 printk(KERN_WARNING
"unknown register type\n");
841 insn
->op
= FILTER_OP_UNARY_MINUS_S64
;
844 insn
->op
= FILTER_OP_UNARY_MINUS_DOUBLE
;
848 next_pc
+= sizeof(struct unary_op
);
852 case FILTER_OP_UNARY_NOT
:
854 struct unary_op
*insn
= (struct unary_op
*) pc
;
856 switch(vstack_ax(stack
)->type
) {
858 printk(KERN_WARNING
"unknown register type\n");
863 insn
->op
= FILTER_OP_UNARY_NOT_S64
;
866 insn
->op
= FILTER_OP_UNARY_NOT_DOUBLE
;
870 next_pc
+= sizeof(struct unary_op
);
874 case FILTER_OP_UNARY_BIT_NOT
:
877 next_pc
+= sizeof(struct unary_op
);
881 case FILTER_OP_UNARY_PLUS_S64
:
882 case FILTER_OP_UNARY_MINUS_S64
:
883 case FILTER_OP_UNARY_NOT_S64
:
884 case FILTER_OP_UNARY_PLUS_DOUBLE
:
885 case FILTER_OP_UNARY_MINUS_DOUBLE
:
886 case FILTER_OP_UNARY_NOT_DOUBLE
:
889 next_pc
+= sizeof(struct unary_op
);
897 /* Continue to next instruction */
898 /* Pop 1 when jump not taken */
899 if (vstack_pop(stack
)) {
903 next_pc
+= sizeof(struct logical_op
);
908 case FILTER_OP_LOAD_FIELD_REF
:
910 printk(KERN_WARNING
"Unknown field ref type\n");
914 /* get context ref */
915 case FILTER_OP_GET_CONTEXT_REF
:
917 printk(KERN_WARNING
"Unknown get context ref type\n");
921 case FILTER_OP_LOAD_FIELD_REF_STRING
:
922 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
923 case FILTER_OP_GET_CONTEXT_REF_STRING
:
924 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
925 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
927 if (vstack_push(stack
)) {
931 vstack_ax(stack
)->type
= REG_STRING
;
932 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
935 case FILTER_OP_LOAD_FIELD_REF_S64
:
936 case FILTER_OP_GET_CONTEXT_REF_S64
:
938 if (vstack_push(stack
)) {
942 vstack_ax(stack
)->type
= REG_S64
;
943 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
946 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
947 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
949 if (vstack_push(stack
)) {
953 vstack_ax(stack
)->type
= REG_DOUBLE
;
954 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
958 /* load from immediate operand */
959 case FILTER_OP_LOAD_STRING
:
961 struct load_op
*insn
= (struct load_op
*) pc
;
963 if (vstack_push(stack
)) {
967 vstack_ax(stack
)->type
= REG_STRING
;
968 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
972 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
974 struct load_op
*insn
= (struct load_op
*) pc
;
976 if (vstack_push(stack
)) {
980 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
981 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
985 case FILTER_OP_LOAD_S64
:
987 if (vstack_push(stack
)) {
991 vstack_ax(stack
)->type
= REG_S64
;
992 next_pc
+= sizeof(struct load_op
)
993 + sizeof(struct literal_numeric
);
997 case FILTER_OP_LOAD_DOUBLE
:
999 if (vstack_push(stack
)) {
1003 vstack_ax(stack
)->type
= REG_DOUBLE
;
1004 next_pc
+= sizeof(struct load_op
)
1005 + sizeof(struct literal_double
);
1010 case FILTER_OP_CAST_TO_S64
:
1012 struct cast_op
*insn
= (struct cast_op
*) pc
;
1014 switch (vstack_ax(stack
)->type
) {
1016 printk(KERN_WARNING
"unknown register type\n");
1021 case REG_STAR_GLOB_STRING
:
1022 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
1026 insn
->op
= FILTER_OP_CAST_NOP
;
1029 insn
->op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
1033 vstack_ax(stack
)->type
= REG_S64
;
1034 next_pc
+= sizeof(struct cast_op
);
1037 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1040 vstack_ax(stack
)->type
= REG_S64
;
1041 next_pc
+= sizeof(struct cast_op
);
1044 case FILTER_OP_CAST_NOP
:
1046 next_pc
+= sizeof(struct cast_op
);
1051 * Instructions for recursive traversal through composed types.
1053 case FILTER_OP_GET_CONTEXT_ROOT
:
1055 if (vstack_push(stack
)) {
1059 vstack_ax(stack
)->type
= REG_PTR
;
1060 vstack_ax(stack
)->load
.type
= LOAD_ROOT_CONTEXT
;
1061 next_pc
+= sizeof(struct load_op
);
1064 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1066 if (vstack_push(stack
)) {
1070 vstack_ax(stack
)->type
= REG_PTR
;
1071 vstack_ax(stack
)->load
.type
= LOAD_ROOT_APP_CONTEXT
;
1072 next_pc
+= sizeof(struct load_op
);
1075 case FILTER_OP_GET_PAYLOAD_ROOT
:
1077 if (vstack_push(stack
)) {
1081 vstack_ax(stack
)->type
= REG_PTR
;
1082 vstack_ax(stack
)->load
.type
= LOAD_ROOT_PAYLOAD
;
1083 next_pc
+= sizeof(struct load_op
);
1087 case FILTER_OP_LOAD_FIELD
:
1089 struct load_op
*insn
= (struct load_op
*) pc
;
1091 WARN_ON_ONCE(vstack_ax(stack
)->type
!= REG_PTR
);
1093 ret
= specialize_load_field(vstack_ax(stack
), insn
);
1097 next_pc
+= sizeof(struct load_op
);
1101 case FILTER_OP_LOAD_FIELD_S8
:
1102 case FILTER_OP_LOAD_FIELD_S16
:
1103 case FILTER_OP_LOAD_FIELD_S32
:
1104 case FILTER_OP_LOAD_FIELD_S64
:
1105 case FILTER_OP_LOAD_FIELD_U8
:
1106 case FILTER_OP_LOAD_FIELD_U16
:
1107 case FILTER_OP_LOAD_FIELD_U32
:
1108 case FILTER_OP_LOAD_FIELD_U64
:
1111 vstack_ax(stack
)->type
= REG_S64
;
1112 next_pc
+= sizeof(struct load_op
);
1116 case FILTER_OP_LOAD_FIELD_STRING
:
1117 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1120 vstack_ax(stack
)->type
= REG_STRING
;
1121 next_pc
+= sizeof(struct load_op
);
1125 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1128 vstack_ax(stack
)->type
= REG_DOUBLE
;
1129 next_pc
+= sizeof(struct load_op
);
1133 case FILTER_OP_GET_SYMBOL
:
1135 struct load_op
*insn
= (struct load_op
*) pc
;
1137 dbg_printk("op get symbol\n");
1138 switch (vstack_ax(stack
)->load
.type
) {
1140 printk(KERN_WARNING
"Nested fields not implemented yet.\n");
1143 case LOAD_ROOT_CONTEXT
:
1144 /* Lookup context field. */
1145 ret
= specialize_context_lookup(ctx
, bytecode
, insn
,
1146 &vstack_ax(stack
)->load
);
1150 case LOAD_ROOT_APP_CONTEXT
:
1153 case LOAD_ROOT_PAYLOAD
:
1154 /* Lookup event payload field. */
1155 ret
= specialize_payload_lookup(event_desc
,
1157 &vstack_ax(stack
)->load
);
1162 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1166 case FILTER_OP_GET_SYMBOL_FIELD
:
1168 /* Always generated by specialize phase. */
1173 case FILTER_OP_GET_INDEX_U16
:
1175 struct load_op
*insn
= (struct load_op
*) pc
;
1176 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1178 dbg_printk("op get index u16\n");
1180 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1181 vstack_ax(stack
), sizeof(*index
));
1184 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1188 case FILTER_OP_GET_INDEX_U64
:
1190 struct load_op
*insn
= (struct load_op
*) pc
;
1191 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1193 dbg_printk("op get index u64\n");
1195 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1196 vstack_ax(stack
), sizeof(*index
));
1199 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);