2 * lttng-filter-specialize.c
4 * LTTng UST filter code specializer.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "lttng-filter.h"
29 #include <lttng/align.h>
31 static int lttng_fls(int val
)
34 unsigned int x
= (unsigned int) val
;
38 if (!(x
& 0xFFFF0000U
)) {
42 if (!(x
& 0xFF000000U
)) {
46 if (!(x
& 0xF0000000U
)) {
50 if (!(x
& 0xC0000000U
)) {
54 if (!(x
& 0x80000000U
)) {
60 static int get_count_order(unsigned int count
)
64 order
= lttng_fls(count
) - 1;
65 if (count
& (count
- 1))
70 static ssize_t
bytecode_reserve_data(struct bytecode_runtime
*runtime
,
71 size_t align
, size_t len
)
74 size_t padding
= offset_align(runtime
->data_len
, align
);
75 size_t new_len
= runtime
->data_len
+ padding
+ len
;
76 size_t new_alloc_len
= new_len
;
77 size_t old_alloc_len
= runtime
->data_alloc_len
;
79 if (new_len
> FILTER_MAX_DATA_LEN
)
82 if (new_alloc_len
> old_alloc_len
) {
86 max_t(size_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
87 newptr
= realloc(runtime
->data
, new_alloc_len
);
90 runtime
->data
= newptr
;
91 /* We zero directly the memory from start of allocation. */
92 memset(&runtime
->data
[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
93 runtime
->data_alloc_len
= new_alloc_len
;
95 runtime
->data_len
+= padding
;
96 ret
= runtime
->data_len
;
97 runtime
->data_len
+= len
;
101 static ssize_t
bytecode_push_data(struct bytecode_runtime
*runtime
,
102 const void *p
, size_t align
, size_t len
)
106 offset
= bytecode_reserve_data(runtime
, align
, len
);
109 memcpy(&runtime
->data
[offset
], p
, len
);
113 static int specialize_load_field(struct vstack_entry
*stack_top
,
114 struct load_op
*insn
)
118 switch (stack_top
->load
.type
) {
121 case LOAD_ROOT_CONTEXT
:
122 case LOAD_ROOT_APP_CONTEXT
:
123 case LOAD_ROOT_PAYLOAD
:
125 dbg_printf("Filter warning: cannot load root, missing field name.\n");
129 switch (stack_top
->load
.object_type
) {
131 dbg_printf("op load field s8\n");
132 stack_top
->type
= REG_S64
;
133 if (!stack_top
->load
.rev_bo
)
134 insn
->op
= FILTER_OP_LOAD_FIELD_S8
;
136 case OBJECT_TYPE_S16
:
137 dbg_printf("op load field s16\n");
138 stack_top
->type
= REG_S64
;
139 if (!stack_top
->load
.rev_bo
)
140 insn
->op
= FILTER_OP_LOAD_FIELD_S16
;
142 case OBJECT_TYPE_S32
:
143 dbg_printf("op load field s32\n");
144 stack_top
->type
= REG_S64
;
145 if (!stack_top
->load
.rev_bo
)
146 insn
->op
= FILTER_OP_LOAD_FIELD_S32
;
148 case OBJECT_TYPE_S64
:
149 dbg_printf("op load field s64\n");
150 stack_top
->type
= REG_S64
;
151 if (!stack_top
->load
.rev_bo
)
152 insn
->op
= FILTER_OP_LOAD_FIELD_S64
;
155 dbg_printf("op load field u8\n");
156 stack_top
->type
= REG_S64
;
157 insn
->op
= FILTER_OP_LOAD_FIELD_U8
;
159 case OBJECT_TYPE_U16
:
160 dbg_printf("op load field u16\n");
161 stack_top
->type
= REG_S64
;
162 if (!stack_top
->load
.rev_bo
)
163 insn
->op
= FILTER_OP_LOAD_FIELD_U16
;
165 case OBJECT_TYPE_U32
:
166 dbg_printf("op load field u32\n");
167 stack_top
->type
= REG_S64
;
168 if (!stack_top
->load
.rev_bo
)
169 insn
->op
= FILTER_OP_LOAD_FIELD_U32
;
171 case OBJECT_TYPE_U64
:
172 dbg_printf("op load field u64\n");
173 stack_top
->type
= REG_S64
;
174 if (!stack_top
->load
.rev_bo
)
175 insn
->op
= FILTER_OP_LOAD_FIELD_U64
;
177 case OBJECT_TYPE_DOUBLE
:
178 stack_top
->type
= REG_DOUBLE
;
179 insn
->op
= FILTER_OP_LOAD_FIELD_DOUBLE
;
181 case OBJECT_TYPE_STRING
:
182 dbg_printf("op load field string\n");
183 stack_top
->type
= REG_STRING
;
184 insn
->op
= FILTER_OP_LOAD_FIELD_STRING
;
186 case OBJECT_TYPE_STRING_SEQUENCE
:
187 dbg_printf("op load field string sequence\n");
188 stack_top
->type
= REG_STRING
;
189 insn
->op
= FILTER_OP_LOAD_FIELD_SEQUENCE
;
191 case OBJECT_TYPE_DYNAMIC
:
192 dbg_printf("op load field dynamic\n");
193 stack_top
->type
= REG_UNKNOWN
;
194 /* Don't specialize load op. */
196 case OBJECT_TYPE_SEQUENCE
:
197 case OBJECT_TYPE_ARRAY
:
198 case OBJECT_TYPE_STRUCT
:
199 case OBJECT_TYPE_VARIANT
:
200 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
210 static int specialize_get_index_object_type(enum object_type
*otype
,
211 int signedness
, uint32_t elem_len
)
216 *otype
= OBJECT_TYPE_S8
;
218 *otype
= OBJECT_TYPE_U8
;
222 *otype
= OBJECT_TYPE_S16
;
224 *otype
= OBJECT_TYPE_U16
;
228 *otype
= OBJECT_TYPE_S32
;
230 *otype
= OBJECT_TYPE_U32
;
234 *otype
= OBJECT_TYPE_S64
;
236 *otype
= OBJECT_TYPE_U64
;
244 static int specialize_get_index(struct bytecode_runtime
*runtime
,
245 struct load_op
*insn
, uint64_t index
,
246 struct vstack_entry
*stack_top
,
250 struct filter_get_index_data gid
;
253 memset(&gid
, 0, sizeof(gid
));
254 switch (stack_top
->load
.type
) {
256 switch (stack_top
->load
.object_type
) {
257 case OBJECT_TYPE_ARRAY
:
259 const struct lttng_event_field
*field
;
260 uint32_t elem_len
, num_elems
;
263 field
= stack_top
->load
.field
;
264 elem_len
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.size
;
265 signedness
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.signedness
;
266 num_elems
= field
->type
.u
.array
.length
;
267 if (index
>= num_elems
) {
271 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
272 signedness
, elem_len
);
275 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
276 gid
.array_len
= num_elems
* (elem_len
/ CHAR_BIT
);
277 gid
.elem
.type
= stack_top
->load
.object_type
;
278 gid
.elem
.len
= elem_len
;
279 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
280 gid
.elem
.rev_bo
= true;
281 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
284 case OBJECT_TYPE_SEQUENCE
:
286 const struct lttng_event_field
*field
;
290 field
= stack_top
->load
.field
;
291 elem_len
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
;
292 signedness
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
;
293 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
294 signedness
, elem_len
);
297 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
298 gid
.elem
.type
= stack_top
->load
.object_type
;
299 gid
.elem
.len
= elem_len
;
300 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
301 gid
.elem
.rev_bo
= true;
302 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
305 case OBJECT_TYPE_STRUCT
:
306 /* Only generated by the specialize phase. */
307 case OBJECT_TYPE_VARIANT
: /* Fall-through */
309 ERR("Unexpected get index type %d",
310 (int) stack_top
->load
.object_type
);
315 case LOAD_ROOT_CONTEXT
:
316 case LOAD_ROOT_APP_CONTEXT
:
317 case LOAD_ROOT_PAYLOAD
:
318 ERR("Index lookup for root field not implemented yet.");
322 data_offset
= bytecode_push_data(runtime
, &gid
,
323 __alignof__(gid
), sizeof(gid
));
324 if (data_offset
< 0) {
330 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
333 ((struct get_index_u64
*) insn
->data
)->index
= data_offset
;
346 static int specialize_context_lookup_name(struct lttng_ctx
*ctx
,
347 struct bytecode_runtime
*bytecode
,
348 struct load_op
*insn
)
353 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
354 name
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ offset
;
355 return lttng_get_context_index(ctx
, name
);
358 static int specialize_load_object(const struct lttng_event_field
*field
,
359 struct vstack_load
*load
, bool is_context
)
361 load
->type
= LOAD_OBJECT
;
363 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
365 switch (field
->type
.atype
) {
367 if (field
->type
.u
.basic
.integer
.signedness
)
368 load
->object_type
= OBJECT_TYPE_S64
;
370 load
->object_type
= OBJECT_TYPE_U64
;
371 load
->rev_bo
= false;
375 const struct lttng_integer_type
*itype
=
376 &field
->type
.u
.basic
.enumeration
.container_type
;
378 if (itype
->signedness
)
379 load
->object_type
= OBJECT_TYPE_S64
;
381 load
->object_type
= OBJECT_TYPE_U64
;
382 load
->rev_bo
= false;
386 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
387 ERR("Array nesting only supports integer types.");
391 load
->object_type
= OBJECT_TYPE_STRING
;
393 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
394 load
->object_type
= OBJECT_TYPE_ARRAY
;
397 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
402 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
403 ERR("Sequence nesting only supports integer types.");
407 load
->object_type
= OBJECT_TYPE_STRING
;
409 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
410 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
413 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
418 load
->object_type
= OBJECT_TYPE_STRING
;
421 load
->object_type
= OBJECT_TYPE_DOUBLE
;
424 load
->object_type
= OBJECT_TYPE_DYNAMIC
;
427 ERR("Structure type cannot be loaded.");
430 ERR("Unknown type: %d", (int) field
->type
.atype
);
436 static int specialize_context_lookup(struct lttng_session
*session
,
437 struct bytecode_runtime
*runtime
,
438 struct load_op
*insn
,
439 struct vstack_load
*load
)
442 struct lttng_ctx_field
*ctx_field
;
443 struct lttng_event_field
*field
;
444 struct filter_get_index_data gid
;
447 idx
= specialize_context_lookup_name(session
->ctx
, runtime
, insn
);
451 ctx_field
= &session
->ctx
->fields
[idx
];
452 field
= &ctx_field
->event_field
;
453 ret
= specialize_load_object(field
, load
, true);
456 /* Specialize each get_symbol into a get_index. */
457 insn
->op
= FILTER_OP_GET_INDEX_U16
;
458 memset(&gid
, 0, sizeof(gid
));
460 gid
.elem
.type
= load
->object_type
;
461 data_offset
= bytecode_push_data(runtime
, &gid
,
462 __alignof__(gid
), sizeof(gid
));
463 if (data_offset
< 0) {
466 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
470 static int specialize_app_context_lookup(struct lttng_session
*session
,
471 struct bytecode_runtime
*runtime
,
472 struct load_op
*insn
,
473 struct vstack_load
*load
)
476 const char *orig_name
;
479 struct lttng_ctx_field
*ctx_field
;
480 struct lttng_event_field
*field
;
481 struct filter_get_index_data gid
;
484 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
485 orig_name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
486 name
= zmalloc(strlen(orig_name
) + strlen("$app.") + 1);
491 strcpy(name
, "$app.");
492 strcat(name
, orig_name
);
493 idx
= lttng_get_context_index(session
->ctx
, name
);
495 assert(lttng_context_is_app(name
));
496 ret
= lttng_ust_add_app_context_to_ctx_rcu(name
,
500 idx
= lttng_get_context_index(session
->ctx
,
505 ctx_field
= &session
->ctx
->fields
[idx
];
506 field
= &ctx_field
->event_field
;
507 ret
= specialize_load_object(field
, load
, true);
510 /* Specialize each get_symbol into a get_index. */
511 insn
->op
= FILTER_OP_GET_INDEX_U16
;
512 memset(&gid
, 0, sizeof(gid
));
514 gid
.elem
.type
= load
->object_type
;
515 data_offset
= bytecode_push_data(runtime
, &gid
,
516 __alignof__(gid
), sizeof(gid
));
517 if (data_offset
< 0) {
521 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
528 static int specialize_event_payload_lookup(struct lttng_event
*event
,
529 struct bytecode_runtime
*runtime
,
530 struct load_op
*insn
,
531 struct vstack_load
*load
)
535 const struct lttng_event_desc
*desc
= event
->desc
;
536 unsigned int i
, nr_fields
;
538 uint32_t field_offset
= 0;
539 const struct lttng_event_field
*field
;
541 struct filter_get_index_data gid
;
544 nr_fields
= desc
->nr_fields
;
545 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
546 name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
547 for (i
= 0; i
< nr_fields
; i
++) {
548 field
= &desc
->fields
[i
];
549 if (!strcmp(field
->name
, name
)) {
553 /* compute field offset on stack */
554 switch (field
->type
.atype
) {
557 field_offset
+= sizeof(int64_t);
561 field_offset
+= sizeof(unsigned long);
562 field_offset
+= sizeof(void *);
565 field_offset
+= sizeof(void *);
568 field_offset
+= sizeof(double);
580 ret
= specialize_load_object(field
, load
, false);
584 /* Specialize each get_symbol into a get_index. */
585 insn
->op
= FILTER_OP_GET_INDEX_U16
;
586 memset(&gid
, 0, sizeof(gid
));
587 gid
.offset
= field_offset
;
588 gid
.elem
.type
= load
->object_type
;
589 data_offset
= bytecode_push_data(runtime
, &gid
,
590 __alignof__(gid
), sizeof(gid
));
591 if (data_offset
< 0) {
595 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
601 int lttng_filter_specialize_bytecode(struct lttng_event
*event
,
602 struct bytecode_runtime
*bytecode
)
604 void *pc
, *next_pc
, *start_pc
;
606 struct vstack _stack
;
607 struct vstack
*stack
= &_stack
;
608 struct lttng_session
*session
= bytecode
->p
.session
;
612 start_pc
= &bytecode
->code
[0];
613 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
615 switch (*(filter_opcode_t
*) pc
) {
616 case FILTER_OP_UNKNOWN
:
618 ERR("unknown bytecode op %u\n",
619 (unsigned int) *(filter_opcode_t
*) pc
);
623 case FILTER_OP_RETURN
:
624 if (vstack_ax(stack
)->type
== REG_S64
)
625 *(filter_opcode_t
*) pc
= FILTER_OP_RETURN_S64
;
629 case FILTER_OP_RETURN_S64
:
630 if (vstack_ax(stack
)->type
!= REG_S64
) {
631 ERR("Unexpected register type\n");
643 case FILTER_OP_MINUS
:
644 ERR("unsupported bytecode op %u\n",
645 (unsigned int) *(filter_opcode_t
*) pc
);
651 struct binary_op
*insn
= (struct binary_op
*) pc
;
653 switch(vstack_ax(stack
)->type
) {
655 ERR("unknown register type\n");
660 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
662 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
663 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
665 insn
->op
= FILTER_OP_EQ_STRING
;
667 case REG_STAR_GLOB_STRING
:
668 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
670 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
673 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
675 if (vstack_bx(stack
)->type
== REG_S64
)
676 insn
->op
= FILTER_OP_EQ_S64
;
678 insn
->op
= FILTER_OP_EQ_DOUBLE_S64
;
681 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
683 if (vstack_bx(stack
)->type
== REG_S64
)
684 insn
->op
= FILTER_OP_EQ_S64_DOUBLE
;
686 insn
->op
= FILTER_OP_EQ_DOUBLE
;
689 break; /* Dynamic typing. */
692 if (vstack_pop(stack
)) {
696 vstack_ax(stack
)->type
= REG_S64
;
697 next_pc
+= sizeof(struct binary_op
);
703 struct binary_op
*insn
= (struct binary_op
*) pc
;
705 switch(vstack_ax(stack
)->type
) {
707 ERR("unknown register type\n");
712 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
714 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
715 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
717 insn
->op
= FILTER_OP_NE_STRING
;
719 case REG_STAR_GLOB_STRING
:
720 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
722 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
725 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
727 if (vstack_bx(stack
)->type
== REG_S64
)
728 insn
->op
= FILTER_OP_NE_S64
;
730 insn
->op
= FILTER_OP_NE_DOUBLE_S64
;
733 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
735 if (vstack_bx(stack
)->type
== REG_S64
)
736 insn
->op
= FILTER_OP_NE_S64_DOUBLE
;
738 insn
->op
= FILTER_OP_NE_DOUBLE
;
741 break; /* Dynamic typing. */
744 if (vstack_pop(stack
)) {
748 vstack_ax(stack
)->type
= REG_S64
;
749 next_pc
+= sizeof(struct binary_op
);
755 struct binary_op
*insn
= (struct binary_op
*) pc
;
757 switch(vstack_ax(stack
)->type
) {
759 ERR("unknown register type\n");
763 case REG_STAR_GLOB_STRING
:
764 ERR("invalid register type for > binary operator\n");
768 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
770 insn
->op
= FILTER_OP_GT_STRING
;
773 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
775 if (vstack_bx(stack
)->type
== REG_S64
)
776 insn
->op
= FILTER_OP_GT_S64
;
778 insn
->op
= FILTER_OP_GT_DOUBLE_S64
;
781 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
783 if (vstack_bx(stack
)->type
== REG_S64
)
784 insn
->op
= FILTER_OP_GT_S64_DOUBLE
;
786 insn
->op
= FILTER_OP_GT_DOUBLE
;
789 break; /* Dynamic typing. */
792 if (vstack_pop(stack
)) {
796 vstack_ax(stack
)->type
= REG_S64
;
797 next_pc
+= sizeof(struct binary_op
);
803 struct binary_op
*insn
= (struct binary_op
*) pc
;
805 switch(vstack_ax(stack
)->type
) {
807 ERR("unknown register type\n");
811 case REG_STAR_GLOB_STRING
:
812 ERR("invalid register type for < binary operator\n");
816 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
818 insn
->op
= FILTER_OP_LT_STRING
;
821 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
823 if (vstack_bx(stack
)->type
== REG_S64
)
824 insn
->op
= FILTER_OP_LT_S64
;
826 insn
->op
= FILTER_OP_LT_DOUBLE_S64
;
829 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
831 if (vstack_bx(stack
)->type
== REG_S64
)
832 insn
->op
= FILTER_OP_LT_S64_DOUBLE
;
834 insn
->op
= FILTER_OP_LT_DOUBLE
;
837 break; /* Dynamic typing. */
840 if (vstack_pop(stack
)) {
844 vstack_ax(stack
)->type
= REG_S64
;
845 next_pc
+= sizeof(struct binary_op
);
851 struct binary_op
*insn
= (struct binary_op
*) pc
;
853 switch(vstack_ax(stack
)->type
) {
855 ERR("unknown register type\n");
859 case REG_STAR_GLOB_STRING
:
860 ERR("invalid register type for >= binary operator\n");
864 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
866 insn
->op
= FILTER_OP_GE_STRING
;
869 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
871 if (vstack_bx(stack
)->type
== REG_S64
)
872 insn
->op
= FILTER_OP_GE_S64
;
874 insn
->op
= FILTER_OP_GE_DOUBLE_S64
;
877 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
879 if (vstack_bx(stack
)->type
== REG_S64
)
880 insn
->op
= FILTER_OP_GE_S64_DOUBLE
;
882 insn
->op
= FILTER_OP_GE_DOUBLE
;
885 break; /* Dynamic typing. */
888 if (vstack_pop(stack
)) {
892 vstack_ax(stack
)->type
= REG_S64
;
893 next_pc
+= sizeof(struct binary_op
);
898 struct binary_op
*insn
= (struct binary_op
*) pc
;
900 switch(vstack_ax(stack
)->type
) {
902 ERR("unknown register type\n");
906 case REG_STAR_GLOB_STRING
:
907 ERR("invalid register type for <= binary operator\n");
911 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
913 insn
->op
= FILTER_OP_LE_STRING
;
916 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
918 if (vstack_bx(stack
)->type
== REG_S64
)
919 insn
->op
= FILTER_OP_LE_S64
;
921 insn
->op
= FILTER_OP_LE_DOUBLE_S64
;
924 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
926 if (vstack_bx(stack
)->type
== REG_S64
)
927 insn
->op
= FILTER_OP_LE_S64_DOUBLE
;
929 insn
->op
= FILTER_OP_LE_DOUBLE
;
932 break; /* Dynamic typing. */
934 vstack_ax(stack
)->type
= REG_S64
;
935 next_pc
+= sizeof(struct binary_op
);
939 case FILTER_OP_EQ_STRING
:
940 case FILTER_OP_NE_STRING
:
941 case FILTER_OP_GT_STRING
:
942 case FILTER_OP_LT_STRING
:
943 case FILTER_OP_GE_STRING
:
944 case FILTER_OP_LE_STRING
:
945 case FILTER_OP_EQ_STAR_GLOB_STRING
:
946 case FILTER_OP_NE_STAR_GLOB_STRING
:
947 case FILTER_OP_EQ_S64
:
948 case FILTER_OP_NE_S64
:
949 case FILTER_OP_GT_S64
:
950 case FILTER_OP_LT_S64
:
951 case FILTER_OP_GE_S64
:
952 case FILTER_OP_LE_S64
:
953 case FILTER_OP_EQ_DOUBLE
:
954 case FILTER_OP_NE_DOUBLE
:
955 case FILTER_OP_GT_DOUBLE
:
956 case FILTER_OP_LT_DOUBLE
:
957 case FILTER_OP_GE_DOUBLE
:
958 case FILTER_OP_LE_DOUBLE
:
959 case FILTER_OP_EQ_DOUBLE_S64
:
960 case FILTER_OP_NE_DOUBLE_S64
:
961 case FILTER_OP_GT_DOUBLE_S64
:
962 case FILTER_OP_LT_DOUBLE_S64
:
963 case FILTER_OP_GE_DOUBLE_S64
:
964 case FILTER_OP_LE_DOUBLE_S64
:
965 case FILTER_OP_EQ_S64_DOUBLE
:
966 case FILTER_OP_NE_S64_DOUBLE
:
967 case FILTER_OP_GT_S64_DOUBLE
:
968 case FILTER_OP_LT_S64_DOUBLE
:
969 case FILTER_OP_GE_S64_DOUBLE
:
970 case FILTER_OP_LE_S64_DOUBLE
:
971 case FILTER_OP_BIT_RSHIFT
:
972 case FILTER_OP_BIT_LSHIFT
:
973 case FILTER_OP_BIT_AND
:
974 case FILTER_OP_BIT_OR
:
975 case FILTER_OP_BIT_XOR
:
978 if (vstack_pop(stack
)) {
982 vstack_ax(stack
)->type
= REG_S64
;
983 next_pc
+= sizeof(struct binary_op
);
988 case FILTER_OP_UNARY_PLUS
:
990 struct unary_op
*insn
= (struct unary_op
*) pc
;
992 switch(vstack_ax(stack
)->type
) {
994 ERR("unknown register type\n");
999 insn
->op
= FILTER_OP_UNARY_PLUS_S64
;
1002 insn
->op
= FILTER_OP_UNARY_PLUS_DOUBLE
;
1004 case REG_UNKNOWN
: /* Dynamic typing. */
1008 next_pc
+= sizeof(struct unary_op
);
1012 case FILTER_OP_UNARY_MINUS
:
1014 struct unary_op
*insn
= (struct unary_op
*) pc
;
1016 switch(vstack_ax(stack
)->type
) {
1018 ERR("unknown register type\n");
1023 insn
->op
= FILTER_OP_UNARY_MINUS_S64
;
1026 insn
->op
= FILTER_OP_UNARY_MINUS_DOUBLE
;
1028 case REG_UNKNOWN
: /* Dynamic typing. */
1032 next_pc
+= sizeof(struct unary_op
);
1036 case FILTER_OP_UNARY_NOT
:
1038 struct unary_op
*insn
= (struct unary_op
*) pc
;
1040 switch(vstack_ax(stack
)->type
) {
1042 ERR("unknown register type\n");
1047 insn
->op
= FILTER_OP_UNARY_NOT_S64
;
1050 insn
->op
= FILTER_OP_UNARY_NOT_DOUBLE
;
1052 case REG_UNKNOWN
: /* Dynamic typing. */
1056 next_pc
+= sizeof(struct unary_op
);
1060 case FILTER_OP_UNARY_BIT_NOT
:
1063 next_pc
+= sizeof(struct unary_op
);
1067 case FILTER_OP_UNARY_PLUS_S64
:
1068 case FILTER_OP_UNARY_MINUS_S64
:
1069 case FILTER_OP_UNARY_NOT_S64
:
1070 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1071 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1072 case FILTER_OP_UNARY_NOT_DOUBLE
:
1075 next_pc
+= sizeof(struct unary_op
);
1083 /* Continue to next instruction */
1084 /* Pop 1 when jump not taken */
1085 if (vstack_pop(stack
)) {
1089 next_pc
+= sizeof(struct logical_op
);
1093 /* load field ref */
1094 case FILTER_OP_LOAD_FIELD_REF
:
1096 ERR("Unknown field ref type\n");
1100 /* get context ref */
1101 case FILTER_OP_GET_CONTEXT_REF
:
1103 if (vstack_push(stack
)) {
1107 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1108 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1111 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1112 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1113 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1115 if (vstack_push(stack
)) {
1119 vstack_ax(stack
)->type
= REG_STRING
;
1120 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1123 case FILTER_OP_LOAD_FIELD_REF_S64
:
1124 case FILTER_OP_GET_CONTEXT_REF_S64
:
1126 if (vstack_push(stack
)) {
1130 vstack_ax(stack
)->type
= REG_S64
;
1131 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1134 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1135 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1137 if (vstack_push(stack
)) {
1141 vstack_ax(stack
)->type
= REG_DOUBLE
;
1142 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1146 /* load from immediate operand */
1147 case FILTER_OP_LOAD_STRING
:
1149 struct load_op
*insn
= (struct load_op
*) pc
;
1151 if (vstack_push(stack
)) {
1155 vstack_ax(stack
)->type
= REG_STRING
;
1156 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1160 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1162 struct load_op
*insn
= (struct load_op
*) pc
;
1164 if (vstack_push(stack
)) {
1168 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1169 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1173 case FILTER_OP_LOAD_S64
:
1175 if (vstack_push(stack
)) {
1179 vstack_ax(stack
)->type
= REG_S64
;
1180 next_pc
+= sizeof(struct load_op
)
1181 + sizeof(struct literal_numeric
);
1185 case FILTER_OP_LOAD_DOUBLE
:
1187 if (vstack_push(stack
)) {
1191 vstack_ax(stack
)->type
= REG_DOUBLE
;
1192 next_pc
+= sizeof(struct load_op
)
1193 + sizeof(struct literal_double
);
1198 case FILTER_OP_CAST_TO_S64
:
1200 struct cast_op
*insn
= (struct cast_op
*) pc
;
1202 switch (vstack_ax(stack
)->type
) {
1204 ERR("unknown register type\n");
1209 case REG_STAR_GLOB_STRING
:
1210 ERR("Cast op can only be applied to numeric or floating point registers\n");
1214 insn
->op
= FILTER_OP_CAST_NOP
;
1217 insn
->op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
1223 vstack_ax(stack
)->type
= REG_S64
;
1224 next_pc
+= sizeof(struct cast_op
);
1227 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1230 vstack_ax(stack
)->type
= REG_S64
;
1231 next_pc
+= sizeof(struct cast_op
);
1234 case FILTER_OP_CAST_NOP
:
1236 next_pc
+= sizeof(struct cast_op
);
1241 * Instructions for recursive traversal through composed types.
1243 case FILTER_OP_GET_CONTEXT_ROOT
:
1245 if (vstack_push(stack
)) {
1249 vstack_ax(stack
)->type
= REG_PTR
;
1250 vstack_ax(stack
)->load
.type
= LOAD_ROOT_CONTEXT
;
1251 next_pc
+= sizeof(struct load_op
);
1254 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1256 if (vstack_push(stack
)) {
1260 vstack_ax(stack
)->type
= REG_PTR
;
1261 vstack_ax(stack
)->load
.type
= LOAD_ROOT_APP_CONTEXT
;
1262 next_pc
+= sizeof(struct load_op
);
1265 case FILTER_OP_GET_PAYLOAD_ROOT
:
1267 if (vstack_push(stack
)) {
1271 vstack_ax(stack
)->type
= REG_PTR
;
1272 vstack_ax(stack
)->load
.type
= LOAD_ROOT_PAYLOAD
;
1273 next_pc
+= sizeof(struct load_op
);
1277 case FILTER_OP_LOAD_FIELD
:
1279 struct load_op
*insn
= (struct load_op
*) pc
;
1281 assert(vstack_ax(stack
)->type
== REG_PTR
);
1283 ret
= specialize_load_field(vstack_ax(stack
), insn
);
1287 next_pc
+= sizeof(struct load_op
);
1291 case FILTER_OP_LOAD_FIELD_S8
:
1292 case FILTER_OP_LOAD_FIELD_S16
:
1293 case FILTER_OP_LOAD_FIELD_S32
:
1294 case FILTER_OP_LOAD_FIELD_S64
:
1295 case FILTER_OP_LOAD_FIELD_U8
:
1296 case FILTER_OP_LOAD_FIELD_U16
:
1297 case FILTER_OP_LOAD_FIELD_U32
:
1298 case FILTER_OP_LOAD_FIELD_U64
:
1301 vstack_ax(stack
)->type
= REG_S64
;
1302 next_pc
+= sizeof(struct load_op
);
1306 case FILTER_OP_LOAD_FIELD_STRING
:
1307 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1310 vstack_ax(stack
)->type
= REG_STRING
;
1311 next_pc
+= sizeof(struct load_op
);
1315 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1318 vstack_ax(stack
)->type
= REG_DOUBLE
;
1319 next_pc
+= sizeof(struct load_op
);
1323 case FILTER_OP_GET_SYMBOL
:
1325 struct load_op
*insn
= (struct load_op
*) pc
;
1327 dbg_printf("op get symbol\n");
1328 switch (vstack_ax(stack
)->load
.type
) {
1330 ERR("Nested fields not implemented yet.");
1333 case LOAD_ROOT_CONTEXT
:
1334 /* Lookup context field. */
1335 ret
= specialize_context_lookup(session
,
1337 &vstack_ax(stack
)->load
);
1341 case LOAD_ROOT_APP_CONTEXT
:
1342 /* Lookup app context field. */
1343 ret
= specialize_app_context_lookup(session
,
1345 &vstack_ax(stack
)->load
);
1349 case LOAD_ROOT_PAYLOAD
:
1350 /* Lookup event payload field. */
1351 ret
= specialize_event_payload_lookup(event
,
1353 &vstack_ax(stack
)->load
);
1358 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1362 case FILTER_OP_GET_SYMBOL_FIELD
:
1364 /* Always generated by specialize phase. */
1369 case FILTER_OP_GET_INDEX_U16
:
1371 struct load_op
*insn
= (struct load_op
*) pc
;
1372 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1374 dbg_printf("op get index u16\n");
1376 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1377 vstack_ax(stack
), sizeof(*index
));
1380 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1384 case FILTER_OP_GET_INDEX_U64
:
1386 struct load_op
*insn
= (struct load_op
*) pc
;
1387 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1389 dbg_printf("op get index u64\n");
1391 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1392 vstack_ax(stack
), sizeof(*index
));
1395 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);