2 * lttng-filter-specialize.c
4 * LTTng UST filter code specializer.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #include "lttng-filter.h"
32 #include <lttng/align.h>
34 static int lttng_fls(int val
)
37 unsigned int x
= (unsigned int) val
;
41 if (!(x
& 0xFFFF0000U
)) {
45 if (!(x
& 0xFF000000U
)) {
49 if (!(x
& 0xF0000000U
)) {
53 if (!(x
& 0xC0000000U
)) {
57 if (!(x
& 0x80000000U
)) {
63 static int get_count_order(unsigned int count
)
67 order
= lttng_fls(count
) - 1;
68 if (count
& (count
- 1))
73 static ssize_t
bytecode_reserve_data(struct bytecode_runtime
*runtime
,
74 size_t align
, size_t len
)
77 size_t padding
= offset_align(runtime
->data_len
, align
);
78 size_t new_len
= runtime
->data_len
+ padding
+ len
;
79 size_t new_alloc_len
= new_len
;
80 size_t old_alloc_len
= runtime
->data_alloc_len
;
82 if (new_len
> FILTER_MAX_DATA_LEN
)
85 if (new_alloc_len
> old_alloc_len
) {
89 max_t(size_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
90 newptr
= realloc(runtime
->data
, new_alloc_len
);
93 runtime
->data
= newptr
;
94 /* We zero directly the memory from start of allocation. */
95 memset(&runtime
->data
[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
96 runtime
->data_alloc_len
= new_alloc_len
;
98 runtime
->data_len
+= padding
;
99 ret
= runtime
->data_len
;
100 runtime
->data_len
+= len
;
104 static ssize_t
bytecode_push_data(struct bytecode_runtime
*runtime
,
105 const void *p
, size_t align
, size_t len
)
109 offset
= bytecode_reserve_data(runtime
, align
, len
);
112 memcpy(&runtime
->data
[offset
], p
, len
);
116 static int specialize_load_field(struct vstack_entry
*stack_top
,
117 struct load_op
*insn
)
121 switch (stack_top
->load
.type
) {
124 case LOAD_ROOT_CONTEXT
:
125 case LOAD_ROOT_APP_CONTEXT
:
126 case LOAD_ROOT_PAYLOAD
:
128 dbg_printf("Filter warning: cannot load root, missing field name.\n");
132 switch (stack_top
->load
.object_type
) {
134 dbg_printf("op load field s8\n");
135 stack_top
->type
= REG_S64
;
136 if (!stack_top
->load
.rev_bo
)
137 insn
->op
= FILTER_OP_LOAD_FIELD_S8
;
139 case OBJECT_TYPE_S16
:
140 dbg_printf("op load field s16\n");
141 stack_top
->type
= REG_S64
;
142 if (!stack_top
->load
.rev_bo
)
143 insn
->op
= FILTER_OP_LOAD_FIELD_S16
;
145 case OBJECT_TYPE_S32
:
146 dbg_printf("op load field s32\n");
147 stack_top
->type
= REG_S64
;
148 if (!stack_top
->load
.rev_bo
)
149 insn
->op
= FILTER_OP_LOAD_FIELD_S32
;
151 case OBJECT_TYPE_S64
:
152 dbg_printf("op load field s64\n");
153 stack_top
->type
= REG_S64
;
154 if (!stack_top
->load
.rev_bo
)
155 insn
->op
= FILTER_OP_LOAD_FIELD_S64
;
158 dbg_printf("op load field u8\n");
159 stack_top
->type
= REG_S64
;
160 insn
->op
= FILTER_OP_LOAD_FIELD_U8
;
162 case OBJECT_TYPE_U16
:
163 dbg_printf("op load field u16\n");
164 stack_top
->type
= REG_S64
;
165 if (!stack_top
->load
.rev_bo
)
166 insn
->op
= FILTER_OP_LOAD_FIELD_U16
;
168 case OBJECT_TYPE_U32
:
169 dbg_printf("op load field u32\n");
170 stack_top
->type
= REG_S64
;
171 if (!stack_top
->load
.rev_bo
)
172 insn
->op
= FILTER_OP_LOAD_FIELD_U32
;
174 case OBJECT_TYPE_U64
:
175 dbg_printf("op load field u64\n");
176 stack_top
->type
= REG_S64
;
177 if (!stack_top
->load
.rev_bo
)
178 insn
->op
= FILTER_OP_LOAD_FIELD_U64
;
180 case OBJECT_TYPE_DOUBLE
:
181 stack_top
->type
= REG_DOUBLE
;
182 insn
->op
= FILTER_OP_LOAD_FIELD_DOUBLE
;
184 case OBJECT_TYPE_STRING
:
185 dbg_printf("op load field string\n");
186 stack_top
->type
= REG_STRING
;
187 insn
->op
= FILTER_OP_LOAD_FIELD_STRING
;
189 case OBJECT_TYPE_STRING_SEQUENCE
:
190 dbg_printf("op load field string sequence\n");
191 stack_top
->type
= REG_STRING
;
192 insn
->op
= FILTER_OP_LOAD_FIELD_SEQUENCE
;
194 case OBJECT_TYPE_DYNAMIC
:
195 dbg_printf("op load field dynamic\n");
196 stack_top
->type
= REG_UNKNOWN
;
197 /* Don't specialize load op. */
199 case OBJECT_TYPE_SEQUENCE
:
200 case OBJECT_TYPE_ARRAY
:
201 case OBJECT_TYPE_STRUCT
:
202 case OBJECT_TYPE_VARIANT
:
203 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
213 static int specialize_get_index_object_type(enum object_type
*otype
,
214 int signedness
, uint32_t elem_len
)
219 *otype
= OBJECT_TYPE_S8
;
221 *otype
= OBJECT_TYPE_U8
;
225 *otype
= OBJECT_TYPE_S16
;
227 *otype
= OBJECT_TYPE_U16
;
231 *otype
= OBJECT_TYPE_S32
;
233 *otype
= OBJECT_TYPE_U32
;
237 *otype
= OBJECT_TYPE_S64
;
239 *otype
= OBJECT_TYPE_U64
;
247 static int specialize_get_index(struct bytecode_runtime
*runtime
,
248 struct load_op
*insn
, uint64_t index
,
249 struct vstack_entry
*stack_top
,
253 struct filter_get_index_data gid
;
256 memset(&gid
, 0, sizeof(gid
));
257 switch (stack_top
->load
.type
) {
259 switch (stack_top
->load
.object_type
) {
260 case OBJECT_TYPE_ARRAY
:
262 const struct lttng_event_field
*field
;
263 uint32_t elem_len
, num_elems
;
266 field
= stack_top
->load
.field
;
267 elem_len
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.size
;
268 signedness
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.signedness
;
269 num_elems
= field
->type
.u
.array
.length
;
270 if (index
>= num_elems
) {
274 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
275 signedness
, elem_len
);
278 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
279 gid
.array_len
= num_elems
* (elem_len
/ CHAR_BIT
);
280 gid
.elem
.type
= stack_top
->load
.object_type
;
281 gid
.elem
.len
= elem_len
;
282 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
283 gid
.elem
.rev_bo
= true;
284 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
287 case OBJECT_TYPE_SEQUENCE
:
289 const struct lttng_event_field
*field
;
293 field
= stack_top
->load
.field
;
294 elem_len
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
;
295 signedness
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
;
296 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
297 signedness
, elem_len
);
300 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
301 gid
.elem
.type
= stack_top
->load
.object_type
;
302 gid
.elem
.len
= elem_len
;
303 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
304 gid
.elem
.rev_bo
= true;
305 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
308 case OBJECT_TYPE_STRUCT
:
309 /* Only generated by the specialize phase. */
310 case OBJECT_TYPE_VARIANT
: /* Fall-through */
312 ERR("Unexpected get index type %d",
313 (int) stack_top
->load
.object_type
);
318 case LOAD_ROOT_CONTEXT
:
319 case LOAD_ROOT_APP_CONTEXT
:
320 case LOAD_ROOT_PAYLOAD
:
321 ERR("Index lookup for root field not implemented yet.");
325 data_offset
= bytecode_push_data(runtime
, &gid
,
326 __alignof__(gid
), sizeof(gid
));
327 if (data_offset
< 0) {
333 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
336 ((struct get_index_u64
*) insn
->data
)->index
= data_offset
;
349 static int specialize_context_lookup_name(struct lttng_ctx
*ctx
,
350 struct bytecode_runtime
*bytecode
,
351 struct load_op
*insn
)
356 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
357 name
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ offset
;
358 return lttng_get_context_index(ctx
, name
);
361 static int specialize_load_object(const struct lttng_event_field
*field
,
362 struct vstack_load
*load
, bool is_context
)
364 load
->type
= LOAD_OBJECT
;
366 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
368 switch (field
->type
.atype
) {
370 if (field
->type
.u
.basic
.integer
.signedness
)
371 load
->object_type
= OBJECT_TYPE_S64
;
373 load
->object_type
= OBJECT_TYPE_U64
;
374 load
->rev_bo
= false;
378 const struct lttng_integer_type
*itype
=
379 &field
->type
.u
.basic
.enumeration
.container_type
;
381 if (itype
->signedness
)
382 load
->object_type
= OBJECT_TYPE_S64
;
384 load
->object_type
= OBJECT_TYPE_U64
;
385 load
->rev_bo
= false;
389 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
390 ERR("Array nesting only supports integer types.");
394 load
->object_type
= OBJECT_TYPE_STRING
;
396 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
397 load
->object_type
= OBJECT_TYPE_ARRAY
;
400 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
405 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
406 ERR("Sequence nesting only supports integer types.");
410 load
->object_type
= OBJECT_TYPE_STRING
;
412 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
413 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
416 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
421 load
->object_type
= OBJECT_TYPE_STRING
;
424 load
->object_type
= OBJECT_TYPE_DOUBLE
;
427 load
->object_type
= OBJECT_TYPE_DYNAMIC
;
430 ERR("Structure type cannot be loaded.");
433 ERR("Unknown type: %d", (int) field
->type
.atype
);
439 static int specialize_context_lookup(struct lttng_session
*session
,
440 struct bytecode_runtime
*runtime
,
441 struct load_op
*insn
,
442 struct vstack_load
*load
)
445 struct lttng_ctx_field
*ctx_field
;
446 struct lttng_event_field
*field
;
447 struct filter_get_index_data gid
;
450 idx
= specialize_context_lookup_name(session
->ctx
, runtime
, insn
);
454 ctx_field
= &session
->ctx
->fields
[idx
];
455 field
= &ctx_field
->event_field
;
456 ret
= specialize_load_object(field
, load
, true);
459 /* Specialize each get_symbol into a get_index. */
460 insn
->op
= FILTER_OP_GET_INDEX_U16
;
461 memset(&gid
, 0, sizeof(gid
));
463 gid
.elem
.type
= load
->object_type
;
464 data_offset
= bytecode_push_data(runtime
, &gid
,
465 __alignof__(gid
), sizeof(gid
));
466 if (data_offset
< 0) {
469 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
473 static int specialize_app_context_lookup(struct lttng_session
*session
,
474 struct bytecode_runtime
*runtime
,
475 struct load_op
*insn
,
476 struct vstack_load
*load
)
479 const char *orig_name
;
482 struct lttng_ctx_field
*ctx_field
;
483 struct lttng_event_field
*field
;
484 struct filter_get_index_data gid
;
487 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
488 orig_name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
489 name
= zmalloc(strlen(orig_name
) + strlen("$app.") + 1);
494 strcpy(name
, "$app.");
495 strcat(name
, orig_name
);
496 idx
= lttng_get_context_index(session
->ctx
, name
);
498 assert(lttng_context_is_app(name
));
499 ret
= lttng_ust_add_app_context_to_ctx_rcu(name
,
503 idx
= lttng_get_context_index(session
->ctx
,
508 ctx_field
= &session
->ctx
->fields
[idx
];
509 field
= &ctx_field
->event_field
;
510 ret
= specialize_load_object(field
, load
, true);
513 /* Specialize each get_symbol into a get_index. */
514 insn
->op
= FILTER_OP_GET_INDEX_U16
;
515 memset(&gid
, 0, sizeof(gid
));
517 gid
.elem
.type
= load
->object_type
;
518 data_offset
= bytecode_push_data(runtime
, &gid
,
519 __alignof__(gid
), sizeof(gid
));
520 if (data_offset
< 0) {
524 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
531 static int specialize_event_payload_lookup(struct lttng_event
*event
,
532 struct bytecode_runtime
*runtime
,
533 struct load_op
*insn
,
534 struct vstack_load
*load
)
538 const struct lttng_event_desc
*desc
= event
->desc
;
539 unsigned int i
, nr_fields
;
541 uint32_t field_offset
= 0;
542 const struct lttng_event_field
*field
;
544 struct filter_get_index_data gid
;
547 nr_fields
= desc
->nr_fields
;
548 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
549 name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
550 for (i
= 0; i
< nr_fields
; i
++) {
551 field
= &desc
->fields
[i
];
552 if (!strcmp(field
->name
, name
)) {
556 /* compute field offset on stack */
557 switch (field
->type
.atype
) {
560 field_offset
+= sizeof(int64_t);
564 field_offset
+= sizeof(unsigned long);
565 field_offset
+= sizeof(void *);
568 field_offset
+= sizeof(void *);
571 field_offset
+= sizeof(double);
583 ret
= specialize_load_object(field
, load
, false);
587 /* Specialize each get_symbol into a get_index. */
588 insn
->op
= FILTER_OP_GET_INDEX_U16
;
589 memset(&gid
, 0, sizeof(gid
));
590 gid
.offset
= field_offset
;
591 gid
.elem
.type
= load
->object_type
;
592 data_offset
= bytecode_push_data(runtime
, &gid
,
593 __alignof__(gid
), sizeof(gid
));
594 if (data_offset
< 0) {
598 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
604 int lttng_filter_specialize_bytecode(struct lttng_event
*event
,
605 struct bytecode_runtime
*bytecode
)
607 void *pc
, *next_pc
, *start_pc
;
609 struct vstack _stack
;
610 struct vstack
*stack
= &_stack
;
611 struct lttng_session
*session
= bytecode
->p
.session
;
615 start_pc
= &bytecode
->code
[0];
616 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
618 switch (*(filter_opcode_t
*) pc
) {
619 case FILTER_OP_UNKNOWN
:
621 ERR("unknown bytecode op %u\n",
622 (unsigned int) *(filter_opcode_t
*) pc
);
626 case FILTER_OP_RETURN
:
627 if (vstack_ax(stack
)->type
== REG_S64
)
628 *(filter_opcode_t
*) pc
= FILTER_OP_RETURN_S64
;
632 case FILTER_OP_RETURN_S64
:
633 if (vstack_ax(stack
)->type
!= REG_S64
) {
634 ERR("Unexpected register type\n");
646 case FILTER_OP_MINUS
:
647 ERR("unsupported bytecode op %u\n",
648 (unsigned int) *(filter_opcode_t
*) pc
);
654 struct binary_op
*insn
= (struct binary_op
*) pc
;
656 switch(vstack_ax(stack
)->type
) {
658 ERR("unknown register type\n");
663 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
665 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
666 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
668 insn
->op
= FILTER_OP_EQ_STRING
;
670 case REG_STAR_GLOB_STRING
:
671 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
673 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
676 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
678 if (vstack_bx(stack
)->type
== REG_S64
)
679 insn
->op
= FILTER_OP_EQ_S64
;
681 insn
->op
= FILTER_OP_EQ_DOUBLE_S64
;
684 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
686 if (vstack_bx(stack
)->type
== REG_S64
)
687 insn
->op
= FILTER_OP_EQ_S64_DOUBLE
;
689 insn
->op
= FILTER_OP_EQ_DOUBLE
;
692 break; /* Dynamic typing. */
695 if (vstack_pop(stack
)) {
699 vstack_ax(stack
)->type
= REG_S64
;
700 next_pc
+= sizeof(struct binary_op
);
706 struct binary_op
*insn
= (struct binary_op
*) pc
;
708 switch(vstack_ax(stack
)->type
) {
710 ERR("unknown register type\n");
715 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
717 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
718 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
720 insn
->op
= FILTER_OP_NE_STRING
;
722 case REG_STAR_GLOB_STRING
:
723 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
725 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
728 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
730 if (vstack_bx(stack
)->type
== REG_S64
)
731 insn
->op
= FILTER_OP_NE_S64
;
733 insn
->op
= FILTER_OP_NE_DOUBLE_S64
;
736 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
738 if (vstack_bx(stack
)->type
== REG_S64
)
739 insn
->op
= FILTER_OP_NE_S64_DOUBLE
;
741 insn
->op
= FILTER_OP_NE_DOUBLE
;
744 break; /* Dynamic typing. */
747 if (vstack_pop(stack
)) {
751 vstack_ax(stack
)->type
= REG_S64
;
752 next_pc
+= sizeof(struct binary_op
);
758 struct binary_op
*insn
= (struct binary_op
*) pc
;
760 switch(vstack_ax(stack
)->type
) {
762 ERR("unknown register type\n");
766 case REG_STAR_GLOB_STRING
:
767 ERR("invalid register type for > binary operator\n");
771 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
773 insn
->op
= FILTER_OP_GT_STRING
;
776 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
778 if (vstack_bx(stack
)->type
== REG_S64
)
779 insn
->op
= FILTER_OP_GT_S64
;
781 insn
->op
= FILTER_OP_GT_DOUBLE_S64
;
784 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
786 if (vstack_bx(stack
)->type
== REG_S64
)
787 insn
->op
= FILTER_OP_GT_S64_DOUBLE
;
789 insn
->op
= FILTER_OP_GT_DOUBLE
;
792 break; /* Dynamic typing. */
795 if (vstack_pop(stack
)) {
799 vstack_ax(stack
)->type
= REG_S64
;
800 next_pc
+= sizeof(struct binary_op
);
806 struct binary_op
*insn
= (struct binary_op
*) pc
;
808 switch(vstack_ax(stack
)->type
) {
810 ERR("unknown register type\n");
814 case REG_STAR_GLOB_STRING
:
815 ERR("invalid register type for < binary operator\n");
819 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
821 insn
->op
= FILTER_OP_LT_STRING
;
824 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
826 if (vstack_bx(stack
)->type
== REG_S64
)
827 insn
->op
= FILTER_OP_LT_S64
;
829 insn
->op
= FILTER_OP_LT_DOUBLE_S64
;
832 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
834 if (vstack_bx(stack
)->type
== REG_S64
)
835 insn
->op
= FILTER_OP_LT_S64_DOUBLE
;
837 insn
->op
= FILTER_OP_LT_DOUBLE
;
840 break; /* Dynamic typing. */
843 if (vstack_pop(stack
)) {
847 vstack_ax(stack
)->type
= REG_S64
;
848 next_pc
+= sizeof(struct binary_op
);
854 struct binary_op
*insn
= (struct binary_op
*) pc
;
856 switch(vstack_ax(stack
)->type
) {
858 ERR("unknown register type\n");
862 case REG_STAR_GLOB_STRING
:
863 ERR("invalid register type for >= binary operator\n");
867 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
869 insn
->op
= FILTER_OP_GE_STRING
;
872 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
874 if (vstack_bx(stack
)->type
== REG_S64
)
875 insn
->op
= FILTER_OP_GE_S64
;
877 insn
->op
= FILTER_OP_GE_DOUBLE_S64
;
880 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
882 if (vstack_bx(stack
)->type
== REG_S64
)
883 insn
->op
= FILTER_OP_GE_S64_DOUBLE
;
885 insn
->op
= FILTER_OP_GE_DOUBLE
;
888 break; /* Dynamic typing. */
891 if (vstack_pop(stack
)) {
895 vstack_ax(stack
)->type
= REG_S64
;
896 next_pc
+= sizeof(struct binary_op
);
901 struct binary_op
*insn
= (struct binary_op
*) pc
;
903 switch(vstack_ax(stack
)->type
) {
905 ERR("unknown register type\n");
909 case REG_STAR_GLOB_STRING
:
910 ERR("invalid register type for <= binary operator\n");
914 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
916 insn
->op
= FILTER_OP_LE_STRING
;
919 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
921 if (vstack_bx(stack
)->type
== REG_S64
)
922 insn
->op
= FILTER_OP_LE_S64
;
924 insn
->op
= FILTER_OP_LE_DOUBLE_S64
;
927 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
929 if (vstack_bx(stack
)->type
== REG_S64
)
930 insn
->op
= FILTER_OP_LE_S64_DOUBLE
;
932 insn
->op
= FILTER_OP_LE_DOUBLE
;
935 break; /* Dynamic typing. */
937 vstack_ax(stack
)->type
= REG_S64
;
938 next_pc
+= sizeof(struct binary_op
);
942 case FILTER_OP_EQ_STRING
:
943 case FILTER_OP_NE_STRING
:
944 case FILTER_OP_GT_STRING
:
945 case FILTER_OP_LT_STRING
:
946 case FILTER_OP_GE_STRING
:
947 case FILTER_OP_LE_STRING
:
948 case FILTER_OP_EQ_STAR_GLOB_STRING
:
949 case FILTER_OP_NE_STAR_GLOB_STRING
:
950 case FILTER_OP_EQ_S64
:
951 case FILTER_OP_NE_S64
:
952 case FILTER_OP_GT_S64
:
953 case FILTER_OP_LT_S64
:
954 case FILTER_OP_GE_S64
:
955 case FILTER_OP_LE_S64
:
956 case FILTER_OP_EQ_DOUBLE
:
957 case FILTER_OP_NE_DOUBLE
:
958 case FILTER_OP_GT_DOUBLE
:
959 case FILTER_OP_LT_DOUBLE
:
960 case FILTER_OP_GE_DOUBLE
:
961 case FILTER_OP_LE_DOUBLE
:
962 case FILTER_OP_EQ_DOUBLE_S64
:
963 case FILTER_OP_NE_DOUBLE_S64
:
964 case FILTER_OP_GT_DOUBLE_S64
:
965 case FILTER_OP_LT_DOUBLE_S64
:
966 case FILTER_OP_GE_DOUBLE_S64
:
967 case FILTER_OP_LE_DOUBLE_S64
:
968 case FILTER_OP_EQ_S64_DOUBLE
:
969 case FILTER_OP_NE_S64_DOUBLE
:
970 case FILTER_OP_GT_S64_DOUBLE
:
971 case FILTER_OP_LT_S64_DOUBLE
:
972 case FILTER_OP_GE_S64_DOUBLE
:
973 case FILTER_OP_LE_S64_DOUBLE
:
974 case FILTER_OP_BIT_RSHIFT
:
975 case FILTER_OP_BIT_LSHIFT
:
976 case FILTER_OP_BIT_AND
:
977 case FILTER_OP_BIT_OR
:
978 case FILTER_OP_BIT_XOR
:
981 if (vstack_pop(stack
)) {
985 vstack_ax(stack
)->type
= REG_S64
;
986 next_pc
+= sizeof(struct binary_op
);
991 case FILTER_OP_UNARY_PLUS
:
993 struct unary_op
*insn
= (struct unary_op
*) pc
;
995 switch(vstack_ax(stack
)->type
) {
997 ERR("unknown register type\n");
1002 insn
->op
= FILTER_OP_UNARY_PLUS_S64
;
1005 insn
->op
= FILTER_OP_UNARY_PLUS_DOUBLE
;
1007 case REG_UNKNOWN
: /* Dynamic typing. */
1011 next_pc
+= sizeof(struct unary_op
);
1015 case FILTER_OP_UNARY_MINUS
:
1017 struct unary_op
*insn
= (struct unary_op
*) pc
;
1019 switch(vstack_ax(stack
)->type
) {
1021 ERR("unknown register type\n");
1026 insn
->op
= FILTER_OP_UNARY_MINUS_S64
;
1029 insn
->op
= FILTER_OP_UNARY_MINUS_DOUBLE
;
1031 case REG_UNKNOWN
: /* Dynamic typing. */
1035 next_pc
+= sizeof(struct unary_op
);
1039 case FILTER_OP_UNARY_NOT
:
1041 struct unary_op
*insn
= (struct unary_op
*) pc
;
1043 switch(vstack_ax(stack
)->type
) {
1045 ERR("unknown register type\n");
1050 insn
->op
= FILTER_OP_UNARY_NOT_S64
;
1053 insn
->op
= FILTER_OP_UNARY_NOT_DOUBLE
;
1055 case REG_UNKNOWN
: /* Dynamic typing. */
1059 next_pc
+= sizeof(struct unary_op
);
1063 case FILTER_OP_UNARY_BIT_NOT
:
1066 next_pc
+= sizeof(struct unary_op
);
1070 case FILTER_OP_UNARY_PLUS_S64
:
1071 case FILTER_OP_UNARY_MINUS_S64
:
1072 case FILTER_OP_UNARY_NOT_S64
:
1073 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1074 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1075 case FILTER_OP_UNARY_NOT_DOUBLE
:
1078 next_pc
+= sizeof(struct unary_op
);
1086 /* Continue to next instruction */
1087 /* Pop 1 when jump not taken */
1088 if (vstack_pop(stack
)) {
1092 next_pc
+= sizeof(struct logical_op
);
1096 /* load field ref */
1097 case FILTER_OP_LOAD_FIELD_REF
:
1099 ERR("Unknown field ref type\n");
1103 /* get context ref */
1104 case FILTER_OP_GET_CONTEXT_REF
:
1106 if (vstack_push(stack
)) {
1110 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1111 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1114 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1115 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1116 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1118 if (vstack_push(stack
)) {
1122 vstack_ax(stack
)->type
= REG_STRING
;
1123 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1126 case FILTER_OP_LOAD_FIELD_REF_S64
:
1127 case FILTER_OP_GET_CONTEXT_REF_S64
:
1129 if (vstack_push(stack
)) {
1133 vstack_ax(stack
)->type
= REG_S64
;
1134 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1137 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1138 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1140 if (vstack_push(stack
)) {
1144 vstack_ax(stack
)->type
= REG_DOUBLE
;
1145 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1149 /* load from immediate operand */
1150 case FILTER_OP_LOAD_STRING
:
1152 struct load_op
*insn
= (struct load_op
*) pc
;
1154 if (vstack_push(stack
)) {
1158 vstack_ax(stack
)->type
= REG_STRING
;
1159 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1163 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1165 struct load_op
*insn
= (struct load_op
*) pc
;
1167 if (vstack_push(stack
)) {
1171 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1172 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1176 case FILTER_OP_LOAD_S64
:
1178 if (vstack_push(stack
)) {
1182 vstack_ax(stack
)->type
= REG_S64
;
1183 next_pc
+= sizeof(struct load_op
)
1184 + sizeof(struct literal_numeric
);
1188 case FILTER_OP_LOAD_DOUBLE
:
1190 if (vstack_push(stack
)) {
1194 vstack_ax(stack
)->type
= REG_DOUBLE
;
1195 next_pc
+= sizeof(struct load_op
)
1196 + sizeof(struct literal_double
);
1201 case FILTER_OP_CAST_TO_S64
:
1203 struct cast_op
*insn
= (struct cast_op
*) pc
;
1205 switch (vstack_ax(stack
)->type
) {
1207 ERR("unknown register type\n");
1212 case REG_STAR_GLOB_STRING
:
1213 ERR("Cast op can only be applied to numeric or floating point registers\n");
1217 insn
->op
= FILTER_OP_CAST_NOP
;
1220 insn
->op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
1226 vstack_ax(stack
)->type
= REG_S64
;
1227 next_pc
+= sizeof(struct cast_op
);
1230 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1233 vstack_ax(stack
)->type
= REG_S64
;
1234 next_pc
+= sizeof(struct cast_op
);
1237 case FILTER_OP_CAST_NOP
:
1239 next_pc
+= sizeof(struct cast_op
);
1244 * Instructions for recursive traversal through composed types.
1246 case FILTER_OP_GET_CONTEXT_ROOT
:
1248 if (vstack_push(stack
)) {
1252 vstack_ax(stack
)->type
= REG_PTR
;
1253 vstack_ax(stack
)->load
.type
= LOAD_ROOT_CONTEXT
;
1254 next_pc
+= sizeof(struct load_op
);
1257 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1259 if (vstack_push(stack
)) {
1263 vstack_ax(stack
)->type
= REG_PTR
;
1264 vstack_ax(stack
)->load
.type
= LOAD_ROOT_APP_CONTEXT
;
1265 next_pc
+= sizeof(struct load_op
);
1268 case FILTER_OP_GET_PAYLOAD_ROOT
:
1270 if (vstack_push(stack
)) {
1274 vstack_ax(stack
)->type
= REG_PTR
;
1275 vstack_ax(stack
)->load
.type
= LOAD_ROOT_PAYLOAD
;
1276 next_pc
+= sizeof(struct load_op
);
1280 case FILTER_OP_LOAD_FIELD
:
1282 struct load_op
*insn
= (struct load_op
*) pc
;
1284 assert(vstack_ax(stack
)->type
== REG_PTR
);
1286 ret
= specialize_load_field(vstack_ax(stack
), insn
);
1290 next_pc
+= sizeof(struct load_op
);
1294 case FILTER_OP_LOAD_FIELD_S8
:
1295 case FILTER_OP_LOAD_FIELD_S16
:
1296 case FILTER_OP_LOAD_FIELD_S32
:
1297 case FILTER_OP_LOAD_FIELD_S64
:
1298 case FILTER_OP_LOAD_FIELD_U8
:
1299 case FILTER_OP_LOAD_FIELD_U16
:
1300 case FILTER_OP_LOAD_FIELD_U32
:
1301 case FILTER_OP_LOAD_FIELD_U64
:
1304 vstack_ax(stack
)->type
= REG_S64
;
1305 next_pc
+= sizeof(struct load_op
);
1309 case FILTER_OP_LOAD_FIELD_STRING
:
1310 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1313 vstack_ax(stack
)->type
= REG_STRING
;
1314 next_pc
+= sizeof(struct load_op
);
1318 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1321 vstack_ax(stack
)->type
= REG_DOUBLE
;
1322 next_pc
+= sizeof(struct load_op
);
1326 case FILTER_OP_GET_SYMBOL
:
1328 struct load_op
*insn
= (struct load_op
*) pc
;
1330 dbg_printf("op get symbol\n");
1331 switch (vstack_ax(stack
)->load
.type
) {
1333 ERR("Nested fields not implemented yet.");
1336 case LOAD_ROOT_CONTEXT
:
1337 /* Lookup context field. */
1338 ret
= specialize_context_lookup(session
,
1340 &vstack_ax(stack
)->load
);
1344 case LOAD_ROOT_APP_CONTEXT
:
1345 /* Lookup app context field. */
1346 ret
= specialize_app_context_lookup(session
,
1348 &vstack_ax(stack
)->load
);
1352 case LOAD_ROOT_PAYLOAD
:
1353 /* Lookup event payload field. */
1354 ret
= specialize_event_payload_lookup(event
,
1356 &vstack_ax(stack
)->load
);
1361 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1365 case FILTER_OP_GET_SYMBOL_FIELD
:
1367 /* Always generated by specialize phase. */
1372 case FILTER_OP_GET_INDEX_U16
:
1374 struct load_op
*insn
= (struct load_op
*) pc
;
1375 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1377 dbg_printf("op get index u16\n");
1379 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1380 vstack_ax(stack
), sizeof(*index
));
1383 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1387 case FILTER_OP_GET_INDEX_U64
:
1389 struct load_op
*insn
= (struct load_op
*) pc
;
1390 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1392 dbg_printf("op get index u64\n");
1394 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1395 vstack_ax(stack
), sizeof(*index
));
1398 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);