Add `lttng_bytecode_interpret_format_output()` for top of stack extraction
[deliverable/lttng-ust.git] / liblttng-ust / lttng-filter-specialize.c
1 /*
2 * lttng-filter-specialize.c
3 *
4 * LTTng UST filter code specializer.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include <stddef.h>
29 #include <stdint.h>
30
31 #include "lttng-filter.h"
32 #include <lttng/align.h>
33 #include "ust-events-internal.h"
34
35 static int lttng_fls(int val)
36 {
37 int r = 32;
38 unsigned int x = (unsigned int) val;
39
40 if (!x)
41 return 0;
42 if (!(x & 0xFFFF0000U)) {
43 x <<= 16;
44 r -= 16;
45 }
46 if (!(x & 0xFF000000U)) {
47 x <<= 8;
48 r -= 8;
49 }
50 if (!(x & 0xF0000000U)) {
51 x <<= 4;
52 r -= 4;
53 }
54 if (!(x & 0xC0000000U)) {
55 x <<= 2;
56 r -= 2;
57 }
58 if (!(x & 0x80000000U)) {
59 r -= 1;
60 }
61 return r;
62 }
63
64 static int get_count_order(unsigned int count)
65 {
66 int order;
67
68 order = lttng_fls(count) - 1;
69 if (count & (count - 1))
70 order++;
71 return order;
72 }
73
74 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
75 size_t align, size_t len)
76 {
77 ssize_t ret;
78 size_t padding = lttng_ust_offset_align(runtime->data_len, align);
79 size_t new_len = runtime->data_len + padding + len;
80 size_t new_alloc_len = new_len;
81 size_t old_alloc_len = runtime->data_alloc_len;
82
83 if (new_len > FILTER_MAX_DATA_LEN)
84 return -EINVAL;
85
86 if (new_alloc_len > old_alloc_len) {
87 char *newptr;
88
89 new_alloc_len =
90 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
91 newptr = realloc(runtime->data, new_alloc_len);
92 if (!newptr)
93 return -ENOMEM;
94 runtime->data = newptr;
95 /* We zero directly the memory from start of allocation. */
96 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
97 runtime->data_alloc_len = new_alloc_len;
98 }
99 runtime->data_len += padding;
100 ret = runtime->data_len;
101 runtime->data_len += len;
102 return ret;
103 }
104
105 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
106 const void *p, size_t align, size_t len)
107 {
108 ssize_t offset;
109
110 offset = bytecode_reserve_data(runtime, align, len);
111 if (offset < 0)
112 return -ENOMEM;
113 memcpy(&runtime->data[offset], p, len);
114 return offset;
115 }
116
117 static int specialize_load_field(struct vstack_entry *stack_top,
118 struct load_op *insn)
119 {
120 int ret;
121
122 switch (stack_top->load.type) {
123 case LOAD_OBJECT:
124 break;
125 case LOAD_ROOT_CONTEXT:
126 case LOAD_ROOT_APP_CONTEXT:
127 case LOAD_ROOT_PAYLOAD:
128 default:
129 dbg_printf("Filter warning: cannot load root, missing field name.\n");
130 ret = -EINVAL;
131 goto end;
132 }
133 switch (stack_top->load.object_type) {
134 case OBJECT_TYPE_S8:
135 dbg_printf("op load field s8\n");
136 stack_top->type = REG_S64;
137 if (!stack_top->load.rev_bo)
138 insn->op = FILTER_OP_LOAD_FIELD_S8;
139 break;
140 case OBJECT_TYPE_S16:
141 dbg_printf("op load field s16\n");
142 stack_top->type = REG_S64;
143 if (!stack_top->load.rev_bo)
144 insn->op = FILTER_OP_LOAD_FIELD_S16;
145 break;
146 case OBJECT_TYPE_S32:
147 dbg_printf("op load field s32\n");
148 stack_top->type = REG_S64;
149 if (!stack_top->load.rev_bo)
150 insn->op = FILTER_OP_LOAD_FIELD_S32;
151 break;
152 case OBJECT_TYPE_S64:
153 dbg_printf("op load field s64\n");
154 stack_top->type = REG_S64;
155 if (!stack_top->load.rev_bo)
156 insn->op = FILTER_OP_LOAD_FIELD_S64;
157 break;
158 case OBJECT_TYPE_U8:
159 dbg_printf("op load field u8\n");
160 stack_top->type = REG_U64;
161 insn->op = FILTER_OP_LOAD_FIELD_U8;
162 break;
163 case OBJECT_TYPE_U16:
164 dbg_printf("op load field u16\n");
165 stack_top->type = REG_U64;
166 if (!stack_top->load.rev_bo)
167 insn->op = FILTER_OP_LOAD_FIELD_U16;
168 break;
169 case OBJECT_TYPE_U32:
170 dbg_printf("op load field u32\n");
171 stack_top->type = REG_U64;
172 if (!stack_top->load.rev_bo)
173 insn->op = FILTER_OP_LOAD_FIELD_U32;
174 break;
175 case OBJECT_TYPE_U64:
176 dbg_printf("op load field u64\n");
177 stack_top->type = REG_U64;
178 if (!stack_top->load.rev_bo)
179 insn->op = FILTER_OP_LOAD_FIELD_U64;
180 break;
181 case OBJECT_TYPE_DOUBLE:
182 stack_top->type = REG_DOUBLE;
183 insn->op = FILTER_OP_LOAD_FIELD_DOUBLE;
184 break;
185 case OBJECT_TYPE_STRING:
186 dbg_printf("op load field string\n");
187 stack_top->type = REG_STRING;
188 insn->op = FILTER_OP_LOAD_FIELD_STRING;
189 break;
190 case OBJECT_TYPE_STRING_SEQUENCE:
191 dbg_printf("op load field string sequence\n");
192 stack_top->type = REG_STRING;
193 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
194 break;
195 case OBJECT_TYPE_DYNAMIC:
196 dbg_printf("op load field dynamic\n");
197 stack_top->type = REG_UNKNOWN;
198 /* Don't specialize load op. */
199 break;
200 case OBJECT_TYPE_SEQUENCE:
201 case OBJECT_TYPE_ARRAY:
202 case OBJECT_TYPE_STRUCT:
203 case OBJECT_TYPE_VARIANT:
204 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
205 ret = -EINVAL;
206 goto end;
207 }
208 return 0;
209
210 end:
211 return ret;
212 }
213
214 static int specialize_get_index_object_type(enum object_type *otype,
215 int signedness, uint32_t elem_len)
216 {
217 switch (elem_len) {
218 case 8:
219 if (signedness)
220 *otype = OBJECT_TYPE_S8;
221 else
222 *otype = OBJECT_TYPE_U8;
223 break;
224 case 16:
225 if (signedness)
226 *otype = OBJECT_TYPE_S16;
227 else
228 *otype = OBJECT_TYPE_U16;
229 break;
230 case 32:
231 if (signedness)
232 *otype = OBJECT_TYPE_S32;
233 else
234 *otype = OBJECT_TYPE_U32;
235 break;
236 case 64:
237 if (signedness)
238 *otype = OBJECT_TYPE_S64;
239 else
240 *otype = OBJECT_TYPE_U64;
241 break;
242 default:
243 return -EINVAL;
244 }
245 return 0;
246 }
247
248 static int specialize_get_index(struct bytecode_runtime *runtime,
249 struct load_op *insn, uint64_t index,
250 struct vstack_entry *stack_top,
251 int idx_len)
252 {
253 int ret;
254 struct filter_get_index_data gid;
255 ssize_t data_offset;
256
257 memset(&gid, 0, sizeof(gid));
258 switch (stack_top->load.type) {
259 case LOAD_OBJECT:
260 switch (stack_top->load.object_type) {
261 case OBJECT_TYPE_ARRAY:
262 {
263 const struct lttng_integer_type *integer_type;
264 const struct lttng_event_field *field;
265 uint32_t elem_len, num_elems;
266 int signedness;
267
268 field = stack_top->load.field;
269 switch (field->type.atype) {
270 case atype_array:
271 integer_type = &field->type.u.legacy.array.elem_type.u.basic.integer;
272 num_elems = field->type.u.legacy.array.length;
273 break;
274 case atype_array_nestable:
275 if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
276 ret = -EINVAL;
277 goto end;
278 }
279 integer_type = &field->type.u.array_nestable.elem_type->u.integer;
280 num_elems = field->type.u.array_nestable.length;
281 break;
282 default:
283 ret = -EINVAL;
284 goto end;
285 }
286 elem_len = integer_type->size;
287 signedness = integer_type->signedness;
288 if (index >= num_elems) {
289 ret = -EINVAL;
290 goto end;
291 }
292 ret = specialize_get_index_object_type(&stack_top->load.object_type,
293 signedness, elem_len);
294 if (ret)
295 goto end;
296 gid.offset = index * (elem_len / CHAR_BIT);
297 gid.array_len = num_elems * (elem_len / CHAR_BIT);
298 gid.elem.type = stack_top->load.object_type;
299 gid.elem.len = elem_len;
300 if (integer_type->reverse_byte_order)
301 gid.elem.rev_bo = true;
302 stack_top->load.rev_bo = gid.elem.rev_bo;
303 break;
304 }
305 case OBJECT_TYPE_SEQUENCE:
306 {
307 const struct lttng_integer_type *integer_type;
308 const struct lttng_event_field *field;
309 uint32_t elem_len;
310 int signedness;
311
312 field = stack_top->load.field;
313 switch (field->type.atype) {
314 case atype_sequence:
315 integer_type = &field->type.u.legacy.sequence.elem_type.u.basic.integer;
316 break;
317 case atype_sequence_nestable:
318 if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
319 ret = -EINVAL;
320 goto end;
321 }
322 integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
323 break;
324 default:
325 ret = -EINVAL;
326 goto end;
327 }
328 elem_len = integer_type->size;
329 signedness = integer_type->signedness;
330 ret = specialize_get_index_object_type(&stack_top->load.object_type,
331 signedness, elem_len);
332 if (ret)
333 goto end;
334 gid.offset = index * (elem_len / CHAR_BIT);
335 gid.elem.type = stack_top->load.object_type;
336 gid.elem.len = elem_len;
337 if (integer_type->reverse_byte_order)
338 gid.elem.rev_bo = true;
339 stack_top->load.rev_bo = gid.elem.rev_bo;
340 break;
341 }
342 case OBJECT_TYPE_STRUCT:
343 /* Only generated by the specialize phase. */
344 case OBJECT_TYPE_VARIANT: /* Fall-through */
345 default:
346 ERR("Unexpected get index type %d",
347 (int) stack_top->load.object_type);
348 ret = -EINVAL;
349 goto end;
350 }
351 break;
352 case LOAD_ROOT_CONTEXT:
353 case LOAD_ROOT_APP_CONTEXT:
354 case LOAD_ROOT_PAYLOAD:
355 ERR("Index lookup for root field not implemented yet.");
356 ret = -EINVAL;
357 goto end;
358 }
359 data_offset = bytecode_push_data(runtime, &gid,
360 __alignof__(gid), sizeof(gid));
361 if (data_offset < 0) {
362 ret = -EINVAL;
363 goto end;
364 }
365 switch (idx_len) {
366 case 2:
367 ((struct get_index_u16 *) insn->data)->index = data_offset;
368 break;
369 case 8:
370 ((struct get_index_u64 *) insn->data)->index = data_offset;
371 break;
372 default:
373 ret = -EINVAL;
374 goto end;
375 }
376
377 return 0;
378
379 end:
380 return ret;
381 }
382
383 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
384 struct bytecode_runtime *bytecode,
385 struct load_op *insn)
386 {
387 uint16_t offset;
388 const char *name;
389
390 offset = ((struct get_symbol *) insn->data)->offset;
391 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
392 return lttng_get_context_index(ctx, name);
393 }
394
395 static int specialize_load_object(const struct lttng_event_field *field,
396 struct vstack_load *load, bool is_context)
397 {
398 load->type = LOAD_OBJECT;
399
400 switch (field->type.atype) {
401 case atype_integer:
402 if (field->type.u.integer.signedness)
403 load->object_type = OBJECT_TYPE_S64;
404 else
405 load->object_type = OBJECT_TYPE_U64;
406 load->rev_bo = false;
407 break;
408 case atype_enum:
409 case atype_enum_nestable:
410 {
411 const struct lttng_integer_type *itype;
412
413 if (field->type.atype == atype_enum) {
414 itype = &field->type.u.legacy.basic.enumeration.container_type;
415 } else {
416 itype = &field->type.u.enum_nestable.container_type->u.integer;
417 }
418 if (itype->signedness)
419 load->object_type = OBJECT_TYPE_S64;
420 else
421 load->object_type = OBJECT_TYPE_U64;
422 load->rev_bo = false;
423 break;
424 }
425 case atype_array:
426 if (field->type.u.legacy.array.elem_type.atype != atype_integer) {
427 ERR("Array nesting only supports integer types.");
428 return -EINVAL;
429 }
430 if (is_context) {
431 load->object_type = OBJECT_TYPE_STRING;
432 } else {
433 if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
434 load->object_type = OBJECT_TYPE_ARRAY;
435 load->field = field;
436 } else {
437 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
438 }
439 }
440 break;
441 case atype_array_nestable:
442 if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
443 ERR("Array nesting only supports integer types.");
444 return -EINVAL;
445 }
446 if (is_context) {
447 load->object_type = OBJECT_TYPE_STRING;
448 } else {
449 if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
450 load->object_type = OBJECT_TYPE_ARRAY;
451 load->field = field;
452 } else {
453 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
454 }
455 }
456 break;
457 case atype_sequence:
458 if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) {
459 ERR("Sequence nesting only supports integer types.");
460 return -EINVAL;
461 }
462 if (is_context) {
463 load->object_type = OBJECT_TYPE_STRING;
464 } else {
465 if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
466 load->object_type = OBJECT_TYPE_SEQUENCE;
467 load->field = field;
468 } else {
469 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
470 }
471 }
472 break;
473 case atype_sequence_nestable:
474 if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
475 ERR("Sequence nesting only supports integer types.");
476 return -EINVAL;
477 }
478 if (is_context) {
479 load->object_type = OBJECT_TYPE_STRING;
480 } else {
481 if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
482 load->object_type = OBJECT_TYPE_SEQUENCE;
483 load->field = field;
484 } else {
485 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
486 }
487 }
488 break;
489
490 case atype_string:
491 load->object_type = OBJECT_TYPE_STRING;
492 break;
493 case atype_float:
494 load->object_type = OBJECT_TYPE_DOUBLE;
495 break;
496 case atype_dynamic:
497 load->object_type = OBJECT_TYPE_DYNAMIC;
498 break;
499 case atype_struct:
500 ERR("Structure type cannot be loaded.");
501 return -EINVAL;
502 default:
503 ERR("Unknown type: %d", (int) field->type.atype);
504 return -EINVAL;
505 }
506 return 0;
507 }
508
509 static int specialize_context_lookup(struct lttng_ctx *ctx,
510 struct bytecode_runtime *runtime,
511 struct load_op *insn,
512 struct vstack_load *load)
513 {
514 int idx, ret;
515 struct lttng_ctx_field *ctx_field;
516 struct lttng_event_field *field;
517 struct filter_get_index_data gid;
518 ssize_t data_offset;
519
520 idx = specialize_context_lookup_name(ctx, runtime, insn);
521 if (idx < 0) {
522 return -ENOENT;
523 }
524 ctx_field = &ctx->fields[idx];
525 field = &ctx_field->event_field;
526 ret = specialize_load_object(field, load, true);
527 if (ret)
528 return ret;
529 /* Specialize each get_symbol into a get_index. */
530 insn->op = FILTER_OP_GET_INDEX_U16;
531 memset(&gid, 0, sizeof(gid));
532 gid.ctx_index = idx;
533 gid.elem.type = load->object_type;
534 gid.field = field;
535 data_offset = bytecode_push_data(runtime, &gid,
536 __alignof__(gid), sizeof(gid));
537 if (data_offset < 0) {
538 return -EINVAL;
539 }
540 ((struct get_index_u16 *) insn->data)->index = data_offset;
541 return 0;
542 }
543
544 static int specialize_app_context_lookup(struct lttng_ctx **pctx,
545 struct bytecode_runtime *runtime,
546 struct load_op *insn,
547 struct vstack_load *load)
548 {
549 uint16_t offset;
550 const char *orig_name;
551 char *name = NULL;
552 int idx, ret;
553 struct lttng_ctx_field *ctx_field;
554 struct lttng_event_field *field;
555 struct filter_get_index_data gid;
556 ssize_t data_offset;
557
558 offset = ((struct get_symbol *) insn->data)->offset;
559 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
560 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
561 if (!name) {
562 ret = -ENOMEM;
563 goto end;
564 }
565 strcpy(name, "$app.");
566 strcat(name, orig_name);
567 idx = lttng_get_context_index(*pctx, name);
568 if (idx < 0) {
569 assert(lttng_context_is_app(name));
570 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
571 pctx);
572 if (ret)
573 return ret;
574 idx = lttng_get_context_index(*pctx, name);
575 if (idx < 0)
576 return -ENOENT;
577 }
578 ctx_field = &(*pctx)->fields[idx];
579 field = &ctx_field->event_field;
580 ret = specialize_load_object(field, load, true);
581 if (ret)
582 goto end;
583 /* Specialize each get_symbol into a get_index. */
584 insn->op = FILTER_OP_GET_INDEX_U16;
585 memset(&gid, 0, sizeof(gid));
586 gid.ctx_index = idx;
587 gid.elem.type = load->object_type;
588 gid.field = field;
589 data_offset = bytecode_push_data(runtime, &gid,
590 __alignof__(gid), sizeof(gid));
591 if (data_offset < 0) {
592 ret = -EINVAL;
593 goto end;
594 }
595 ((struct get_index_u16 *) insn->data)->index = data_offset;
596 ret = 0;
597 end:
598 free(name);
599 return ret;
600 }
601
602 static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
603 struct bytecode_runtime *runtime,
604 struct load_op *insn,
605 struct vstack_load *load)
606 {
607 const char *name;
608 uint16_t offset;
609 unsigned int i, nr_fields;
610 bool found = false;
611 uint32_t field_offset = 0;
612 const struct lttng_event_field *field;
613 int ret;
614 struct filter_get_index_data gid;
615 ssize_t data_offset;
616
617 nr_fields = event_desc->nr_fields;
618 offset = ((struct get_symbol *) insn->data)->offset;
619 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
620 for (i = 0; i < nr_fields; i++) {
621 field = &event_desc->fields[i];
622 if (field->u.ext.nofilter) {
623 continue;
624 }
625 if (!strcmp(field->name, name)) {
626 found = true;
627 break;
628 }
629 /* compute field offset on stack */
630 switch (field->type.atype) {
631 case atype_integer:
632 case atype_enum:
633 case atype_enum_nestable:
634 field_offset += sizeof(int64_t);
635 break;
636 case atype_array:
637 case atype_array_nestable:
638 case atype_sequence:
639 case atype_sequence_nestable:
640 field_offset += sizeof(unsigned long);
641 field_offset += sizeof(void *);
642 break;
643 case atype_string:
644 field_offset += sizeof(void *);
645 break;
646 case atype_float:
647 field_offset += sizeof(double);
648 break;
649 default:
650 ret = -EINVAL;
651 goto end;
652 }
653 }
654 if (!found) {
655 ret = -EINVAL;
656 goto end;
657 }
658
659 ret = specialize_load_object(field, load, false);
660 if (ret)
661 goto end;
662
663 /* Specialize each get_symbol into a get_index. */
664 insn->op = FILTER_OP_GET_INDEX_U16;
665 memset(&gid, 0, sizeof(gid));
666 gid.offset = field_offset;
667 gid.elem.type = load->object_type;
668 gid.field = field;
669 data_offset = bytecode_push_data(runtime, &gid,
670 __alignof__(gid), sizeof(gid));
671 if (data_offset < 0) {
672 ret = -EINVAL;
673 goto end;
674 }
675 ((struct get_index_u16 *) insn->data)->index = data_offset;
676 ret = 0;
677 end:
678 return ret;
679 }
680
681 int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc,
682 struct bytecode_runtime *bytecode)
683 {
684 void *pc, *next_pc, *start_pc;
685 int ret = -EINVAL;
686 struct vstack _stack;
687 struct vstack *stack = &_stack;
688 struct lttng_ctx **pctx = bytecode->p.pctx;
689
690 vstack_init(stack);
691
692 start_pc = &bytecode->code[0];
693 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
694 pc = next_pc) {
695 switch (*(filter_opcode_t *) pc) {
696 case FILTER_OP_UNKNOWN:
697 default:
698 ERR("unknown bytecode op %u\n",
699 (unsigned int) *(filter_opcode_t *) pc);
700 ret = -EINVAL;
701 goto end;
702
703 case FILTER_OP_RETURN:
704 if (vstack_ax(stack)->type == REG_S64 ||
705 vstack_ax(stack)->type == REG_U64)
706 *(filter_opcode_t *) pc = FILTER_OP_RETURN_S64;
707 ret = 0;
708 goto end;
709
710 case FILTER_OP_RETURN_S64:
711 if (vstack_ax(stack)->type != REG_S64 &&
712 vstack_ax(stack)->type != REG_U64) {
713 ERR("Unexpected register type\n");
714 ret = -EINVAL;
715 goto end;
716 }
717 ret = 0;
718 goto end;
719
720 /* binary */
721 case FILTER_OP_MUL:
722 case FILTER_OP_DIV:
723 case FILTER_OP_MOD:
724 case FILTER_OP_PLUS:
725 case FILTER_OP_MINUS:
726 ERR("unsupported bytecode op %u\n",
727 (unsigned int) *(filter_opcode_t *) pc);
728 ret = -EINVAL;
729 goto end;
730
731 case FILTER_OP_EQ:
732 {
733 struct binary_op *insn = (struct binary_op *) pc;
734
735 switch(vstack_ax(stack)->type) {
736 default:
737 ERR("unknown register type\n");
738 ret = -EINVAL;
739 goto end;
740
741 case REG_STRING:
742 if (vstack_bx(stack)->type == REG_UNKNOWN)
743 break;
744 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
745 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
746 else
747 insn->op = FILTER_OP_EQ_STRING;
748 break;
749 case REG_STAR_GLOB_STRING:
750 if (vstack_bx(stack)->type == REG_UNKNOWN)
751 break;
752 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
753 break;
754 case REG_S64:
755 case REG_U64:
756 if (vstack_bx(stack)->type == REG_UNKNOWN)
757 break;
758 if (vstack_bx(stack)->type == REG_S64 ||
759 vstack_bx(stack)->type == REG_U64)
760 insn->op = FILTER_OP_EQ_S64;
761 else
762 insn->op = FILTER_OP_EQ_DOUBLE_S64;
763 break;
764 case REG_DOUBLE:
765 if (vstack_bx(stack)->type == REG_UNKNOWN)
766 break;
767 if (vstack_bx(stack)->type == REG_S64 ||
768 vstack_bx(stack)->type == REG_U64)
769 insn->op = FILTER_OP_EQ_S64_DOUBLE;
770 else
771 insn->op = FILTER_OP_EQ_DOUBLE;
772 break;
773 case REG_UNKNOWN:
774 break; /* Dynamic typing. */
775 }
776 /* Pop 2, push 1 */
777 if (vstack_pop(stack)) {
778 ret = -EINVAL;
779 goto end;
780 }
781 vstack_ax(stack)->type = REG_S64;
782 next_pc += sizeof(struct binary_op);
783 break;
784 }
785
786 case FILTER_OP_NE:
787 {
788 struct binary_op *insn = (struct binary_op *) pc;
789
790 switch(vstack_ax(stack)->type) {
791 default:
792 ERR("unknown register type\n");
793 ret = -EINVAL;
794 goto end;
795
796 case REG_STRING:
797 if (vstack_bx(stack)->type == REG_UNKNOWN)
798 break;
799 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
800 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
801 else
802 insn->op = FILTER_OP_NE_STRING;
803 break;
804 case REG_STAR_GLOB_STRING:
805 if (vstack_bx(stack)->type == REG_UNKNOWN)
806 break;
807 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
808 break;
809 case REG_S64:
810 case REG_U64:
811 if (vstack_bx(stack)->type == REG_UNKNOWN)
812 break;
813 if (vstack_bx(stack)->type == REG_S64 ||
814 vstack_bx(stack)->type == REG_U64)
815 insn->op = FILTER_OP_NE_S64;
816 else
817 insn->op = FILTER_OP_NE_DOUBLE_S64;
818 break;
819 case REG_DOUBLE:
820 if (vstack_bx(stack)->type == REG_UNKNOWN)
821 break;
822 if (vstack_bx(stack)->type == REG_S64 ||
823 vstack_bx(stack)->type == REG_U64)
824 insn->op = FILTER_OP_NE_S64_DOUBLE;
825 else
826 insn->op = FILTER_OP_NE_DOUBLE;
827 break;
828 case REG_UNKNOWN:
829 break; /* Dynamic typing. */
830 }
831 /* Pop 2, push 1 */
832 if (vstack_pop(stack)) {
833 ret = -EINVAL;
834 goto end;
835 }
836 vstack_ax(stack)->type = REG_S64;
837 next_pc += sizeof(struct binary_op);
838 break;
839 }
840
841 case FILTER_OP_GT:
842 {
843 struct binary_op *insn = (struct binary_op *) pc;
844
845 switch(vstack_ax(stack)->type) {
846 default:
847 ERR("unknown register type\n");
848 ret = -EINVAL;
849 goto end;
850
851 case REG_STAR_GLOB_STRING:
852 ERR("invalid register type for > binary operator\n");
853 ret = -EINVAL;
854 goto end;
855 case REG_STRING:
856 if (vstack_bx(stack)->type == REG_UNKNOWN)
857 break;
858 insn->op = FILTER_OP_GT_STRING;
859 break;
860 case REG_S64:
861 case REG_U64:
862 if (vstack_bx(stack)->type == REG_UNKNOWN)
863 break;
864 if (vstack_bx(stack)->type == REG_S64 ||
865 vstack_bx(stack)->type == REG_U64)
866 insn->op = FILTER_OP_GT_S64;
867 else
868 insn->op = FILTER_OP_GT_DOUBLE_S64;
869 break;
870 case REG_DOUBLE:
871 if (vstack_bx(stack)->type == REG_UNKNOWN)
872 break;
873 if (vstack_bx(stack)->type == REG_S64 ||
874 vstack_bx(stack)->type == REG_U64)
875 insn->op = FILTER_OP_GT_S64_DOUBLE;
876 else
877 insn->op = FILTER_OP_GT_DOUBLE;
878 break;
879 case REG_UNKNOWN:
880 break; /* Dynamic typing. */
881 }
882 /* Pop 2, push 1 */
883 if (vstack_pop(stack)) {
884 ret = -EINVAL;
885 goto end;
886 }
887 vstack_ax(stack)->type = REG_S64;
888 next_pc += sizeof(struct binary_op);
889 break;
890 }
891
892 case FILTER_OP_LT:
893 {
894 struct binary_op *insn = (struct binary_op *) pc;
895
896 switch(vstack_ax(stack)->type) {
897 default:
898 ERR("unknown register type\n");
899 ret = -EINVAL;
900 goto end;
901
902 case REG_STAR_GLOB_STRING:
903 ERR("invalid register type for < binary operator\n");
904 ret = -EINVAL;
905 goto end;
906 case REG_STRING:
907 if (vstack_bx(stack)->type == REG_UNKNOWN)
908 break;
909 insn->op = FILTER_OP_LT_STRING;
910 break;
911 case REG_S64:
912 case REG_U64:
913 if (vstack_bx(stack)->type == REG_UNKNOWN)
914 break;
915 if (vstack_bx(stack)->type == REG_S64 ||
916 vstack_bx(stack)->type == REG_U64)
917 insn->op = FILTER_OP_LT_S64;
918 else
919 insn->op = FILTER_OP_LT_DOUBLE_S64;
920 break;
921 case REG_DOUBLE:
922 if (vstack_bx(stack)->type == REG_UNKNOWN)
923 break;
924 if (vstack_bx(stack)->type == REG_S64 ||
925 vstack_bx(stack)->type == REG_U64)
926 insn->op = FILTER_OP_LT_S64_DOUBLE;
927 else
928 insn->op = FILTER_OP_LT_DOUBLE;
929 break;
930 case REG_UNKNOWN:
931 break; /* Dynamic typing. */
932 }
933 /* Pop 2, push 1 */
934 if (vstack_pop(stack)) {
935 ret = -EINVAL;
936 goto end;
937 }
938 vstack_ax(stack)->type = REG_S64;
939 next_pc += sizeof(struct binary_op);
940 break;
941 }
942
943 case FILTER_OP_GE:
944 {
945 struct binary_op *insn = (struct binary_op *) pc;
946
947 switch(vstack_ax(stack)->type) {
948 default:
949 ERR("unknown register type\n");
950 ret = -EINVAL;
951 goto end;
952
953 case REG_STAR_GLOB_STRING:
954 ERR("invalid register type for >= binary operator\n");
955 ret = -EINVAL;
956 goto end;
957 case REG_STRING:
958 if (vstack_bx(stack)->type == REG_UNKNOWN)
959 break;
960 insn->op = FILTER_OP_GE_STRING;
961 break;
962 case REG_S64:
963 case REG_U64:
964 if (vstack_bx(stack)->type == REG_UNKNOWN)
965 break;
966 if (vstack_bx(stack)->type == REG_S64 ||
967 vstack_bx(stack)->type == REG_U64)
968 insn->op = FILTER_OP_GE_S64;
969 else
970 insn->op = FILTER_OP_GE_DOUBLE_S64;
971 break;
972 case REG_DOUBLE:
973 if (vstack_bx(stack)->type == REG_UNKNOWN)
974 break;
975 if (vstack_bx(stack)->type == REG_S64 ||
976 vstack_bx(stack)->type == REG_U64)
977 insn->op = FILTER_OP_GE_S64_DOUBLE;
978 else
979 insn->op = FILTER_OP_GE_DOUBLE;
980 break;
981 case REG_UNKNOWN:
982 break; /* Dynamic typing. */
983 }
984 /* Pop 2, push 1 */
985 if (vstack_pop(stack)) {
986 ret = -EINVAL;
987 goto end;
988 }
989 vstack_ax(stack)->type = REG_U64;
990 next_pc += sizeof(struct binary_op);
991 break;
992 }
993 case FILTER_OP_LE:
994 {
995 struct binary_op *insn = (struct binary_op *) pc;
996
997 switch(vstack_ax(stack)->type) {
998 default:
999 ERR("unknown register type\n");
1000 ret = -EINVAL;
1001 goto end;
1002
1003 case REG_STAR_GLOB_STRING:
1004 ERR("invalid register type for <= binary operator\n");
1005 ret = -EINVAL;
1006 goto end;
1007 case REG_STRING:
1008 if (vstack_bx(stack)->type == REG_UNKNOWN)
1009 break;
1010 insn->op = FILTER_OP_LE_STRING;
1011 break;
1012 case REG_S64:
1013 case REG_U64:
1014 if (vstack_bx(stack)->type == REG_UNKNOWN)
1015 break;
1016 if (vstack_bx(stack)->type == REG_S64 ||
1017 vstack_bx(stack)->type == REG_U64)
1018 insn->op = FILTER_OP_LE_S64;
1019 else
1020 insn->op = FILTER_OP_LE_DOUBLE_S64;
1021 break;
1022 case REG_DOUBLE:
1023 if (vstack_bx(stack)->type == REG_UNKNOWN)
1024 break;
1025 if (vstack_bx(stack)->type == REG_S64 ||
1026 vstack_bx(stack)->type == REG_U64)
1027 insn->op = FILTER_OP_LE_S64_DOUBLE;
1028 else
1029 insn->op = FILTER_OP_LE_DOUBLE;
1030 break;
1031 case REG_UNKNOWN:
1032 break; /* Dynamic typing. */
1033 }
1034 vstack_ax(stack)->type = REG_S64;
1035 next_pc += sizeof(struct binary_op);
1036 break;
1037 }
1038
1039 case FILTER_OP_EQ_STRING:
1040 case FILTER_OP_NE_STRING:
1041 case FILTER_OP_GT_STRING:
1042 case FILTER_OP_LT_STRING:
1043 case FILTER_OP_GE_STRING:
1044 case FILTER_OP_LE_STRING:
1045 case FILTER_OP_EQ_STAR_GLOB_STRING:
1046 case FILTER_OP_NE_STAR_GLOB_STRING:
1047 case FILTER_OP_EQ_S64:
1048 case FILTER_OP_NE_S64:
1049 case FILTER_OP_GT_S64:
1050 case FILTER_OP_LT_S64:
1051 case FILTER_OP_GE_S64:
1052 case FILTER_OP_LE_S64:
1053 case FILTER_OP_EQ_DOUBLE:
1054 case FILTER_OP_NE_DOUBLE:
1055 case FILTER_OP_GT_DOUBLE:
1056 case FILTER_OP_LT_DOUBLE:
1057 case FILTER_OP_GE_DOUBLE:
1058 case FILTER_OP_LE_DOUBLE:
1059 case FILTER_OP_EQ_DOUBLE_S64:
1060 case FILTER_OP_NE_DOUBLE_S64:
1061 case FILTER_OP_GT_DOUBLE_S64:
1062 case FILTER_OP_LT_DOUBLE_S64:
1063 case FILTER_OP_GE_DOUBLE_S64:
1064 case FILTER_OP_LE_DOUBLE_S64:
1065 case FILTER_OP_EQ_S64_DOUBLE:
1066 case FILTER_OP_NE_S64_DOUBLE:
1067 case FILTER_OP_GT_S64_DOUBLE:
1068 case FILTER_OP_LT_S64_DOUBLE:
1069 case FILTER_OP_GE_S64_DOUBLE:
1070 case FILTER_OP_LE_S64_DOUBLE:
1071 {
1072 /* Pop 2, push 1 */
1073 if (vstack_pop(stack)) {
1074 ret = -EINVAL;
1075 goto end;
1076 }
1077 vstack_ax(stack)->type = REG_S64;
1078 next_pc += sizeof(struct binary_op);
1079 break;
1080 }
1081
1082 case FILTER_OP_BIT_RSHIFT:
1083 case FILTER_OP_BIT_LSHIFT:
1084 case FILTER_OP_BIT_AND:
1085 case FILTER_OP_BIT_OR:
1086 case FILTER_OP_BIT_XOR:
1087 {
1088 /* Pop 2, push 1 */
1089 if (vstack_pop(stack)) {
1090 ret = -EINVAL;
1091 goto end;
1092 }
1093 vstack_ax(stack)->type = REG_S64;
1094 next_pc += sizeof(struct binary_op);
1095 break;
1096 }
1097
1098 /* unary */
1099 case FILTER_OP_UNARY_PLUS:
1100 {
1101 struct unary_op *insn = (struct unary_op *) pc;
1102
1103 switch(vstack_ax(stack)->type) {
1104 default:
1105 ERR("unknown register type\n");
1106 ret = -EINVAL;
1107 goto end;
1108
1109 case REG_S64:
1110 case REG_U64:
1111 insn->op = FILTER_OP_UNARY_PLUS_S64;
1112 break;
1113 case REG_DOUBLE:
1114 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
1115 break;
1116 case REG_UNKNOWN: /* Dynamic typing. */
1117 break;
1118 }
1119 /* Pop 1, push 1 */
1120 next_pc += sizeof(struct unary_op);
1121 break;
1122 }
1123
1124 case FILTER_OP_UNARY_MINUS:
1125 {
1126 struct unary_op *insn = (struct unary_op *) pc;
1127
1128 switch(vstack_ax(stack)->type) {
1129 default:
1130 ERR("unknown register type\n");
1131 ret = -EINVAL;
1132 goto end;
1133
1134 case REG_S64:
1135 case REG_U64:
1136 insn->op = FILTER_OP_UNARY_MINUS_S64;
1137 break;
1138 case REG_DOUBLE:
1139 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
1140 break;
1141 case REG_UNKNOWN: /* Dynamic typing. */
1142 break;
1143 }
1144 /* Pop 1, push 1 */
1145 next_pc += sizeof(struct unary_op);
1146 break;
1147 }
1148
1149 case FILTER_OP_UNARY_NOT:
1150 {
1151 struct unary_op *insn = (struct unary_op *) pc;
1152
1153 switch(vstack_ax(stack)->type) {
1154 default:
1155 ERR("unknown register type\n");
1156 ret = -EINVAL;
1157 goto end;
1158
1159 case REG_S64:
1160 case REG_U64:
1161 insn->op = FILTER_OP_UNARY_NOT_S64;
1162 break;
1163 case REG_DOUBLE:
1164 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
1165 break;
1166 case REG_UNKNOWN: /* Dynamic typing. */
1167 break;
1168 }
1169 /* Pop 1, push 1 */
1170 next_pc += sizeof(struct unary_op);
1171 break;
1172 }
1173
1174 case FILTER_OP_UNARY_BIT_NOT:
1175 {
1176 /* Pop 1, push 1 */
1177 next_pc += sizeof(struct unary_op);
1178 break;
1179 }
1180
1181 case FILTER_OP_UNARY_PLUS_S64:
1182 case FILTER_OP_UNARY_MINUS_S64:
1183 case FILTER_OP_UNARY_NOT_S64:
1184 case FILTER_OP_UNARY_PLUS_DOUBLE:
1185 case FILTER_OP_UNARY_MINUS_DOUBLE:
1186 case FILTER_OP_UNARY_NOT_DOUBLE:
1187 {
1188 /* Pop 1, push 1 */
1189 next_pc += sizeof(struct unary_op);
1190 break;
1191 }
1192
1193 /* logical */
1194 case FILTER_OP_AND:
1195 case FILTER_OP_OR:
1196 {
1197 /* Continue to next instruction */
1198 /* Pop 1 when jump not taken */
1199 if (vstack_pop(stack)) {
1200 ret = -EINVAL;
1201 goto end;
1202 }
1203 next_pc += sizeof(struct logical_op);
1204 break;
1205 }
1206
1207 /* load field ref */
1208 case FILTER_OP_LOAD_FIELD_REF:
1209 {
1210 ERR("Unknown field ref type\n");
1211 ret = -EINVAL;
1212 goto end;
1213 }
1214 /* get context ref */
1215 case FILTER_OP_GET_CONTEXT_REF:
1216 {
1217 if (vstack_push(stack)) {
1218 ret = -EINVAL;
1219 goto end;
1220 }
1221 vstack_ax(stack)->type = REG_UNKNOWN;
1222 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1223 break;
1224 }
1225 case FILTER_OP_LOAD_FIELD_REF_STRING:
1226 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1227 case FILTER_OP_GET_CONTEXT_REF_STRING:
1228 {
1229 if (vstack_push(stack)) {
1230 ret = -EINVAL;
1231 goto end;
1232 }
1233 vstack_ax(stack)->type = REG_STRING;
1234 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1235 break;
1236 }
1237 case FILTER_OP_LOAD_FIELD_REF_S64:
1238 case FILTER_OP_GET_CONTEXT_REF_S64:
1239 {
1240 if (vstack_push(stack)) {
1241 ret = -EINVAL;
1242 goto end;
1243 }
1244 vstack_ax(stack)->type = REG_S64;
1245 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1246 break;
1247 }
1248 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1249 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1250 {
1251 if (vstack_push(stack)) {
1252 ret = -EINVAL;
1253 goto end;
1254 }
1255 vstack_ax(stack)->type = REG_DOUBLE;
1256 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1257 break;
1258 }
1259
1260 /* load from immediate operand */
1261 case FILTER_OP_LOAD_STRING:
1262 {
1263 struct load_op *insn = (struct load_op *) pc;
1264
1265 if (vstack_push(stack)) {
1266 ret = -EINVAL;
1267 goto end;
1268 }
1269 vstack_ax(stack)->type = REG_STRING;
1270 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1271 break;
1272 }
1273
1274 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1275 {
1276 struct load_op *insn = (struct load_op *) pc;
1277
1278 if (vstack_push(stack)) {
1279 ret = -EINVAL;
1280 goto end;
1281 }
1282 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1283 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1284 break;
1285 }
1286
1287 case FILTER_OP_LOAD_S64:
1288 {
1289 if (vstack_push(stack)) {
1290 ret = -EINVAL;
1291 goto end;
1292 }
1293 vstack_ax(stack)->type = REG_S64;
1294 next_pc += sizeof(struct load_op)
1295 + sizeof(struct literal_numeric);
1296 break;
1297 }
1298
1299 case FILTER_OP_LOAD_DOUBLE:
1300 {
1301 if (vstack_push(stack)) {
1302 ret = -EINVAL;
1303 goto end;
1304 }
1305 vstack_ax(stack)->type = REG_DOUBLE;
1306 next_pc += sizeof(struct load_op)
1307 + sizeof(struct literal_double);
1308 break;
1309 }
1310
1311 /* cast */
1312 case FILTER_OP_CAST_TO_S64:
1313 {
1314 struct cast_op *insn = (struct cast_op *) pc;
1315
1316 switch (vstack_ax(stack)->type) {
1317 default:
1318 ERR("unknown register type\n");
1319 ret = -EINVAL;
1320 goto end;
1321
1322 case REG_STRING:
1323 case REG_STAR_GLOB_STRING:
1324 ERR("Cast op can only be applied to numeric or floating point registers\n");
1325 ret = -EINVAL;
1326 goto end;
1327 case REG_S64:
1328 insn->op = FILTER_OP_CAST_NOP;
1329 break;
1330 case REG_DOUBLE:
1331 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1332 break;
1333 case REG_UNKNOWN:
1334 case REG_U64:
1335 break;
1336 }
1337 /* Pop 1, push 1 */
1338 vstack_ax(stack)->type = REG_S64;
1339 next_pc += sizeof(struct cast_op);
1340 break;
1341 }
1342 case FILTER_OP_CAST_DOUBLE_TO_S64:
1343 {
1344 /* Pop 1, push 1 */
1345 vstack_ax(stack)->type = REG_S64;
1346 next_pc += sizeof(struct cast_op);
1347 break;
1348 }
1349 case FILTER_OP_CAST_NOP:
1350 {
1351 next_pc += sizeof(struct cast_op);
1352 break;
1353 }
1354
1355 /*
1356 * Instructions for recursive traversal through composed types.
1357 */
1358 case FILTER_OP_GET_CONTEXT_ROOT:
1359 {
1360 if (vstack_push(stack)) {
1361 ret = -EINVAL;
1362 goto end;
1363 }
1364 vstack_ax(stack)->type = REG_PTR;
1365 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1366 next_pc += sizeof(struct load_op);
1367 break;
1368 }
1369 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1370 {
1371 if (vstack_push(stack)) {
1372 ret = -EINVAL;
1373 goto end;
1374 }
1375 vstack_ax(stack)->type = REG_PTR;
1376 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1377 next_pc += sizeof(struct load_op);
1378 break;
1379 }
1380 case FILTER_OP_GET_PAYLOAD_ROOT:
1381 {
1382 if (vstack_push(stack)) {
1383 ret = -EINVAL;
1384 goto end;
1385 }
1386 vstack_ax(stack)->type = REG_PTR;
1387 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1388 next_pc += sizeof(struct load_op);
1389 break;
1390 }
1391
1392 case FILTER_OP_LOAD_FIELD:
1393 {
1394 struct load_op *insn = (struct load_op *) pc;
1395
1396 assert(vstack_ax(stack)->type == REG_PTR);
1397 /* Pop 1, push 1 */
1398 ret = specialize_load_field(vstack_ax(stack), insn);
1399 if (ret)
1400 goto end;
1401
1402 next_pc += sizeof(struct load_op);
1403 break;
1404 }
1405
1406 case FILTER_OP_LOAD_FIELD_S8:
1407 case FILTER_OP_LOAD_FIELD_S16:
1408 case FILTER_OP_LOAD_FIELD_S32:
1409 case FILTER_OP_LOAD_FIELD_S64:
1410 {
1411 /* Pop 1, push 1 */
1412 vstack_ax(stack)->type = REG_S64;
1413 next_pc += sizeof(struct load_op);
1414 break;
1415 }
1416
1417 case FILTER_OP_LOAD_FIELD_U8:
1418 case FILTER_OP_LOAD_FIELD_U16:
1419 case FILTER_OP_LOAD_FIELD_U32:
1420 case FILTER_OP_LOAD_FIELD_U64:
1421 {
1422 /* Pop 1, push 1 */
1423 vstack_ax(stack)->type = REG_U64;
1424 next_pc += sizeof(struct load_op);
1425 break;
1426 }
1427
1428 case FILTER_OP_LOAD_FIELD_STRING:
1429 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1430 {
1431 /* Pop 1, push 1 */
1432 vstack_ax(stack)->type = REG_STRING;
1433 next_pc += sizeof(struct load_op);
1434 break;
1435 }
1436
1437 case FILTER_OP_LOAD_FIELD_DOUBLE:
1438 {
1439 /* Pop 1, push 1 */
1440 vstack_ax(stack)->type = REG_DOUBLE;
1441 next_pc += sizeof(struct load_op);
1442 break;
1443 }
1444
1445 case FILTER_OP_GET_SYMBOL:
1446 {
1447 struct load_op *insn = (struct load_op *) pc;
1448
1449 dbg_printf("op get symbol\n");
1450 switch (vstack_ax(stack)->load.type) {
1451 case LOAD_OBJECT:
1452 ERR("Nested fields not implemented yet.");
1453 ret = -EINVAL;
1454 goto end;
1455 case LOAD_ROOT_CONTEXT:
1456 /* Lookup context field. */
1457 ret = specialize_context_lookup(*pctx,
1458 bytecode, insn,
1459 &vstack_ax(stack)->load);
1460 if (ret)
1461 goto end;
1462 break;
1463 case LOAD_ROOT_APP_CONTEXT:
1464 /* Lookup app context field. */
1465 ret = specialize_app_context_lookup(pctx,
1466 bytecode, insn,
1467 &vstack_ax(stack)->load);
1468 if (ret)
1469 goto end;
1470 break;
1471 case LOAD_ROOT_PAYLOAD:
1472 /* Lookup event payload field. */
1473 ret = specialize_payload_lookup(event_desc,
1474 bytecode, insn,
1475 &vstack_ax(stack)->load);
1476 if (ret)
1477 goto end;
1478 break;
1479 }
1480 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1481 break;
1482 }
1483
1484 case FILTER_OP_GET_SYMBOL_FIELD:
1485 {
1486 /* Always generated by specialize phase. */
1487 ret = -EINVAL;
1488 goto end;
1489 }
1490
1491 case FILTER_OP_GET_INDEX_U16:
1492 {
1493 struct load_op *insn = (struct load_op *) pc;
1494 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1495
1496 dbg_printf("op get index u16\n");
1497 /* Pop 1, push 1 */
1498 ret = specialize_get_index(bytecode, insn, index->index,
1499 vstack_ax(stack), sizeof(*index));
1500 if (ret)
1501 goto end;
1502 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1503 break;
1504 }
1505
1506 case FILTER_OP_GET_INDEX_U64:
1507 {
1508 struct load_op *insn = (struct load_op *) pc;
1509 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1510
1511 dbg_printf("op get index u64\n");
1512 /* Pop 1, push 1 */
1513 ret = specialize_get_index(bytecode, insn, index->index,
1514 vstack_ax(stack), sizeof(*index));
1515 if (ret)
1516 goto end;
1517 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1518 break;
1519 }
1520
1521 }
1522 }
1523 end:
1524 return ret;
1525 }
This page took 0.105773 seconds and 5 git commands to generate.