Filter: document ust app ctx limitation
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
1 /*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <stdlib.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <common/align.h>
26 #include <common/compat/string.h>
27
28 #include "filter-bytecode.h"
29 #include "filter-ir.h"
30 #include "filter-ast.h"
31
32 #include <common/macros.h>
33
34 #ifndef max_t
35 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
36 #endif
37
38 #define INIT_ALLOC_SIZE 4
39
40 static
41 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
42 struct ir_op *node);
43
44 static inline int get_count_order(unsigned int count)
45 {
46 int order;
47
48 order = lttng_fls(count) - 1;
49 if (count & (count - 1))
50 order++;
51 return order;
52 }
53
54 static
55 int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
56 {
57 uint32_t alloc_len;
58
59 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
60 *fb = calloc(alloc_len, 1);
61 if (!*fb) {
62 return -ENOMEM;
63 } else {
64 (*fb)->alloc_len = alloc_len;
65 return 0;
66 }
67 }
68
69 static
70 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
71 {
72 int32_t ret;
73 uint32_t padding = offset_align((*fb)->b.len, align);
74 uint32_t new_len = (*fb)->b.len + padding + len;
75 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
76 uint32_t old_alloc_len = (*fb)->alloc_len;
77
78 if (new_len > LTTNG_FILTER_MAX_LEN)
79 return -EINVAL;
80
81 if (new_alloc_len > old_alloc_len) {
82 struct lttng_filter_bytecode_alloc *newptr;
83
84 new_alloc_len =
85 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
86 newptr = realloc(*fb, new_alloc_len);
87 if (!newptr)
88 return -ENOMEM;
89 *fb = newptr;
90 /* We zero directly the memory from start of allocation. */
91 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
92 (*fb)->alloc_len = new_alloc_len;
93 }
94 (*fb)->b.len += padding;
95 ret = (*fb)->b.len;
96 (*fb)->b.len += len;
97 return ret;
98 }
99
100 static
101 int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
102 uint32_t align, uint32_t len)
103 {
104 int32_t offset;
105
106 offset = bytecode_reserve(fb, align, len);
107 if (offset < 0)
108 return offset;
109 memcpy(&(*fb)->b.data[offset], data, len);
110 return 0;
111 }
112
113 static
114 int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
115 struct logical_op *data,
116 uint32_t align, uint32_t len,
117 uint16_t *skip_offset)
118 {
119 int32_t offset;
120
121 offset = bytecode_reserve(fb, align, len);
122 if (offset < 0)
123 return offset;
124 memcpy(&(*fb)->b.data[offset], data, len);
125 *skip_offset =
126 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
127 - (void *) &(*fb)->b.data[0];
128 return 0;
129 }
130
131 static
132 int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
133 const void *data,
134 uint16_t offset,
135 uint32_t len)
136 {
137 if (offset >= (*fb)->b.len) {
138 return -EINVAL;
139 }
140 memcpy(&(*fb)->b.data[offset], data, len);
141 return 0;
142 }
143
144 static
145 int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
146 {
147 int ret;
148 struct return_op insn;
149
150 /* Visit child */
151 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
152 if (ret)
153 return ret;
154
155 /* Generate end of bytecode instruction */
156 insn.op = FILTER_OP_RETURN;
157 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
158 }
159
160 static
161 int append_str(char **s, const char *append)
162 {
163 char *old = *s;
164 char *new;
165 size_t oldlen = (old == NULL) ? 0 : strlen(old);
166 size_t appendlen = strlen(append);
167
168 new = calloc(oldlen + appendlen + 1, 1);
169 if (!new) {
170 return -ENOMEM;
171 }
172 if (oldlen) {
173 strcpy(new, old);
174 }
175 strcat(new, append);
176 *s = new;
177 free(old);
178 return 0;
179 }
180
181 /*
182 * 1: match
183 * 0: no match
184 * < 0: error
185 */
186 static
187 int load_expression_legacy_match(const struct ir_load_expression *exp,
188 enum filter_op *op_type,
189 char **symbol)
190 {
191 const struct ir_load_expression_op *op;
192 bool need_dot = false;
193
194 op = exp->child;
195 switch (op->type) {
196 case IR_LOAD_EXPRESSION_GET_CONTEXT_ROOT:
197 *op_type = FILTER_OP_GET_CONTEXT_REF;
198 if (append_str(symbol, "$ctx.")) {
199 return -ENOMEM;
200 }
201 need_dot = false;
202 break;
203 case IR_LOAD_EXPRESSION_GET_APP_CONTEXT_ROOT:
204 *op_type = FILTER_OP_GET_CONTEXT_REF;
205 if (append_str(symbol, "$app.")) {
206 return -ENOMEM;
207 }
208 need_dot = false;
209 break;
210 case IR_LOAD_EXPRESSION_GET_PAYLOAD_ROOT:
211 *op_type = FILTER_OP_LOAD_FIELD_REF;
212 need_dot = false;
213 break;
214
215 case IR_LOAD_EXPRESSION_GET_SYMBOL:
216 case IR_LOAD_EXPRESSION_GET_INDEX:
217 case IR_LOAD_EXPRESSION_LOAD_FIELD:
218 default:
219 return 0; /* no match */
220 }
221
222 for (;;) {
223 op = op->next;
224 if (!op) {
225 return 0; /* no match */
226 }
227 switch (op->type) {
228 case IR_LOAD_EXPRESSION_LOAD_FIELD:
229 goto end;
230 case IR_LOAD_EXPRESSION_GET_SYMBOL:
231 if (need_dot && append_str(symbol, ".")) {
232 return -ENOMEM;
233 }
234 if (append_str(symbol, op->u.symbol)) {
235 return -ENOMEM;
236 }
237 break;
238 default:
239 return 0; /* no match */
240 }
241 need_dot = true;
242 }
243 end:
244 return 1; /* Legacy match */
245 }
246
247 /*
248 * 1: legacy match
249 * 0: no legacy match
250 * < 0: error
251 */
252 static
253 int visit_node_load_expression_legacy(struct filter_parser_ctx *ctx,
254 const struct ir_load_expression *exp,
255 const struct ir_load_expression_op *op)
256 {
257 struct load_op *insn = NULL;
258 uint32_t insn_len = sizeof(struct load_op)
259 + sizeof(struct field_ref);
260 struct field_ref ref_offset;
261 uint32_t reloc_offset_u32;
262 uint16_t reloc_offset;
263 enum filter_op op_type;
264 char *symbol = NULL;
265 int ret;
266
267 ret = load_expression_legacy_match(exp, &op_type, &symbol);
268 if (ret <= 0) {
269 goto end;
270 }
271 insn = calloc(insn_len, 1);
272 if (!insn) {
273 ret = -ENOMEM;
274 goto end;
275 }
276 insn->op = op_type;
277 ref_offset.offset = (uint16_t) -1U;
278 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
279 /* reloc_offset points to struct load_op */
280 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
281 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
282 ret = -EINVAL;
283 goto end;
284 }
285 reloc_offset = (uint16_t) reloc_offset_u32;
286 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
287 if (ret) {
288 goto end;
289 }
290 /* append reloc */
291 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
292 1, sizeof(reloc_offset));
293 if (ret) {
294 goto end;
295 }
296 ret = bytecode_push(&ctx->bytecode_reloc, symbol,
297 1, strlen(symbol) + 1);
298 ret = 1; /* legacy */
299 end:
300 free(insn);
301 free(symbol);
302 return ret;
303 }
304
305 static
306 int visit_node_load_expression(struct filter_parser_ctx *ctx,
307 const struct ir_op *node)
308 {
309 struct ir_load_expression *exp;
310 struct ir_load_expression_op *op;
311 int ret;
312
313 exp = node->u.load.u.expression;
314 if (!exp) {
315 return -EINVAL;
316 }
317 op = exp->child;
318 if (!op) {
319 return -EINVAL;
320 }
321
322 /*
323 * TODO: if we remove legacy load for application contexts, we
324 * need to update session bytecode parser as well.
325 */
326 ret = visit_node_load_expression_legacy(ctx, exp, op);
327 if (ret < 0) {
328 return ret;
329 }
330 if (ret > 0) {
331 return 0; /* legacy */
332 }
333
334 for (; op != NULL; op = op->next) {
335 switch (op->type) {
336 case IR_LOAD_EXPRESSION_GET_CONTEXT_ROOT:
337 {
338 struct load_op *insn;
339 uint32_t insn_len = sizeof(struct load_op);
340 int ret;
341
342 insn = calloc(insn_len, 1);
343 if (!insn)
344 return -ENOMEM;
345 insn->op = FILTER_OP_GET_CONTEXT_ROOT;
346 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
347 free(insn);
348 if (ret) {
349 return ret;
350 }
351 break;
352 }
353 case IR_LOAD_EXPRESSION_GET_APP_CONTEXT_ROOT:
354 {
355 struct load_op *insn;
356 uint32_t insn_len = sizeof(struct load_op);
357 int ret;
358
359 insn = calloc(insn_len, 1);
360 if (!insn)
361 return -ENOMEM;
362 insn->op = FILTER_OP_GET_APP_CONTEXT_ROOT;
363 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
364 free(insn);
365 if (ret) {
366 return ret;
367 }
368 break;
369 }
370 case IR_LOAD_EXPRESSION_GET_PAYLOAD_ROOT:
371 {
372 struct load_op *insn;
373 uint32_t insn_len = sizeof(struct load_op);
374 int ret;
375
376 insn = calloc(insn_len, 1);
377 if (!insn)
378 return -ENOMEM;
379 insn->op = FILTER_OP_GET_PAYLOAD_ROOT;
380 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
381 free(insn);
382 if (ret) {
383 return ret;
384 }
385 break;
386 }
387 case IR_LOAD_EXPRESSION_GET_SYMBOL:
388 {
389 struct load_op *insn;
390 uint32_t insn_len = sizeof(struct load_op)
391 + sizeof(struct get_symbol);
392 struct get_symbol symbol_offset;
393 uint32_t reloc_offset_u32;
394 uint16_t reloc_offset;
395 uint32_t bytecode_reloc_offset_u32;
396 int ret;
397
398 insn = calloc(insn_len, 1);
399 if (!insn)
400 return -ENOMEM;
401 insn->op = FILTER_OP_GET_SYMBOL;
402 bytecode_reloc_offset_u32 =
403 bytecode_get_len(&ctx->bytecode_reloc->b)
404 + sizeof(reloc_offset);
405 symbol_offset.offset =
406 (uint16_t) bytecode_reloc_offset_u32;
407 memcpy(insn->data, &symbol_offset,
408 sizeof(symbol_offset));
409 /* reloc_offset points to struct load_op */
410 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
411 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
412 free(insn);
413 return -EINVAL;
414 }
415 reloc_offset = (uint16_t) reloc_offset_u32;
416 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
417 if (ret) {
418 free(insn);
419 return ret;
420 }
421 /* append reloc */
422 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
423 1, sizeof(reloc_offset));
424 if (ret) {
425 free(insn);
426 return ret;
427 }
428 ret = bytecode_push(&ctx->bytecode_reloc,
429 op->u.symbol,
430 1, strlen(op->u.symbol) + 1);
431 free(insn);
432 if (ret) {
433 return ret;
434 }
435 break;
436 }
437 case IR_LOAD_EXPRESSION_GET_INDEX:
438 {
439 struct load_op *insn;
440 uint32_t insn_len = sizeof(struct load_op)
441 + sizeof(struct get_index_u64);
442 struct get_index_u64 index;
443 int ret;
444
445 insn = calloc(insn_len, 1);
446 if (!insn)
447 return -ENOMEM;
448 insn->op = FILTER_OP_GET_INDEX_U64;
449 index.index = op->u.index;
450 memcpy(insn->data, &index, sizeof(index));
451 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
452 free(insn);
453 if (ret) {
454 return ret;
455 }
456 break;
457 }
458 case IR_LOAD_EXPRESSION_LOAD_FIELD:
459 {
460 struct load_op *insn;
461 uint32_t insn_len = sizeof(struct load_op);
462 int ret;
463
464 insn = calloc(insn_len, 1);
465 if (!insn)
466 return -ENOMEM;
467 insn->op = FILTER_OP_LOAD_FIELD;
468 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
469 free(insn);
470 if (ret) {
471 return ret;
472 }
473 break;
474 }
475 }
476 }
477 return 0;
478 }
479
480 static
481 int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
482 {
483 int ret;
484
485 switch (node->data_type) {
486 case IR_DATA_UNKNOWN:
487 default:
488 fprintf(stderr, "[error] Unknown data type in %s\n",
489 __func__);
490 return -EINVAL;
491
492 case IR_DATA_STRING:
493 {
494 struct load_op *insn;
495 uint32_t insn_len = sizeof(struct load_op)
496 + strlen(node->u.load.u.string.value) + 1;
497
498 insn = calloc(insn_len, 1);
499 if (!insn)
500 return -ENOMEM;
501
502 switch (node->u.load.u.string.type) {
503 case IR_LOAD_STRING_TYPE_GLOB_STAR:
504 /*
505 * We explicitly tell the interpreter here that
506 * this load is a full star globbing pattern so
507 * that the appropriate matching function can be
508 * called. Also, see comment below.
509 */
510 insn->op = FILTER_OP_LOAD_STAR_GLOB_STRING;
511 break;
512 default:
513 /*
514 * This is the "legacy" string, which includes
515 * star globbing patterns with a star only at
516 * the end. Both "plain" and "star at the end"
517 * literal strings are handled at the same place
518 * by the tracer's filter bytecode interpreter,
519 * whereas full star globbing patterns (stars
520 * can be anywhere in the string) is a special
521 * case.
522 */
523 insn->op = FILTER_OP_LOAD_STRING;
524 break;
525 }
526
527 strcpy(insn->data, node->u.load.u.string.value);
528 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
529 free(insn);
530 return ret;
531 }
532 case IR_DATA_NUMERIC:
533 {
534 struct load_op *insn;
535 uint32_t insn_len = sizeof(struct load_op)
536 + sizeof(struct literal_numeric);
537
538 insn = calloc(insn_len, 1);
539 if (!insn)
540 return -ENOMEM;
541 insn->op = FILTER_OP_LOAD_S64;
542 memcpy(insn->data, &node->u.load.u.num, sizeof(int64_t));
543 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
544 free(insn);
545 return ret;
546 }
547 case IR_DATA_FLOAT:
548 {
549 struct load_op *insn;
550 uint32_t insn_len = sizeof(struct load_op)
551 + sizeof(struct literal_double);
552
553 insn = calloc(insn_len, 1);
554 if (!insn)
555 return -ENOMEM;
556 insn->op = FILTER_OP_LOAD_DOUBLE;
557 memcpy(insn->data, &node->u.load.u.flt, sizeof(double));
558 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
559 free(insn);
560 return ret;
561 }
562 case IR_DATA_EXPRESSION:
563 return visit_node_load_expression(ctx, node);
564 }
565 }
566
567 static
568 int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
569 {
570 int ret;
571 struct unary_op insn;
572
573 /* Visit child */
574 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
575 if (ret)
576 return ret;
577
578 /* Generate end of bytecode instruction */
579 switch (node->u.unary.type) {
580 case AST_UNARY_UNKNOWN:
581 default:
582 fprintf(stderr, "[error] Unknown unary node type in %s\n",
583 __func__);
584 return -EINVAL;
585 case AST_UNARY_PLUS:
586 /* Nothing to do. */
587 return 0;
588 case AST_UNARY_MINUS:
589 insn.op = FILTER_OP_UNARY_MINUS;
590 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
591 case AST_UNARY_NOT:
592 insn.op = FILTER_OP_UNARY_NOT;
593 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
594 case AST_UNARY_BIT_NOT:
595 insn.op = FILTER_OP_UNARY_BIT_NOT;
596 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
597 }
598 }
599
600 /*
601 * Binary comparator nesting is disallowed. This allows fitting into
602 * only 2 registers.
603 */
604 static
605 int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
606 {
607 int ret;
608 struct binary_op insn;
609
610 /* Visit child */
611 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
612 if (ret)
613 return ret;
614 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
615 if (ret)
616 return ret;
617
618 switch (node->u.binary.type) {
619 case AST_OP_UNKNOWN:
620 default:
621 fprintf(stderr, "[error] Unknown unary node type in %s\n",
622 __func__);
623 return -EINVAL;
624
625 case AST_OP_AND:
626 case AST_OP_OR:
627 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
628 __func__);
629 return -EINVAL;
630
631 case AST_OP_MUL:
632 insn.op = FILTER_OP_MUL;
633 break;
634 case AST_OP_DIV:
635 insn.op = FILTER_OP_DIV;
636 break;
637 case AST_OP_MOD:
638 insn.op = FILTER_OP_MOD;
639 break;
640 case AST_OP_PLUS:
641 insn.op = FILTER_OP_PLUS;
642 break;
643 case AST_OP_MINUS:
644 insn.op = FILTER_OP_MINUS;
645 break;
646 case AST_OP_BIT_RSHIFT:
647 insn.op = FILTER_OP_BIT_RSHIFT;
648 break;
649 case AST_OP_BIT_LSHIFT:
650 insn.op = FILTER_OP_BIT_LSHIFT;
651 break;
652 case AST_OP_BIT_AND:
653 insn.op = FILTER_OP_BIT_AND;
654 break;
655 case AST_OP_BIT_OR:
656 insn.op = FILTER_OP_BIT_OR;
657 break;
658 case AST_OP_BIT_XOR:
659 insn.op = FILTER_OP_BIT_XOR;
660 break;
661
662 case AST_OP_EQ:
663 insn.op = FILTER_OP_EQ;
664 break;
665 case AST_OP_NE:
666 insn.op = FILTER_OP_NE;
667 break;
668 case AST_OP_GT:
669 insn.op = FILTER_OP_GT;
670 break;
671 case AST_OP_LT:
672 insn.op = FILTER_OP_LT;
673 break;
674 case AST_OP_GE:
675 insn.op = FILTER_OP_GE;
676 break;
677 case AST_OP_LE:
678 insn.op = FILTER_OP_LE;
679 break;
680 }
681 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
682 }
683
684 /*
685 * A logical op always return a s64 (1 or 0).
686 */
687 static
688 int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
689 {
690 int ret;
691 struct logical_op insn;
692 uint16_t skip_offset_loc;
693 uint16_t target_loc;
694
695 /* Visit left child */
696 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
697 if (ret)
698 return ret;
699 /* Cast to s64 if float or field ref */
700 if ((node->u.binary.left->data_type == IR_DATA_FIELD_REF
701 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
702 || node->u.binary.left->data_type == IR_DATA_EXPRESSION)
703 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
704 struct cast_op cast_insn;
705
706 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
707 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
708 || node->u.binary.left->data_type == IR_DATA_EXPRESSION) {
709 cast_insn.op = FILTER_OP_CAST_TO_S64;
710 } else {
711 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
712 }
713 ret = bytecode_push(&ctx->bytecode, &cast_insn,
714 1, sizeof(cast_insn));
715 if (ret)
716 return ret;
717 }
718 switch (node->u.logical.type) {
719 default:
720 fprintf(stderr, "[error] Unknown node type in %s\n",
721 __func__);
722 return -EINVAL;
723
724 case AST_OP_AND:
725 insn.op = FILTER_OP_AND;
726 break;
727 case AST_OP_OR:
728 insn.op = FILTER_OP_OR;
729 break;
730 }
731 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
732 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
733 &skip_offset_loc);
734 if (ret)
735 return ret;
736 /* Visit right child */
737 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
738 if (ret)
739 return ret;
740 /* Cast to s64 if float or field ref */
741 if ((node->u.binary.right->data_type == IR_DATA_FIELD_REF
742 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
743 || node->u.binary.right->data_type == IR_DATA_EXPRESSION)
744 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
745 struct cast_op cast_insn;
746
747 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
748 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
749 || node->u.binary.right->data_type == IR_DATA_EXPRESSION) {
750 cast_insn.op = FILTER_OP_CAST_TO_S64;
751 } else {
752 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
753 }
754 ret = bytecode_push(&ctx->bytecode, &cast_insn,
755 1, sizeof(cast_insn));
756 if (ret)
757 return ret;
758 }
759 /* We now know where the logical op can skip. */
760 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
761 ret = bytecode_patch(&ctx->bytecode,
762 &target_loc, /* Offset to jump to */
763 skip_offset_loc, /* Where to patch */
764 sizeof(uint16_t));
765 return ret;
766 }
767
768 /*
769 * Postorder traversal of the tree. We need the children result before
770 * we can evaluate the parent.
771 */
772 static
773 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
774 struct ir_op *node)
775 {
776 switch (node->op) {
777 case IR_OP_UNKNOWN:
778 default:
779 fprintf(stderr, "[error] Unknown node type in %s\n",
780 __func__);
781 return -EINVAL;
782
783 case IR_OP_ROOT:
784 return visit_node_root(ctx, node);
785 case IR_OP_LOAD:
786 return visit_node_load(ctx, node);
787 case IR_OP_UNARY:
788 return visit_node_unary(ctx, node);
789 case IR_OP_BINARY:
790 return visit_node_binary(ctx, node);
791 case IR_OP_LOGICAL:
792 return visit_node_logical(ctx, node);
793 }
794 }
795
796 LTTNG_HIDDEN
797 void filter_bytecode_free(struct filter_parser_ctx *ctx)
798 {
799 if (!ctx) {
800 return;
801 }
802
803 if (ctx->bytecode) {
804 free(ctx->bytecode);
805 ctx->bytecode = NULL;
806 }
807
808 if (ctx->bytecode_reloc) {
809 free(ctx->bytecode_reloc);
810 ctx->bytecode_reloc = NULL;
811 }
812 }
813
814 LTTNG_HIDDEN
815 int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
816 {
817 int ret;
818
819 ret = bytecode_init(&ctx->bytecode);
820 if (ret)
821 return ret;
822 ret = bytecode_init(&ctx->bytecode_reloc);
823 if (ret)
824 goto error;
825 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
826 if (ret)
827 goto error;
828
829 /* Finally, append symbol table to bytecode */
830 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
831 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
832 1, bytecode_get_len(&ctx->bytecode_reloc->b));
833
834 error:
835 filter_bytecode_free(ctx);
836 return ret;
837 }
This page took 0.083924 seconds and 5 git commands to generate.