Filters: generate backward compatible "get field" and "get context" instructions
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
CommitLineData
953192ba
MD
1/*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <stdlib.h>
23#include <string.h>
24#include <errno.h>
46820c8b 25#include <common/align.h>
afc5df03 26#include <common/compat/string.h>
46820c8b 27
953192ba
MD
28#include "filter-bytecode.h"
29#include "filter-ir.h"
30#include "filter-ast.h"
31
a187da1a
DG
32#include <common/macros.h>
33
953192ba
MD
34#ifndef max_t
35#define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
36#endif
37
953192ba
MD
38#define INIT_ALLOC_SIZE 4
39
40static
41int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
42 struct ir_op *node);
43
01a204f0
CB
44static inline int get_count_order(unsigned int count)
45{
46 int order;
47
afc5df03 48 order = lttng_fls(count) - 1;
01a204f0
CB
49 if (count & (count - 1))
50 order++;
51 return order;
52}
53
953192ba 54static
53a80697 55int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
953192ba 56{
1029587a
MD
57 uint32_t alloc_len;
58
59 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
60 *fb = calloc(alloc_len, 1);
953192ba
MD
61 if (!*fb) {
62 return -ENOMEM;
63 } else {
1029587a 64 (*fb)->alloc_len = alloc_len;
953192ba
MD
65 return 0;
66 }
67}
68
69static
53a80697 70int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
953192ba
MD
71{
72 int32_t ret;
73 uint32_t padding = offset_align((*fb)->b.len, align);
ec96a8f6 74 uint32_t new_len = (*fb)->b.len + padding + len;
1029587a 75 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
ec96a8f6 76 uint32_t old_alloc_len = (*fb)->alloc_len;
953192ba 77
ec96a8f6 78 if (new_len > LTTNG_FILTER_MAX_LEN)
5ddb0a08
CB
79 return -EINVAL;
80
ec96a8f6 81 if (new_alloc_len > old_alloc_len) {
d0b96690
DG
82 struct lttng_filter_bytecode_alloc *newptr;
83
ec96a8f6
MD
84 new_alloc_len =
85 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
d0b96690
DG
86 newptr = realloc(*fb, new_alloc_len);
87 if (!newptr)
953192ba 88 return -ENOMEM;
d0b96690 89 *fb = newptr;
1029587a 90 /* We zero directly the memory from start of allocation. */
ec96a8f6
MD
91 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
92 (*fb)->alloc_len = new_alloc_len;
953192ba
MD
93 }
94 (*fb)->b.len += padding;
95 ret = (*fb)->b.len;
96 (*fb)->b.len += len;
97 return ret;
98}
99
100static
53a80697 101int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
953192ba
MD
102 uint32_t align, uint32_t len)
103{
104 int32_t offset;
105
106 offset = bytecode_reserve(fb, align, len);
107 if (offset < 0)
108 return offset;
109 memcpy(&(*fb)->b.data[offset], data, len);
110 return 0;
111}
112
113static
53a80697 114int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
115 struct logical_op *data,
116 uint32_t align, uint32_t len,
117 uint16_t *skip_offset)
118{
119 int32_t offset;
120
121 offset = bytecode_reserve(fb, align, len);
122 if (offset < 0)
123 return offset;
124 memcpy(&(*fb)->b.data[offset], data, len);
125 *skip_offset =
126 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
127 - (void *) &(*fb)->b.data[0];
128 return 0;
129}
130
131static
53a80697 132int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
133 const void *data,
134 uint16_t offset,
135 uint32_t len)
136{
137 if (offset >= (*fb)->b.len) {
138 return -EINVAL;
139 }
140 memcpy(&(*fb)->b.data[offset], data, len);
141 return 0;
142}
143
144static
145int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
146{
147 int ret;
148 struct return_op insn;
149
150 /* Visit child */
151 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
152 if (ret)
153 return ret;
154
155 /* Generate end of bytecode instruction */
156 insn.op = FILTER_OP_RETURN;
157 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
158}
159
016dbbb4
MD
160static
161int append_str(char **s, const char *append)
162{
163 char *old = *s;
164 char *new;
165 size_t oldlen = (old == NULL) ? 0 : strlen(old);
166 size_t appendlen = strlen(append);
167
168 new = calloc(oldlen + appendlen + 1, 1);
169 if (!new) {
170 return -ENOMEM;
171 }
172 if (oldlen) {
173 strcpy(new, old);
174 }
175 strcat(new, append);
176 *s = new;
177 free(old);
178 return 0;
179}
180
181/*
182 * 1: match
183 * 0: no match
184 * < 0: error
185 */
186static
187int load_expression_legacy_match(const struct ir_load_expression *exp,
188 enum filter_op *op_type,
189 char **symbol)
190{
191 const struct ir_load_expression_op *op;
192 bool need_dot = false;
193
194 op = exp->child;
195 switch (op->type) {
196 case IR_LOAD_EXPRESSION_GET_CONTEXT_ROOT:
197 *op_type = FILTER_OP_GET_CONTEXT_REF;
198 if (append_str(symbol, "$ctx.")) {
199 return -ENOMEM;
200 }
201 need_dot = false;
202 break;
203 case IR_LOAD_EXPRESSION_GET_APP_CONTEXT_ROOT:
204 *op_type = FILTER_OP_GET_CONTEXT_REF;
205 if (append_str(symbol, "$app.")) {
206 return -ENOMEM;
207 }
208 need_dot = false;
209 break;
210 case IR_LOAD_EXPRESSION_GET_PAYLOAD_ROOT:
211 *op_type = FILTER_OP_LOAD_FIELD_REF;
212 need_dot = false;
213 break;
214
215 case IR_LOAD_EXPRESSION_GET_SYMBOL:
216 case IR_LOAD_EXPRESSION_GET_INDEX:
217 case IR_LOAD_EXPRESSION_LOAD_FIELD:
218 default:
219 return 0; /* no match */
220 }
221
222 for (;;) {
223 op = op->next;
224 if (!op) {
225 return 0; /* no match */
226 }
227 switch (op->type) {
228 case IR_LOAD_EXPRESSION_LOAD_FIELD:
229 goto end;
230 case IR_LOAD_EXPRESSION_GET_SYMBOL:
231 if (need_dot && append_str(symbol, ".")) {
232 return -ENOMEM;
233 }
234 if (append_str(symbol, op->u.symbol)) {
235 return -ENOMEM;
236 }
237 break;
238 default:
239 return 0; /* no match */
240 }
241 need_dot = true;
242 }
243end:
244 return 1; /* Legacy match */
245}
246
247/*
248 * 1: legacy match
249 * 0: no legacy match
250 * < 0: error
251 */
252static
253int visit_node_load_expression_legacy(struct filter_parser_ctx *ctx,
254 const struct ir_load_expression *exp,
255 const struct ir_load_expression_op *op)
256{
257 struct load_op *insn = NULL;
258 uint32_t insn_len = sizeof(struct load_op)
259 + sizeof(struct field_ref);
260 struct field_ref ref_offset;
261 uint32_t reloc_offset_u32;
262 uint16_t reloc_offset;
263 enum filter_op op_type;
264 char *symbol = NULL;
265 int ret;
266
267 ret = load_expression_legacy_match(exp, &op_type, &symbol);
268 if (ret <= 0) {
269 goto end;
270 }
271 insn = calloc(insn_len, 1);
272 if (!insn) {
273 ret = -ENOMEM;
274 goto end;
275 }
276 insn->op = op_type;
277 ref_offset.offset = (uint16_t) -1U;
278 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
279 /* reloc_offset points to struct load_op */
280 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
281 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
282 ret = -EINVAL;
283 goto end;
284 }
285 reloc_offset = (uint16_t) reloc_offset_u32;
286 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
287 if (ret) {
288 goto end;
289 }
290 /* append reloc */
291 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
292 1, sizeof(reloc_offset));
293 if (ret) {
294 goto end;
295 }
296 ret = bytecode_push(&ctx->bytecode_reloc, symbol,
297 1, strlen(symbol) + 1);
298 ret = 1; /* legacy */
299end:
300 free(insn);
301 free(symbol);
302 return ret;
303}
304
bff988fa
MD
305static
306int visit_node_load_expression(struct filter_parser_ctx *ctx,
307 const struct ir_op *node)
308{
309 struct ir_load_expression *exp;
310 struct ir_load_expression_op *op;
016dbbb4 311 int ret;
bff988fa
MD
312
313 exp = node->u.load.u.expression;
314 if (!exp) {
315 return -EINVAL;
316 }
317 op = exp->child;
318 if (!op) {
319 return -EINVAL;
320 }
016dbbb4
MD
321
322 ret = visit_node_load_expression_legacy(ctx, exp, op);
323 if (ret < 0) {
324 return ret;
325 }
326 if (ret > 0) {
327 return 0; /* legacy */
328 }
329
bff988fa
MD
330 for (; op != NULL; op = op->next) {
331 switch (op->type) {
332 case IR_LOAD_EXPRESSION_GET_CONTEXT_ROOT:
333 {
334 struct load_op *insn;
335 uint32_t insn_len = sizeof(struct load_op);
336 int ret;
337
338 insn = calloc(insn_len, 1);
339 if (!insn)
340 return -ENOMEM;
341 insn->op = FILTER_OP_GET_CONTEXT_ROOT;
342 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
343 free(insn);
344 if (ret) {
345 return ret;
346 }
347 break;
348 }
349 case IR_LOAD_EXPRESSION_GET_APP_CONTEXT_ROOT:
350 {
351 struct load_op *insn;
352 uint32_t insn_len = sizeof(struct load_op);
353 int ret;
354
355 insn = calloc(insn_len, 1);
356 if (!insn)
357 return -ENOMEM;
358 insn->op = FILTER_OP_GET_APP_CONTEXT_ROOT;
359 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
360 free(insn);
361 if (ret) {
362 return ret;
363 }
364 break;
365 }
366 case IR_LOAD_EXPRESSION_GET_PAYLOAD_ROOT:
367 {
368 struct load_op *insn;
369 uint32_t insn_len = sizeof(struct load_op);
370 int ret;
371
372 insn = calloc(insn_len, 1);
373 if (!insn)
374 return -ENOMEM;
375 insn->op = FILTER_OP_GET_PAYLOAD_ROOT;
376 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
377 free(insn);
378 if (ret) {
379 return ret;
380 }
381 break;
382 }
383 case IR_LOAD_EXPRESSION_GET_SYMBOL:
384 {
385 struct load_op *insn;
386 uint32_t insn_len = sizeof(struct load_op)
387 + sizeof(struct get_symbol);
388 struct get_symbol symbol_offset;
389 uint32_t reloc_offset_u32;
390 uint16_t reloc_offset;
391 uint32_t bytecode_reloc_offset_u32;
392 int ret;
393
394 insn = calloc(insn_len, 1);
395 if (!insn)
396 return -ENOMEM;
397 insn->op = FILTER_OP_GET_SYMBOL;
398 bytecode_reloc_offset_u32 =
399 bytecode_get_len(&ctx->bytecode_reloc->b)
400 + sizeof(reloc_offset);
401 symbol_offset.offset =
402 (uint16_t) bytecode_reloc_offset_u32;
403 memcpy(insn->data, &symbol_offset,
404 sizeof(symbol_offset));
405 /* reloc_offset points to struct load_op */
406 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
407 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
408 free(insn);
409 return -EINVAL;
410 }
411 reloc_offset = (uint16_t) reloc_offset_u32;
412 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
413 if (ret) {
414 free(insn);
415 return ret;
416 }
417 /* append reloc */
418 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
419 1, sizeof(reloc_offset));
420 if (ret) {
421 free(insn);
422 return ret;
423 }
424 ret = bytecode_push(&ctx->bytecode_reloc,
425 op->u.symbol,
426 1, strlen(op->u.symbol) + 1);
427 free(insn);
428 if (ret) {
429 return ret;
430 }
431 break;
432 }
433 case IR_LOAD_EXPRESSION_GET_INDEX:
434 {
435 struct load_op *insn;
436 uint32_t insn_len = sizeof(struct load_op)
437 + sizeof(struct get_index_u64);
438 struct get_index_u64 index;
439 int ret;
440
441 insn = calloc(insn_len, 1);
442 if (!insn)
443 return -ENOMEM;
444 insn->op = FILTER_OP_GET_INDEX_U64;
445 index.index = op->u.index;
446 memcpy(insn->data, &index, sizeof(index));
447 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
448 free(insn);
449 if (ret) {
450 return ret;
451 }
452 break;
453 }
454 case IR_LOAD_EXPRESSION_LOAD_FIELD:
455 {
456 struct load_op *insn;
457 uint32_t insn_len = sizeof(struct load_op);
458 int ret;
459
460 insn = calloc(insn_len, 1);
461 if (!insn)
462 return -ENOMEM;
463 insn->op = FILTER_OP_LOAD_FIELD;
464 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
465 free(insn);
466 if (ret) {
467 return ret;
468 }
469 break;
470 }
471 }
472 }
473 return 0;
474}
475
953192ba
MD
476static
477int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
478{
479 int ret;
480
481 switch (node->data_type) {
482 case IR_DATA_UNKNOWN:
483 default:
484 fprintf(stderr, "[error] Unknown data type in %s\n",
485 __func__);
486 return -EINVAL;
487
488 case IR_DATA_STRING:
489 {
490 struct load_op *insn;
491 uint32_t insn_len = sizeof(struct load_op)
9f449915 492 + strlen(node->u.load.u.string.value) + 1;
953192ba
MD
493
494 insn = calloc(insn_len, 1);
495 if (!insn)
496 return -ENOMEM;
9f449915
PP
497
498 switch (node->u.load.u.string.type) {
499 case IR_LOAD_STRING_TYPE_GLOB_STAR:
500 /*
501 * We explicitly tell the interpreter here that
502 * this load is a full star globbing pattern so
503 * that the appropriate matching function can be
504 * called. Also, see comment below.
505 */
506 insn->op = FILTER_OP_LOAD_STAR_GLOB_STRING;
507 break;
508 default:
509 /*
510 * This is the "legacy" string, which includes
511 * star globbing patterns with a star only at
512 * the end. Both "plain" and "star at the end"
513 * literal strings are handled at the same place
514 * by the tracer's filter bytecode interpreter,
515 * whereas full star globbing patterns (stars
516 * can be anywhere in the string) is a special
517 * case.
518 */
519 insn->op = FILTER_OP_LOAD_STRING;
520 break;
521 }
522
523 strcpy(insn->data, node->u.load.u.string.value);
953192ba
MD
524 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
525 free(insn);
526 return ret;
527 }
528 case IR_DATA_NUMERIC:
529 {
530 struct load_op *insn;
531 uint32_t insn_len = sizeof(struct load_op)
532 + sizeof(struct literal_numeric);
533
534 insn = calloc(insn_len, 1);
535 if (!insn)
536 return -ENOMEM;
537 insn->op = FILTER_OP_LOAD_S64;
58d494e4 538 memcpy(insn->data, &node->u.load.u.num, sizeof(int64_t));
953192ba
MD
539 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
540 free(insn);
541 return ret;
542 }
e90d8561
MD
543 case IR_DATA_FLOAT:
544 {
545 struct load_op *insn;
546 uint32_t insn_len = sizeof(struct load_op)
547 + sizeof(struct literal_double);
548
549 insn = calloc(insn_len, 1);
550 if (!insn)
551 return -ENOMEM;
552 insn->op = FILTER_OP_LOAD_DOUBLE;
58d494e4 553 memcpy(insn->data, &node->u.load.u.flt, sizeof(double));
e90d8561
MD
554 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
555 free(insn);
556 return ret;
557 }
bff988fa
MD
558 case IR_DATA_EXPRESSION:
559 return visit_node_load_expression(ctx, node);
953192ba
MD
560 }
561}
562
563static
564int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
565{
566 int ret;
567 struct unary_op insn;
568
569 /* Visit child */
570 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
571 if (ret)
572 return ret;
573
574 /* Generate end of bytecode instruction */
575 switch (node->u.unary.type) {
576 case AST_UNARY_UNKNOWN:
577 default:
578 fprintf(stderr, "[error] Unknown unary node type in %s\n",
579 __func__);
580 return -EINVAL;
581 case AST_UNARY_PLUS:
582 /* Nothing to do. */
583 return 0;
584 case AST_UNARY_MINUS:
585 insn.op = FILTER_OP_UNARY_MINUS;
953192ba
MD
586 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
587 case AST_UNARY_NOT:
588 insn.op = FILTER_OP_UNARY_NOT;
953192ba
MD
589 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
590 }
591}
592
593/*
594 * Binary comparator nesting is disallowed. This allows fitting into
595 * only 2 registers.
596 */
597static
598int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
599{
600 int ret;
601 struct binary_op insn;
602
603 /* Visit child */
604 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
605 if (ret)
606 return ret;
607 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
608 if (ret)
609 return ret;
610
611 switch (node->u.binary.type) {
612 case AST_OP_UNKNOWN:
613 default:
614 fprintf(stderr, "[error] Unknown unary node type in %s\n",
615 __func__);
616 return -EINVAL;
617
618 case AST_OP_AND:
619 case AST_OP_OR:
620 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
621 __func__);
622 return -EINVAL;
623
624 case AST_OP_MUL:
625 insn.op = FILTER_OP_MUL;
626 break;
627 case AST_OP_DIV:
628 insn.op = FILTER_OP_DIV;
629 break;
630 case AST_OP_MOD:
631 insn.op = FILTER_OP_MOD;
632 break;
633 case AST_OP_PLUS:
634 insn.op = FILTER_OP_PLUS;
635 break;
636 case AST_OP_MINUS:
637 insn.op = FILTER_OP_MINUS;
638 break;
639 case AST_OP_RSHIFT:
640 insn.op = FILTER_OP_RSHIFT;
641 break;
642 case AST_OP_LSHIFT:
643 insn.op = FILTER_OP_LSHIFT;
644 break;
bff988fa
MD
645 case AST_OP_BIT_AND:
646 insn.op = FILTER_OP_BIT_AND;
953192ba 647 break;
bff988fa
MD
648 case AST_OP_BIT_OR:
649 insn.op = FILTER_OP_BIT_OR;
953192ba 650 break;
bff988fa
MD
651 case AST_OP_BIT_XOR:
652 insn.op = FILTER_OP_BIT_XOR;
953192ba
MD
653 break;
654
655 case AST_OP_EQ:
656 insn.op = FILTER_OP_EQ;
657 break;
658 case AST_OP_NE:
659 insn.op = FILTER_OP_NE;
660 break;
661 case AST_OP_GT:
662 insn.op = FILTER_OP_GT;
663 break;
664 case AST_OP_LT:
665 insn.op = FILTER_OP_LT;
666 break;
667 case AST_OP_GE:
668 insn.op = FILTER_OP_GE;
669 break;
670 case AST_OP_LE:
671 insn.op = FILTER_OP_LE;
672 break;
673 }
674 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
675}
676
8cf9540a
MD
677/*
678 * A logical op always return a s64 (1 or 0).
679 */
953192ba
MD
680static
681int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
682{
683 int ret;
684 struct logical_op insn;
685 uint16_t skip_offset_loc;
686 uint16_t target_loc;
687
688 /* Visit left child */
689 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
690 if (ret)
691 return ret;
8cf9540a 692 /* Cast to s64 if float or field ref */
586dc72f 693 if ((node->u.binary.left->data_type == IR_DATA_FIELD_REF
661dfdd1 694 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
bff988fa 695 || node->u.binary.left->data_type == IR_DATA_EXPRESSION)
8cf9540a
MD
696 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
697 struct cast_op cast_insn;
698
586dc72f 699 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
661dfdd1 700 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
bff988fa 701 || node->u.binary.left->data_type == IR_DATA_EXPRESSION) {
29fefef8
MD
702 cast_insn.op = FILTER_OP_CAST_TO_S64;
703 } else {
704 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
705 }
8cf9540a
MD
706 ret = bytecode_push(&ctx->bytecode, &cast_insn,
707 1, sizeof(cast_insn));
708 if (ret)
709 return ret;
710 }
953192ba
MD
711 switch (node->u.logical.type) {
712 default:
713 fprintf(stderr, "[error] Unknown node type in %s\n",
714 __func__);
715 return -EINVAL;
716
717 case AST_OP_AND:
718 insn.op = FILTER_OP_AND;
719 break;
720 case AST_OP_OR:
721 insn.op = FILTER_OP_OR;
722 break;
723 }
724 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
725 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
726 &skip_offset_loc);
727 if (ret)
728 return ret;
729 /* Visit right child */
730 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
731 if (ret)
732 return ret;
8cf9540a 733 /* Cast to s64 if float or field ref */
586dc72f 734 if ((node->u.binary.right->data_type == IR_DATA_FIELD_REF
661dfdd1 735 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
bff988fa 736 || node->u.binary.right->data_type == IR_DATA_EXPRESSION)
8cf9540a
MD
737 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
738 struct cast_op cast_insn;
739
586dc72f 740 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
661dfdd1 741 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
bff988fa 742 || node->u.binary.right->data_type == IR_DATA_EXPRESSION) {
29fefef8
MD
743 cast_insn.op = FILTER_OP_CAST_TO_S64;
744 } else {
745 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
746 }
8cf9540a
MD
747 ret = bytecode_push(&ctx->bytecode, &cast_insn,
748 1, sizeof(cast_insn));
749 if (ret)
750 return ret;
751 }
953192ba
MD
752 /* We now know where the logical op can skip. */
753 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
754 ret = bytecode_patch(&ctx->bytecode,
755 &target_loc, /* Offset to jump to */
756 skip_offset_loc, /* Where to patch */
757 sizeof(uint16_t));
758 return ret;
759}
760
761/*
762 * Postorder traversal of the tree. We need the children result before
763 * we can evaluate the parent.
764 */
765static
766int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
767 struct ir_op *node)
768{
769 switch (node->op) {
770 case IR_OP_UNKNOWN:
771 default:
772 fprintf(stderr, "[error] Unknown node type in %s\n",
773 __func__);
774 return -EINVAL;
775
776 case IR_OP_ROOT:
777 return visit_node_root(ctx, node);
778 case IR_OP_LOAD:
779 return visit_node_load(ctx, node);
780 case IR_OP_UNARY:
781 return visit_node_unary(ctx, node);
782 case IR_OP_BINARY:
783 return visit_node_binary(ctx, node);
784 case IR_OP_LOGICAL:
785 return visit_node_logical(ctx, node);
786 }
787}
788
a187da1a 789LTTNG_HIDDEN
953192ba
MD
790void filter_bytecode_free(struct filter_parser_ctx *ctx)
791{
7ca1dc6f
DG
792 if (!ctx) {
793 return;
794 }
795
3f0c8837
DG
796 if (ctx->bytecode) {
797 free(ctx->bytecode);
798 ctx->bytecode = NULL;
799 }
800
801 if (ctx->bytecode_reloc) {
802 free(ctx->bytecode_reloc);
803 ctx->bytecode_reloc = NULL;
804 }
953192ba
MD
805}
806
a187da1a 807LTTNG_HIDDEN
953192ba
MD
808int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
809{
810 int ret;
811
812 ret = bytecode_init(&ctx->bytecode);
813 if (ret)
814 return ret;
815 ret = bytecode_init(&ctx->bytecode_reloc);
816 if (ret)
817 goto error;
818 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
819 if (ret)
820 goto error;
821
822 /* Finally, append symbol table to bytecode */
823 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
824 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
825 1, bytecode_get_len(&ctx->bytecode_reloc->b));
826
827error:
828 filter_bytecode_free(ctx);
829 return ret;
830}
This page took 0.087373 seconds and 5 git commands to generate.