Filter: index array, sequences, implement bitwise binary operators
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
CommitLineData
953192ba
MD
1/*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <stdlib.h>
23#include <string.h>
24#include <errno.h>
46820c8b 25#include <common/align.h>
afc5df03 26#include <common/compat/string.h>
46820c8b 27
953192ba
MD
28#include "filter-bytecode.h"
29#include "filter-ir.h"
30#include "filter-ast.h"
31
a187da1a
DG
32#include <common/macros.h>
33
953192ba
MD
34#ifndef max_t
35#define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
36#endif
37
953192ba
MD
38#define INIT_ALLOC_SIZE 4
39
40static
41int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
42 struct ir_op *node);
43
01a204f0
CB
44static inline int get_count_order(unsigned int count)
45{
46 int order;
47
afc5df03 48 order = lttng_fls(count) - 1;
01a204f0
CB
49 if (count & (count - 1))
50 order++;
51 return order;
52}
53
953192ba 54static
53a80697 55int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
953192ba 56{
1029587a
MD
57 uint32_t alloc_len;
58
59 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
60 *fb = calloc(alloc_len, 1);
953192ba
MD
61 if (!*fb) {
62 return -ENOMEM;
63 } else {
1029587a 64 (*fb)->alloc_len = alloc_len;
953192ba
MD
65 return 0;
66 }
67}
68
69static
53a80697 70int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
953192ba
MD
71{
72 int32_t ret;
73 uint32_t padding = offset_align((*fb)->b.len, align);
ec96a8f6 74 uint32_t new_len = (*fb)->b.len + padding + len;
1029587a 75 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
ec96a8f6 76 uint32_t old_alloc_len = (*fb)->alloc_len;
953192ba 77
ec96a8f6 78 if (new_len > LTTNG_FILTER_MAX_LEN)
5ddb0a08
CB
79 return -EINVAL;
80
ec96a8f6 81 if (new_alloc_len > old_alloc_len) {
d0b96690
DG
82 struct lttng_filter_bytecode_alloc *newptr;
83
ec96a8f6
MD
84 new_alloc_len =
85 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
d0b96690
DG
86 newptr = realloc(*fb, new_alloc_len);
87 if (!newptr)
953192ba 88 return -ENOMEM;
d0b96690 89 *fb = newptr;
1029587a 90 /* We zero directly the memory from start of allocation. */
ec96a8f6
MD
91 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
92 (*fb)->alloc_len = new_alloc_len;
953192ba
MD
93 }
94 (*fb)->b.len += padding;
95 ret = (*fb)->b.len;
96 (*fb)->b.len += len;
97 return ret;
98}
99
100static
53a80697 101int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
953192ba
MD
102 uint32_t align, uint32_t len)
103{
104 int32_t offset;
105
106 offset = bytecode_reserve(fb, align, len);
107 if (offset < 0)
108 return offset;
109 memcpy(&(*fb)->b.data[offset], data, len);
110 return 0;
111}
112
113static
53a80697 114int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
115 struct logical_op *data,
116 uint32_t align, uint32_t len,
117 uint16_t *skip_offset)
118{
119 int32_t offset;
120
121 offset = bytecode_reserve(fb, align, len);
122 if (offset < 0)
123 return offset;
124 memcpy(&(*fb)->b.data[offset], data, len);
125 *skip_offset =
126 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
127 - (void *) &(*fb)->b.data[0];
128 return 0;
129}
130
131static
53a80697 132int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
133 const void *data,
134 uint16_t offset,
135 uint32_t len)
136{
137 if (offset >= (*fb)->b.len) {
138 return -EINVAL;
139 }
140 memcpy(&(*fb)->b.data[offset], data, len);
141 return 0;
142}
143
144static
145int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
146{
147 int ret;
148 struct return_op insn;
149
150 /* Visit child */
151 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
152 if (ret)
153 return ret;
154
155 /* Generate end of bytecode instruction */
156 insn.op = FILTER_OP_RETURN;
157 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
158}
159
87942d06
MD
160static
161int visit_node_load_expression(struct filter_parser_ctx *ctx,
162 struct ir_op *node)
163{
164 struct ir_load_expression *exp;
165 struct ir_load_expression_op *op;
166
167 exp = node->u.load.u.expression;
168 if (!exp) {
169 return -EINVAL;
170 }
171 op = exp->child;
172 if (!op) {
173 return -EINVAL;
174 }
175 for (; op != NULL; op = op->next) {
176 switch (op->type) {
177 case IR_LOAD_EXPRESSION_GET_CONTEXT_ROOT:
178 {
179 struct load_op *insn;
180 uint32_t insn_len = sizeof(struct load_op);
181 int ret;
182
183 insn = calloc(insn_len, 1);
184 if (!insn)
185 return -ENOMEM;
186 insn->op = FILTER_OP_GET_CONTEXT_ROOT;
187 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
188 free(insn);
189 if (ret) {
190 return ret;
191 }
192 break;
193 }
194 case IR_LOAD_EXPRESSION_GET_APP_CONTEXT_ROOT:
195 {
196 struct load_op *insn;
197 uint32_t insn_len = sizeof(struct load_op);
198 int ret;
199
200 insn = calloc(insn_len, 1);
201 if (!insn)
202 return -ENOMEM;
203 insn->op = FILTER_OP_GET_APP_CONTEXT_ROOT;
204 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
205 free(insn);
206 if (ret) {
207 return ret;
208 }
209 break;
210 }
211 case IR_LOAD_EXPRESSION_GET_PAYLOAD_ROOT:
212 {
213 struct load_op *insn;
214 uint32_t insn_len = sizeof(struct load_op);
215 int ret;
216
217 insn = calloc(insn_len, 1);
218 if (!insn)
219 return -ENOMEM;
220 insn->op = FILTER_OP_GET_PAYLOAD_ROOT;
221 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
222 free(insn);
223 if (ret) {
224 return ret;
225 }
226 break;
227 }
228 case IR_LOAD_EXPRESSION_GET_SYMBOL:
229 {
230 struct load_op *insn;
231 uint32_t insn_len = sizeof(struct load_op)
232 + sizeof(struct get_symbol);
233 struct get_symbol symbol_offset;
234 uint32_t reloc_offset_u32;
235 uint16_t reloc_offset;
236 uint32_t bytecode_reloc_offset_u32;
237 int ret;
238
239 insn = calloc(insn_len, 1);
240 if (!insn)
241 return -ENOMEM;
242 insn->op = FILTER_OP_GET_SYMBOL;
243 bytecode_reloc_offset_u32 =
244 bytecode_get_len(&ctx->bytecode_reloc->b)
245 + sizeof(reloc_offset);
246 symbol_offset.offset =
247 (uint16_t) bytecode_reloc_offset_u32;
248 memcpy(insn->data, &symbol_offset,
249 sizeof(symbol_offset));
250 /* reloc_offset points to struct load_op */
251 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
252 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
253 free(insn);
254 return -EINVAL;
255 }
256 reloc_offset = (uint16_t) reloc_offset_u32;
257 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
258 if (ret) {
259 free(insn);
260 return ret;
261 }
262 /* append reloc */
263 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
264 1, sizeof(reloc_offset));
265 if (ret) {
266 free(insn);
267 return ret;
268 }
269 ret = bytecode_push(&ctx->bytecode_reloc,
270 op->u.symbol,
271 1, strlen(op->u.symbol) + 1);
272 free(insn);
273 if (ret) {
274 return ret;
275 }
276 break;
277 }
278 case IR_LOAD_EXPRESSION_GET_INDEX:
279 {
280 struct load_op *insn;
281 uint32_t insn_len = sizeof(struct load_op)
282 + sizeof(struct get_index_u64);
283 struct get_index_u64 index;
284 int ret;
285
286 insn = calloc(insn_len, 1);
287 if (!insn)
288 return -ENOMEM;
289 insn->op = FILTER_OP_GET_INDEX_U64;
290 index.index = op->u.index;
291 memcpy(insn->data, &index, sizeof(index));
292 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
293 free(insn);
294 if (ret) {
295 return ret;
296 }
297 break;
298 }
299 case IR_LOAD_EXPRESSION_LOAD_FIELD:
300 {
301 struct load_op *insn;
302 uint32_t insn_len = sizeof(struct load_op);
303 int ret;
304
305 insn = calloc(insn_len, 1);
306 if (!insn)
307 return -ENOMEM;
308 insn->op = FILTER_OP_LOAD_FIELD;
309 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
310 free(insn);
311 if (ret) {
312 return ret;
313 }
314 break;
315 }
316 }
317 }
318 return 0;
319}
320
953192ba
MD
321static
322int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
323{
324 int ret;
325
326 switch (node->data_type) {
327 case IR_DATA_UNKNOWN:
328 default:
329 fprintf(stderr, "[error] Unknown data type in %s\n",
330 __func__);
331 return -EINVAL;
332
333 case IR_DATA_STRING:
334 {
335 struct load_op *insn;
336 uint32_t insn_len = sizeof(struct load_op)
9f449915 337 + strlen(node->u.load.u.string.value) + 1;
953192ba
MD
338
339 insn = calloc(insn_len, 1);
340 if (!insn)
341 return -ENOMEM;
9f449915
PP
342
343 switch (node->u.load.u.string.type) {
344 case IR_LOAD_STRING_TYPE_GLOB_STAR:
345 /*
346 * We explicitly tell the interpreter here that
347 * this load is a full star globbing pattern so
348 * that the appropriate matching function can be
349 * called. Also, see comment below.
350 */
351 insn->op = FILTER_OP_LOAD_STAR_GLOB_STRING;
352 break;
353 default:
354 /*
355 * This is the "legacy" string, which includes
356 * star globbing patterns with a star only at
357 * the end. Both "plain" and "star at the end"
358 * literal strings are handled at the same place
359 * by the tracer's filter bytecode interpreter,
360 * whereas full star globbing patterns (stars
361 * can be anywhere in the string) is a special
362 * case.
363 */
364 insn->op = FILTER_OP_LOAD_STRING;
365 break;
366 }
367
368 strcpy(insn->data, node->u.load.u.string.value);
953192ba
MD
369 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
370 free(insn);
371 return ret;
372 }
373 case IR_DATA_NUMERIC:
374 {
375 struct load_op *insn;
376 uint32_t insn_len = sizeof(struct load_op)
377 + sizeof(struct literal_numeric);
378
379 insn = calloc(insn_len, 1);
380 if (!insn)
381 return -ENOMEM;
382 insn->op = FILTER_OP_LOAD_S64;
58d494e4 383 memcpy(insn->data, &node->u.load.u.num, sizeof(int64_t));
953192ba
MD
384 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
385 free(insn);
386 return ret;
387 }
e90d8561
MD
388 case IR_DATA_FLOAT:
389 {
390 struct load_op *insn;
391 uint32_t insn_len = sizeof(struct load_op)
392 + sizeof(struct literal_double);
393
394 insn = calloc(insn_len, 1);
395 if (!insn)
396 return -ENOMEM;
397 insn->op = FILTER_OP_LOAD_DOUBLE;
58d494e4 398 memcpy(insn->data, &node->u.load.u.flt, sizeof(double));
e90d8561
MD
399 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
400 free(insn);
401 return ret;
402 }
87942d06 403#if 0
586dc72f
MD
404 case IR_DATA_FIELD_REF: /* fall-through */
405 case IR_DATA_GET_CONTEXT_REF:
953192ba
MD
406 {
407 struct load_op *insn;
408 uint32_t insn_len = sizeof(struct load_op)
409 + sizeof(struct field_ref);
410 struct field_ref ref_offset;
ec96a8f6
MD
411 uint32_t reloc_offset_u32;
412 uint16_t reloc_offset;
953192ba
MD
413
414 insn = calloc(insn_len, 1);
415 if (!insn)
416 return -ENOMEM;
5590fc2c 417 switch (node->data_type) {
586dc72f
MD
418 case IR_DATA_FIELD_REF:
419 insn->op = FILTER_OP_LOAD_FIELD_REF;
420 break;
421 case IR_DATA_GET_CONTEXT_REF:
422 insn->op = FILTER_OP_GET_CONTEXT_REF;
423 break;
424 default:
3a68137c 425 free(insn);
586dc72f
MD
426 return -EINVAL;
427 }
953192ba
MD
428 ref_offset.offset = (uint16_t) -1U;
429 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
65775683 430 /* reloc_offset points to struct load_op */
ec96a8f6
MD
431 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
432 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
433 free(insn);
434 return -EINVAL;
435 }
436 reloc_offset = (uint16_t) reloc_offset_u32;
953192ba
MD
437 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
438 if (ret) {
439 free(insn);
440 return ret;
441 }
442 /* append reloc */
443 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
444 1, sizeof(reloc_offset));
445 if (ret) {
446 free(insn);
447 return ret;
448 }
449 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
450 1, strlen(node->u.load.u.ref) + 1);
451 free(insn);
452 return ret;
453 }
87942d06
MD
454#endif
455 case IR_DATA_EXPRESSION:
456 return visit_node_load_expression(ctx, node);
953192ba
MD
457 }
458}
459
460static
461int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
462{
463 int ret;
464 struct unary_op insn;
465
466 /* Visit child */
467 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
468 if (ret)
469 return ret;
470
471 /* Generate end of bytecode instruction */
472 switch (node->u.unary.type) {
473 case AST_UNARY_UNKNOWN:
474 default:
475 fprintf(stderr, "[error] Unknown unary node type in %s\n",
476 __func__);
477 return -EINVAL;
478 case AST_UNARY_PLUS:
479 /* Nothing to do. */
480 return 0;
481 case AST_UNARY_MINUS:
482 insn.op = FILTER_OP_UNARY_MINUS;
953192ba
MD
483 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
484 case AST_UNARY_NOT:
485 insn.op = FILTER_OP_UNARY_NOT;
953192ba
MD
486 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
487 }
488}
489
490/*
491 * Binary comparator nesting is disallowed. This allows fitting into
492 * only 2 registers.
493 */
494static
495int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
496{
497 int ret;
498 struct binary_op insn;
499
500 /* Visit child */
501 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
502 if (ret)
503 return ret;
504 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
505 if (ret)
506 return ret;
507
508 switch (node->u.binary.type) {
509 case AST_OP_UNKNOWN:
510 default:
511 fprintf(stderr, "[error] Unknown unary node type in %s\n",
512 __func__);
513 return -EINVAL;
514
515 case AST_OP_AND:
516 case AST_OP_OR:
517 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
518 __func__);
519 return -EINVAL;
520
521 case AST_OP_MUL:
522 insn.op = FILTER_OP_MUL;
523 break;
524 case AST_OP_DIV:
525 insn.op = FILTER_OP_DIV;
526 break;
527 case AST_OP_MOD:
528 insn.op = FILTER_OP_MOD;
529 break;
530 case AST_OP_PLUS:
531 insn.op = FILTER_OP_PLUS;
532 break;
533 case AST_OP_MINUS:
534 insn.op = FILTER_OP_MINUS;
535 break;
536 case AST_OP_RSHIFT:
537 insn.op = FILTER_OP_RSHIFT;
538 break;
539 case AST_OP_LSHIFT:
540 insn.op = FILTER_OP_LSHIFT;
541 break;
87942d06
MD
542 case AST_OP_BIT_AND:
543 insn.op = FILTER_OP_BIT_AND;
953192ba 544 break;
87942d06
MD
545 case AST_OP_BIT_OR:
546 insn.op = FILTER_OP_BIT_OR;
953192ba 547 break;
87942d06
MD
548 case AST_OP_BIT_XOR:
549 insn.op = FILTER_OP_BIT_XOR;
953192ba
MD
550 break;
551
552 case AST_OP_EQ:
553 insn.op = FILTER_OP_EQ;
554 break;
555 case AST_OP_NE:
556 insn.op = FILTER_OP_NE;
557 break;
558 case AST_OP_GT:
559 insn.op = FILTER_OP_GT;
560 break;
561 case AST_OP_LT:
562 insn.op = FILTER_OP_LT;
563 break;
564 case AST_OP_GE:
565 insn.op = FILTER_OP_GE;
566 break;
567 case AST_OP_LE:
568 insn.op = FILTER_OP_LE;
569 break;
570 }
571 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
572}
573
8cf9540a
MD
574/*
575 * A logical op always return a s64 (1 or 0).
576 */
953192ba
MD
577static
578int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
579{
580 int ret;
581 struct logical_op insn;
582 uint16_t skip_offset_loc;
583 uint16_t target_loc;
584
585 /* Visit left child */
586 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
587 if (ret)
588 return ret;
8cf9540a 589 /* Cast to s64 if float or field ref */
586dc72f 590 if ((node->u.binary.left->data_type == IR_DATA_FIELD_REF
5590fc2c 591 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
87942d06 592 || node->u.binary.left->data_type == IR_DATA_EXPRESSION)
8cf9540a
MD
593 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
594 struct cast_op cast_insn;
595
586dc72f 596 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
5590fc2c 597 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
87942d06 598 || node->u.binary.left->data_type == IR_DATA_EXPRESSION) {
29fefef8
MD
599 cast_insn.op = FILTER_OP_CAST_TO_S64;
600 } else {
601 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
602 }
8cf9540a
MD
603 ret = bytecode_push(&ctx->bytecode, &cast_insn,
604 1, sizeof(cast_insn));
605 if (ret)
606 return ret;
607 }
953192ba
MD
608 switch (node->u.logical.type) {
609 default:
610 fprintf(stderr, "[error] Unknown node type in %s\n",
611 __func__);
612 return -EINVAL;
613
614 case AST_OP_AND:
615 insn.op = FILTER_OP_AND;
616 break;
617 case AST_OP_OR:
618 insn.op = FILTER_OP_OR;
619 break;
620 }
621 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
622 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
623 &skip_offset_loc);
624 if (ret)
625 return ret;
626 /* Visit right child */
627 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
628 if (ret)
629 return ret;
8cf9540a 630 /* Cast to s64 if float or field ref */
586dc72f 631 if ((node->u.binary.right->data_type == IR_DATA_FIELD_REF
5590fc2c 632 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
87942d06 633 || node->u.binary.right->data_type == IR_DATA_EXPRESSION)
8cf9540a
MD
634 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
635 struct cast_op cast_insn;
636
586dc72f 637 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
5590fc2c 638 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
87942d06 639 || node->u.binary.right->data_type == IR_DATA_EXPRESSION) {
29fefef8
MD
640 cast_insn.op = FILTER_OP_CAST_TO_S64;
641 } else {
642 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
643 }
8cf9540a
MD
644 ret = bytecode_push(&ctx->bytecode, &cast_insn,
645 1, sizeof(cast_insn));
646 if (ret)
647 return ret;
648 }
953192ba
MD
649 /* We now know where the logical op can skip. */
650 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
651 ret = bytecode_patch(&ctx->bytecode,
652 &target_loc, /* Offset to jump to */
653 skip_offset_loc, /* Where to patch */
654 sizeof(uint16_t));
655 return ret;
656}
657
658/*
659 * Postorder traversal of the tree. We need the children result before
660 * we can evaluate the parent.
661 */
662static
663int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
664 struct ir_op *node)
665{
666 switch (node->op) {
667 case IR_OP_UNKNOWN:
668 default:
669 fprintf(stderr, "[error] Unknown node type in %s\n",
670 __func__);
671 return -EINVAL;
672
673 case IR_OP_ROOT:
674 return visit_node_root(ctx, node);
675 case IR_OP_LOAD:
676 return visit_node_load(ctx, node);
677 case IR_OP_UNARY:
678 return visit_node_unary(ctx, node);
679 case IR_OP_BINARY:
680 return visit_node_binary(ctx, node);
681 case IR_OP_LOGICAL:
682 return visit_node_logical(ctx, node);
683 }
684}
685
a187da1a 686LTTNG_HIDDEN
953192ba
MD
687void filter_bytecode_free(struct filter_parser_ctx *ctx)
688{
7ca1dc6f
DG
689 if (!ctx) {
690 return;
691 }
692
3f0c8837
DG
693 if (ctx->bytecode) {
694 free(ctx->bytecode);
695 ctx->bytecode = NULL;
696 }
697
698 if (ctx->bytecode_reloc) {
699 free(ctx->bytecode_reloc);
700 ctx->bytecode_reloc = NULL;
701 }
953192ba
MD
702}
703
a187da1a 704LTTNG_HIDDEN
953192ba
MD
705int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
706{
707 int ret;
708
709 ret = bytecode_init(&ctx->bytecode);
710 if (ret)
711 return ret;
712 ret = bytecode_init(&ctx->bytecode_reloc);
713 if (ret)
714 goto error;
715 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
716 if (ret)
717 goto error;
718
719 /* Finally, append symbol table to bytecode */
720 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
721 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
722 1, bytecode_get_len(&ctx->bytecode_reloc->b));
723
724error:
725 filter_bytecode_free(ctx);
726 return ret;
727}
This page took 0.081694 seconds and 5 git commands to generate.