2 * filter-visitor-generate-bytecode.c
4 * LTTng filter bytecode generation
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "filter-bytecode.h"
27 #include "filter-ir.h"
28 #include "filter-ast.h"
31 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
34 //#define INIT_ALLOC_SIZE PAGE_SIZE
35 #define INIT_ALLOC_SIZE 4
38 int recursive_visit_gen_bytecode(struct filter_parser_ctx
*ctx
,
42 int bytecode_init(struct lttng_filter_bytecode_alloc
**fb
)
44 *fb
= calloc(sizeof(struct lttng_filter_bytecode_alloc
) + INIT_ALLOC_SIZE
, 1);
48 (*fb
)->alloc_len
= INIT_ALLOC_SIZE
;
54 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc
**fb
, uint32_t align
, uint32_t len
)
57 uint32_t padding
= offset_align((*fb
)->b
.len
, align
);
59 if ((*fb
)->b
.len
+ padding
+ len
> (*fb
)->alloc_len
) {
61 max_t(uint32_t, (*fb
)->b
.len
+ padding
+ len
,
62 (*fb
)->alloc_len
<< 1);
63 uint32_t old_len
= (*fb
)->alloc_len
;
67 *fb
= realloc(*fb
, sizeof(struct lttng_filter_bytecode_alloc
) + new_len
);
70 memset(&(*fb
)->b
.data
[old_len
], 0, new_len
- old_len
);
71 (*fb
)->alloc_len
= new_len
;
73 (*fb
)->b
.len
+= padding
;
80 int bytecode_push(struct lttng_filter_bytecode_alloc
**fb
, const void *data
,
81 uint32_t align
, uint32_t len
)
85 offset
= bytecode_reserve(fb
, align
, len
);
88 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
93 int bytecode_push_logical(struct lttng_filter_bytecode_alloc
**fb
,
94 struct logical_op
*data
,
95 uint32_t align
, uint32_t len
,
96 uint16_t *skip_offset
)
100 offset
= bytecode_reserve(fb
, align
, len
);
103 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
105 (void *) &((struct logical_op
*) &(*fb
)->b
.data
[offset
])->skip_offset
106 - (void *) &(*fb
)->b
.data
[0];
111 int bytecode_patch(struct lttng_filter_bytecode_alloc
**fb
,
116 if (offset
>= (*fb
)->b
.len
) {
119 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
124 int visit_node_root(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
127 struct return_op insn
;
130 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.root
.child
);
134 /* Generate end of bytecode instruction */
135 insn
.op
= FILTER_OP_RETURN
;
136 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
140 int visit_node_load(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
144 switch (node
->data_type
) {
145 case IR_DATA_UNKNOWN
:
147 fprintf(stderr
, "[error] Unknown data type in %s\n",
153 struct load_op
*insn
;
154 uint32_t insn_len
= sizeof(struct load_op
)
155 + strlen(node
->u
.load
.u
.string
) + 1;
157 insn
= calloc(insn_len
, 1);
160 insn
->op
= FILTER_OP_LOAD_STRING
;
161 strcpy(insn
->data
, node
->u
.load
.u
.string
);
162 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
166 case IR_DATA_NUMERIC
:
168 struct load_op
*insn
;
169 uint32_t insn_len
= sizeof(struct load_op
)
170 + sizeof(struct literal_numeric
);
172 insn
= calloc(insn_len
, 1);
175 insn
->op
= FILTER_OP_LOAD_S64
;
176 *(int64_t *) insn
->data
= node
->u
.load
.u
.num
;
177 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
183 struct load_op
*insn
;
184 uint32_t insn_len
= sizeof(struct load_op
)
185 + sizeof(struct literal_double
);
187 insn
= calloc(insn_len
, 1);
190 insn
->op
= FILTER_OP_LOAD_DOUBLE
;
191 *(double *) insn
->data
= node
->u
.load
.u
.flt
;
192 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
196 case IR_DATA_FIELD_REF
:
198 struct load_op
*insn
;
199 uint32_t insn_len
= sizeof(struct load_op
)
200 + sizeof(struct field_ref
);
201 struct field_ref ref_offset
;
202 uint16_t reloc_offset
;
204 insn
= calloc(insn_len
, 1);
207 insn
->op
= FILTER_OP_LOAD_FIELD_REF
;
208 ref_offset
.offset
= (uint16_t) -1U;
209 memcpy(insn
->data
, &ref_offset
, sizeof(ref_offset
));
210 /* reloc_offset points to struct load_op */
211 reloc_offset
= bytecode_get_len(&ctx
->bytecode
->b
);
212 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
218 ret
= bytecode_push(&ctx
->bytecode_reloc
, &reloc_offset
,
219 1, sizeof(reloc_offset
));
224 ret
= bytecode_push(&ctx
->bytecode_reloc
, node
->u
.load
.u
.ref
,
225 1, strlen(node
->u
.load
.u
.ref
) + 1);
233 int visit_node_unary(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
236 struct unary_op insn
;
239 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.unary
.child
);
243 /* Generate end of bytecode instruction */
244 switch (node
->u
.unary
.type
) {
245 case AST_UNARY_UNKNOWN
:
247 fprintf(stderr
, "[error] Unknown unary node type in %s\n",
253 case AST_UNARY_MINUS
:
254 insn
.op
= FILTER_OP_UNARY_MINUS
;
255 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
257 insn
.op
= FILTER_OP_UNARY_NOT
;
258 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
263 * Binary comparator nesting is disallowed. This allows fitting into
267 int visit_node_binary(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
270 struct binary_op insn
;
273 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.left
);
276 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.right
);
280 switch (node
->u
.binary
.type
) {
283 fprintf(stderr
, "[error] Unknown unary node type in %s\n",
289 fprintf(stderr
, "[error] Unexpected logical node type in %s\n",
294 insn
.op
= FILTER_OP_MUL
;
297 insn
.op
= FILTER_OP_DIV
;
300 insn
.op
= FILTER_OP_MOD
;
303 insn
.op
= FILTER_OP_PLUS
;
306 insn
.op
= FILTER_OP_MINUS
;
309 insn
.op
= FILTER_OP_RSHIFT
;
312 insn
.op
= FILTER_OP_LSHIFT
;
315 insn
.op
= FILTER_OP_BIN_AND
;
318 insn
.op
= FILTER_OP_BIN_OR
;
321 insn
.op
= FILTER_OP_BIN_XOR
;
325 insn
.op
= FILTER_OP_EQ
;
328 insn
.op
= FILTER_OP_NE
;
331 insn
.op
= FILTER_OP_GT
;
334 insn
.op
= FILTER_OP_LT
;
337 insn
.op
= FILTER_OP_GE
;
340 insn
.op
= FILTER_OP_LE
;
343 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
347 * A logical op always return a s64 (1 or 0).
350 int visit_node_logical(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
353 struct logical_op insn
;
354 uint16_t skip_offset_loc
;
357 /* Visit left child */
358 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.left
);
361 /* Cast to s64 if float or field ref */
362 if (node
->u
.binary
.left
->data_type
== IR_DATA_FIELD_REF
363 || node
->u
.binary
.left
->data_type
== IR_DATA_FLOAT
) {
364 struct cast_op cast_insn
;
366 if (node
->u
.binary
.left
->data_type
== IR_DATA_FIELD_REF
) {
367 cast_insn
.op
= FILTER_OP_CAST_TO_S64
;
369 cast_insn
.op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
371 ret
= bytecode_push(&ctx
->bytecode
, &cast_insn
,
372 1, sizeof(cast_insn
));
376 switch (node
->u
.logical
.type
) {
378 fprintf(stderr
, "[error] Unknown node type in %s\n",
383 insn
.op
= FILTER_OP_AND
;
386 insn
.op
= FILTER_OP_OR
;
389 insn
.skip_offset
= (uint16_t) -1UL; /* Temporary */
390 ret
= bytecode_push_logical(&ctx
->bytecode
, &insn
, 1, sizeof(insn
),
394 /* Visit right child */
395 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.right
);
398 /* Cast to s64 if float or field ref */
399 if (node
->u
.binary
.right
->data_type
== IR_DATA_FIELD_REF
400 || node
->u
.binary
.right
->data_type
== IR_DATA_FLOAT
) {
401 struct cast_op cast_insn
;
403 if (node
->u
.binary
.right
->data_type
== IR_DATA_FIELD_REF
) {
404 cast_insn
.op
= FILTER_OP_CAST_TO_S64
;
406 cast_insn
.op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
408 ret
= bytecode_push(&ctx
->bytecode
, &cast_insn
,
409 1, sizeof(cast_insn
));
413 /* We now know where the logical op can skip. */
414 target_loc
= (uint16_t) bytecode_get_len(&ctx
->bytecode
->b
);
415 ret
= bytecode_patch(&ctx
->bytecode
,
416 &target_loc
, /* Offset to jump to */
417 skip_offset_loc
, /* Where to patch */
423 * Postorder traversal of the tree. We need the children result before
424 * we can evaluate the parent.
427 int recursive_visit_gen_bytecode(struct filter_parser_ctx
*ctx
,
433 fprintf(stderr
, "[error] Unknown node type in %s\n",
438 return visit_node_root(ctx
, node
);
440 return visit_node_load(ctx
, node
);
442 return visit_node_unary(ctx
, node
);
444 return visit_node_binary(ctx
, node
);
446 return visit_node_logical(ctx
, node
);
450 __attribute__((visibility("hidden")))
451 void filter_bytecode_free(struct filter_parser_ctx
*ctx
)
454 ctx
->bytecode
= NULL
;
455 free(ctx
->bytecode_reloc
);
456 ctx
->bytecode_reloc
= NULL
;
459 __attribute__((visibility("hidden")))
460 int filter_visitor_bytecode_generate(struct filter_parser_ctx
*ctx
)
464 ret
= bytecode_init(&ctx
->bytecode
);
467 ret
= bytecode_init(&ctx
->bytecode_reloc
);
470 ret
= recursive_visit_gen_bytecode(ctx
, ctx
->ir_root
);
474 /* Finally, append symbol table to bytecode */
475 ctx
->bytecode
->b
.reloc_table_offset
= bytecode_get_len(&ctx
->bytecode
->b
);
476 return bytecode_push(&ctx
->bytecode
, ctx
->bytecode_reloc
->b
.data
,
477 1, bytecode_get_len(&ctx
->bytecode_reloc
->b
));
480 filter_bytecode_free(ctx
);