e28abd57271ecec91936d19e2918c8c1b1aa986c
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
1 /*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <stdlib.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <common/align.h>
26 #include <common/compat/string.h>
27
28 #include "filter-bytecode.h"
29 #include "filter-ir.h"
30 #include "filter-ast.h"
31
32 #include <common/macros.h>
33
34 #ifndef max_t
35 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
36 #endif
37
38 #define INIT_ALLOC_SIZE 4
39
40 static
41 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
42 struct ir_op *node);
43
44 static inline int get_count_order(unsigned int count)
45 {
46 int order;
47
48 order = lttng_fls(count) - 1;
49 if (count & (count - 1))
50 order++;
51 return order;
52 }
53
54 static
55 int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
56 {
57 uint32_t alloc_len;
58
59 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
60 *fb = calloc(alloc_len, 1);
61 if (!*fb) {
62 return -ENOMEM;
63 } else {
64 (*fb)->alloc_len = alloc_len;
65 return 0;
66 }
67 }
68
69 static
70 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
71 {
72 int32_t ret;
73 uint32_t padding = offset_align((*fb)->b.len, align);
74 uint32_t new_len = (*fb)->b.len + padding + len;
75 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
76 uint32_t old_alloc_len = (*fb)->alloc_len;
77
78 if (new_len > LTTNG_FILTER_MAX_LEN)
79 return -EINVAL;
80
81 if (new_alloc_len > old_alloc_len) {
82 struct lttng_filter_bytecode_alloc *newptr;
83
84 new_alloc_len =
85 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
86 newptr = realloc(*fb, new_alloc_len);
87 if (!newptr)
88 return -ENOMEM;
89 *fb = newptr;
90 /* We zero directly the memory from start of allocation. */
91 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
92 (*fb)->alloc_len = new_alloc_len;
93 }
94 (*fb)->b.len += padding;
95 ret = (*fb)->b.len;
96 (*fb)->b.len += len;
97 return ret;
98 }
99
100 static
101 int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
102 uint32_t align, uint32_t len)
103 {
104 int32_t offset;
105
106 offset = bytecode_reserve(fb, align, len);
107 if (offset < 0)
108 return offset;
109 memcpy(&(*fb)->b.data[offset], data, len);
110 return 0;
111 }
112
113 static
114 int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
115 struct logical_op *data,
116 uint32_t align, uint32_t len,
117 uint16_t *skip_offset)
118 {
119 int32_t offset;
120
121 offset = bytecode_reserve(fb, align, len);
122 if (offset < 0)
123 return offset;
124 memcpy(&(*fb)->b.data[offset], data, len);
125 *skip_offset =
126 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
127 - (void *) &(*fb)->b.data[0];
128 return 0;
129 }
130
131 static
132 int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
133 const void *data,
134 uint16_t offset,
135 uint32_t len)
136 {
137 if (offset >= (*fb)->b.len) {
138 return -EINVAL;
139 }
140 memcpy(&(*fb)->b.data[offset], data, len);
141 return 0;
142 }
143
144 static
145 int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
146 {
147 int ret;
148 struct return_op insn;
149
150 /* Visit child */
151 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
152 if (ret)
153 return ret;
154
155 /* Generate end of bytecode instruction */
156 insn.op = FILTER_OP_RETURN;
157 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
158 }
159
160 static
161 int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
162 {
163 int ret;
164
165 switch (node->data_type) {
166 case IR_DATA_UNKNOWN:
167 default:
168 fprintf(stderr, "[error] Unknown data type in %s\n",
169 __func__);
170 return -EINVAL;
171
172 case IR_DATA_STRING:
173 {
174 struct load_op *insn;
175 uint32_t insn_len = sizeof(struct load_op)
176 + strlen(node->u.load.u.string) + 1;
177
178 insn = calloc(insn_len, 1);
179 if (!insn)
180 return -ENOMEM;
181 insn->op = FILTER_OP_LOAD_STRING;
182 strcpy(insn->data, node->u.load.u.string);
183 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
184 free(insn);
185 return ret;
186 }
187 case IR_DATA_NUMERIC:
188 {
189 struct load_op *insn;
190 uint32_t insn_len = sizeof(struct load_op)
191 + sizeof(struct literal_numeric);
192
193 insn = calloc(insn_len, 1);
194 if (!insn)
195 return -ENOMEM;
196 insn->op = FILTER_OP_LOAD_S64;
197 memcpy(insn->data, &node->u.load.u.num, sizeof(int64_t));
198 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
199 free(insn);
200 return ret;
201 }
202 case IR_DATA_FLOAT:
203 {
204 struct load_op *insn;
205 uint32_t insn_len = sizeof(struct load_op)
206 + sizeof(struct literal_double);
207
208 insn = calloc(insn_len, 1);
209 if (!insn)
210 return -ENOMEM;
211 insn->op = FILTER_OP_LOAD_DOUBLE;
212 memcpy(insn->data, &node->u.load.u.flt, sizeof(double));
213 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
214 free(insn);
215 return ret;
216 }
217 case IR_DATA_FIELD_REF: /* fall-through */
218 case IR_DATA_GET_CONTEXT_REF:
219 {
220 struct load_op *insn;
221 uint32_t insn_len = sizeof(struct load_op)
222 + sizeof(struct field_ref);
223 struct field_ref ref_offset;
224 uint32_t reloc_offset_u32;
225 uint16_t reloc_offset;
226
227 insn = calloc(insn_len, 1);
228 if (!insn)
229 return -ENOMEM;
230 switch(node->data_type) {
231 case IR_DATA_FIELD_REF:
232 insn->op = FILTER_OP_LOAD_FIELD_REF;
233 break;
234 case IR_DATA_GET_CONTEXT_REF:
235 insn->op = FILTER_OP_GET_CONTEXT_REF;
236 break;
237 default:
238 free(insn);
239 return -EINVAL;
240 }
241 ref_offset.offset = (uint16_t) -1U;
242 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
243 /* reloc_offset points to struct load_op */
244 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
245 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
246 free(insn);
247 return -EINVAL;
248 }
249 reloc_offset = (uint16_t) reloc_offset_u32;
250 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
251 if (ret) {
252 free(insn);
253 return ret;
254 }
255 /* append reloc */
256 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
257 1, sizeof(reloc_offset));
258 if (ret) {
259 free(insn);
260 return ret;
261 }
262 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
263 1, strlen(node->u.load.u.ref) + 1);
264 free(insn);
265 return ret;
266 }
267 }
268 }
269
270 static
271 int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
272 {
273 int ret;
274 struct unary_op insn;
275
276 /* Visit child */
277 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
278 if (ret)
279 return ret;
280
281 /* Generate end of bytecode instruction */
282 switch (node->u.unary.type) {
283 case AST_UNARY_UNKNOWN:
284 default:
285 fprintf(stderr, "[error] Unknown unary node type in %s\n",
286 __func__);
287 return -EINVAL;
288 case AST_UNARY_PLUS:
289 /* Nothing to do. */
290 return 0;
291 case AST_UNARY_MINUS:
292 insn.op = FILTER_OP_UNARY_MINUS;
293 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
294 case AST_UNARY_NOT:
295 insn.op = FILTER_OP_UNARY_NOT;
296 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
297 }
298 }
299
300 /*
301 * Binary comparator nesting is disallowed. This allows fitting into
302 * only 2 registers.
303 */
304 static
305 int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
306 {
307 int ret;
308 struct binary_op insn;
309
310 /* Visit child */
311 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
312 if (ret)
313 return ret;
314 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
315 if (ret)
316 return ret;
317
318 switch (node->u.binary.type) {
319 case AST_OP_UNKNOWN:
320 default:
321 fprintf(stderr, "[error] Unknown unary node type in %s\n",
322 __func__);
323 return -EINVAL;
324
325 case AST_OP_AND:
326 case AST_OP_OR:
327 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
328 __func__);
329 return -EINVAL;
330
331 case AST_OP_MUL:
332 insn.op = FILTER_OP_MUL;
333 break;
334 case AST_OP_DIV:
335 insn.op = FILTER_OP_DIV;
336 break;
337 case AST_OP_MOD:
338 insn.op = FILTER_OP_MOD;
339 break;
340 case AST_OP_PLUS:
341 insn.op = FILTER_OP_PLUS;
342 break;
343 case AST_OP_MINUS:
344 insn.op = FILTER_OP_MINUS;
345 break;
346 case AST_OP_RSHIFT:
347 insn.op = FILTER_OP_RSHIFT;
348 break;
349 case AST_OP_LSHIFT:
350 insn.op = FILTER_OP_LSHIFT;
351 break;
352 case AST_OP_BIN_AND:
353 insn.op = FILTER_OP_BIN_AND;
354 break;
355 case AST_OP_BIN_OR:
356 insn.op = FILTER_OP_BIN_OR;
357 break;
358 case AST_OP_BIN_XOR:
359 insn.op = FILTER_OP_BIN_XOR;
360 break;
361
362 case AST_OP_EQ:
363 insn.op = FILTER_OP_EQ;
364 break;
365 case AST_OP_NE:
366 insn.op = FILTER_OP_NE;
367 break;
368 case AST_OP_GT:
369 insn.op = FILTER_OP_GT;
370 break;
371 case AST_OP_LT:
372 insn.op = FILTER_OP_LT;
373 break;
374 case AST_OP_GE:
375 insn.op = FILTER_OP_GE;
376 break;
377 case AST_OP_LE:
378 insn.op = FILTER_OP_LE;
379 break;
380 }
381 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
382 }
383
384 /*
385 * A logical op always return a s64 (1 or 0).
386 */
387 static
388 int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
389 {
390 int ret;
391 struct logical_op insn;
392 uint16_t skip_offset_loc;
393 uint16_t target_loc;
394
395 /* Visit left child */
396 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
397 if (ret)
398 return ret;
399 /* Cast to s64 if float or field ref */
400 if ((node->u.binary.left->data_type == IR_DATA_FIELD_REF
401 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF)
402 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
403 struct cast_op cast_insn;
404
405 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
406 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF) {
407 cast_insn.op = FILTER_OP_CAST_TO_S64;
408 } else {
409 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
410 }
411 ret = bytecode_push(&ctx->bytecode, &cast_insn,
412 1, sizeof(cast_insn));
413 if (ret)
414 return ret;
415 }
416 switch (node->u.logical.type) {
417 default:
418 fprintf(stderr, "[error] Unknown node type in %s\n",
419 __func__);
420 return -EINVAL;
421
422 case AST_OP_AND:
423 insn.op = FILTER_OP_AND;
424 break;
425 case AST_OP_OR:
426 insn.op = FILTER_OP_OR;
427 break;
428 }
429 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
430 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
431 &skip_offset_loc);
432 if (ret)
433 return ret;
434 /* Visit right child */
435 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
436 if (ret)
437 return ret;
438 /* Cast to s64 if float or field ref */
439 if ((node->u.binary.right->data_type == IR_DATA_FIELD_REF
440 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF)
441 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
442 struct cast_op cast_insn;
443
444 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
445 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF) {
446 cast_insn.op = FILTER_OP_CAST_TO_S64;
447 } else {
448 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
449 }
450 ret = bytecode_push(&ctx->bytecode, &cast_insn,
451 1, sizeof(cast_insn));
452 if (ret)
453 return ret;
454 }
455 /* We now know where the logical op can skip. */
456 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
457 ret = bytecode_patch(&ctx->bytecode,
458 &target_loc, /* Offset to jump to */
459 skip_offset_loc, /* Where to patch */
460 sizeof(uint16_t));
461 return ret;
462 }
463
464 /*
465 * Postorder traversal of the tree. We need the children result before
466 * we can evaluate the parent.
467 */
468 static
469 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
470 struct ir_op *node)
471 {
472 switch (node->op) {
473 case IR_OP_UNKNOWN:
474 default:
475 fprintf(stderr, "[error] Unknown node type in %s\n",
476 __func__);
477 return -EINVAL;
478
479 case IR_OP_ROOT:
480 return visit_node_root(ctx, node);
481 case IR_OP_LOAD:
482 return visit_node_load(ctx, node);
483 case IR_OP_UNARY:
484 return visit_node_unary(ctx, node);
485 case IR_OP_BINARY:
486 return visit_node_binary(ctx, node);
487 case IR_OP_LOGICAL:
488 return visit_node_logical(ctx, node);
489 }
490 }
491
492 LTTNG_HIDDEN
493 void filter_bytecode_free(struct filter_parser_ctx *ctx)
494 {
495 if (!ctx) {
496 return;
497 }
498
499 if (ctx->bytecode) {
500 free(ctx->bytecode);
501 ctx->bytecode = NULL;
502 }
503
504 if (ctx->bytecode_reloc) {
505 free(ctx->bytecode_reloc);
506 ctx->bytecode_reloc = NULL;
507 }
508 }
509
510 LTTNG_HIDDEN
511 int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
512 {
513 int ret;
514
515 ret = bytecode_init(&ctx->bytecode);
516 if (ret)
517 return ret;
518 ret = bytecode_init(&ctx->bytecode_reloc);
519 if (ret)
520 goto error;
521 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
522 if (ret)
523 goto error;
524
525 /* Finally, append symbol table to bytecode */
526 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
527 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
528 1, bytecode_get_len(&ctx->bytecode_reloc->b));
529
530 error:
531 filter_bytecode_free(ctx);
532 return ret;
533 }
This page took 0.040752 seconds and 4 git commands to generate.