Commit | Line | Data |
---|---|---|
953192ba MD |
1 | /* |
2 | * filter-visitor-generate-bytecode.c | |
3 | * | |
4 | * LTTng filter bytecode generation | |
5 | * | |
6 | * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU Lesser General Public License, version 2.1 only, | |
10 | * as published by the Free Software Foundation. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public License | |
18 | * along with this library; if not, write to the Free Software Foundation, | |
19 | * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
20 | */ | |
21 | ||
22 | #include <stdlib.h> | |
23 | #include <string.h> | |
24 | #include <errno.h> | |
25 | #include "align.h" | |
26 | #include "filter-bytecode.h" | |
27 | #include "filter-ir.h" | |
28 | #include "filter-ast.h" | |
29 | ||
30 | #ifndef max_t | |
31 | #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b))) | |
32 | #endif | |
33 | ||
34 | //#define INIT_ALLOC_SIZE PAGE_SIZE | |
35 | #define INIT_ALLOC_SIZE 4 | |
36 | ||
37 | static | |
38 | int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx, | |
39 | struct ir_op *node); | |
40 | ||
41 | static | |
53a80697 | 42 | int bytecode_init(struct lttng_filter_bytecode_alloc **fb) |
953192ba | 43 | { |
53a80697 | 44 | *fb = calloc(sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE, 1); |
953192ba MD |
45 | if (!*fb) { |
46 | return -ENOMEM; | |
47 | } else { | |
48 | (*fb)->alloc_len = INIT_ALLOC_SIZE; | |
49 | return 0; | |
50 | } | |
51 | } | |
52 | ||
53 | static | |
53a80697 | 54 | int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len) |
953192ba MD |
55 | { |
56 | int32_t ret; | |
57 | uint32_t padding = offset_align((*fb)->b.len, align); | |
58 | ||
59 | if ((*fb)->b.len + padding + len > (*fb)->alloc_len) { | |
60 | uint32_t new_len = | |
61 | max_t(uint32_t, (*fb)->b.len + padding + len, | |
62 | (*fb)->alloc_len << 1); | |
63 | uint32_t old_len = (*fb)->alloc_len; | |
64 | ||
65 | if (new_len > 0xFFFF) | |
66 | return -EINVAL; | |
53a80697 | 67 | *fb = realloc(*fb, sizeof(struct lttng_filter_bytecode_alloc) + new_len); |
953192ba MD |
68 | if (!*fb) |
69 | return -ENOMEM; | |
70 | memset(&(*fb)->b.data[old_len], 0, new_len - old_len); | |
71 | (*fb)->alloc_len = new_len; | |
72 | } | |
73 | (*fb)->b.len += padding; | |
74 | ret = (*fb)->b.len; | |
75 | (*fb)->b.len += len; | |
76 | return ret; | |
77 | } | |
78 | ||
79 | static | |
53a80697 | 80 | int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data, |
953192ba MD |
81 | uint32_t align, uint32_t len) |
82 | { | |
83 | int32_t offset; | |
84 | ||
85 | offset = bytecode_reserve(fb, align, len); | |
86 | if (offset < 0) | |
87 | return offset; | |
88 | memcpy(&(*fb)->b.data[offset], data, len); | |
89 | return 0; | |
90 | } | |
91 | ||
92 | static | |
53a80697 | 93 | int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb, |
953192ba MD |
94 | struct logical_op *data, |
95 | uint32_t align, uint32_t len, | |
96 | uint16_t *skip_offset) | |
97 | { | |
98 | int32_t offset; | |
99 | ||
100 | offset = bytecode_reserve(fb, align, len); | |
101 | if (offset < 0) | |
102 | return offset; | |
103 | memcpy(&(*fb)->b.data[offset], data, len); | |
104 | *skip_offset = | |
105 | (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset | |
106 | - (void *) &(*fb)->b.data[0]; | |
107 | return 0; | |
108 | } | |
109 | ||
110 | static | |
53a80697 | 111 | int bytecode_patch(struct lttng_filter_bytecode_alloc **fb, |
953192ba MD |
112 | const void *data, |
113 | uint16_t offset, | |
114 | uint32_t len) | |
115 | { | |
116 | if (offset >= (*fb)->b.len) { | |
117 | return -EINVAL; | |
118 | } | |
119 | memcpy(&(*fb)->b.data[offset], data, len); | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static | |
124 | int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node) | |
125 | { | |
126 | int ret; | |
127 | struct return_op insn; | |
128 | ||
129 | /* Visit child */ | |
130 | ret = recursive_visit_gen_bytecode(ctx, node->u.root.child); | |
131 | if (ret) | |
132 | return ret; | |
133 | ||
134 | /* Generate end of bytecode instruction */ | |
135 | insn.op = FILTER_OP_RETURN; | |
136 | return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn)); | |
137 | } | |
138 | ||
139 | static | |
140 | enum filter_register reg_sel(struct ir_op *node) | |
141 | { | |
142 | switch (node->side) { | |
143 | case IR_SIDE_UNKNOWN: | |
144 | default: | |
145 | fprintf(stderr, "[error] Unknown node side in %s\n", | |
146 | __func__); | |
147 | return REG_ERROR; | |
148 | case IR_LEFT: | |
149 | return REG_R0; | |
150 | case IR_RIGHT: | |
151 | return REG_R1; | |
152 | } | |
153 | } | |
154 | ||
155 | static | |
156 | int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node) | |
157 | { | |
158 | int ret; | |
159 | ||
160 | switch (node->data_type) { | |
161 | case IR_DATA_UNKNOWN: | |
162 | default: | |
163 | fprintf(stderr, "[error] Unknown data type in %s\n", | |
164 | __func__); | |
165 | return -EINVAL; | |
166 | ||
167 | case IR_DATA_STRING: | |
168 | { | |
169 | struct load_op *insn; | |
170 | uint32_t insn_len = sizeof(struct load_op) | |
171 | + strlen(node->u.load.u.string) + 1; | |
172 | ||
173 | insn = calloc(insn_len, 1); | |
174 | if (!insn) | |
175 | return -ENOMEM; | |
176 | insn->op = FILTER_OP_LOAD_STRING; | |
177 | insn->reg = reg_sel(node); | |
178 | if (insn->reg == REG_ERROR) | |
179 | return -EINVAL; | |
180 | strcpy(insn->data, node->u.load.u.string); | |
181 | ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len); | |
182 | free(insn); | |
183 | return ret; | |
184 | } | |
185 | case IR_DATA_NUMERIC: | |
186 | { | |
187 | struct load_op *insn; | |
188 | uint32_t insn_len = sizeof(struct load_op) | |
189 | + sizeof(struct literal_numeric); | |
190 | ||
191 | insn = calloc(insn_len, 1); | |
192 | if (!insn) | |
193 | return -ENOMEM; | |
194 | insn->op = FILTER_OP_LOAD_S64; | |
195 | insn->reg = reg_sel(node); | |
196 | if (insn->reg == REG_ERROR) | |
197 | return -EINVAL; | |
198 | *(int64_t *) insn->data = node->u.load.u.num; | |
199 | ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len); | |
200 | free(insn); | |
201 | return ret; | |
202 | } | |
e90d8561 MD |
203 | case IR_DATA_FLOAT: |
204 | { | |
205 | struct load_op *insn; | |
206 | uint32_t insn_len = sizeof(struct load_op) | |
207 | + sizeof(struct literal_double); | |
208 | ||
209 | insn = calloc(insn_len, 1); | |
210 | if (!insn) | |
211 | return -ENOMEM; | |
212 | insn->op = FILTER_OP_LOAD_DOUBLE; | |
213 | insn->reg = reg_sel(node); | |
214 | if (insn->reg == REG_ERROR) | |
215 | return -EINVAL; | |
216 | *(double *) insn->data = node->u.load.u.flt; | |
217 | ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len); | |
218 | free(insn); | |
219 | return ret; | |
220 | } | |
953192ba MD |
221 | case IR_DATA_FIELD_REF: |
222 | { | |
223 | struct load_op *insn; | |
224 | uint32_t insn_len = sizeof(struct load_op) | |
225 | + sizeof(struct field_ref); | |
226 | struct field_ref ref_offset; | |
227 | uint16_t reloc_offset; | |
228 | ||
229 | insn = calloc(insn_len, 1); | |
230 | if (!insn) | |
231 | return -ENOMEM; | |
232 | insn->op = FILTER_OP_LOAD_FIELD_REF; | |
233 | insn->reg = reg_sel(node); | |
234 | ref_offset.offset = (uint16_t) -1U; | |
235 | memcpy(insn->data, &ref_offset, sizeof(ref_offset)); | |
236 | if (insn->reg == REG_ERROR) | |
237 | return -EINVAL; | |
238 | /* reloc_offset points to struct field_ref */ | |
239 | reloc_offset = bytecode_get_len(&ctx->bytecode->b); | |
240 | reloc_offset += sizeof(struct load_op); | |
241 | ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len); | |
242 | if (ret) { | |
243 | free(insn); | |
244 | return ret; | |
245 | } | |
246 | /* append reloc */ | |
247 | ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset, | |
248 | 1, sizeof(reloc_offset)); | |
249 | if (ret) { | |
250 | free(insn); | |
251 | return ret; | |
252 | } | |
253 | ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref, | |
254 | 1, strlen(node->u.load.u.ref) + 1); | |
255 | free(insn); | |
256 | return ret; | |
257 | } | |
258 | } | |
259 | } | |
260 | ||
261 | static | |
262 | int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node) | |
263 | { | |
264 | int ret; | |
265 | struct unary_op insn; | |
266 | ||
267 | /* Visit child */ | |
268 | ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child); | |
269 | if (ret) | |
270 | return ret; | |
271 | ||
272 | /* Generate end of bytecode instruction */ | |
273 | switch (node->u.unary.type) { | |
274 | case AST_UNARY_UNKNOWN: | |
275 | default: | |
276 | fprintf(stderr, "[error] Unknown unary node type in %s\n", | |
277 | __func__); | |
278 | return -EINVAL; | |
279 | case AST_UNARY_PLUS: | |
280 | /* Nothing to do. */ | |
281 | return 0; | |
282 | case AST_UNARY_MINUS: | |
283 | insn.op = FILTER_OP_UNARY_MINUS; | |
284 | insn.reg = reg_sel(node); | |
285 | if (insn.reg == REG_ERROR) | |
286 | return -EINVAL; | |
287 | return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn)); | |
288 | case AST_UNARY_NOT: | |
289 | insn.op = FILTER_OP_UNARY_NOT; | |
290 | insn.reg = reg_sel(node); | |
291 | if (insn.reg == REG_ERROR) | |
292 | return -EINVAL; | |
293 | return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn)); | |
294 | } | |
295 | } | |
296 | ||
297 | /* | |
298 | * Binary comparator nesting is disallowed. This allows fitting into | |
299 | * only 2 registers. | |
300 | */ | |
301 | static | |
302 | int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node) | |
303 | { | |
304 | int ret; | |
305 | struct binary_op insn; | |
306 | ||
307 | /* Visit child */ | |
308 | ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left); | |
309 | if (ret) | |
310 | return ret; | |
311 | ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right); | |
312 | if (ret) | |
313 | return ret; | |
314 | ||
315 | switch (node->u.binary.type) { | |
316 | case AST_OP_UNKNOWN: | |
317 | default: | |
318 | fprintf(stderr, "[error] Unknown unary node type in %s\n", | |
319 | __func__); | |
320 | return -EINVAL; | |
321 | ||
322 | case AST_OP_AND: | |
323 | case AST_OP_OR: | |
324 | fprintf(stderr, "[error] Unexpected logical node type in %s\n", | |
325 | __func__); | |
326 | return -EINVAL; | |
327 | ||
328 | case AST_OP_MUL: | |
329 | insn.op = FILTER_OP_MUL; | |
330 | break; | |
331 | case AST_OP_DIV: | |
332 | insn.op = FILTER_OP_DIV; | |
333 | break; | |
334 | case AST_OP_MOD: | |
335 | insn.op = FILTER_OP_MOD; | |
336 | break; | |
337 | case AST_OP_PLUS: | |
338 | insn.op = FILTER_OP_PLUS; | |
339 | break; | |
340 | case AST_OP_MINUS: | |
341 | insn.op = FILTER_OP_MINUS; | |
342 | break; | |
343 | case AST_OP_RSHIFT: | |
344 | insn.op = FILTER_OP_RSHIFT; | |
345 | break; | |
346 | case AST_OP_LSHIFT: | |
347 | insn.op = FILTER_OP_LSHIFT; | |
348 | break; | |
349 | case AST_OP_BIN_AND: | |
350 | insn.op = FILTER_OP_BIN_AND; | |
351 | break; | |
352 | case AST_OP_BIN_OR: | |
353 | insn.op = FILTER_OP_BIN_OR; | |
354 | break; | |
355 | case AST_OP_BIN_XOR: | |
356 | insn.op = FILTER_OP_BIN_XOR; | |
357 | break; | |
358 | ||
359 | case AST_OP_EQ: | |
360 | insn.op = FILTER_OP_EQ; | |
361 | break; | |
362 | case AST_OP_NE: | |
363 | insn.op = FILTER_OP_NE; | |
364 | break; | |
365 | case AST_OP_GT: | |
366 | insn.op = FILTER_OP_GT; | |
367 | break; | |
368 | case AST_OP_LT: | |
369 | insn.op = FILTER_OP_LT; | |
370 | break; | |
371 | case AST_OP_GE: | |
372 | insn.op = FILTER_OP_GE; | |
373 | break; | |
374 | case AST_OP_LE: | |
375 | insn.op = FILTER_OP_LE; | |
376 | break; | |
377 | } | |
378 | return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn)); | |
379 | } | |
380 | ||
381 | static | |
382 | int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node) | |
383 | { | |
384 | int ret; | |
385 | struct logical_op insn; | |
386 | uint16_t skip_offset_loc; | |
387 | uint16_t target_loc; | |
388 | ||
389 | /* Visit left child */ | |
390 | ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left); | |
391 | if (ret) | |
392 | return ret; | |
393 | switch (node->u.logical.type) { | |
394 | default: | |
395 | fprintf(stderr, "[error] Unknown node type in %s\n", | |
396 | __func__); | |
397 | return -EINVAL; | |
398 | ||
399 | case AST_OP_AND: | |
400 | insn.op = FILTER_OP_AND; | |
401 | break; | |
402 | case AST_OP_OR: | |
403 | insn.op = FILTER_OP_OR; | |
404 | break; | |
405 | } | |
406 | insn.skip_offset = (uint16_t) -1UL; /* Temporary */ | |
407 | ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn), | |
408 | &skip_offset_loc); | |
409 | if (ret) | |
410 | return ret; | |
411 | /* Visit right child */ | |
412 | ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right); | |
413 | if (ret) | |
414 | return ret; | |
415 | /* We now know where the logical op can skip. */ | |
416 | target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b); | |
417 | ret = bytecode_patch(&ctx->bytecode, | |
418 | &target_loc, /* Offset to jump to */ | |
419 | skip_offset_loc, /* Where to patch */ | |
420 | sizeof(uint16_t)); | |
421 | return ret; | |
422 | } | |
423 | ||
424 | /* | |
425 | * Postorder traversal of the tree. We need the children result before | |
426 | * we can evaluate the parent. | |
427 | */ | |
428 | static | |
429 | int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx, | |
430 | struct ir_op *node) | |
431 | { | |
432 | switch (node->op) { | |
433 | case IR_OP_UNKNOWN: | |
434 | default: | |
435 | fprintf(stderr, "[error] Unknown node type in %s\n", | |
436 | __func__); | |
437 | return -EINVAL; | |
438 | ||
439 | case IR_OP_ROOT: | |
440 | return visit_node_root(ctx, node); | |
441 | case IR_OP_LOAD: | |
442 | return visit_node_load(ctx, node); | |
443 | case IR_OP_UNARY: | |
444 | return visit_node_unary(ctx, node); | |
445 | case IR_OP_BINARY: | |
446 | return visit_node_binary(ctx, node); | |
447 | case IR_OP_LOGICAL: | |
448 | return visit_node_logical(ctx, node); | |
449 | } | |
450 | } | |
451 | ||
452 | void filter_bytecode_free(struct filter_parser_ctx *ctx) | |
453 | { | |
454 | free(ctx->bytecode); | |
455 | ctx->bytecode = NULL; | |
456 | free(ctx->bytecode_reloc); | |
457 | ctx->bytecode_reloc = NULL; | |
458 | } | |
459 | ||
460 | int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx) | |
461 | { | |
462 | int ret; | |
463 | ||
464 | ret = bytecode_init(&ctx->bytecode); | |
465 | if (ret) | |
466 | return ret; | |
467 | ret = bytecode_init(&ctx->bytecode_reloc); | |
468 | if (ret) | |
469 | goto error; | |
470 | ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root); | |
471 | if (ret) | |
472 | goto error; | |
473 | ||
474 | /* Finally, append symbol table to bytecode */ | |
475 | ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b); | |
476 | return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data, | |
477 | 1, bytecode_get_len(&ctx->bytecode_reloc->b)); | |
478 | ||
479 | error: | |
480 | filter_bytecode_free(ctx); | |
481 | return ret; | |
482 | } |