SoW-2019-0002: Dynamic Snapshot
[deliverable/lttng-modules.git] / lttng-filter-specialize.c
CommitLineData
9f36eaed
MJ
1/* SPDX-License-Identifier: MIT
2 *
07dfc1d0
MD
3 * lttng-filter-specialize.c
4 *
5 * LTTng modules filter code specializer.
6 *
bbf3aef5 7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
07dfc1d0
MD
8 */
9
3834b99f 10#include <linux/slab.h>
241ae9a8 11#include <lttng-filter.h>
3834b99f 12#include "lib/align.h"
07dfc1d0 13
3834b99f
MD
14static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
15 size_t align, size_t len)
16{
17 ssize_t ret;
18 size_t padding = offset_align(runtime->data_len, align);
19 size_t new_len = runtime->data_len + padding + len;
20 size_t new_alloc_len = new_len;
21 size_t old_alloc_len = runtime->data_alloc_len;
22
23 if (new_len > FILTER_MAX_DATA_LEN)
24 return -EINVAL;
25
26 if (new_alloc_len > old_alloc_len) {
27 char *newptr;
28
29 new_alloc_len =
30 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
31 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
32 if (!newptr)
33 return -ENOMEM;
34 runtime->data = newptr;
35 /* We zero directly the memory from start of allocation. */
36 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
37 runtime->data_alloc_len = new_alloc_len;
38 }
39 runtime->data_len += padding;
40 ret = runtime->data_len;
41 runtime->data_len += len;
42 return ret;
43}
44
45static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
46 const void *p, size_t align, size_t len)
47{
48 ssize_t offset;
49
50 offset = bytecode_reserve_data(runtime, align, len);
51 if (offset < 0)
52 return -ENOMEM;
53 memcpy(&runtime->data[offset], p, len);
54 return offset;
55}
56
57static int specialize_load_field(struct vstack_entry *stack_top,
58 struct load_op *insn)
59{
60 int ret;
61
62 switch (stack_top->load.type) {
63 case LOAD_OBJECT:
64 break;
65 case LOAD_ROOT_CONTEXT:
66 case LOAD_ROOT_APP_CONTEXT:
67 case LOAD_ROOT_PAYLOAD:
68 default:
69 dbg_printk("Filter warning: cannot load root, missing field name.\n");
70 ret = -EINVAL;
71 goto end;
72 }
73 switch (stack_top->load.object_type) {
74 case OBJECT_TYPE_S8:
75 dbg_printk("op load field s8\n");
76 stack_top->type = REG_S64;
77 if (!stack_top->load.rev_bo)
78 insn->op = FILTER_OP_LOAD_FIELD_S8;
79 break;
80 case OBJECT_TYPE_S16:
81 dbg_printk("op load field s16\n");
82 stack_top->type = REG_S64;
83 if (!stack_top->load.rev_bo)
84 insn->op = FILTER_OP_LOAD_FIELD_S16;
85 break;
86 case OBJECT_TYPE_S32:
87 dbg_printk("op load field s32\n");
88 stack_top->type = REG_S64;
89 if (!stack_top->load.rev_bo)
90 insn->op = FILTER_OP_LOAD_FIELD_S32;
91 break;
92 case OBJECT_TYPE_S64:
93 dbg_printk("op load field s64\n");
94 stack_top->type = REG_S64;
95 if (!stack_top->load.rev_bo)
96 insn->op = FILTER_OP_LOAD_FIELD_S64;
97 break;
98 case OBJECT_TYPE_U8:
99 dbg_printk("op load field u8\n");
100 stack_top->type = REG_S64;
101 insn->op = FILTER_OP_LOAD_FIELD_U8;
102 break;
103 case OBJECT_TYPE_U16:
104 dbg_printk("op load field u16\n");
105 stack_top->type = REG_S64;
106 if (!stack_top->load.rev_bo)
107 insn->op = FILTER_OP_LOAD_FIELD_U16;
108 break;
109 case OBJECT_TYPE_U32:
110 dbg_printk("op load field u32\n");
111 stack_top->type = REG_S64;
112 if (!stack_top->load.rev_bo)
113 insn->op = FILTER_OP_LOAD_FIELD_U32;
114 break;
115 case OBJECT_TYPE_U64:
116 dbg_printk("op load field u64\n");
117 stack_top->type = REG_S64;
118 if (!stack_top->load.rev_bo)
119 insn->op = FILTER_OP_LOAD_FIELD_U64;
120 break;
121 case OBJECT_TYPE_DOUBLE:
122 printk(KERN_WARNING "Double type unsupported\n\n");
123 ret = -EINVAL;
124 goto end;
125 case OBJECT_TYPE_STRING:
126 dbg_printk("op load field string\n");
127 stack_top->type = REG_STRING;
128 insn->op = FILTER_OP_LOAD_FIELD_STRING;
129 break;
130 case OBJECT_TYPE_STRING_SEQUENCE:
131 dbg_printk("op load field string sequence\n");
132 stack_top->type = REG_STRING;
133 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
134 break;
135 case OBJECT_TYPE_DYNAMIC:
136 ret = -EINVAL;
137 goto end;
138 case OBJECT_TYPE_SEQUENCE:
139 case OBJECT_TYPE_ARRAY:
140 case OBJECT_TYPE_STRUCT:
141 case OBJECT_TYPE_VARIANT:
142 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
143 ret = -EINVAL;
144 goto end;
145 }
146 return 0;
147
148end:
149 return ret;
150}
151
152static int specialize_get_index_object_type(enum object_type *otype,
153 int signedness, uint32_t elem_len)
154{
155 switch (elem_len) {
156 case 8:
157 if (signedness)
158 *otype = OBJECT_TYPE_S8;
159 else
160 *otype = OBJECT_TYPE_U8;
161 break;
162 case 16:
163 if (signedness)
164 *otype = OBJECT_TYPE_S16;
165 else
166 *otype = OBJECT_TYPE_U16;
167 break;
168 case 32:
169 if (signedness)
170 *otype = OBJECT_TYPE_S32;
171 else
172 *otype = OBJECT_TYPE_U32;
173 break;
174 case 64:
175 if (signedness)
176 *otype = OBJECT_TYPE_S64;
177 else
178 *otype = OBJECT_TYPE_U64;
179 break;
180 default:
181 return -EINVAL;
182 }
183 return 0;
184}
185
186static int specialize_get_index(struct bytecode_runtime *runtime,
187 struct load_op *insn, uint64_t index,
188 struct vstack_entry *stack_top,
189 int idx_len)
190{
191 int ret;
192 struct filter_get_index_data gid;
193 ssize_t data_offset;
194
195 memset(&gid, 0, sizeof(gid));
196 switch (stack_top->load.type) {
197 case LOAD_OBJECT:
198 switch (stack_top->load.object_type) {
199 case OBJECT_TYPE_ARRAY:
200 {
201 const struct lttng_event_field *field;
202 uint32_t elem_len, num_elems;
203 int signedness;
204
205 field = stack_top->load.field;
206 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
207 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
208 num_elems = field->type.u.array.length;
209 if (index >= num_elems) {
210 ret = -EINVAL;
211 goto end;
212 }
213 ret = specialize_get_index_object_type(&stack_top->load.object_type,
214 signedness, elem_len);
215 if (ret)
216 goto end;
217 gid.offset = index * (elem_len / CHAR_BIT);
218 gid.array_len = num_elems * (elem_len / CHAR_BIT);
219 gid.elem.type = stack_top->load.object_type;
220 gid.elem.len = elem_len;
221 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
222 gid.elem.rev_bo = true;
223 stack_top->load.rev_bo = gid.elem.rev_bo;
224 break;
225 }
226 case OBJECT_TYPE_SEQUENCE:
227 {
228 const struct lttng_event_field *field;
229 uint32_t elem_len;
230 int signedness;
231
232 field = stack_top->load.field;
233 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
234 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
235 ret = specialize_get_index_object_type(&stack_top->load.object_type,
236 signedness, elem_len);
237 if (ret)
238 goto end;
239 gid.offset = index * (elem_len / CHAR_BIT);
240 gid.elem.type = stack_top->load.object_type;
241 gid.elem.len = elem_len;
242 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
243 gid.elem.rev_bo = true;
244 stack_top->load.rev_bo = gid.elem.rev_bo;
245 break;
246 }
247 case OBJECT_TYPE_STRUCT:
248 /* Only generated by the specialize phase. */
249 case OBJECT_TYPE_VARIANT: /* Fall-through */
250 default:
251 printk(KERN_WARNING "Unexpected get index type %d",
252 (int) stack_top->load.object_type);
253 ret = -EINVAL;
254 goto end;
255 }
256 break;
257 case LOAD_ROOT_CONTEXT:
258 case LOAD_ROOT_APP_CONTEXT:
259 case LOAD_ROOT_PAYLOAD:
260 printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
261 ret = -EINVAL;
262 goto end;
263 }
264 data_offset = bytecode_push_data(runtime, &gid,
265 __alignof__(gid), sizeof(gid));
266 if (data_offset < 0) {
267 ret = -EINVAL;
268 goto end;
269 }
270 switch (idx_len) {
271 case 2:
272 ((struct get_index_u16 *) insn->data)->index = data_offset;
273 break;
274 case 8:
275 ((struct get_index_u64 *) insn->data)->index = data_offset;
276 break;
277 default:
278 ret = -EINVAL;
279 goto end;
280 }
281
282 return 0;
283
284end:
285 return ret;
286}
287
63629d86
FD
288static int specialize_context_lookup_name(struct lttng_ctx *ctx,
289 struct bytecode_runtime *bytecode,
3834b99f
MD
290 struct load_op *insn)
291{
292 uint16_t offset;
293 const char *name;
294
295 offset = ((struct get_symbol *) insn->data)->offset;
296 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
63629d86 297 return lttng_get_context_index(ctx, name);
3834b99f
MD
298}
299
300static int specialize_load_object(const struct lttng_event_field *field,
301 struct vstack_load *load, bool is_context)
302{
303 load->type = LOAD_OBJECT;
304 /*
305 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
306 */
307 switch (field->type.atype) {
308 case atype_integer:
309 if (field->type.u.basic.integer.signedness)
310 load->object_type = OBJECT_TYPE_S64;
311 else
312 load->object_type = OBJECT_TYPE_U64;
313 load->rev_bo = false;
314 break;
315 case atype_enum:
316 {
317 const struct lttng_integer_type *itype =
318 &field->type.u.basic.enumeration.container_type;
319
320 if (itype->signedness)
321 load->object_type = OBJECT_TYPE_S64;
322 else
323 load->object_type = OBJECT_TYPE_U64;
324 load->rev_bo = false;
325 break;
326 }
327 case atype_array:
328 if (field->type.u.array.elem_type.atype != atype_integer) {
329 printk(KERN_WARNING "Array nesting only supports integer types.\n");
330 return -EINVAL;
331 }
332 if (is_context) {
333 load->object_type = OBJECT_TYPE_STRING;
334 } else {
335 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
336 load->object_type = OBJECT_TYPE_ARRAY;
337 load->field = field;
338 } else {
339 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
340 }
341 }
342 break;
343 case atype_sequence:
344 if (field->type.u.sequence.elem_type.atype != atype_integer) {
345 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
346 return -EINVAL;
347 }
348 if (is_context) {
349 load->object_type = OBJECT_TYPE_STRING;
350 } else {
351 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
352 load->object_type = OBJECT_TYPE_SEQUENCE;
353 load->field = field;
354 } else {
355 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
356 }
357 }
358 break;
359 case atype_array_bitfield:
360 printk(KERN_WARNING "Bitfield array type is not supported.\n");
361 return -EINVAL;
362 case atype_sequence_bitfield:
363 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
364 return -EINVAL;
365 case atype_string:
366 load->object_type = OBJECT_TYPE_STRING;
367 break;
368 case atype_struct:
369 printk(KERN_WARNING "Structure type cannot be loaded.\n");
370 return -EINVAL;
371 default:
372 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
373 return -EINVAL;
374 }
375 return 0;
376}
377
63629d86
FD
378static int specialize_context_lookup(struct lttng_ctx *ctx,
379 struct bytecode_runtime *runtime,
3834b99f
MD
380 struct load_op *insn,
381 struct vstack_load *load)
382{
383 int idx, ret;
384 struct lttng_ctx_field *ctx_field;
385 struct lttng_event_field *field;
386 struct filter_get_index_data gid;
387 ssize_t data_offset;
388
63629d86 389 idx = specialize_context_lookup_name(ctx, runtime, insn);
3834b99f
MD
390 if (idx < 0) {
391 return -ENOENT;
392 }
393 ctx_field = &lttng_static_ctx->fields[idx];
394 field = &ctx_field->event_field;
395 ret = specialize_load_object(field, load, true);
396 if (ret)
397 return ret;
398 /* Specialize each get_symbol into a get_index. */
399 insn->op = FILTER_OP_GET_INDEX_U16;
400 memset(&gid, 0, sizeof(gid));
401 gid.ctx_index = idx;
402 gid.elem.type = load->object_type;
403 data_offset = bytecode_push_data(runtime, &gid,
404 __alignof__(gid), sizeof(gid));
405 if (data_offset < 0) {
406 return -EINVAL;
407 }
408 ((struct get_index_u16 *) insn->data)->index = data_offset;
409 return 0;
410}
411
63629d86 412static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
3834b99f
MD
413 struct bytecode_runtime *runtime,
414 struct load_op *insn,
415 struct vstack_load *load)
416{
417 const char *name;
418 uint16_t offset;
3834b99f
MD
419 unsigned int i, nr_fields;
420 bool found = false;
421 uint32_t field_offset = 0;
422 const struct lttng_event_field *field;
423 int ret;
424 struct filter_get_index_data gid;
425 ssize_t data_offset;
426
63629d86 427 nr_fields = event_desc->nr_fields;
3834b99f
MD
428 offset = ((struct get_symbol *) insn->data)->offset;
429 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
430 for (i = 0; i < nr_fields; i++) {
63629d86 431 field = &event_desc->fields[i];
3834b99f
MD
432 if (!strcmp(field->name, name)) {
433 found = true;
434 break;
435 }
436 /* compute field offset on stack */
437 switch (field->type.atype) {
438 case atype_integer:
439 case atype_enum:
440 field_offset += sizeof(int64_t);
441 break;
442 case atype_array:
443 case atype_sequence:
444 case atype_array_bitfield:
445 case atype_sequence_bitfield:
446 field_offset += sizeof(unsigned long);
447 field_offset += sizeof(void *);
448 break;
449 case atype_string:
450 field_offset += sizeof(void *);
451 break;
452 default:
453 ret = -EINVAL;
454 goto end;
455 }
456 }
457 if (!found) {
458 ret = -EINVAL;
459 goto end;
460 }
461
462 ret = specialize_load_object(field, load, false);
463 if (ret)
464 goto end;
465
466 /* Specialize each get_symbol into a get_index. */
467 insn->op = FILTER_OP_GET_INDEX_U16;
468 memset(&gid, 0, sizeof(gid));
469 gid.offset = field_offset;
470 gid.elem.type = load->object_type;
471 data_offset = bytecode_push_data(runtime, &gid,
472 __alignof__(gid), sizeof(gid));
473 if (data_offset < 0) {
474 ret = -EINVAL;
475 goto end;
476 }
477 ((struct get_index_u16 *) insn->data)->index = data_offset;
478 ret = 0;
479end:
480 return ret;
481}
482
63629d86 483int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc,
3834b99f 484 struct bytecode_runtime *bytecode)
07dfc1d0
MD
485{
486 void *pc, *next_pc, *start_pc;
487 int ret = -EINVAL;
488 struct vstack _stack;
489 struct vstack *stack = &_stack;
63629d86 490 struct lttng_ctx *ctx = bytecode->p.ctx;
07dfc1d0
MD
491
492 vstack_init(stack);
493
3834b99f 494 start_pc = &bytecode->code[0];
07dfc1d0
MD
495 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
496 pc = next_pc) {
497 switch (*(filter_opcode_t *) pc) {
498 case FILTER_OP_UNKNOWN:
499 default:
500 printk(KERN_WARNING "unknown bytecode op %u\n",
501 (unsigned int) *(filter_opcode_t *) pc);
502 ret = -EINVAL;
503 goto end;
504
505 case FILTER_OP_RETURN:
57ba4b41 506 case FILTER_OP_RETURN_S64:
07dfc1d0
MD
507 ret = 0;
508 goto end;
509
510 /* binary */
511 case FILTER_OP_MUL:
512 case FILTER_OP_DIV:
513 case FILTER_OP_MOD:
514 case FILTER_OP_PLUS:
515 case FILTER_OP_MINUS:
07dfc1d0
MD
516 printk(KERN_WARNING "unsupported bytecode op %u\n",
517 (unsigned int) *(filter_opcode_t *) pc);
518 ret = -EINVAL;
519 goto end;
520
521 case FILTER_OP_EQ:
522 {
523 struct binary_op *insn = (struct binary_op *) pc;
524
525 switch(vstack_ax(stack)->type) {
526 default:
527 printk(KERN_WARNING "unknown register type\n");
528 ret = -EINVAL;
529 goto end;
530
531 case REG_STRING:
02aca193
PP
532 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
533 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
534 else
535 insn->op = FILTER_OP_EQ_STRING;
536 break;
537 case REG_STAR_GLOB_STRING:
538 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
07dfc1d0
MD
539 break;
540 case REG_S64:
541 if (vstack_bx(stack)->type == REG_S64)
542 insn->op = FILTER_OP_EQ_S64;
543 else
544 insn->op = FILTER_OP_EQ_DOUBLE_S64;
545 break;
546 case REG_DOUBLE:
547 if (vstack_bx(stack)->type == REG_S64)
548 insn->op = FILTER_OP_EQ_S64_DOUBLE;
549 else
550 insn->op = FILTER_OP_EQ_DOUBLE;
551 break;
552 }
553 /* Pop 2, push 1 */
554 if (vstack_pop(stack)) {
555 ret = -EINVAL;
556 goto end;
557 }
558 vstack_ax(stack)->type = REG_S64;
559 next_pc += sizeof(struct binary_op);
560 break;
561 }
562
563 case FILTER_OP_NE:
564 {
565 struct binary_op *insn = (struct binary_op *) pc;
566
567 switch(vstack_ax(stack)->type) {
568 default:
569 printk(KERN_WARNING "unknown register type\n");
570 ret = -EINVAL;
571 goto end;
572
573 case REG_STRING:
02aca193
PP
574 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
575 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
576 else
577 insn->op = FILTER_OP_NE_STRING;
578 break;
579 case REG_STAR_GLOB_STRING:
580 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
07dfc1d0
MD
581 break;
582 case REG_S64:
583 if (vstack_bx(stack)->type == REG_S64)
584 insn->op = FILTER_OP_NE_S64;
585 else
586 insn->op = FILTER_OP_NE_DOUBLE_S64;
587 break;
588 case REG_DOUBLE:
589 if (vstack_bx(stack)->type == REG_S64)
590 insn->op = FILTER_OP_NE_S64_DOUBLE;
591 else
592 insn->op = FILTER_OP_NE_DOUBLE;
593 break;
594 }
595 /* Pop 2, push 1 */
596 if (vstack_pop(stack)) {
597 ret = -EINVAL;
598 goto end;
599 }
600 vstack_ax(stack)->type = REG_S64;
601 next_pc += sizeof(struct binary_op);
602 break;
603 }
604
605 case FILTER_OP_GT:
606 {
607 struct binary_op *insn = (struct binary_op *) pc;
608
609 switch(vstack_ax(stack)->type) {
610 default:
611 printk(KERN_WARNING "unknown register type\n");
612 ret = -EINVAL;
613 goto end;
614
02aca193
PP
615 case REG_STAR_GLOB_STRING:
616 printk(KERN_WARNING "invalid register type for > binary operator\n");
617 ret = -EINVAL;
618 goto end;
07dfc1d0
MD
619 case REG_STRING:
620 insn->op = FILTER_OP_GT_STRING;
621 break;
622 case REG_S64:
623 if (vstack_bx(stack)->type == REG_S64)
624 insn->op = FILTER_OP_GT_S64;
625 else
626 insn->op = FILTER_OP_GT_DOUBLE_S64;
627 break;
628 case REG_DOUBLE:
629 if (vstack_bx(stack)->type == REG_S64)
630 insn->op = FILTER_OP_GT_S64_DOUBLE;
631 else
632 insn->op = FILTER_OP_GT_DOUBLE;
633 break;
634 }
635 /* Pop 2, push 1 */
636 if (vstack_pop(stack)) {
637 ret = -EINVAL;
638 goto end;
639 }
640 vstack_ax(stack)->type = REG_S64;
641 next_pc += sizeof(struct binary_op);
642 break;
643 }
644
645 case FILTER_OP_LT:
646 {
647 struct binary_op *insn = (struct binary_op *) pc;
648
649 switch(vstack_ax(stack)->type) {
650 default:
651 printk(KERN_WARNING "unknown register type\n");
652 ret = -EINVAL;
653 goto end;
654
02aca193
PP
655 case REG_STAR_GLOB_STRING:
656 printk(KERN_WARNING "invalid register type for < binary operator\n");
657 ret = -EINVAL;
658 goto end;
07dfc1d0
MD
659 case REG_STRING:
660 insn->op = FILTER_OP_LT_STRING;
661 break;
662 case REG_S64:
663 if (vstack_bx(stack)->type == REG_S64)
664 insn->op = FILTER_OP_LT_S64;
665 else
666 insn->op = FILTER_OP_LT_DOUBLE_S64;
667 break;
668 case REG_DOUBLE:
669 if (vstack_bx(stack)->type == REG_S64)
670 insn->op = FILTER_OP_LT_S64_DOUBLE;
671 else
672 insn->op = FILTER_OP_LT_DOUBLE;
673 break;
674 }
675 /* Pop 2, push 1 */
676 if (vstack_pop(stack)) {
677 ret = -EINVAL;
678 goto end;
679 }
680 vstack_ax(stack)->type = REG_S64;
681 next_pc += sizeof(struct binary_op);
682 break;
683 }
684
685 case FILTER_OP_GE:
686 {
687 struct binary_op *insn = (struct binary_op *) pc;
688
689 switch(vstack_ax(stack)->type) {
690 default:
691 printk(KERN_WARNING "unknown register type\n");
692 ret = -EINVAL;
693 goto end;
694
02aca193
PP
695 case REG_STAR_GLOB_STRING:
696 printk(KERN_WARNING "invalid register type for >= binary operator\n");
697 ret = -EINVAL;
698 goto end;
07dfc1d0
MD
699 case REG_STRING:
700 insn->op = FILTER_OP_GE_STRING;
701 break;
702 case REG_S64:
703 if (vstack_bx(stack)->type == REG_S64)
704 insn->op = FILTER_OP_GE_S64;
705 else
706 insn->op = FILTER_OP_GE_DOUBLE_S64;
707 break;
708 case REG_DOUBLE:
709 if (vstack_bx(stack)->type == REG_S64)
710 insn->op = FILTER_OP_GE_S64_DOUBLE;
711 else
712 insn->op = FILTER_OP_GE_DOUBLE;
713 break;
714 }
715 /* Pop 2, push 1 */
716 if (vstack_pop(stack)) {
717 ret = -EINVAL;
718 goto end;
719 }
720 vstack_ax(stack)->type = REG_S64;
721 next_pc += sizeof(struct binary_op);
722 break;
723 }
724 case FILTER_OP_LE:
725 {
726 struct binary_op *insn = (struct binary_op *) pc;
727
728 switch(vstack_ax(stack)->type) {
729 default:
730 printk(KERN_WARNING "unknown register type\n");
731 ret = -EINVAL;
732 goto end;
733
02aca193
PP
734 case REG_STAR_GLOB_STRING:
735 printk(KERN_WARNING "invalid register type for <= binary operator\n");
736 ret = -EINVAL;
737 goto end;
07dfc1d0
MD
738 case REG_STRING:
739 insn->op = FILTER_OP_LE_STRING;
740 break;
741 case REG_S64:
742 if (vstack_bx(stack)->type == REG_S64)
743 insn->op = FILTER_OP_LE_S64;
744 else
745 insn->op = FILTER_OP_LE_DOUBLE_S64;
746 break;
747 case REG_DOUBLE:
748 if (vstack_bx(stack)->type == REG_S64)
749 insn->op = FILTER_OP_LE_S64_DOUBLE;
750 else
751 insn->op = FILTER_OP_LE_DOUBLE;
752 break;
753 }
754 vstack_ax(stack)->type = REG_S64;
755 next_pc += sizeof(struct binary_op);
756 break;
757 }
758
759 case FILTER_OP_EQ_STRING:
760 case FILTER_OP_NE_STRING:
761 case FILTER_OP_GT_STRING:
762 case FILTER_OP_LT_STRING:
763 case FILTER_OP_GE_STRING:
764 case FILTER_OP_LE_STRING:
02aca193
PP
765 case FILTER_OP_EQ_STAR_GLOB_STRING:
766 case FILTER_OP_NE_STAR_GLOB_STRING:
07dfc1d0
MD
767 case FILTER_OP_EQ_S64:
768 case FILTER_OP_NE_S64:
769 case FILTER_OP_GT_S64:
770 case FILTER_OP_LT_S64:
771 case FILTER_OP_GE_S64:
772 case FILTER_OP_LE_S64:
773 case FILTER_OP_EQ_DOUBLE:
774 case FILTER_OP_NE_DOUBLE:
775 case FILTER_OP_GT_DOUBLE:
776 case FILTER_OP_LT_DOUBLE:
777 case FILTER_OP_GE_DOUBLE:
778 case FILTER_OP_LE_DOUBLE:
779 case FILTER_OP_EQ_DOUBLE_S64:
780 case FILTER_OP_NE_DOUBLE_S64:
781 case FILTER_OP_GT_DOUBLE_S64:
782 case FILTER_OP_LT_DOUBLE_S64:
783 case FILTER_OP_GE_DOUBLE_S64:
784 case FILTER_OP_LE_DOUBLE_S64:
785 case FILTER_OP_EQ_S64_DOUBLE:
786 case FILTER_OP_NE_S64_DOUBLE:
787 case FILTER_OP_GT_S64_DOUBLE:
788 case FILTER_OP_LT_S64_DOUBLE:
789 case FILTER_OP_GE_S64_DOUBLE:
790 case FILTER_OP_LE_S64_DOUBLE:
e16c054b
MD
791 case FILTER_OP_BIT_RSHIFT:
792 case FILTER_OP_BIT_LSHIFT:
3834b99f
MD
793 case FILTER_OP_BIT_AND:
794 case FILTER_OP_BIT_OR:
795 case FILTER_OP_BIT_XOR:
07dfc1d0
MD
796 {
797 /* Pop 2, push 1 */
798 if (vstack_pop(stack)) {
799 ret = -EINVAL;
800 goto end;
801 }
802 vstack_ax(stack)->type = REG_S64;
803 next_pc += sizeof(struct binary_op);
804 break;
805 }
806
807 /* unary */
808 case FILTER_OP_UNARY_PLUS:
809 {
810 struct unary_op *insn = (struct unary_op *) pc;
811
812 switch(vstack_ax(stack)->type) {
813 default:
814 printk(KERN_WARNING "unknown register type\n");
815 ret = -EINVAL;
816 goto end;
817
818 case REG_S64:
819 insn->op = FILTER_OP_UNARY_PLUS_S64;
820 break;
821 case REG_DOUBLE:
822 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
823 break;
824 }
825 /* Pop 1, push 1 */
826 next_pc += sizeof(struct unary_op);
827 break;
828 }
829
830 case FILTER_OP_UNARY_MINUS:
831 {
832 struct unary_op *insn = (struct unary_op *) pc;
833
834 switch(vstack_ax(stack)->type) {
835 default:
836 printk(KERN_WARNING "unknown register type\n");
837 ret = -EINVAL;
838 goto end;
839
840 case REG_S64:
841 insn->op = FILTER_OP_UNARY_MINUS_S64;
842 break;
843 case REG_DOUBLE:
844 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
845 break;
846 }
847 /* Pop 1, push 1 */
848 next_pc += sizeof(struct unary_op);
849 break;
850 }
851
852 case FILTER_OP_UNARY_NOT:
853 {
854 struct unary_op *insn = (struct unary_op *) pc;
855
856 switch(vstack_ax(stack)->type) {
857 default:
858 printk(KERN_WARNING "unknown register type\n");
859 ret = -EINVAL;
860 goto end;
861
862 case REG_S64:
863 insn->op = FILTER_OP_UNARY_NOT_S64;
864 break;
865 case REG_DOUBLE:
866 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
867 break;
868 }
869 /* Pop 1, push 1 */
870 next_pc += sizeof(struct unary_op);
871 break;
872 }
873
e16c054b
MD
874 case FILTER_OP_UNARY_BIT_NOT:
875 {
876 /* Pop 1, push 1 */
877 next_pc += sizeof(struct unary_op);
878 break;
879 }
880
07dfc1d0
MD
881 case FILTER_OP_UNARY_PLUS_S64:
882 case FILTER_OP_UNARY_MINUS_S64:
883 case FILTER_OP_UNARY_NOT_S64:
884 case FILTER_OP_UNARY_PLUS_DOUBLE:
885 case FILTER_OP_UNARY_MINUS_DOUBLE:
886 case FILTER_OP_UNARY_NOT_DOUBLE:
887 {
888 /* Pop 1, push 1 */
889 next_pc += sizeof(struct unary_op);
890 break;
891 }
892
893 /* logical */
894 case FILTER_OP_AND:
895 case FILTER_OP_OR:
896 {
897 /* Continue to next instruction */
898 /* Pop 1 when jump not taken */
899 if (vstack_pop(stack)) {
900 ret = -EINVAL;
901 goto end;
902 }
903 next_pc += sizeof(struct logical_op);
904 break;
905 }
906
907 /* load field ref */
908 case FILTER_OP_LOAD_FIELD_REF:
909 {
910 printk(KERN_WARNING "Unknown field ref type\n");
911 ret = -EINVAL;
912 goto end;
913 }
914 /* get context ref */
915 case FILTER_OP_GET_CONTEXT_REF:
916 {
917 printk(KERN_WARNING "Unknown get context ref type\n");
918 ret = -EINVAL;
919 goto end;
920 }
921 case FILTER_OP_LOAD_FIELD_REF_STRING:
922 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
923 case FILTER_OP_GET_CONTEXT_REF_STRING:
f127e61e
MD
924 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
925 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
07dfc1d0
MD
926 {
927 if (vstack_push(stack)) {
928 ret = -EINVAL;
929 goto end;
930 }
931 vstack_ax(stack)->type = REG_STRING;
932 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
933 break;
934 }
935 case FILTER_OP_LOAD_FIELD_REF_S64:
936 case FILTER_OP_GET_CONTEXT_REF_S64:
937 {
938 if (vstack_push(stack)) {
939 ret = -EINVAL;
940 goto end;
941 }
942 vstack_ax(stack)->type = REG_S64;
943 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
944 break;
945 }
946 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
947 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
948 {
949 if (vstack_push(stack)) {
950 ret = -EINVAL;
951 goto end;
952 }
953 vstack_ax(stack)->type = REG_DOUBLE;
954 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
955 break;
956 }
957
958 /* load from immediate operand */
959 case FILTER_OP_LOAD_STRING:
960 {
961 struct load_op *insn = (struct load_op *) pc;
962
963 if (vstack_push(stack)) {
964 ret = -EINVAL;
965 goto end;
966 }
967 vstack_ax(stack)->type = REG_STRING;
968 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
969 break;
970 }
971
02aca193
PP
972 case FILTER_OP_LOAD_STAR_GLOB_STRING:
973 {
974 struct load_op *insn = (struct load_op *) pc;
975
976 if (vstack_push(stack)) {
977 ret = -EINVAL;
978 goto end;
979 }
980 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
981 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
982 break;
983 }
984
07dfc1d0
MD
985 case FILTER_OP_LOAD_S64:
986 {
987 if (vstack_push(stack)) {
988 ret = -EINVAL;
989 goto end;
990 }
991 vstack_ax(stack)->type = REG_S64;
992 next_pc += sizeof(struct load_op)
993 + sizeof(struct literal_numeric);
994 break;
995 }
996
997 case FILTER_OP_LOAD_DOUBLE:
998 {
999 if (vstack_push(stack)) {
1000 ret = -EINVAL;
1001 goto end;
1002 }
1003 vstack_ax(stack)->type = REG_DOUBLE;
1004 next_pc += sizeof(struct load_op)
1005 + sizeof(struct literal_double);
1006 break;
1007 }
1008
1009 /* cast */
1010 case FILTER_OP_CAST_TO_S64:
1011 {
1012 struct cast_op *insn = (struct cast_op *) pc;
1013
1014 switch (vstack_ax(stack)->type) {
1015 default:
1016 printk(KERN_WARNING "unknown register type\n");
1017 ret = -EINVAL;
1018 goto end;
1019
1020 case REG_STRING:
02aca193 1021 case REG_STAR_GLOB_STRING:
07dfc1d0
MD
1022 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
1023 ret = -EINVAL;
1024 goto end;
1025 case REG_S64:
1026 insn->op = FILTER_OP_CAST_NOP;
1027 break;
1028 case REG_DOUBLE:
1029 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1030 break;
1031 }
1032 /* Pop 1, push 1 */
1033 vstack_ax(stack)->type = REG_S64;
1034 next_pc += sizeof(struct cast_op);
1035 break;
1036 }
1037 case FILTER_OP_CAST_DOUBLE_TO_S64:
1038 {
1039 /* Pop 1, push 1 */
1040 vstack_ax(stack)->type = REG_S64;
1041 next_pc += sizeof(struct cast_op);
1042 break;
1043 }
1044 case FILTER_OP_CAST_NOP:
1045 {
1046 next_pc += sizeof(struct cast_op);
1047 break;
1048 }
1049
3834b99f
MD
1050 /*
1051 * Instructions for recursive traversal through composed types.
1052 */
1053 case FILTER_OP_GET_CONTEXT_ROOT:
1054 {
1055 if (vstack_push(stack)) {
1056 ret = -EINVAL;
1057 goto end;
1058 }
1059 vstack_ax(stack)->type = REG_PTR;
1060 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1061 next_pc += sizeof(struct load_op);
1062 break;
1063 }
1064 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1065 {
1066 if (vstack_push(stack)) {
1067 ret = -EINVAL;
1068 goto end;
1069 }
1070 vstack_ax(stack)->type = REG_PTR;
1071 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1072 next_pc += sizeof(struct load_op);
1073 break;
1074 }
1075 case FILTER_OP_GET_PAYLOAD_ROOT:
1076 {
1077 if (vstack_push(stack)) {
1078 ret = -EINVAL;
1079 goto end;
1080 }
1081 vstack_ax(stack)->type = REG_PTR;
1082 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1083 next_pc += sizeof(struct load_op);
1084 break;
1085 }
1086
1087 case FILTER_OP_LOAD_FIELD:
1088 {
1089 struct load_op *insn = (struct load_op *) pc;
1090
1091 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1092 /* Pop 1, push 1 */
1093 ret = specialize_load_field(vstack_ax(stack), insn);
1094 if (ret)
1095 goto end;
1096
1097 next_pc += sizeof(struct load_op);
1098 break;
1099 }
1100
1101 case FILTER_OP_LOAD_FIELD_S8:
1102 case FILTER_OP_LOAD_FIELD_S16:
1103 case FILTER_OP_LOAD_FIELD_S32:
1104 case FILTER_OP_LOAD_FIELD_S64:
1105 case FILTER_OP_LOAD_FIELD_U8:
1106 case FILTER_OP_LOAD_FIELD_U16:
1107 case FILTER_OP_LOAD_FIELD_U32:
1108 case FILTER_OP_LOAD_FIELD_U64:
1109 {
1110 /* Pop 1, push 1 */
1111 vstack_ax(stack)->type = REG_S64;
1112 next_pc += sizeof(struct load_op);
1113 break;
1114 }
1115
1116 case FILTER_OP_LOAD_FIELD_STRING:
1117 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1118 {
1119 /* Pop 1, push 1 */
1120 vstack_ax(stack)->type = REG_STRING;
1121 next_pc += sizeof(struct load_op);
1122 break;
1123 }
1124
1125 case FILTER_OP_LOAD_FIELD_DOUBLE:
1126 {
1127 /* Pop 1, push 1 */
1128 vstack_ax(stack)->type = REG_DOUBLE;
1129 next_pc += sizeof(struct load_op);
1130 break;
1131 }
1132
1133 case FILTER_OP_GET_SYMBOL:
1134 {
1135 struct load_op *insn = (struct load_op *) pc;
1136
1137 dbg_printk("op get symbol\n");
1138 switch (vstack_ax(stack)->load.type) {
1139 case LOAD_OBJECT:
1140 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1141 ret = -EINVAL;
1142 goto end;
1143 case LOAD_ROOT_CONTEXT:
1144 /* Lookup context field. */
63629d86 1145 ret = specialize_context_lookup(ctx, bytecode, insn,
3834b99f
MD
1146 &vstack_ax(stack)->load);
1147 if (ret)
1148 goto end;
1149 break;
1150 case LOAD_ROOT_APP_CONTEXT:
1151 ret = -EINVAL;
1152 goto end;
1153 case LOAD_ROOT_PAYLOAD:
1154 /* Lookup event payload field. */
63629d86 1155 ret = specialize_payload_lookup(event_desc,
3834b99f
MD
1156 bytecode, insn,
1157 &vstack_ax(stack)->load);
1158 if (ret)
1159 goto end;
1160 break;
1161 }
1162 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1163 break;
1164 }
1165
1166 case FILTER_OP_GET_SYMBOL_FIELD:
1167 {
1168 /* Always generated by specialize phase. */
1169 ret = -EINVAL;
1170 goto end;
1171 }
1172
1173 case FILTER_OP_GET_INDEX_U16:
1174 {
1175 struct load_op *insn = (struct load_op *) pc;
1176 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1177
1178 dbg_printk("op get index u16\n");
1179 /* Pop 1, push 1 */
1180 ret = specialize_get_index(bytecode, insn, index->index,
1181 vstack_ax(stack), sizeof(*index));
1182 if (ret)
1183 goto end;
1184 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1185 break;
1186 }
1187
1188 case FILTER_OP_GET_INDEX_U64:
1189 {
1190 struct load_op *insn = (struct load_op *) pc;
1191 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1192
1193 dbg_printk("op get index u64\n");
1194 /* Pop 1, push 1 */
1195 ret = specialize_get_index(bytecode, insn, index->index,
1196 vstack_ax(stack), sizeof(*index));
1197 if (ret)
1198 goto end;
1199 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1200 break;
1201 }
1202
07dfc1d0
MD
1203 }
1204 }
1205end:
1206 return ret;
1207}
This page took 0.075145 seconds and 5 git commands to generate.