SoW-2019-0007-2: Dynamic Snapshot: Triggers send partial event payload with notifications
[deliverable/lttng-modules.git] / src / lttng-bytecode-validator.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-bytecode-validator.c
4 *
5 * LTTng modules bytecode bytecode validator.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/types.h>
11 #include <linux/jhash.h>
12 #include <linux/slab.h>
13
14 #include <wrapper/list.h>
15 #include <lttng/lttng-bytecode.h>
16
17 #define MERGE_POINT_TABLE_BITS 7
18 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
19
20 /* merge point table node */
21 struct mp_node {
22 struct hlist_node node;
23
24 /* Context at merge point */
25 struct vstack stack;
26 unsigned long target_pc;
27 };
28
29 struct mp_table {
30 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
31 };
32
33 static
34 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
35 {
36 if (mp_node->target_pc == key_pc)
37 return 1;
38 else
39 return 0;
40 }
41
42 static
43 int merge_points_compare(const struct vstack *stacka,
44 const struct vstack *stackb)
45 {
46 int i, len;
47
48 if (stacka->top != stackb->top)
49 return 1;
50 len = stacka->top + 1;
51 WARN_ON_ONCE(len < 0);
52 for (i = 0; i < len; i++) {
53 if (stacka->e[i].type != stackb->e[i].type)
54 return 1;
55 }
56 return 0;
57 }
58
59 static
60 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
61 const struct vstack *stack)
62 {
63 struct mp_node *mp_node;
64 unsigned long hash = jhash_1word(target_pc, 0);
65 struct hlist_head *head;
66 struct mp_node *lookup_node;
67 int found = 0;
68
69 dbg_printk("Bytecode: adding merge point at offset %lu, hash %lu\n",
70 target_pc, hash);
71 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
72 if (!mp_node)
73 return -ENOMEM;
74 mp_node->target_pc = target_pc;
75 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
76
77 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
78 lttng_hlist_for_each_entry(lookup_node, head, node) {
79 if (lttng_hash_match(lookup_node, target_pc)) {
80 found = 1;
81 break;
82 }
83 }
84 if (found) {
85 /* Key already present */
86 dbg_printk("Bytecode: compare merge points for offset %lu, hash %lu\n",
87 target_pc, hash);
88 kfree(mp_node);
89 if (merge_points_compare(stack, &lookup_node->stack)) {
90 printk(KERN_WARNING "Merge points differ for offset %lu\n",
91 target_pc);
92 return -EINVAL;
93 }
94 } else {
95 hlist_add_head(&mp_node->node, head);
96 }
97 return 0;
98 }
99
100 /*
101 * Binary comparators use top of stack and top of stack -1.
102 */
103 static
104 int bin_op_compare_check(struct vstack *stack, const bytecode_opcode_t opcode,
105 const char *str)
106 {
107 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
108 goto error_empty;
109
110 switch (vstack_ax(stack)->type) {
111 default:
112 case REG_DOUBLE:
113 goto error_type;
114
115 case REG_STRING:
116 switch (vstack_bx(stack)->type) {
117 default:
118 case REG_DOUBLE:
119 goto error_type;
120 case REG_TYPE_UNKNOWN:
121 goto unknown;
122 case REG_STRING:
123 break;
124 case REG_STAR_GLOB_STRING:
125 if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
126 goto error_mismatch;
127 }
128 break;
129 case REG_S64:
130 case REG_U64:
131 goto error_mismatch;
132 }
133 break;
134 case REG_STAR_GLOB_STRING:
135 switch (vstack_bx(stack)->type) {
136 default:
137 case REG_DOUBLE:
138 goto error_type;
139 case REG_TYPE_UNKNOWN:
140 goto unknown;
141 case REG_STRING:
142 if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
143 goto error_mismatch;
144 }
145 break;
146 case REG_STAR_GLOB_STRING:
147 case REG_S64:
148 case REG_U64:
149 goto error_mismatch;
150 }
151 break;
152 case REG_S64:
153 case REG_U64:
154 switch (vstack_bx(stack)->type) {
155 default:
156 case REG_DOUBLE:
157 goto error_type;
158 case REG_TYPE_UNKNOWN:
159 goto unknown;
160 case REG_STRING:
161 case REG_STAR_GLOB_STRING:
162 goto error_mismatch;
163 case REG_S64:
164 case REG_U64:
165 break;
166 }
167 break;
168 case REG_TYPE_UNKNOWN:
169 switch (vstack_bx(stack)->type) {
170 default:
171 case REG_DOUBLE:
172 goto error_type;
173 case REG_TYPE_UNKNOWN:
174 case REG_STRING:
175 case REG_STAR_GLOB_STRING:
176 case REG_S64:
177 case REG_U64:
178 goto unknown;
179 }
180 break;
181 }
182 return 0;
183
184 unknown:
185 return 1;
186
187 error_empty:
188 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
189 return -EINVAL;
190
191 error_mismatch:
192 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
193 return -EINVAL;
194
195 error_type:
196 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
197 return -EINVAL;
198 }
199
200 /*
201 * Binary bitwise operators use top of stack and top of stack -1.
202 * Return 0 if typing is known to match, 1 if typing is dynamic
203 * (unknown), negative error value on error.
204 */
205 static
206 int bin_op_bitwise_check(struct vstack *stack, bytecode_opcode_t opcode,
207 const char *str)
208 {
209 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
210 goto error_empty;
211
212 switch (vstack_ax(stack)->type) {
213 default:
214 case REG_DOUBLE:
215 goto error_type;
216
217 case REG_TYPE_UNKNOWN:
218 switch (vstack_bx(stack)->type) {
219 default:
220 case REG_DOUBLE:
221 goto error_type;
222 case REG_TYPE_UNKNOWN:
223 case REG_STRING:
224 case REG_STAR_GLOB_STRING:
225 case REG_S64:
226 case REG_U64:
227 goto unknown;
228 }
229 break;
230 case REG_S64:
231 case REG_U64:
232 switch (vstack_bx(stack)->type) {
233 default:
234 case REG_DOUBLE:
235 goto error_type;
236 case REG_TYPE_UNKNOWN:
237 goto unknown;
238 case REG_S64:
239 case REG_U64:
240 break;
241 }
242 break;
243 }
244 return 0;
245
246 unknown:
247 return 1;
248
249 error_empty:
250 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
251 return -EINVAL;
252
253 error_type:
254 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
255 return -EINVAL;
256 }
257
258 static
259 int validate_get_symbol(struct bytecode_runtime *bytecode,
260 const struct get_symbol *sym)
261 {
262 const char *str, *str_limit;
263 size_t len_limit;
264
265 if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
266 return -EINVAL;
267
268 str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
269 str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
270 len_limit = str_limit - str;
271 if (strnlen(str, len_limit) == len_limit)
272 return -EINVAL;
273 return 0;
274 }
275
276 /*
277 * Validate bytecode range overflow within the validation pass.
278 * Called for each instruction encountered.
279 */
280 static
281 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
282 char *start_pc, char *pc)
283 {
284 int ret = 0;
285
286 switch (*(bytecode_opcode_t *) pc) {
287 case BYTECODE_OP_UNKNOWN:
288 default:
289 {
290 printk(KERN_WARNING "unknown bytecode op %u\n",
291 (unsigned int) *(bytecode_opcode_t *) pc);
292 ret = -EINVAL;
293 break;
294 }
295
296 case BYTECODE_OP_RETURN:
297 case BYTECODE_OP_RETURN_S64:
298 {
299 if (unlikely(pc + sizeof(struct return_op)
300 > start_pc + bytecode->len)) {
301 ret = -ERANGE;
302 }
303 break;
304 }
305
306 /* binary */
307 case BYTECODE_OP_MUL:
308 case BYTECODE_OP_DIV:
309 case BYTECODE_OP_MOD:
310 case BYTECODE_OP_PLUS:
311 case BYTECODE_OP_MINUS:
312 case BYTECODE_OP_EQ_DOUBLE:
313 case BYTECODE_OP_NE_DOUBLE:
314 case BYTECODE_OP_GT_DOUBLE:
315 case BYTECODE_OP_LT_DOUBLE:
316 case BYTECODE_OP_GE_DOUBLE:
317 case BYTECODE_OP_LE_DOUBLE:
318 /* Floating point */
319 case BYTECODE_OP_EQ_DOUBLE_S64:
320 case BYTECODE_OP_NE_DOUBLE_S64:
321 case BYTECODE_OP_GT_DOUBLE_S64:
322 case BYTECODE_OP_LT_DOUBLE_S64:
323 case BYTECODE_OP_GE_DOUBLE_S64:
324 case BYTECODE_OP_LE_DOUBLE_S64:
325 case BYTECODE_OP_EQ_S64_DOUBLE:
326 case BYTECODE_OP_NE_S64_DOUBLE:
327 case BYTECODE_OP_GT_S64_DOUBLE:
328 case BYTECODE_OP_LT_S64_DOUBLE:
329 case BYTECODE_OP_GE_S64_DOUBLE:
330 case BYTECODE_OP_LE_S64_DOUBLE:
331 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
332 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
333 case BYTECODE_OP_LOAD_DOUBLE:
334 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
335 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
336 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
337 case BYTECODE_OP_UNARY_NOT_DOUBLE:
338 {
339 printk(KERN_WARNING "unsupported bytecode op %u\n",
340 (unsigned int) *(bytecode_opcode_t *) pc);
341 ret = -EINVAL;
342 break;
343 }
344
345 case BYTECODE_OP_EQ:
346 case BYTECODE_OP_NE:
347 case BYTECODE_OP_GT:
348 case BYTECODE_OP_LT:
349 case BYTECODE_OP_GE:
350 case BYTECODE_OP_LE:
351 case BYTECODE_OP_EQ_STRING:
352 case BYTECODE_OP_NE_STRING:
353 case BYTECODE_OP_GT_STRING:
354 case BYTECODE_OP_LT_STRING:
355 case BYTECODE_OP_GE_STRING:
356 case BYTECODE_OP_LE_STRING:
357 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
358 case BYTECODE_OP_NE_STAR_GLOB_STRING:
359 case BYTECODE_OP_EQ_S64:
360 case BYTECODE_OP_NE_S64:
361 case BYTECODE_OP_GT_S64:
362 case BYTECODE_OP_LT_S64:
363 case BYTECODE_OP_GE_S64:
364 case BYTECODE_OP_LE_S64:
365 case BYTECODE_OP_BIT_RSHIFT:
366 case BYTECODE_OP_BIT_LSHIFT:
367 case BYTECODE_OP_BIT_AND:
368 case BYTECODE_OP_BIT_OR:
369 case BYTECODE_OP_BIT_XOR:
370 {
371 if (unlikely(pc + sizeof(struct binary_op)
372 > start_pc + bytecode->len)) {
373 ret = -ERANGE;
374 }
375 break;
376 }
377
378 /* unary */
379 case BYTECODE_OP_UNARY_PLUS:
380 case BYTECODE_OP_UNARY_MINUS:
381 case BYTECODE_OP_UNARY_NOT:
382 case BYTECODE_OP_UNARY_PLUS_S64:
383 case BYTECODE_OP_UNARY_MINUS_S64:
384 case BYTECODE_OP_UNARY_NOT_S64:
385 case BYTECODE_OP_UNARY_BIT_NOT:
386 {
387 if (unlikely(pc + sizeof(struct unary_op)
388 > start_pc + bytecode->len)) {
389 ret = -ERANGE;
390 }
391 break;
392 }
393
394 /* logical */
395 case BYTECODE_OP_AND:
396 case BYTECODE_OP_OR:
397 {
398 if (unlikely(pc + sizeof(struct logical_op)
399 > start_pc + bytecode->len)) {
400 ret = -ERANGE;
401 }
402 break;
403 }
404
405 /* load field ref */
406 case BYTECODE_OP_LOAD_FIELD_REF:
407 {
408 printk(KERN_WARNING "Unknown field ref type\n");
409 ret = -EINVAL;
410 break;
411 }
412
413 /* get context ref */
414 case BYTECODE_OP_GET_CONTEXT_REF:
415 {
416 printk(KERN_WARNING "Unknown field ref type\n");
417 ret = -EINVAL;
418 break;
419 }
420 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
421 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
422 case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
423 case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
424 case BYTECODE_OP_LOAD_FIELD_REF_S64:
425 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
426 case BYTECODE_OP_GET_CONTEXT_REF_S64:
427 {
428 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
429 > start_pc + bytecode->len)) {
430 ret = -ERANGE;
431 }
432 break;
433 }
434
435 /* load from immediate operand */
436 case BYTECODE_OP_LOAD_STRING:
437 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
438 {
439 struct load_op *insn = (struct load_op *) pc;
440 uint32_t str_len, maxlen;
441
442 if (unlikely(pc + sizeof(struct load_op)
443 > start_pc + bytecode->len)) {
444 ret = -ERANGE;
445 break;
446 }
447
448 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
449 str_len = strnlen(insn->data, maxlen);
450 if (unlikely(str_len >= maxlen)) {
451 /* Final '\0' not found within range */
452 ret = -ERANGE;
453 }
454 break;
455 }
456
457 case BYTECODE_OP_LOAD_S64:
458 {
459 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
460 > start_pc + bytecode->len)) {
461 ret = -ERANGE;
462 }
463 break;
464 }
465
466 case BYTECODE_OP_CAST_TO_S64:
467 case BYTECODE_OP_CAST_NOP:
468 {
469 if (unlikely(pc + sizeof(struct cast_op)
470 > start_pc + bytecode->len)) {
471 ret = -ERANGE;
472 }
473 break;
474 }
475
476 /*
477 * Instructions for recursive traversal through composed types.
478 */
479 case BYTECODE_OP_GET_CONTEXT_ROOT:
480 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
481 case BYTECODE_OP_GET_PAYLOAD_ROOT:
482 case BYTECODE_OP_LOAD_FIELD:
483 case BYTECODE_OP_LOAD_FIELD_S8:
484 case BYTECODE_OP_LOAD_FIELD_S16:
485 case BYTECODE_OP_LOAD_FIELD_S32:
486 case BYTECODE_OP_LOAD_FIELD_S64:
487 case BYTECODE_OP_LOAD_FIELD_U8:
488 case BYTECODE_OP_LOAD_FIELD_U16:
489 case BYTECODE_OP_LOAD_FIELD_U32:
490 case BYTECODE_OP_LOAD_FIELD_U64:
491 case BYTECODE_OP_LOAD_FIELD_STRING:
492 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
493 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
494 if (unlikely(pc + sizeof(struct load_op)
495 > start_pc + bytecode->len)) {
496 ret = -ERANGE;
497 }
498 break;
499
500 case BYTECODE_OP_GET_SYMBOL:
501 {
502 struct load_op *insn = (struct load_op *) pc;
503 struct get_symbol *sym = (struct get_symbol *) insn->data;
504
505 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
506 > start_pc + bytecode->len)) {
507 ret = -ERANGE;
508 break;
509 }
510 ret = validate_get_symbol(bytecode, sym);
511 break;
512 }
513
514 case BYTECODE_OP_GET_SYMBOL_FIELD:
515 printk(KERN_WARNING "Unexpected get symbol field\n");
516 ret = -EINVAL;
517 break;
518
519 case BYTECODE_OP_GET_INDEX_U16:
520 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
521 > start_pc + bytecode->len)) {
522 ret = -ERANGE;
523 }
524 break;
525
526 case BYTECODE_OP_GET_INDEX_U64:
527 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
528 > start_pc + bytecode->len)) {
529 ret = -ERANGE;
530 }
531 break;
532 }
533
534 return ret;
535 }
536
537 static
538 unsigned long delete_all_nodes(struct mp_table *mp_table)
539 {
540 struct mp_node *mp_node;
541 struct hlist_node *tmp;
542 unsigned long nr_nodes = 0;
543 int i;
544
545 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
546 struct hlist_head *head;
547
548 head = &mp_table->mp_head[i];
549 lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
550 kfree(mp_node);
551 nr_nodes++;
552 }
553 }
554 return nr_nodes;
555 }
556
557 /*
558 * Return value:
559 * >=0: success
560 * <0: error
561 */
562 static
563 int validate_instruction_context(struct bytecode_runtime *bytecode,
564 struct vstack *stack,
565 char *start_pc,
566 char *pc)
567 {
568 int ret = 0;
569 const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
570
571 switch (opcode) {
572 case BYTECODE_OP_UNKNOWN:
573 default:
574 {
575 printk(KERN_WARNING "unknown bytecode op %u\n",
576 (unsigned int) *(bytecode_opcode_t *) pc);
577 ret = -EINVAL;
578 goto end;
579 }
580
581 case BYTECODE_OP_RETURN:
582 case BYTECODE_OP_RETURN_S64:
583 {
584 goto end;
585 }
586
587 /* binary */
588 case BYTECODE_OP_MUL:
589 case BYTECODE_OP_DIV:
590 case BYTECODE_OP_MOD:
591 case BYTECODE_OP_PLUS:
592 case BYTECODE_OP_MINUS:
593 /* Floating point */
594 case BYTECODE_OP_EQ_DOUBLE:
595 case BYTECODE_OP_NE_DOUBLE:
596 case BYTECODE_OP_GT_DOUBLE:
597 case BYTECODE_OP_LT_DOUBLE:
598 case BYTECODE_OP_GE_DOUBLE:
599 case BYTECODE_OP_LE_DOUBLE:
600 case BYTECODE_OP_EQ_DOUBLE_S64:
601 case BYTECODE_OP_NE_DOUBLE_S64:
602 case BYTECODE_OP_GT_DOUBLE_S64:
603 case BYTECODE_OP_LT_DOUBLE_S64:
604 case BYTECODE_OP_GE_DOUBLE_S64:
605 case BYTECODE_OP_LE_DOUBLE_S64:
606 case BYTECODE_OP_EQ_S64_DOUBLE:
607 case BYTECODE_OP_NE_S64_DOUBLE:
608 case BYTECODE_OP_GT_S64_DOUBLE:
609 case BYTECODE_OP_LT_S64_DOUBLE:
610 case BYTECODE_OP_GE_S64_DOUBLE:
611 case BYTECODE_OP_LE_S64_DOUBLE:
612 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
613 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
614 case BYTECODE_OP_UNARY_NOT_DOUBLE:
615 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
616 case BYTECODE_OP_LOAD_DOUBLE:
617 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
618 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
619 {
620 printk(KERN_WARNING "unsupported bytecode op %u\n",
621 (unsigned int) *(bytecode_opcode_t *) pc);
622 ret = -EINVAL;
623 goto end;
624 }
625
626 case BYTECODE_OP_EQ:
627 {
628 ret = bin_op_compare_check(stack, opcode, "==");
629 if (ret < 0)
630 goto end;
631 break;
632 }
633 case BYTECODE_OP_NE:
634 {
635 ret = bin_op_compare_check(stack, opcode, "!=");
636 if (ret < 0)
637 goto end;
638 break;
639 }
640 case BYTECODE_OP_GT:
641 {
642 ret = bin_op_compare_check(stack, opcode, ">");
643 if (ret < 0)
644 goto end;
645 break;
646 }
647 case BYTECODE_OP_LT:
648 {
649 ret = bin_op_compare_check(stack, opcode, "<");
650 if (ret < 0)
651 goto end;
652 break;
653 }
654 case BYTECODE_OP_GE:
655 {
656 ret = bin_op_compare_check(stack, opcode, ">=");
657 if (ret < 0)
658 goto end;
659 break;
660 }
661 case BYTECODE_OP_LE:
662 {
663 ret = bin_op_compare_check(stack, opcode, "<=");
664 if (ret < 0)
665 goto end;
666 break;
667 }
668
669 case BYTECODE_OP_EQ_STRING:
670 case BYTECODE_OP_NE_STRING:
671 case BYTECODE_OP_GT_STRING:
672 case BYTECODE_OP_LT_STRING:
673 case BYTECODE_OP_GE_STRING:
674 case BYTECODE_OP_LE_STRING:
675 {
676 if (!vstack_ax(stack) || !vstack_bx(stack)) {
677 printk(KERN_WARNING "Empty stack\n");
678 ret = -EINVAL;
679 goto end;
680 }
681 if (vstack_ax(stack)->type != REG_STRING
682 || vstack_bx(stack)->type != REG_STRING) {
683 printk(KERN_WARNING "Unexpected register type for string comparator\n");
684 ret = -EINVAL;
685 goto end;
686 }
687 break;
688 }
689
690
691 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
692 case BYTECODE_OP_NE_STAR_GLOB_STRING:
693 {
694 if (!vstack_ax(stack) || !vstack_bx(stack)) {
695 printk(KERN_WARNING "Empty stack\n");
696 ret = -EINVAL;
697 goto end;
698 }
699 if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
700 && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
701 printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
702 ret = -EINVAL;
703 goto end;
704 }
705 break;
706 }
707
708 case BYTECODE_OP_EQ_S64:
709 case BYTECODE_OP_NE_S64:
710 case BYTECODE_OP_GT_S64:
711 case BYTECODE_OP_LT_S64:
712 case BYTECODE_OP_GE_S64:
713 case BYTECODE_OP_LE_S64:
714 {
715 if (!vstack_ax(stack) || !vstack_bx(stack)) {
716 printk(KERN_WARNING "Empty stack\n");
717 ret = -EINVAL;
718 goto end;
719 }
720 switch (vstack_ax(stack)->type) {
721 case REG_S64:
722 case REG_U64:
723 break;
724 default:
725 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
726 ret = -EINVAL;
727 goto end;
728 }
729 switch (vstack_bx(stack)->type) {
730 case REG_S64:
731 case REG_U64:
732 break;
733 default:
734 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
735 ret = -EINVAL;
736 goto end;
737 }
738 break;
739 }
740
741 case BYTECODE_OP_BIT_RSHIFT:
742 ret = bin_op_bitwise_check(stack, opcode, ">>");
743 if (ret < 0)
744 goto end;
745 break;
746 case BYTECODE_OP_BIT_LSHIFT:
747 ret = bin_op_bitwise_check(stack, opcode, "<<");
748 if (ret < 0)
749 goto end;
750 break;
751 case BYTECODE_OP_BIT_AND:
752 ret = bin_op_bitwise_check(stack, opcode, "&");
753 if (ret < 0)
754 goto end;
755 break;
756 case BYTECODE_OP_BIT_OR:
757 ret = bin_op_bitwise_check(stack, opcode, "|");
758 if (ret < 0)
759 goto end;
760 break;
761 case BYTECODE_OP_BIT_XOR:
762 ret = bin_op_bitwise_check(stack, opcode, "^");
763 if (ret < 0)
764 goto end;
765 break;
766
767 /* unary */
768 case BYTECODE_OP_UNARY_PLUS:
769 case BYTECODE_OP_UNARY_MINUS:
770 case BYTECODE_OP_UNARY_NOT:
771 {
772 if (!vstack_ax(stack)) {
773 printk(KERN_WARNING "Empty stack\n");
774 ret = -EINVAL;
775 goto end;
776 }
777 switch (vstack_ax(stack)->type) {
778 default:
779 case REG_DOUBLE:
780 printk(KERN_WARNING "unknown register type\n");
781 ret = -EINVAL;
782 goto end;
783
784 case REG_STRING:
785 case REG_STAR_GLOB_STRING:
786 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
787 ret = -EINVAL;
788 goto end;
789 case REG_S64:
790 case REG_U64:
791 case REG_TYPE_UNKNOWN:
792 break;
793 }
794 break;
795 }
796 case BYTECODE_OP_UNARY_BIT_NOT:
797 {
798 if (!vstack_ax(stack)) {
799 printk(KERN_WARNING "Empty stack\n");
800 ret = -EINVAL;
801 goto end;
802 }
803 switch (vstack_ax(stack)->type) {
804 default:
805 printk(KERN_WARNING "unknown register type\n");
806 ret = -EINVAL;
807 goto end;
808
809 case REG_STRING:
810 case REG_STAR_GLOB_STRING:
811 case REG_DOUBLE:
812 printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
813 ret = -EINVAL;
814 goto end;
815 case REG_S64:
816 case REG_U64:
817 case REG_TYPE_UNKNOWN:
818 break;
819 }
820 break;
821 }
822
823 case BYTECODE_OP_UNARY_PLUS_S64:
824 case BYTECODE_OP_UNARY_MINUS_S64:
825 case BYTECODE_OP_UNARY_NOT_S64:
826 {
827 if (!vstack_ax(stack)) {
828 printk(KERN_WARNING "Empty stack\n");
829 ret = -EINVAL;
830 goto end;
831 }
832 if (vstack_ax(stack)->type != REG_S64 &&
833 vstack_ax(stack)->type != REG_U64) {
834 printk(KERN_WARNING "Invalid register type\n");
835 ret = -EINVAL;
836 goto end;
837 }
838 break;
839 }
840
841 /* logical */
842 case BYTECODE_OP_AND:
843 case BYTECODE_OP_OR:
844 {
845 struct logical_op *insn = (struct logical_op *) pc;
846
847 if (!vstack_ax(stack)) {
848 printk(KERN_WARNING "Empty stack\n");
849 ret = -EINVAL;
850 goto end;
851 }
852 if (vstack_ax(stack)->type != REG_S64 &&
853 vstack_ax(stack)->type != REG_U64) {
854 printk(KERN_WARNING "Logical comparator expects S64 or U64 register\n");
855 ret = -EINVAL;
856 goto end;
857 }
858
859 dbg_printk("Validate jumping to bytecode offset %u\n",
860 (unsigned int) insn->skip_offset);
861 if (unlikely(start_pc + insn->skip_offset <= pc)) {
862 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
863 ret = -EINVAL;
864 goto end;
865 }
866 break;
867 }
868
869 /* load field ref */
870 case BYTECODE_OP_LOAD_FIELD_REF:
871 {
872 printk(KERN_WARNING "Unknown field ref type\n");
873 ret = -EINVAL;
874 goto end;
875 }
876 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
877 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
878 case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
879 case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
880 {
881 struct load_op *insn = (struct load_op *) pc;
882 struct field_ref *ref = (struct field_ref *) insn->data;
883
884 dbg_printk("Validate load field ref offset %u type string\n",
885 ref->offset);
886 break;
887 }
888 case BYTECODE_OP_LOAD_FIELD_REF_S64:
889 {
890 struct load_op *insn = (struct load_op *) pc;
891 struct field_ref *ref = (struct field_ref *) insn->data;
892
893 dbg_printk("Validate load field ref offset %u type s64\n",
894 ref->offset);
895 break;
896 }
897
898 /* load from immediate operand */
899 case BYTECODE_OP_LOAD_STRING:
900 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
901 {
902 break;
903 }
904
905 case BYTECODE_OP_LOAD_S64:
906 {
907 break;
908 }
909
910 case BYTECODE_OP_CAST_TO_S64:
911 {
912 struct cast_op *insn = (struct cast_op *) pc;
913
914 if (!vstack_ax(stack)) {
915 printk(KERN_WARNING "Empty stack\n");
916 ret = -EINVAL;
917 goto end;
918 }
919 switch (vstack_ax(stack)->type) {
920 default:
921 case REG_DOUBLE:
922 printk(KERN_WARNING "unknown register type\n");
923 ret = -EINVAL;
924 goto end;
925
926 case REG_STRING:
927 case REG_STAR_GLOB_STRING:
928 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
929 ret = -EINVAL;
930 goto end;
931 case REG_S64:
932 break;
933 }
934 if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
935 if (vstack_ax(stack)->type != REG_DOUBLE) {
936 printk(KERN_WARNING "Cast expects double\n");
937 ret = -EINVAL;
938 goto end;
939 }
940 }
941 break;
942 }
943 case BYTECODE_OP_CAST_NOP:
944 {
945 break;
946 }
947
948 /* get context ref */
949 case BYTECODE_OP_GET_CONTEXT_REF:
950 {
951 printk(KERN_WARNING "Unknown get context ref type\n");
952 ret = -EINVAL;
953 goto end;
954 }
955 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
956 {
957 struct load_op *insn = (struct load_op *) pc;
958 struct field_ref *ref = (struct field_ref *) insn->data;
959
960 dbg_printk("Validate get context ref offset %u type string\n",
961 ref->offset);
962 break;
963 }
964 case BYTECODE_OP_GET_CONTEXT_REF_S64:
965 {
966 struct load_op *insn = (struct load_op *) pc;
967 struct field_ref *ref = (struct field_ref *) insn->data;
968
969 dbg_printk("Validate get context ref offset %u type s64\n",
970 ref->offset);
971 break;
972 }
973
974 /*
975 * Instructions for recursive traversal through composed types.
976 */
977 case BYTECODE_OP_GET_CONTEXT_ROOT:
978 {
979 dbg_printk("Validate get context root\n");
980 break;
981 }
982 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
983 {
984 dbg_printk("Validate get app context root\n");
985 break;
986 }
987 case BYTECODE_OP_GET_PAYLOAD_ROOT:
988 {
989 dbg_printk("Validate get payload root\n");
990 break;
991 }
992 case BYTECODE_OP_LOAD_FIELD:
993 {
994 /*
995 * We tolerate that field type is unknown at validation,
996 * because we are performing the load specialization in
997 * a phase after validation.
998 */
999 dbg_printk("Validate load field\n");
1000 break;
1001 }
1002 case BYTECODE_OP_LOAD_FIELD_S8:
1003 {
1004 dbg_printk("Validate load field s8\n");
1005 break;
1006 }
1007 case BYTECODE_OP_LOAD_FIELD_S16:
1008 {
1009 dbg_printk("Validate load field s16\n");
1010 break;
1011 }
1012 case BYTECODE_OP_LOAD_FIELD_S32:
1013 {
1014 dbg_printk("Validate load field s32\n");
1015 break;
1016 }
1017 case BYTECODE_OP_LOAD_FIELD_S64:
1018 {
1019 dbg_printk("Validate load field s64\n");
1020 break;
1021 }
1022 case BYTECODE_OP_LOAD_FIELD_U8:
1023 {
1024 dbg_printk("Validate load field u8\n");
1025 break;
1026 }
1027 case BYTECODE_OP_LOAD_FIELD_U16:
1028 {
1029 dbg_printk("Validate load field u16\n");
1030 break;
1031 }
1032 case BYTECODE_OP_LOAD_FIELD_U32:
1033 {
1034 dbg_printk("Validate load field u32\n");
1035 break;
1036 }
1037 case BYTECODE_OP_LOAD_FIELD_U64:
1038 {
1039 dbg_printk("Validate load field u64\n");
1040 break;
1041 }
1042 case BYTECODE_OP_LOAD_FIELD_STRING:
1043 {
1044 dbg_printk("Validate load field string\n");
1045 break;
1046 }
1047 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1048 {
1049 dbg_printk("Validate load field sequence\n");
1050 break;
1051 }
1052 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1053 {
1054 dbg_printk("Validate load field double\n");
1055 break;
1056 }
1057
1058 case BYTECODE_OP_GET_SYMBOL:
1059 {
1060 struct load_op *insn = (struct load_op *) pc;
1061 struct get_symbol *sym = (struct get_symbol *) insn->data;
1062
1063 dbg_printk("Validate get symbol offset %u\n", sym->offset);
1064 break;
1065 }
1066
1067 case BYTECODE_OP_GET_SYMBOL_FIELD:
1068 {
1069 struct load_op *insn = (struct load_op *) pc;
1070 struct get_symbol *sym = (struct get_symbol *) insn->data;
1071
1072 dbg_printk("Validate get symbol field offset %u\n", sym->offset);
1073 break;
1074 }
1075
1076 case BYTECODE_OP_GET_INDEX_U16:
1077 {
1078 struct load_op *insn = (struct load_op *) pc;
1079 struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
1080
1081 dbg_printk("Validate get index u16 index %u\n", get_index->index);
1082 break;
1083 }
1084
1085 case BYTECODE_OP_GET_INDEX_U64:
1086 {
1087 struct load_op *insn = (struct load_op *) pc;
1088 struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
1089
1090 dbg_printk("Validate get index u64 index %llu\n",
1091 (unsigned long long) get_index->index);
1092 break;
1093 }
1094 }
1095 end:
1096 return ret;
1097 }
1098
1099 /*
1100 * Return value:
1101 * 0: success
1102 * <0: error
1103 */
1104 static
1105 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
1106 struct mp_table *mp_table,
1107 struct vstack *stack,
1108 char *start_pc,
1109 char *pc)
1110 {
1111 int ret, found = 0;
1112 unsigned long target_pc = pc - start_pc;
1113 unsigned long hash;
1114 struct hlist_head *head;
1115 struct mp_node *mp_node;
1116
1117 /* Validate the context resulting from the previous instruction */
1118 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
1119 if (ret < 0)
1120 return ret;
1121
1122 /* Validate merge points */
1123 hash = jhash_1word(target_pc, 0);
1124 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
1125 lttng_hlist_for_each_entry(mp_node, head, node) {
1126 if (lttng_hash_match(mp_node, target_pc)) {
1127 found = 1;
1128 break;
1129 }
1130 }
1131 if (found) {
1132 dbg_printk("Bytecode: validate merge point at offset %lu\n",
1133 target_pc);
1134 if (merge_points_compare(stack, &mp_node->stack)) {
1135 printk(KERN_WARNING "Merge points differ for offset %lu\n",
1136 target_pc);
1137 return -EINVAL;
1138 }
1139 /* Once validated, we can remove the merge point */
1140 dbg_printk("Bytecode: remove merge point at offset %lu\n",
1141 target_pc);
1142 hlist_del(&mp_node->node);
1143 }
1144 return 0;
1145 }
1146
1147 /*
1148 * Return value:
1149 * >0: going to next insn.
1150 * 0: success, stop iteration.
1151 * <0: error
1152 */
1153 static
1154 int exec_insn(struct bytecode_runtime *bytecode,
1155 struct mp_table *mp_table,
1156 struct vstack *stack,
1157 char **_next_pc,
1158 char *pc)
1159 {
1160 int ret = 1;
1161 char *next_pc = *_next_pc;
1162
1163 switch (*(bytecode_opcode_t *) pc) {
1164 case BYTECODE_OP_UNKNOWN:
1165 default:
1166 {
1167 printk(KERN_WARNING "unknown bytecode op %u\n",
1168 (unsigned int) *(bytecode_opcode_t *) pc);
1169 ret = -EINVAL;
1170 goto end;
1171 }
1172
1173 case BYTECODE_OP_RETURN:
1174 {
1175 if (!vstack_ax(stack)) {
1176 printk(KERN_WARNING "Empty stack\n");
1177 ret = -EINVAL;
1178 goto end;
1179 }
1180 switch (vstack_ax(stack)->type) {
1181 case REG_S64:
1182 case REG_U64:
1183 case REG_DOUBLE:
1184 case REG_STRING:
1185 case REG_PTR:
1186 case REG_TYPE_UNKNOWN:
1187 break;
1188 default:
1189 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1190 (int) vstack_ax(stack)->type);
1191 ret = -EINVAL;
1192 goto end;
1193 }
1194
1195 ret = 0;
1196 goto end;
1197 }
1198
1199 case BYTECODE_OP_RETURN_S64:
1200 {
1201 if (!vstack_ax(stack)) {
1202 printk(KERN_WARNING "Empty stack\n");
1203 ret = -EINVAL;
1204 goto end;
1205 }
1206 switch (vstack_ax(stack)->type) {
1207 case REG_S64:
1208 case REG_U64:
1209 break;
1210 default:
1211 case REG_TYPE_UNKNOWN:
1212 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1213 (int) vstack_ax(stack)->type);
1214 ret = -EINVAL;
1215 goto end;
1216 }
1217
1218 ret = 0;
1219 goto end;
1220 }
1221
1222 /* binary */
1223 case BYTECODE_OP_MUL:
1224 case BYTECODE_OP_DIV:
1225 case BYTECODE_OP_MOD:
1226 case BYTECODE_OP_PLUS:
1227 case BYTECODE_OP_MINUS:
1228 /* Floating point */
1229 case BYTECODE_OP_EQ_DOUBLE:
1230 case BYTECODE_OP_NE_DOUBLE:
1231 case BYTECODE_OP_GT_DOUBLE:
1232 case BYTECODE_OP_LT_DOUBLE:
1233 case BYTECODE_OP_GE_DOUBLE:
1234 case BYTECODE_OP_LE_DOUBLE:
1235 case BYTECODE_OP_EQ_DOUBLE_S64:
1236 case BYTECODE_OP_NE_DOUBLE_S64:
1237 case BYTECODE_OP_GT_DOUBLE_S64:
1238 case BYTECODE_OP_LT_DOUBLE_S64:
1239 case BYTECODE_OP_GE_DOUBLE_S64:
1240 case BYTECODE_OP_LE_DOUBLE_S64:
1241 case BYTECODE_OP_EQ_S64_DOUBLE:
1242 case BYTECODE_OP_NE_S64_DOUBLE:
1243 case BYTECODE_OP_GT_S64_DOUBLE:
1244 case BYTECODE_OP_LT_S64_DOUBLE:
1245 case BYTECODE_OP_GE_S64_DOUBLE:
1246 case BYTECODE_OP_LE_S64_DOUBLE:
1247 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
1248 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
1249 case BYTECODE_OP_UNARY_NOT_DOUBLE:
1250 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
1251 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
1252 case BYTECODE_OP_LOAD_DOUBLE:
1253 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
1254 {
1255 printk(KERN_WARNING "unsupported bytecode op %u\n",
1256 (unsigned int) *(bytecode_opcode_t *) pc);
1257 ret = -EINVAL;
1258 goto end;
1259 }
1260
1261 case BYTECODE_OP_EQ:
1262 case BYTECODE_OP_NE:
1263 case BYTECODE_OP_GT:
1264 case BYTECODE_OP_LT:
1265 case BYTECODE_OP_GE:
1266 case BYTECODE_OP_LE:
1267 case BYTECODE_OP_EQ_STRING:
1268 case BYTECODE_OP_NE_STRING:
1269 case BYTECODE_OP_GT_STRING:
1270 case BYTECODE_OP_LT_STRING:
1271 case BYTECODE_OP_GE_STRING:
1272 case BYTECODE_OP_LE_STRING:
1273 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
1274 case BYTECODE_OP_NE_STAR_GLOB_STRING:
1275 case BYTECODE_OP_EQ_S64:
1276 case BYTECODE_OP_NE_S64:
1277 case BYTECODE_OP_GT_S64:
1278 case BYTECODE_OP_LT_S64:
1279 case BYTECODE_OP_GE_S64:
1280 case BYTECODE_OP_LE_S64:
1281 {
1282 /* Pop 2, push 1 */
1283 if (vstack_pop(stack)) {
1284 ret = -EINVAL;
1285 goto end;
1286 }
1287 if (!vstack_ax(stack)) {
1288 printk(KERN_WARNING "Empty stack\n");
1289 ret = -EINVAL;
1290 goto end;
1291 }
1292 switch (vstack_ax(stack)->type) {
1293 case REG_S64:
1294 case REG_U64:
1295 case REG_DOUBLE:
1296 case REG_STRING:
1297 case REG_STAR_GLOB_STRING:
1298 case REG_TYPE_UNKNOWN:
1299 break;
1300 default:
1301 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1302 (int) vstack_ax(stack)->type);
1303 ret = -EINVAL;
1304 goto end;
1305 }
1306
1307 vstack_ax(stack)->type = REG_S64;
1308 next_pc += sizeof(struct binary_op);
1309 break;
1310 }
1311 case BYTECODE_OP_BIT_RSHIFT:
1312 case BYTECODE_OP_BIT_LSHIFT:
1313 case BYTECODE_OP_BIT_AND:
1314 case BYTECODE_OP_BIT_OR:
1315 case BYTECODE_OP_BIT_XOR:
1316 {
1317 /* Pop 2, push 1 */
1318 if (vstack_pop(stack)) {
1319 ret = -EINVAL;
1320 goto end;
1321 }
1322 if (!vstack_ax(stack)) {
1323 printk(KERN_WARNING "Empty stack\n");
1324 ret = -EINVAL;
1325 goto end;
1326 }
1327 switch (vstack_ax(stack)->type) {
1328 case REG_S64:
1329 case REG_U64:
1330 case REG_DOUBLE:
1331 case REG_STRING:
1332 case REG_STAR_GLOB_STRING:
1333 case REG_TYPE_UNKNOWN:
1334 break;
1335 default:
1336 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1337 (int) vstack_ax(stack)->type);
1338 ret = -EINVAL;
1339 goto end;
1340 }
1341
1342 vstack_ax(stack)->type = REG_U64;
1343 next_pc += sizeof(struct binary_op);
1344 break;
1345 }
1346
1347 /* unary */
1348 case BYTECODE_OP_UNARY_PLUS:
1349 case BYTECODE_OP_UNARY_MINUS:
1350 {
1351 /* Pop 1, push 1 */
1352 if (!vstack_ax(stack)) {
1353 printk(KERN_WARNING "Empty stack\n\n");
1354 ret = -EINVAL;
1355 goto end;
1356 }
1357 switch (vstack_ax(stack)->type) {
1358 case REG_S64:
1359 case REG_U64:
1360 case REG_TYPE_UNKNOWN:
1361 break;
1362 default:
1363 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1364 (int) vstack_ax(stack)->type);
1365 ret = -EINVAL;
1366 goto end;
1367 }
1368
1369 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1370 next_pc += sizeof(struct unary_op);
1371 break;
1372 }
1373
1374 case BYTECODE_OP_UNARY_PLUS_S64:
1375 case BYTECODE_OP_UNARY_MINUS_S64:
1376 case BYTECODE_OP_UNARY_NOT_S64:
1377 {
1378 /* Pop 1, push 1 */
1379 if (!vstack_ax(stack)) {
1380 printk(KERN_WARNING "Empty stack\n\n");
1381 ret = -EINVAL;
1382 goto end;
1383 }
1384 switch (vstack_ax(stack)->type) {
1385 case REG_S64:
1386 case REG_U64:
1387 break;
1388 default:
1389 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1390 (int) vstack_ax(stack)->type);
1391 ret = -EINVAL;
1392 goto end;
1393 }
1394
1395 next_pc += sizeof(struct unary_op);
1396 break;
1397 }
1398
1399 case BYTECODE_OP_UNARY_NOT:
1400 {
1401 /* Pop 1, push 1 */
1402 if (!vstack_ax(stack)) {
1403 printk(KERN_WARNING "Empty stack\n\n");
1404 ret = -EINVAL;
1405 goto end;
1406 }
1407 switch (vstack_ax(stack)->type) {
1408 case REG_S64:
1409 case REG_U64:
1410 case REG_TYPE_UNKNOWN:
1411 break;
1412 default:
1413 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1414 (int) vstack_ax(stack)->type);
1415 ret = -EINVAL;
1416 goto end;
1417 }
1418
1419 next_pc += sizeof(struct unary_op);
1420 break;
1421 }
1422
1423 case BYTECODE_OP_UNARY_BIT_NOT:
1424 {
1425 /* Pop 1, push 1 */
1426 if (!vstack_ax(stack)) {
1427 printk(KERN_WARNING "Empty stack\n");
1428 ret = -EINVAL;
1429 goto end;
1430 }
1431 switch (vstack_ax(stack)->type) {
1432 case REG_S64:
1433 case REG_U64:
1434 case REG_TYPE_UNKNOWN:
1435 break;
1436 case REG_DOUBLE:
1437 default:
1438 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1439 (int) vstack_ax(stack)->type);
1440 ret = -EINVAL;
1441 goto end;
1442 }
1443
1444 vstack_ax(stack)->type = REG_U64;
1445 next_pc += sizeof(struct unary_op);
1446 break;
1447 }
1448
1449 /* logical */
1450 case BYTECODE_OP_AND:
1451 case BYTECODE_OP_OR:
1452 {
1453 struct logical_op *insn = (struct logical_op *) pc;
1454 int merge_ret;
1455
1456 /* Add merge point to table */
1457 merge_ret = merge_point_add_check(mp_table,
1458 insn->skip_offset, stack);
1459 if (merge_ret) {
1460 ret = merge_ret;
1461 goto end;
1462 }
1463
1464 if (!vstack_ax(stack)) {
1465 printk(KERN_WARNING "Empty stack\n\n");
1466 ret = -EINVAL;
1467 goto end;
1468 }
1469 /* There is always a cast-to-s64 operation before a or/and op. */
1470 switch (vstack_ax(stack)->type) {
1471 case REG_S64:
1472 case REG_U64:
1473 break;
1474 default:
1475 printk(KERN_WARNING "Incorrect register type %d for operation\n",
1476 (int) vstack_ax(stack)->type);
1477 ret = -EINVAL;
1478 goto end;
1479 }
1480
1481 /* Continue to next instruction */
1482 /* Pop 1 when jump not taken */
1483 if (vstack_pop(stack)) {
1484 ret = -EINVAL;
1485 goto end;
1486 }
1487 next_pc += sizeof(struct logical_op);
1488 break;
1489 }
1490
1491 /* load field ref */
1492 case BYTECODE_OP_LOAD_FIELD_REF:
1493 {
1494 printk(KERN_WARNING "Unknown field ref type\n");
1495 ret = -EINVAL;
1496 goto end;
1497 }
1498 /* get context ref */
1499 case BYTECODE_OP_GET_CONTEXT_REF:
1500 {
1501 printk(KERN_WARNING "Unknown get context ref type\n");
1502 ret = -EINVAL;
1503 goto end;
1504 }
1505 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
1506 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
1507 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
1508 case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
1509 case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
1510 {
1511 if (vstack_push(stack)) {
1512 ret = -EINVAL;
1513 goto end;
1514 }
1515 vstack_ax(stack)->type = REG_STRING;
1516 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1517 break;
1518 }
1519 case BYTECODE_OP_LOAD_FIELD_REF_S64:
1520 case BYTECODE_OP_GET_CONTEXT_REF_S64:
1521 {
1522 if (vstack_push(stack)) {
1523 ret = -EINVAL;
1524 goto end;
1525 }
1526 vstack_ax(stack)->type = REG_S64;
1527 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1528 break;
1529 }
1530
1531 /* load from immediate operand */
1532 case BYTECODE_OP_LOAD_STRING:
1533 {
1534 struct load_op *insn = (struct load_op *) pc;
1535
1536 if (vstack_push(stack)) {
1537 ret = -EINVAL;
1538 goto end;
1539 }
1540 vstack_ax(stack)->type = REG_STRING;
1541 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1542 break;
1543 }
1544
1545 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
1546 {
1547 struct load_op *insn = (struct load_op *) pc;
1548
1549 if (vstack_push(stack)) {
1550 ret = -EINVAL;
1551 goto end;
1552 }
1553 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1554 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1555 break;
1556 }
1557
1558 case BYTECODE_OP_LOAD_S64:
1559 {
1560 if (vstack_push(stack)) {
1561 ret = -EINVAL;
1562 goto end;
1563 }
1564 vstack_ax(stack)->type = REG_S64;
1565 next_pc += sizeof(struct load_op)
1566 + sizeof(struct literal_numeric);
1567 break;
1568 }
1569
1570 case BYTECODE_OP_CAST_TO_S64:
1571 {
1572 /* Pop 1, push 1 */
1573 if (!vstack_ax(stack)) {
1574 printk(KERN_WARNING "Empty stack\n");
1575 ret = -EINVAL;
1576 goto end;
1577 }
1578 switch (vstack_ax(stack)->type) {
1579 case REG_S64:
1580 case REG_U64:
1581 case REG_DOUBLE:
1582 case REG_TYPE_UNKNOWN:
1583 break;
1584 default:
1585 printk(KERN_WARNING "Incorrect register type %d for cast\n",
1586 (int) vstack_ax(stack)->type);
1587 ret = -EINVAL;
1588 goto end;
1589 }
1590 vstack_ax(stack)->type = REG_S64;
1591 next_pc += sizeof(struct cast_op);
1592 break;
1593 }
1594 case BYTECODE_OP_CAST_NOP:
1595 {
1596 next_pc += sizeof(struct cast_op);
1597 break;
1598 }
1599
1600 /*
1601 * Instructions for recursive traversal through composed types.
1602 */
1603 case BYTECODE_OP_GET_CONTEXT_ROOT:
1604 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
1605 case BYTECODE_OP_GET_PAYLOAD_ROOT:
1606 {
1607 if (vstack_push(stack)) {
1608 ret = -EINVAL;
1609 goto end;
1610 }
1611 vstack_ax(stack)->type = REG_PTR;
1612 next_pc += sizeof(struct load_op);
1613 break;
1614 }
1615
1616 case BYTECODE_OP_LOAD_FIELD:
1617 {
1618 /* Pop 1, push 1 */
1619 if (!vstack_ax(stack)) {
1620 printk(KERN_WARNING "Empty stack\n\n");
1621 ret = -EINVAL;
1622 goto end;
1623 }
1624 if (vstack_ax(stack)->type != REG_PTR) {
1625 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1626 ret = -EINVAL;
1627 goto end;
1628 }
1629 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1630 next_pc += sizeof(struct load_op);
1631 break;
1632 }
1633
1634 case BYTECODE_OP_LOAD_FIELD_S8:
1635 case BYTECODE_OP_LOAD_FIELD_S16:
1636 case BYTECODE_OP_LOAD_FIELD_S32:
1637 case BYTECODE_OP_LOAD_FIELD_S64:
1638 {
1639 /* Pop 1, push 1 */
1640 if (!vstack_ax(stack)) {
1641 printk(KERN_WARNING "Empty stack\n\n");
1642 ret = -EINVAL;
1643 goto end;
1644 }
1645 if (vstack_ax(stack)->type != REG_PTR) {
1646 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1647 ret = -EINVAL;
1648 goto end;
1649 }
1650 vstack_ax(stack)->type = REG_S64;
1651 next_pc += sizeof(struct load_op);
1652 break;
1653 }
1654 case BYTECODE_OP_LOAD_FIELD_U8:
1655 case BYTECODE_OP_LOAD_FIELD_U16:
1656 case BYTECODE_OP_LOAD_FIELD_U32:
1657 case BYTECODE_OP_LOAD_FIELD_U64:
1658 {
1659 /* Pop 1, push 1 */
1660 if (!vstack_ax(stack)) {
1661 printk(KERN_WARNING "Empty stack\n\n");
1662 ret = -EINVAL;
1663 goto end;
1664 }
1665 if (vstack_ax(stack)->type != REG_PTR) {
1666 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1667 ret = -EINVAL;
1668 goto end;
1669 }
1670 vstack_ax(stack)->type = REG_U64;
1671 next_pc += sizeof(struct load_op);
1672 break;
1673 }
1674 case BYTECODE_OP_LOAD_FIELD_STRING:
1675 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1676 {
1677 /* Pop 1, push 1 */
1678 if (!vstack_ax(stack)) {
1679 printk(KERN_WARNING "Empty stack\n\n");
1680 ret = -EINVAL;
1681 goto end;
1682 }
1683 if (vstack_ax(stack)->type != REG_PTR) {
1684 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1685 ret = -EINVAL;
1686 goto end;
1687 }
1688 vstack_ax(stack)->type = REG_STRING;
1689 next_pc += sizeof(struct load_op);
1690 break;
1691 }
1692
1693 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1694 {
1695 /* Pop 1, push 1 */
1696 if (!vstack_ax(stack)) {
1697 printk(KERN_WARNING "Empty stack\n\n");
1698 ret = -EINVAL;
1699 goto end;
1700 }
1701 if (vstack_ax(stack)->type != REG_PTR) {
1702 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1703 ret = -EINVAL;
1704 goto end;
1705 }
1706 vstack_ax(stack)->type = REG_DOUBLE;
1707 next_pc += sizeof(struct load_op);
1708 break;
1709 }
1710
1711 case BYTECODE_OP_GET_SYMBOL:
1712 case BYTECODE_OP_GET_SYMBOL_FIELD:
1713 {
1714 /* Pop 1, push 1 */
1715 if (!vstack_ax(stack)) {
1716 printk(KERN_WARNING "Empty stack\n\n");
1717 ret = -EINVAL;
1718 goto end;
1719 }
1720 if (vstack_ax(stack)->type != REG_PTR) {
1721 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1722 ret = -EINVAL;
1723 goto end;
1724 }
1725 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1726 break;
1727 }
1728
1729 case BYTECODE_OP_GET_INDEX_U16:
1730 {
1731 /* Pop 1, push 1 */
1732 if (!vstack_ax(stack)) {
1733 printk(KERN_WARNING "Empty stack\n\n");
1734 ret = -EINVAL;
1735 goto end;
1736 }
1737 if (vstack_ax(stack)->type != REG_PTR) {
1738 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1739 ret = -EINVAL;
1740 goto end;
1741 }
1742 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1743 break;
1744 }
1745
1746 case BYTECODE_OP_GET_INDEX_U64:
1747 {
1748 /* Pop 1, push 1 */
1749 if (!vstack_ax(stack)) {
1750 printk(KERN_WARNING "Empty stack\n\n");
1751 ret = -EINVAL;
1752 goto end;
1753 }
1754 if (vstack_ax(stack)->type != REG_PTR) {
1755 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1756 ret = -EINVAL;
1757 goto end;
1758 }
1759 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1760 break;
1761 }
1762
1763 }
1764 end:
1765 *_next_pc = next_pc;
1766 return ret;
1767 }
1768
1769 /*
1770 * Never called concurrently (hash seed is shared).
1771 */
1772 int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
1773 {
1774 struct mp_table *mp_table;
1775 char *pc, *next_pc, *start_pc;
1776 int ret = -EINVAL;
1777 struct vstack stack;
1778
1779 vstack_init(&stack);
1780
1781 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1782 if (!mp_table) {
1783 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1784 return -ENOMEM;
1785 }
1786 start_pc = &bytecode->code[0];
1787 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1788 pc = next_pc) {
1789 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1790 if (ret != 0) {
1791 if (ret == -ERANGE)
1792 printk(KERN_WARNING "bytecode overflow\n");
1793 goto end;
1794 }
1795 dbg_printk("Validating op %s (%u)\n",
1796 lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc),
1797 (unsigned int) *(bytecode_opcode_t *) pc);
1798
1799 /*
1800 * For each instruction, validate the current context
1801 * (traversal of entire execution flow), and validate
1802 * all merge points targeting this instruction.
1803 */
1804 ret = validate_instruction_all_contexts(bytecode, mp_table,
1805 &stack, start_pc, pc);
1806 if (ret)
1807 goto end;
1808 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1809 if (ret <= 0)
1810 goto end;
1811 }
1812 end:
1813 if (delete_all_nodes(mp_table)) {
1814 if (!ret) {
1815 printk(KERN_WARNING "Unexpected merge points\n");
1816 ret = -EINVAL;
1817 }
1818 }
1819 kfree(mp_table);
1820 return ret;
1821 }
This page took 0.0677 seconds and 5 git commands to generate.