2 * lttng-filter-interpreter.c
4 * LTTng modules filter interpreter.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <linux/uaccess.h>
28 #include <wrapper/frame.h>
30 #include <lttng-filter.h>
31 #include <lttng-string-utils.h>
33 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode
);
36 * get_char should be called with page fault handler disabled if it is expected
37 * to handle user-space read.
40 char get_char(struct estack_entry
*reg
, size_t offset
)
42 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
47 /* Handle invalid access as end of string. */
48 if (unlikely(!access_ok(VERIFY_READ
,
49 reg
->u
.s
.user_str
+ offset
,
52 /* Handle fault (nonzero return value) as end of string. */
53 if (unlikely(__copy_from_user_inatomic(&c
,
54 reg
->u
.s
.user_str
+ offset
,
59 return reg
->u
.s
.str
[offset
];
65 * -2: unknown escape char.
69 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
74 *c
= get_char(reg
, *offset
);
90 char get_char_at_cb(size_t at
, void *data
)
92 return get_char(data
, at
);
96 int stack_star_glob_match(struct estack
*stack
, int top
, const char *cmp_type
)
98 bool has_user
= false;
101 struct estack_entry
*pattern_reg
;
102 struct estack_entry
*candidate_reg
;
104 if (estack_bx(stack
, top
)->u
.s
.user
105 || estack_ax(stack
, top
)->u
.s
.user
) {
112 /* Find out which side is the pattern vs. the candidate. */
113 if (estack_ax(stack
, top
)->u
.s
.literal_type
== ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
) {
114 pattern_reg
= estack_ax(stack
, top
);
115 candidate_reg
= estack_bx(stack
, top
);
117 pattern_reg
= estack_bx(stack
, top
);
118 candidate_reg
= estack_ax(stack
, top
);
121 /* Perform the match operation. */
122 result
= !strutils_star_glob_match_char_cb(get_char_at_cb
,
123 pattern_reg
, get_char_at_cb
, candidate_reg
);
133 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
135 size_t offset_bx
= 0, offset_ax
= 0;
136 int diff
, has_user
= 0;
139 if (estack_bx(stack
, top
)->u
.s
.user
140 || estack_ax(stack
, top
)->u
.s
.user
) {
150 char char_bx
, char_ax
;
152 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
153 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
155 if (unlikely(char_bx
== '\0')) {
156 if (char_ax
== '\0') {
160 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
161 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
162 ret
= parse_char(estack_ax(stack
, top
),
163 &char_ax
, &offset_ax
);
173 if (unlikely(char_ax
== '\0')) {
174 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
175 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
176 ret
= parse_char(estack_bx(stack
, top
),
177 &char_bx
, &offset_bx
);
186 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
187 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
188 ret
= parse_char(estack_bx(stack
, top
),
189 &char_bx
, &offset_bx
);
193 } else if (ret
== -2) {
196 /* else compare both char */
198 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
199 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
200 ret
= parse_char(estack_ax(stack
, top
),
201 &char_ax
, &offset_ax
);
205 } else if (ret
== -2) {
222 diff
= char_bx
- char_ax
;
235 uint64_t lttng_filter_false(void *filter_data
,
236 struct lttng_probe_ctx
*lttng_probe_ctx
,
237 const char *filter_stack_data
)
242 #ifdef INTERPRETER_USE_SWITCH
245 * Fallback for compilers that do not support taking address of labels.
249 start_pc = &bytecode->data[0]; \
250 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
252 dbg_printk("Executing op %s (%u)\n", \
253 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
254 (unsigned int) *(filter_opcode_t *) pc); \
255 switch (*(filter_opcode_t *) pc) {
257 #define OP(name) case name
267 * Dispatch-table based interpreter.
271 start_pc = &bytecode->data[0]; \
272 pc = next_pc = start_pc; \
273 if (unlikely(pc - start_pc >= bytecode->len)) \
275 goto *dispatch[*(filter_opcode_t *) pc];
282 goto *dispatch[*(filter_opcode_t *) pc];
289 * Return 0 (discard), or raise the 0x1 flag (log event).
290 * Currently, other flags are kept for future extensions and have no
293 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
294 struct lttng_probe_ctx
*lttng_probe_ctx
,
295 const char *filter_stack_data
)
297 struct bytecode_runtime
*bytecode
= filter_data
;
298 void *pc
, *next_pc
, *start_pc
;
301 struct estack _stack
;
302 struct estack
*stack
= &_stack
;
303 register int64_t ax
= 0, bx
= 0;
304 register int top
= FILTER_STACK_EMPTY
;
305 #ifndef INTERPRETER_USE_SWITCH
306 static void *dispatch
[NR_FILTER_OPS
] = {
307 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
309 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
312 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
313 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
314 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
315 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
316 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
317 [ FILTER_OP_RSHIFT
] = &&LABEL_FILTER_OP_RSHIFT
,
318 [ FILTER_OP_LSHIFT
] = &&LABEL_FILTER_OP_LSHIFT
,
319 [ FILTER_OP_BIN_AND
] = &&LABEL_FILTER_OP_BIN_AND
,
320 [ FILTER_OP_BIN_OR
] = &&LABEL_FILTER_OP_BIN_OR
,
321 [ FILTER_OP_BIN_XOR
] = &&LABEL_FILTER_OP_BIN_XOR
,
323 /* binary comparators */
324 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
325 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
326 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
327 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
328 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
329 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
331 /* string binary comparator */
332 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
333 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
334 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
335 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
336 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
337 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
339 /* globbing pattern binary comparator */
340 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING
,
341 [ FILTER_OP_NE_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING
,
343 /* s64 binary comparator */
344 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
345 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
346 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
347 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
348 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
349 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
351 /* double binary comparator */
352 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
353 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
354 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
355 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
356 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
357 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
359 /* Mixed S64-double binary comparators */
360 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
361 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
362 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
363 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
364 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
365 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
367 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
368 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
369 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
370 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
371 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
372 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
375 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
376 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
377 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
378 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
379 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
380 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
381 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
382 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
383 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
386 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
387 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
390 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
391 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
392 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
393 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
394 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
396 /* load from immediate operand */
397 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
398 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING
,
399 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
400 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
403 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
404 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
405 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
407 /* get context ref */
408 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
409 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
410 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
411 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
413 /* load userspace field ref */
414 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
415 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
417 #endif /* #ifndef INTERPRETER_USE_SWITCH */
421 OP(FILTER_OP_UNKNOWN
):
422 OP(FILTER_OP_LOAD_FIELD_REF
):
423 OP(FILTER_OP_GET_CONTEXT_REF
):
424 #ifdef INTERPRETER_USE_SWITCH
426 #endif /* INTERPRETER_USE_SWITCH */
427 printk(KERN_WARNING
"unknown bytecode op %u\n",
428 (unsigned int) *(filter_opcode_t
*) pc
);
432 OP(FILTER_OP_RETURN
):
433 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
434 retval
= !!estack_ax_v
;
444 OP(FILTER_OP_RSHIFT
):
445 OP(FILTER_OP_LSHIFT
):
446 OP(FILTER_OP_BIN_AND
):
447 OP(FILTER_OP_BIN_OR
):
448 OP(FILTER_OP_BIN_XOR
):
449 printk(KERN_WARNING
"unsupported bytecode op %u\n",
450 (unsigned int) *(filter_opcode_t
*) pc
);
460 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
461 (unsigned int) *(filter_opcode_t
*) pc
);
465 OP(FILTER_OP_EQ_STRING
):
469 res
= (stack_strcmp(stack
, top
, "==") == 0);
470 estack_pop(stack
, top
, ax
, bx
);
472 next_pc
+= sizeof(struct binary_op
);
475 OP(FILTER_OP_NE_STRING
):
479 res
= (stack_strcmp(stack
, top
, "!=") != 0);
480 estack_pop(stack
, top
, ax
, bx
);
482 next_pc
+= sizeof(struct binary_op
);
485 OP(FILTER_OP_GT_STRING
):
489 res
= (stack_strcmp(stack
, top
, ">") > 0);
490 estack_pop(stack
, top
, ax
, bx
);
492 next_pc
+= sizeof(struct binary_op
);
495 OP(FILTER_OP_LT_STRING
):
499 res
= (stack_strcmp(stack
, top
, "<") < 0);
500 estack_pop(stack
, top
, ax
, bx
);
502 next_pc
+= sizeof(struct binary_op
);
505 OP(FILTER_OP_GE_STRING
):
509 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
510 estack_pop(stack
, top
, ax
, bx
);
512 next_pc
+= sizeof(struct binary_op
);
515 OP(FILTER_OP_LE_STRING
):
519 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
520 estack_pop(stack
, top
, ax
, bx
);
522 next_pc
+= sizeof(struct binary_op
);
526 OP(FILTER_OP_EQ_STAR_GLOB_STRING
):
530 res
= (stack_star_glob_match(stack
, top
, "==") == 0);
531 estack_pop(stack
, top
, ax
, bx
);
533 next_pc
+= sizeof(struct binary_op
);
536 OP(FILTER_OP_NE_STAR_GLOB_STRING
):
540 res
= (stack_star_glob_match(stack
, top
, "!=") != 0);
541 estack_pop(stack
, top
, ax
, bx
);
543 next_pc
+= sizeof(struct binary_op
);
547 OP(FILTER_OP_EQ_S64
):
551 res
= (estack_bx_v
== estack_ax_v
);
552 estack_pop(stack
, top
, ax
, bx
);
554 next_pc
+= sizeof(struct binary_op
);
557 OP(FILTER_OP_NE_S64
):
561 res
= (estack_bx_v
!= estack_ax_v
);
562 estack_pop(stack
, top
, ax
, bx
);
564 next_pc
+= sizeof(struct binary_op
);
567 OP(FILTER_OP_GT_S64
):
571 res
= (estack_bx_v
> estack_ax_v
);
572 estack_pop(stack
, top
, ax
, bx
);
574 next_pc
+= sizeof(struct binary_op
);
577 OP(FILTER_OP_LT_S64
):
581 res
= (estack_bx_v
< estack_ax_v
);
582 estack_pop(stack
, top
, ax
, bx
);
584 next_pc
+= sizeof(struct binary_op
);
587 OP(FILTER_OP_GE_S64
):
591 res
= (estack_bx_v
>= estack_ax_v
);
592 estack_pop(stack
, top
, ax
, bx
);
594 next_pc
+= sizeof(struct binary_op
);
597 OP(FILTER_OP_LE_S64
):
601 res
= (estack_bx_v
<= estack_ax_v
);
602 estack_pop(stack
, top
, ax
, bx
);
604 next_pc
+= sizeof(struct binary_op
);
608 OP(FILTER_OP_EQ_DOUBLE
):
609 OP(FILTER_OP_NE_DOUBLE
):
610 OP(FILTER_OP_GT_DOUBLE
):
611 OP(FILTER_OP_LT_DOUBLE
):
612 OP(FILTER_OP_GE_DOUBLE
):
613 OP(FILTER_OP_LE_DOUBLE
):
619 /* Mixed S64-double binary comparators */
620 OP(FILTER_OP_EQ_DOUBLE_S64
):
621 OP(FILTER_OP_NE_DOUBLE_S64
):
622 OP(FILTER_OP_GT_DOUBLE_S64
):
623 OP(FILTER_OP_LT_DOUBLE_S64
):
624 OP(FILTER_OP_GE_DOUBLE_S64
):
625 OP(FILTER_OP_LE_DOUBLE_S64
):
626 OP(FILTER_OP_EQ_S64_DOUBLE
):
627 OP(FILTER_OP_NE_S64_DOUBLE
):
628 OP(FILTER_OP_GT_S64_DOUBLE
):
629 OP(FILTER_OP_LT_S64_DOUBLE
):
630 OP(FILTER_OP_GE_S64_DOUBLE
):
631 OP(FILTER_OP_LE_S64_DOUBLE
):
638 OP(FILTER_OP_UNARY_PLUS
):
639 OP(FILTER_OP_UNARY_MINUS
):
640 OP(FILTER_OP_UNARY_NOT
):
641 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
642 (unsigned int) *(filter_opcode_t
*) pc
);
647 OP(FILTER_OP_UNARY_PLUS_S64
):
649 next_pc
+= sizeof(struct unary_op
);
652 OP(FILTER_OP_UNARY_MINUS_S64
):
654 estack_ax_v
= -estack_ax_v
;
655 next_pc
+= sizeof(struct unary_op
);
658 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
659 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
664 OP(FILTER_OP_UNARY_NOT_S64
):
666 estack_ax_v
= !estack_ax_v
;
667 next_pc
+= sizeof(struct unary_op
);
670 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
679 struct logical_op
*insn
= (struct logical_op
*) pc
;
681 /* If AX is 0, skip and evaluate to 0 */
682 if (unlikely(estack_ax_v
== 0)) {
683 dbg_printk("Jumping to bytecode offset %u\n",
684 (unsigned int) insn
->skip_offset
);
685 next_pc
= start_pc
+ insn
->skip_offset
;
687 /* Pop 1 when jump not taken */
688 estack_pop(stack
, top
, ax
, bx
);
689 next_pc
+= sizeof(struct logical_op
);
695 struct logical_op
*insn
= (struct logical_op
*) pc
;
697 /* If AX is nonzero, skip and evaluate to 1 */
699 if (unlikely(estack_ax_v
!= 0)) {
701 dbg_printk("Jumping to bytecode offset %u\n",
702 (unsigned int) insn
->skip_offset
);
703 next_pc
= start_pc
+ insn
->skip_offset
;
705 /* Pop 1 when jump not taken */
706 estack_pop(stack
, top
, ax
, bx
);
707 next_pc
+= sizeof(struct logical_op
);
714 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
716 struct load_op
*insn
= (struct load_op
*) pc
;
717 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
719 dbg_printk("load field ref offset %u type string\n",
721 estack_push(stack
, top
, ax
, bx
);
722 estack_ax(stack
, top
)->u
.s
.str
=
723 *(const char * const *) &filter_stack_data
[ref
->offset
];
724 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
725 dbg_printk("Filter warning: loading a NULL string.\n");
729 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
730 estack_ax(stack
, top
)->u
.s
.literal_type
=
731 ESTACK_STRING_LITERAL_TYPE_NONE
;
732 estack_ax(stack
, top
)->u
.s
.user
= 0;
733 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
734 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
738 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
740 struct load_op
*insn
= (struct load_op
*) pc
;
741 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
743 dbg_printk("load field ref offset %u type sequence\n",
745 estack_push(stack
, top
, ax
, bx
);
746 estack_ax(stack
, top
)->u
.s
.seq_len
=
747 *(unsigned long *) &filter_stack_data
[ref
->offset
];
748 estack_ax(stack
, top
)->u
.s
.str
=
749 *(const char **) (&filter_stack_data
[ref
->offset
750 + sizeof(unsigned long)]);
751 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
752 dbg_printk("Filter warning: loading a NULL sequence.\n");
756 estack_ax(stack
, top
)->u
.s
.literal_type
=
757 ESTACK_STRING_LITERAL_TYPE_NONE
;
758 estack_ax(stack
, top
)->u
.s
.user
= 0;
759 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
763 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
765 struct load_op
*insn
= (struct load_op
*) pc
;
766 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
768 dbg_printk("load field ref offset %u type s64\n",
770 estack_push(stack
, top
, ax
, bx
);
772 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
773 dbg_printk("ref load s64 %lld\n",
774 (long long) estack_ax_v
);
775 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
779 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
785 /* load from immediate operand */
786 OP(FILTER_OP_LOAD_STRING
):
788 struct load_op
*insn
= (struct load_op
*) pc
;
790 dbg_printk("load string %s\n", insn
->data
);
791 estack_push(stack
, top
, ax
, bx
);
792 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
793 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
794 estack_ax(stack
, top
)->u
.s
.literal_type
=
795 ESTACK_STRING_LITERAL_TYPE_PLAIN
;
796 estack_ax(stack
, top
)->u
.s
.user
= 0;
797 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
801 OP(FILTER_OP_LOAD_STAR_GLOB_STRING
):
803 struct load_op
*insn
= (struct load_op
*) pc
;
805 dbg_printk("load globbing pattern %s\n", insn
->data
);
806 estack_push(stack
, top
, ax
, bx
);
807 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
808 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
809 estack_ax(stack
, top
)->u
.s
.literal_type
=
810 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
;
811 estack_ax(stack
, top
)->u
.s
.user
= 0;
812 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
816 OP(FILTER_OP_LOAD_S64
):
818 struct load_op
*insn
= (struct load_op
*) pc
;
820 estack_push(stack
, top
, ax
, bx
);
821 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
822 dbg_printk("load s64 %lld\n",
823 (long long) estack_ax_v
);
824 next_pc
+= sizeof(struct load_op
)
825 + sizeof(struct literal_numeric
);
829 OP(FILTER_OP_LOAD_DOUBLE
):
836 OP(FILTER_OP_CAST_TO_S64
):
837 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
838 (unsigned int) *(filter_opcode_t
*) pc
);
842 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
848 OP(FILTER_OP_CAST_NOP
):
850 next_pc
+= sizeof(struct cast_op
);
854 /* get context ref */
855 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
857 struct load_op
*insn
= (struct load_op
*) pc
;
858 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
859 struct lttng_ctx_field
*ctx_field
;
860 union lttng_ctx_value v
;
862 dbg_printk("get context ref offset %u type string\n",
864 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
865 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
866 estack_push(stack
, top
, ax
, bx
);
867 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
868 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
869 dbg_printk("Filter warning: loading a NULL string.\n");
873 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
874 estack_ax(stack
, top
)->u
.s
.literal_type
=
875 ESTACK_STRING_LITERAL_TYPE_NONE
;
876 estack_ax(stack
, top
)->u
.s
.user
= 0;
877 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
878 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
882 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
884 struct load_op
*insn
= (struct load_op
*) pc
;
885 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
886 struct lttng_ctx_field
*ctx_field
;
887 union lttng_ctx_value v
;
889 dbg_printk("get context ref offset %u type s64\n",
891 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
892 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
893 estack_push(stack
, top
, ax
, bx
);
895 dbg_printk("ref get context s64 %lld\n",
896 (long long) estack_ax_v
);
897 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
901 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
907 /* load userspace field ref */
908 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
910 struct load_op
*insn
= (struct load_op
*) pc
;
911 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
913 dbg_printk("load field ref offset %u type user string\n",
915 estack_push(stack
, top
, ax
, bx
);
916 estack_ax(stack
, top
)->u
.s
.user_str
=
917 *(const char * const *) &filter_stack_data
[ref
->offset
];
918 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
919 dbg_printk("Filter warning: loading a NULL string.\n");
923 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
924 estack_ax(stack
, top
)->u
.s
.literal_type
=
925 ESTACK_STRING_LITERAL_TYPE_NONE
;
926 estack_ax(stack
, top
)->u
.s
.user
= 1;
927 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
928 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
932 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
934 struct load_op
*insn
= (struct load_op
*) pc
;
935 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
937 dbg_printk("load field ref offset %u type user sequence\n",
939 estack_push(stack
, top
, ax
, bx
);
940 estack_ax(stack
, top
)->u
.s
.seq_len
=
941 *(unsigned long *) &filter_stack_data
[ref
->offset
];
942 estack_ax(stack
, top
)->u
.s
.user_str
=
943 *(const char **) (&filter_stack_data
[ref
->offset
944 + sizeof(unsigned long)]);
945 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
946 dbg_printk("Filter warning: loading a NULL sequence.\n");
950 estack_ax(stack
, top
)->u
.s
.literal_type
=
951 ESTACK_STRING_LITERAL_TYPE_NONE
;
952 estack_ax(stack
, top
)->u
.s
.user
= 1;
953 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
959 /* return 0 (discard) on error */