1 /* SPDX-License-Identifier: MIT
5 * LTTng modules filter code.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/list.h>
11 #include <linux/slab.h>
13 #include <lttng/filter.h>
15 static const char *opnames
[] = {
16 [ FILTER_OP_UNKNOWN
] = "UNKNOWN",
18 [ FILTER_OP_RETURN
] = "RETURN",
21 [ FILTER_OP_MUL
] = "MUL",
22 [ FILTER_OP_DIV
] = "DIV",
23 [ FILTER_OP_MOD
] = "MOD",
24 [ FILTER_OP_PLUS
] = "PLUS",
25 [ FILTER_OP_MINUS
] = "MINUS",
26 [ FILTER_OP_BIT_RSHIFT
] = "BIT_RSHIFT",
27 [ FILTER_OP_BIT_LSHIFT
] = "BIT_LSHIFT",
28 [ FILTER_OP_BIT_AND
] = "BIT_AND",
29 [ FILTER_OP_BIT_OR
] = "BIT_OR",
30 [ FILTER_OP_BIT_XOR
] = "BIT_XOR",
32 /* binary comparators */
33 [ FILTER_OP_EQ
] = "EQ",
34 [ FILTER_OP_NE
] = "NE",
35 [ FILTER_OP_GT
] = "GT",
36 [ FILTER_OP_LT
] = "LT",
37 [ FILTER_OP_GE
] = "GE",
38 [ FILTER_OP_LE
] = "LE",
40 /* string binary comparators */
41 [ FILTER_OP_EQ_STRING
] = "EQ_STRING",
42 [ FILTER_OP_NE_STRING
] = "NE_STRING",
43 [ FILTER_OP_GT_STRING
] = "GT_STRING",
44 [ FILTER_OP_LT_STRING
] = "LT_STRING",
45 [ FILTER_OP_GE_STRING
] = "GE_STRING",
46 [ FILTER_OP_LE_STRING
] = "LE_STRING",
48 /* s64 binary comparators */
49 [ FILTER_OP_EQ_S64
] = "EQ_S64",
50 [ FILTER_OP_NE_S64
] = "NE_S64",
51 [ FILTER_OP_GT_S64
] = "GT_S64",
52 [ FILTER_OP_LT_S64
] = "LT_S64",
53 [ FILTER_OP_GE_S64
] = "GE_S64",
54 [ FILTER_OP_LE_S64
] = "LE_S64",
56 /* double binary comparators */
57 [ FILTER_OP_EQ_DOUBLE
] = "EQ_DOUBLE",
58 [ FILTER_OP_NE_DOUBLE
] = "NE_DOUBLE",
59 [ FILTER_OP_GT_DOUBLE
] = "GT_DOUBLE",
60 [ FILTER_OP_LT_DOUBLE
] = "LT_DOUBLE",
61 [ FILTER_OP_GE_DOUBLE
] = "GE_DOUBLE",
62 [ FILTER_OP_LE_DOUBLE
] = "LE_DOUBLE",
64 /* Mixed S64-double binary comparators */
65 [ FILTER_OP_EQ_DOUBLE_S64
] = "EQ_DOUBLE_S64",
66 [ FILTER_OP_NE_DOUBLE_S64
] = "NE_DOUBLE_S64",
67 [ FILTER_OP_GT_DOUBLE_S64
] = "GT_DOUBLE_S64",
68 [ FILTER_OP_LT_DOUBLE_S64
] = "LT_DOUBLE_S64",
69 [ FILTER_OP_GE_DOUBLE_S64
] = "GE_DOUBLE_S64",
70 [ FILTER_OP_LE_DOUBLE_S64
] = "LE_DOUBLE_S64",
72 [ FILTER_OP_EQ_S64_DOUBLE
] = "EQ_S64_DOUBLE",
73 [ FILTER_OP_NE_S64_DOUBLE
] = "NE_S64_DOUBLE",
74 [ FILTER_OP_GT_S64_DOUBLE
] = "GT_S64_DOUBLE",
75 [ FILTER_OP_LT_S64_DOUBLE
] = "LT_S64_DOUBLE",
76 [ FILTER_OP_GE_S64_DOUBLE
] = "GE_S64_DOUBLE",
77 [ FILTER_OP_LE_S64_DOUBLE
] = "LE_S64_DOUBLE",
80 [ FILTER_OP_UNARY_PLUS
] = "UNARY_PLUS",
81 [ FILTER_OP_UNARY_MINUS
] = "UNARY_MINUS",
82 [ FILTER_OP_UNARY_NOT
] = "UNARY_NOT",
83 [ FILTER_OP_UNARY_PLUS_S64
] = "UNARY_PLUS_S64",
84 [ FILTER_OP_UNARY_MINUS_S64
] = "UNARY_MINUS_S64",
85 [ FILTER_OP_UNARY_NOT_S64
] = "UNARY_NOT_S64",
86 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = "UNARY_PLUS_DOUBLE",
87 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = "UNARY_MINUS_DOUBLE",
88 [ FILTER_OP_UNARY_NOT_DOUBLE
] = "UNARY_NOT_DOUBLE",
91 [ FILTER_OP_AND
] = "AND",
92 [ FILTER_OP_OR
] = "OR",
95 [ FILTER_OP_LOAD_FIELD_REF
] = "LOAD_FIELD_REF",
96 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = "LOAD_FIELD_REF_STRING",
97 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = "LOAD_FIELD_REF_SEQUENCE",
98 [ FILTER_OP_LOAD_FIELD_REF_S64
] = "LOAD_FIELD_REF_S64",
99 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = "LOAD_FIELD_REF_DOUBLE",
101 /* load from immediate operand */
102 [ FILTER_OP_LOAD_STRING
] = "LOAD_STRING",
103 [ FILTER_OP_LOAD_S64
] = "LOAD_S64",
104 [ FILTER_OP_LOAD_DOUBLE
] = "LOAD_DOUBLE",
107 [ FILTER_OP_CAST_TO_S64
] = "CAST_TO_S64",
108 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = "CAST_DOUBLE_TO_S64",
109 [ FILTER_OP_CAST_NOP
] = "CAST_NOP",
111 /* get context ref */
112 [ FILTER_OP_GET_CONTEXT_REF
] = "GET_CONTEXT_REF",
113 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = "GET_CONTEXT_REF_STRING",
114 [ FILTER_OP_GET_CONTEXT_REF_S64
] = "GET_CONTEXT_REF_S64",
115 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = "GET_CONTEXT_REF_DOUBLE",
117 /* load userspace field ref */
118 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = "LOAD_FIELD_REF_USER_STRING",
119 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = "LOAD_FIELD_REF_USER_SEQUENCE",
122 * load immediate star globbing pattern (literal string)
125 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = "LOAD_STAR_GLOB_STRING",
127 /* globbing pattern binary operator: apply to */
128 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = "EQ_STAR_GLOB_STRING",
129 [ FILTER_OP_NE_STAR_GLOB_STRING
] = "NE_STAR_GLOB_STRING",
132 * Instructions for recursive traversal through composed types.
134 [ FILTER_OP_GET_CONTEXT_ROOT
] = "GET_CONTEXT_ROOT",
135 [ FILTER_OP_GET_APP_CONTEXT_ROOT
] = "GET_APP_CONTEXT_ROOT",
136 [ FILTER_OP_GET_PAYLOAD_ROOT
] = "GET_PAYLOAD_ROOT",
138 [ FILTER_OP_GET_SYMBOL
] = "GET_SYMBOL",
139 [ FILTER_OP_GET_SYMBOL_FIELD
] = "GET_SYMBOL_FIELD",
140 [ FILTER_OP_GET_INDEX_U16
] = "GET_INDEX_U16",
141 [ FILTER_OP_GET_INDEX_U64
] = "GET_INDEX_U64",
143 [ FILTER_OP_LOAD_FIELD
] = "LOAD_FIELD",
144 [ FILTER_OP_LOAD_FIELD_S8
] = "LOAD_FIELD_S8",
145 [ FILTER_OP_LOAD_FIELD_S16
] = "LOAD_FIELD_S16",
146 [ FILTER_OP_LOAD_FIELD_S32
] = "LOAD_FIELD_S32",
147 [ FILTER_OP_LOAD_FIELD_S64
] = "LOAD_FIELD_S64",
148 [ FILTER_OP_LOAD_FIELD_U8
] = "LOAD_FIELD_U8",
149 [ FILTER_OP_LOAD_FIELD_U16
] = "LOAD_FIELD_U16",
150 [ FILTER_OP_LOAD_FIELD_U32
] = "LOAD_FIELD_U32",
151 [ FILTER_OP_LOAD_FIELD_U64
] = "LOAD_FIELD_U64",
152 [ FILTER_OP_LOAD_FIELD_STRING
] = "LOAD_FIELD_STRING",
153 [ FILTER_OP_LOAD_FIELD_SEQUENCE
] = "LOAD_FIELD_SEQUENCE",
154 [ FILTER_OP_LOAD_FIELD_DOUBLE
] = "LOAD_FIELD_DOUBLE",
156 [ FILTER_OP_UNARY_BIT_NOT
] = "UNARY_BIT_NOT",
158 [ FILTER_OP_RETURN_S64
] = "RETURN_S64",
161 const char *lttng_filter_print_op(enum filter_op op
)
163 if (op
>= NR_FILTER_OPS
)
170 int apply_field_reloc(struct lttng_event
*event
,
171 struct bytecode_runtime
*runtime
,
172 uint32_t runtime_len
,
173 uint32_t reloc_offset
,
174 const char *field_name
,
175 enum filter_op filter_op
)
177 const struct lttng_event_desc
*desc
;
178 const struct lttng_event_field
*fields
, *field
= NULL
;
179 unsigned int nr_fields
, i
;
181 uint32_t field_offset
= 0;
183 dbg_printk("Apply field reloc: %u %s\n", reloc_offset
, field_name
);
185 /* Lookup event by name */
189 fields
= desc
->fields
;
192 nr_fields
= desc
->nr_fields
;
193 for (i
= 0; i
< nr_fields
; i
++) {
194 if (fields
[i
].nofilter
)
196 if (!strcmp(fields
[i
].name
, field_name
)) {
200 /* compute field offset */
201 switch (fields
[i
].type
.atype
) {
203 case atype_enum_nestable
:
204 field_offset
+= sizeof(int64_t);
206 case atype_array_nestable
:
207 if (!lttng_is_bytewise_integer(fields
[i
].type
.u
.array_nestable
.elem_type
))
209 field_offset
+= sizeof(unsigned long);
210 field_offset
+= sizeof(void *);
212 case atype_sequence_nestable
:
213 if (!lttng_is_bytewise_integer(fields
[i
].type
.u
.sequence_nestable
.elem_type
))
215 field_offset
+= sizeof(unsigned long);
216 field_offset
+= sizeof(void *);
219 field_offset
+= sizeof(void *);
221 case atype_struct_nestable
: /* Unsupported. */
222 case atype_variant_nestable
: /* Unsupported. */
230 /* Check if field offset is too large for 16-bit offset */
231 if (field_offset
> LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN
- 1)
235 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
238 case FILTER_OP_LOAD_FIELD_REF
:
240 struct field_ref
*field_ref
;
242 field_ref
= (struct field_ref
*) op
->data
;
243 switch (field
->type
.atype
) {
245 case atype_enum_nestable
:
246 op
->op
= FILTER_OP_LOAD_FIELD_REF_S64
;
248 case atype_array_nestable
:
249 case atype_sequence_nestable
:
251 op
->op
= FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
;
253 op
->op
= FILTER_OP_LOAD_FIELD_REF_SEQUENCE
;
257 op
->op
= FILTER_OP_LOAD_FIELD_REF_USER_STRING
;
259 op
->op
= FILTER_OP_LOAD_FIELD_REF_STRING
;
261 case atype_struct_nestable
: /* Unsupported. */
262 case atype_variant_nestable
: /* Unsupported. */
267 field_ref
->offset
= (uint16_t) field_offset
;
277 int apply_context_reloc(struct lttng_event
*event
,
278 struct bytecode_runtime
*runtime
,
279 uint32_t runtime_len
,
280 uint32_t reloc_offset
,
281 const char *context_name
,
282 enum filter_op filter_op
)
285 struct lttng_ctx_field
*ctx_field
;
288 dbg_printk("Apply context reloc: %u %s\n", reloc_offset
, context_name
);
290 /* Get context index */
291 idx
= lttng_get_context_index(lttng_static_ctx
, context_name
);
295 /* Check if idx is too large for 16-bit offset */
296 if (idx
> LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN
- 1)
299 /* Get context return type */
300 ctx_field
= <tng_static_ctx
->fields
[idx
];
301 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
304 case FILTER_OP_GET_CONTEXT_REF
:
306 struct field_ref
*field_ref
;
308 field_ref
= (struct field_ref
*) op
->data
;
309 switch (ctx_field
->event_field
.type
.atype
) {
311 case atype_enum_nestable
:
312 op
->op
= FILTER_OP_GET_CONTEXT_REF_S64
;
314 /* Sequence and array supported as string */
316 BUG_ON(ctx_field
->event_field
.user
);
317 op
->op
= FILTER_OP_GET_CONTEXT_REF_STRING
;
319 case atype_array_nestable
:
320 if (!lttng_is_bytewise_integer(ctx_field
->event_field
.type
.u
.array_nestable
.elem_type
))
322 BUG_ON(ctx_field
->event_field
.user
);
323 op
->op
= FILTER_OP_GET_CONTEXT_REF_STRING
;
325 case atype_sequence_nestable
:
326 if (!lttng_is_bytewise_integer(ctx_field
->event_field
.type
.u
.sequence_nestable
.elem_type
))
328 BUG_ON(ctx_field
->event_field
.user
);
329 op
->op
= FILTER_OP_GET_CONTEXT_REF_STRING
;
331 case atype_struct_nestable
: /* Unsupported. */
332 case atype_variant_nestable
: /* Unsupported. */
336 /* set offset to context index within channel contexts */
337 field_ref
->offset
= (uint16_t) idx
;
347 int apply_reloc(struct lttng_event
*event
,
348 struct bytecode_runtime
*runtime
,
349 uint32_t runtime_len
,
350 uint32_t reloc_offset
,
355 dbg_printk("Apply reloc: %u %s\n", reloc_offset
, name
);
357 /* Ensure that the reloc is within the code */
358 if (runtime_len
- reloc_offset
< sizeof(uint16_t))
361 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
363 case FILTER_OP_LOAD_FIELD_REF
:
364 return apply_field_reloc(event
, runtime
, runtime_len
,
365 reloc_offset
, name
, op
->op
);
366 case FILTER_OP_GET_CONTEXT_REF
:
367 return apply_context_reloc(event
, runtime
, runtime_len
,
368 reloc_offset
, name
, op
->op
);
369 case FILTER_OP_GET_SYMBOL
:
370 case FILTER_OP_GET_SYMBOL_FIELD
:
372 * Will be handled by load specialize phase or
373 * dynamically by interpreter.
377 printk(KERN_WARNING
"LTTng: filter: Unknown reloc op type %u\n", op
->op
);
384 int bytecode_is_linked(struct lttng_filter_bytecode_node
*filter_bytecode
,
385 struct lttng_event
*event
)
387 struct lttng_bytecode_runtime
*bc_runtime
;
389 list_for_each_entry(bc_runtime
,
390 &event
->bytecode_runtime_head
, node
) {
391 if (bc_runtime
->bc
== filter_bytecode
)
398 * Take a bytecode with reloc table and link it to an event to create a
402 int _lttng_filter_event_link_bytecode(struct lttng_event
*event
,
403 struct lttng_filter_bytecode_node
*filter_bytecode
,
404 struct list_head
*insert_loc
)
406 int ret
, offset
, next_offset
;
407 struct bytecode_runtime
*runtime
= NULL
;
408 size_t runtime_alloc_len
;
410 if (!filter_bytecode
)
412 /* Bytecode already linked */
413 if (bytecode_is_linked(filter_bytecode
, event
))
416 dbg_printk("Linking...\n");
418 /* We don't need the reloc table in the runtime */
419 runtime_alloc_len
= sizeof(*runtime
) + filter_bytecode
->bc
.reloc_offset
;
420 runtime
= kzalloc(runtime_alloc_len
, GFP_KERNEL
);
425 runtime
->p
.bc
= filter_bytecode
;
426 runtime
->p
.event
= event
;
427 runtime
->len
= filter_bytecode
->bc
.reloc_offset
;
428 /* copy original bytecode */
429 memcpy(runtime
->code
, filter_bytecode
->bc
.data
, runtime
->len
);
431 * apply relocs. Those are a uint16_t (offset in bytecode)
432 * followed by a string (field name).
434 for (offset
= filter_bytecode
->bc
.reloc_offset
;
435 offset
< filter_bytecode
->bc
.len
;
436 offset
= next_offset
) {
437 uint16_t reloc_offset
=
438 *(uint16_t *) &filter_bytecode
->bc
.data
[offset
];
440 (const char *) &filter_bytecode
->bc
.data
[offset
+ sizeof(uint16_t)];
442 ret
= apply_reloc(event
, runtime
, runtime
->len
, reloc_offset
, name
);
446 next_offset
= offset
+ sizeof(uint16_t) + strlen(name
) + 1;
448 /* Validate bytecode */
449 ret
= lttng_filter_validate_bytecode(runtime
);
453 /* Specialize bytecode */
454 ret
= lttng_filter_specialize_bytecode(event
, runtime
);
458 runtime
->p
.filter
= lttng_filter_interpret_bytecode
;
459 runtime
->p
.link_failed
= 0;
460 list_add_rcu(&runtime
->p
.node
, insert_loc
);
461 dbg_printk("Linking successful.\n");
465 runtime
->p
.filter
= lttng_filter_false
;
466 runtime
->p
.link_failed
= 1;
467 list_add_rcu(&runtime
->p
.node
, insert_loc
);
469 dbg_printk("Linking failed.\n");
473 void lttng_filter_sync_state(struct lttng_bytecode_runtime
*runtime
)
475 struct lttng_filter_bytecode_node
*bc
= runtime
->bc
;
477 if (!bc
->enabler
->enabled
|| runtime
->link_failed
)
478 runtime
->filter
= lttng_filter_false
;
480 runtime
->filter
= lttng_filter_interpret_bytecode
;
484 * Link bytecode for all enablers referenced by an event.
486 void lttng_enabler_event_link_bytecode(struct lttng_event
*event
,
487 struct lttng_enabler
*enabler
)
489 struct lttng_filter_bytecode_node
*bc
;
490 struct lttng_bytecode_runtime
*runtime
;
492 /* Can only be called for events with desc attached */
493 WARN_ON_ONCE(!event
->desc
);
495 /* Link each bytecode. */
496 list_for_each_entry(bc
, &enabler
->filter_bytecode_head
, node
) {
498 struct list_head
*insert_loc
;
500 list_for_each_entry(runtime
,
501 &event
->bytecode_runtime_head
, node
) {
502 if (runtime
->bc
== bc
) {
507 /* Skip bytecode already linked */
512 * Insert at specified priority (seqnum) in increasing
513 * order. If there already is a bytecode of the same priority,
514 * insert the new bytecode right after it.
516 list_for_each_entry_reverse(runtime
,
517 &event
->bytecode_runtime_head
, node
) {
518 if (runtime
->bc
->bc
.seqnum
<= bc
->bc
.seqnum
) {
520 insert_loc
= &runtime
->node
;
524 /* Add to head to list */
525 insert_loc
= &event
->bytecode_runtime_head
;
527 dbg_printk("linking bytecode\n");
528 ret
= _lttng_filter_event_link_bytecode(event
, bc
,
531 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
537 * We own the filter_bytecode if we return success.
539 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler
*enabler
,
540 struct lttng_filter_bytecode_node
*filter_bytecode
)
542 list_add(&filter_bytecode
->node
, &enabler
->filter_bytecode_head
);
546 void lttng_free_enabler_filter_bytecode(struct lttng_enabler
*enabler
)
548 struct lttng_filter_bytecode_node
*filter_bytecode
, *tmp
;
550 list_for_each_entry_safe(filter_bytecode
, tmp
,
551 &enabler
->filter_bytecode_head
, node
) {
552 kfree(filter_bytecode
);
556 void lttng_free_event_filter_runtime(struct lttng_event
*event
)
558 struct bytecode_runtime
*runtime
, *tmp
;
560 list_for_each_entry_safe(runtime
, tmp
,
561 &event
->bytecode_runtime_head
, p
.node
) {
562 kfree(runtime
->data
);