4 * LTTng UST filter code.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #include <urcu/rculist.h>
33 #include "lttng-filter.h"
35 static const char *opnames
[] = {
36 [ FILTER_OP_UNKNOWN
] = "UNKNOWN",
38 [ FILTER_OP_RETURN
] = "RETURN",
41 [ FILTER_OP_MUL
] = "MUL",
42 [ FILTER_OP_DIV
] = "DIV",
43 [ FILTER_OP_MOD
] = "MOD",
44 [ FILTER_OP_PLUS
] = "PLUS",
45 [ FILTER_OP_MINUS
] = "MINUS",
46 [ FILTER_OP_BIT_RSHIFT
] = "BIT_RSHIFT",
47 [ FILTER_OP_BIT_LSHIFT
] = "BIT_LSHIFT",
48 [ FILTER_OP_BIT_AND
] = "BIT_AND",
49 [ FILTER_OP_BIT_OR
] = "BIT_OR",
50 [ FILTER_OP_BIT_XOR
] = "BIT_XOR",
52 /* binary comparators */
53 [ FILTER_OP_EQ
] = "EQ",
54 [ FILTER_OP_NE
] = "NE",
55 [ FILTER_OP_GT
] = "GT",
56 [ FILTER_OP_LT
] = "LT",
57 [ FILTER_OP_GE
] = "GE",
58 [ FILTER_OP_LE
] = "LE",
60 /* string binary comparators */
61 [ FILTER_OP_EQ_STRING
] = "EQ_STRING",
62 [ FILTER_OP_NE_STRING
] = "NE_STRING",
63 [ FILTER_OP_GT_STRING
] = "GT_STRING",
64 [ FILTER_OP_LT_STRING
] = "LT_STRING",
65 [ FILTER_OP_GE_STRING
] = "GE_STRING",
66 [ FILTER_OP_LE_STRING
] = "LE_STRING",
68 /* s64 binary comparators */
69 [ FILTER_OP_EQ_S64
] = "EQ_S64",
70 [ FILTER_OP_NE_S64
] = "NE_S64",
71 [ FILTER_OP_GT_S64
] = "GT_S64",
72 [ FILTER_OP_LT_S64
] = "LT_S64",
73 [ FILTER_OP_GE_S64
] = "GE_S64",
74 [ FILTER_OP_LE_S64
] = "LE_S64",
76 /* double binary comparators */
77 [ FILTER_OP_EQ_DOUBLE
] = "EQ_DOUBLE",
78 [ FILTER_OP_NE_DOUBLE
] = "NE_DOUBLE",
79 [ FILTER_OP_GT_DOUBLE
] = "GT_DOUBLE",
80 [ FILTER_OP_LT_DOUBLE
] = "LT_DOUBLE",
81 [ FILTER_OP_GE_DOUBLE
] = "GE_DOUBLE",
82 [ FILTER_OP_LE_DOUBLE
] = "LE_DOUBLE",
84 /* Mixed S64-double binary comparators */
85 [ FILTER_OP_EQ_DOUBLE_S64
] = "EQ_DOUBLE_S64",
86 [ FILTER_OP_NE_DOUBLE_S64
] = "NE_DOUBLE_S64",
87 [ FILTER_OP_GT_DOUBLE_S64
] = "GT_DOUBLE_S64",
88 [ FILTER_OP_LT_DOUBLE_S64
] = "LT_DOUBLE_S64",
89 [ FILTER_OP_GE_DOUBLE_S64
] = "GE_DOUBLE_S64",
90 [ FILTER_OP_LE_DOUBLE_S64
] = "LE_DOUBLE_S64",
92 [ FILTER_OP_EQ_S64_DOUBLE
] = "EQ_S64_DOUBLE",
93 [ FILTER_OP_NE_S64_DOUBLE
] = "NE_S64_DOUBLE",
94 [ FILTER_OP_GT_S64_DOUBLE
] = "GT_S64_DOUBLE",
95 [ FILTER_OP_LT_S64_DOUBLE
] = "LT_S64_DOUBLE",
96 [ FILTER_OP_GE_S64_DOUBLE
] = "GE_S64_DOUBLE",
97 [ FILTER_OP_LE_S64_DOUBLE
] = "LE_S64_DOUBLE",
100 [ FILTER_OP_UNARY_PLUS
] = "UNARY_PLUS",
101 [ FILTER_OP_UNARY_MINUS
] = "UNARY_MINUS",
102 [ FILTER_OP_UNARY_NOT
] = "UNARY_NOT",
103 [ FILTER_OP_UNARY_PLUS_S64
] = "UNARY_PLUS_S64",
104 [ FILTER_OP_UNARY_MINUS_S64
] = "UNARY_MINUS_S64",
105 [ FILTER_OP_UNARY_NOT_S64
] = "UNARY_NOT_S64",
106 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = "UNARY_PLUS_DOUBLE",
107 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = "UNARY_MINUS_DOUBLE",
108 [ FILTER_OP_UNARY_NOT_DOUBLE
] = "UNARY_NOT_DOUBLE",
111 [ FILTER_OP_AND
] = "AND",
112 [ FILTER_OP_OR
] = "OR",
115 [ FILTER_OP_LOAD_FIELD_REF
] = "LOAD_FIELD_REF",
116 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = "LOAD_FIELD_REF_STRING",
117 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = "LOAD_FIELD_REF_SEQUENCE",
118 [ FILTER_OP_LOAD_FIELD_REF_S64
] = "LOAD_FIELD_REF_S64",
119 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = "LOAD_FIELD_REF_DOUBLE",
121 /* load from immediate operand */
122 [ FILTER_OP_LOAD_STRING
] = "LOAD_STRING",
123 [ FILTER_OP_LOAD_S64
] = "LOAD_S64",
124 [ FILTER_OP_LOAD_DOUBLE
] = "LOAD_DOUBLE",
127 [ FILTER_OP_CAST_TO_S64
] = "CAST_TO_S64",
128 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = "CAST_DOUBLE_TO_S64",
129 [ FILTER_OP_CAST_NOP
] = "CAST_NOP",
131 /* get context ref */
132 [ FILTER_OP_GET_CONTEXT_REF
] = "GET_CONTEXT_REF",
133 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = "GET_CONTEXT_REF_STRING",
134 [ FILTER_OP_GET_CONTEXT_REF_S64
] = "GET_CONTEXT_REF_S64",
135 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = "GET_CONTEXT_REF_DOUBLE",
137 /* load userspace field ref */
138 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = "LOAD_FIELD_REF_USER_STRING",
139 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = "LOAD_FIELD_REF_USER_SEQUENCE",
142 * load immediate star globbing pattern (literal string)
145 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = "LOAD_STAR_GLOB_STRING",
147 /* globbing pattern binary operator: apply to */
148 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = "EQ_STAR_GLOB_STRING",
149 [ FILTER_OP_NE_STAR_GLOB_STRING
] = "NE_STAR_GLOB_STRING",
152 * Instructions for recursive traversal through composed types.
154 [ FILTER_OP_GET_CONTEXT_ROOT
] = "GET_CONTEXT_ROOT",
155 [ FILTER_OP_GET_APP_CONTEXT_ROOT
] = "GET_APP_CONTEXT_ROOT",
156 [ FILTER_OP_GET_PAYLOAD_ROOT
] = "GET_PAYLOAD_ROOT",
158 [ FILTER_OP_GET_SYMBOL
] = "GET_SYMBOL",
159 [ FILTER_OP_GET_SYMBOL_FIELD
] = "GET_SYMBOL_FIELD",
160 [ FILTER_OP_GET_INDEX_U16
] = "GET_INDEX_U16",
161 [ FILTER_OP_GET_INDEX_U64
] = "GET_INDEX_U64",
163 [ FILTER_OP_LOAD_FIELD
] = "LOAD_FIELD",
164 [ FILTER_OP_LOAD_FIELD_S8
] = "LOAD_FIELD_S8",
165 [ FILTER_OP_LOAD_FIELD_S16
] = "LOAD_FIELD_S16",
166 [ FILTER_OP_LOAD_FIELD_S32
] = "LOAD_FIELD_S32",
167 [ FILTER_OP_LOAD_FIELD_S64
] = "LOAD_FIELD_S64",
168 [ FILTER_OP_LOAD_FIELD_U8
] = "LOAD_FIELD_U8",
169 [ FILTER_OP_LOAD_FIELD_U16
] = "LOAD_FIELD_U16",
170 [ FILTER_OP_LOAD_FIELD_U32
] = "LOAD_FIELD_U32",
171 [ FILTER_OP_LOAD_FIELD_U64
] = "LOAD_FIELD_U64",
172 [ FILTER_OP_LOAD_FIELD_STRING
] = "LOAD_FIELD_STRING",
173 [ FILTER_OP_LOAD_FIELD_SEQUENCE
] = "LOAD_FIELD_SEQUENCE",
174 [ FILTER_OP_LOAD_FIELD_DOUBLE
] = "LOAD_FIELD_DOUBLE",
176 [ FILTER_OP_UNARY_BIT_NOT
] = "UNARY_BIT_NOT",
178 [ FILTER_OP_RETURN_S64
] = "RETURN_S64",
181 const char *print_op(enum filter_op op
)
183 if (op
>= NR_FILTER_OPS
)
190 int apply_field_reloc(struct lttng_event
*event
,
191 struct bytecode_runtime
*runtime
,
192 uint32_t runtime_len
,
193 uint32_t reloc_offset
,
194 const char *field_name
,
195 enum filter_op filter_op
)
197 const struct lttng_event_desc
*desc
;
198 const struct lttng_event_field
*fields
, *field
= NULL
;
199 unsigned int nr_fields
, i
;
201 uint32_t field_offset
= 0;
203 dbg_printf("Apply field reloc: %u %s\n", reloc_offset
, field_name
);
205 /* Lookup event by name */
209 fields
= desc
->fields
;
212 nr_fields
= desc
->nr_fields
;
213 for (i
= 0; i
< nr_fields
; i
++) {
214 if (!strcmp(fields
[i
].name
, field_name
)) {
218 /* compute field offset */
219 switch (fields
[i
].type
.atype
) {
222 field_offset
+= sizeof(int64_t);
226 field_offset
+= sizeof(unsigned long);
227 field_offset
+= sizeof(void *);
230 field_offset
+= sizeof(void *);
233 field_offset
+= sizeof(double);
242 /* Check if field offset is too large for 16-bit offset */
243 if (field_offset
> FILTER_BYTECODE_MAX_LEN
- 1)
247 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
250 case FILTER_OP_LOAD_FIELD_REF
:
252 struct field_ref
*field_ref
;
254 field_ref
= (struct field_ref
*) op
->data
;
255 switch (field
->type
.atype
) {
258 op
->op
= FILTER_OP_LOAD_FIELD_REF_S64
;
262 op
->op
= FILTER_OP_LOAD_FIELD_REF_SEQUENCE
;
265 op
->op
= FILTER_OP_LOAD_FIELD_REF_STRING
;
268 op
->op
= FILTER_OP_LOAD_FIELD_REF_DOUBLE
;
274 field_ref
->offset
= (uint16_t) field_offset
;
284 int apply_context_reloc(struct lttng_event
*event
,
285 struct bytecode_runtime
*runtime
,
286 uint32_t runtime_len
,
287 uint32_t reloc_offset
,
288 const char *context_name
,
289 enum filter_op filter_op
)
292 struct lttng_ctx_field
*ctx_field
;
294 struct lttng_session
*session
= runtime
->p
.session
;
296 dbg_printf("Apply context reloc: %u %s\n", reloc_offset
, context_name
);
298 /* Get context index */
299 idx
= lttng_get_context_index(session
->ctx
, context_name
);
301 if (lttng_context_is_app(context_name
)) {
304 ret
= lttng_ust_add_app_context_to_ctx_rcu(context_name
,
308 idx
= lttng_get_context_index(session
->ctx
,
316 /* Check if idx is too large for 16-bit offset */
317 if (idx
> FILTER_BYTECODE_MAX_LEN
- 1)
320 /* Get context return type */
321 ctx_field
= &session
->ctx
->fields
[idx
];
322 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
325 case FILTER_OP_GET_CONTEXT_REF
:
327 struct field_ref
*field_ref
;
329 field_ref
= (struct field_ref
*) op
->data
;
330 switch (ctx_field
->event_field
.type
.atype
) {
333 op
->op
= FILTER_OP_GET_CONTEXT_REF_S64
;
335 /* Sequence and array supported as string */
339 op
->op
= FILTER_OP_GET_CONTEXT_REF_STRING
;
342 op
->op
= FILTER_OP_GET_CONTEXT_REF_DOUBLE
;
345 op
->op
= FILTER_OP_GET_CONTEXT_REF
;
350 /* set offset to context index within channel contexts */
351 field_ref
->offset
= (uint16_t) idx
;
361 int apply_reloc(struct lttng_event
*event
,
362 struct bytecode_runtime
*runtime
,
363 uint32_t runtime_len
,
364 uint32_t reloc_offset
,
369 dbg_printf("Apply reloc: %u %s\n", reloc_offset
, name
);
371 /* Ensure that the reloc is within the code */
372 if (runtime_len
- reloc_offset
< sizeof(uint16_t))
375 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
377 case FILTER_OP_LOAD_FIELD_REF
:
378 return apply_field_reloc(event
, runtime
, runtime_len
,
379 reloc_offset
, name
, op
->op
);
380 case FILTER_OP_GET_CONTEXT_REF
:
381 return apply_context_reloc(event
, runtime
, runtime_len
,
382 reloc_offset
, name
, op
->op
);
383 case FILTER_OP_GET_SYMBOL
:
384 case FILTER_OP_GET_SYMBOL_FIELD
:
386 * Will be handled by load specialize phase or
387 * dynamically by interpreter.
391 ERR("Unknown reloc op type %u\n", op
->op
);
398 int bytecode_is_linked(struct lttng_ust_filter_bytecode_node
*filter_bytecode
,
399 struct lttng_event
*event
)
401 struct lttng_bytecode_runtime
*bc_runtime
;
403 cds_list_for_each_entry(bc_runtime
,
404 &event
->bytecode_runtime_head
, node
) {
405 if (bc_runtime
->bc
== filter_bytecode
)
412 * Take a bytecode with reloc table and link it to an event to create a
416 int _lttng_filter_event_link_bytecode(struct lttng_event
*event
,
417 struct lttng_ust_filter_bytecode_node
*filter_bytecode
,
418 struct cds_list_head
*insert_loc
)
420 int ret
, offset
, next_offset
;
421 struct bytecode_runtime
*runtime
= NULL
;
422 size_t runtime_alloc_len
;
424 if (!filter_bytecode
)
426 /* Bytecode already linked */
427 if (bytecode_is_linked(filter_bytecode
, event
))
430 dbg_printf("Linking...\n");
432 /* We don't need the reloc table in the runtime */
433 runtime_alloc_len
= sizeof(*runtime
) + filter_bytecode
->bc
.reloc_offset
;
434 runtime
= zmalloc(runtime_alloc_len
);
439 runtime
->p
.bc
= filter_bytecode
;
440 runtime
->p
.session
= event
->chan
->session
;
441 runtime
->len
= filter_bytecode
->bc
.reloc_offset
;
442 /* copy original bytecode */
443 memcpy(runtime
->code
, filter_bytecode
->bc
.data
, runtime
->len
);
445 * apply relocs. Those are a uint16_t (offset in bytecode)
446 * followed by a string (field name).
448 for (offset
= filter_bytecode
->bc
.reloc_offset
;
449 offset
< filter_bytecode
->bc
.len
;
450 offset
= next_offset
) {
451 uint16_t reloc_offset
=
452 *(uint16_t *) &filter_bytecode
->bc
.data
[offset
];
454 (const char *) &filter_bytecode
->bc
.data
[offset
+ sizeof(uint16_t)];
456 ret
= apply_reloc(event
, runtime
, runtime
->len
, reloc_offset
, name
);
460 next_offset
= offset
+ sizeof(uint16_t) + strlen(name
) + 1;
462 /* Validate bytecode */
463 ret
= lttng_filter_validate_bytecode(runtime
);
467 /* Specialize bytecode */
468 ret
= lttng_filter_specialize_bytecode(event
, runtime
);
472 runtime
->p
.filter
= lttng_filter_interpret_bytecode
;
473 runtime
->p
.link_failed
= 0;
474 cds_list_add_rcu(&runtime
->p
.node
, insert_loc
);
475 dbg_printf("Linking successful.\n");
479 runtime
->p
.filter
= lttng_filter_false
;
480 runtime
->p
.link_failed
= 1;
481 cds_list_add_rcu(&runtime
->p
.node
, insert_loc
);
483 dbg_printf("Linking failed.\n");
487 void lttng_filter_sync_state(struct lttng_bytecode_runtime
*runtime
)
489 struct lttng_ust_filter_bytecode_node
*bc
= runtime
->bc
;
491 if (!bc
->enabler
->enabled
|| runtime
->link_failed
)
492 runtime
->filter
= lttng_filter_false
;
494 runtime
->filter
= lttng_filter_interpret_bytecode
;
498 * Link bytecode for all enablers referenced by an event.
500 void lttng_enabler_event_link_bytecode(struct lttng_event
*event
,
501 struct lttng_enabler
*enabler
)
503 struct lttng_ust_filter_bytecode_node
*bc
;
504 struct lttng_bytecode_runtime
*runtime
;
506 /* Can only be called for events with desc attached */
509 /* Link each bytecode. */
510 cds_list_for_each_entry(bc
, &enabler
->filter_bytecode_head
, node
) {
512 struct cds_list_head
*insert_loc
;
514 cds_list_for_each_entry(runtime
,
515 &event
->bytecode_runtime_head
, node
) {
516 if (runtime
->bc
== bc
) {
521 /* Skip bytecode already linked */
526 * Insert at specified priority (seqnum) in increasing
529 cds_list_for_each_entry_reverse(runtime
,
530 &event
->bytecode_runtime_head
, node
) {
531 if (runtime
->bc
->bc
.seqnum
< bc
->bc
.seqnum
) {
533 insert_loc
= &runtime
->node
;
537 /* Add to head to list */
538 insert_loc
= &event
->bytecode_runtime_head
;
540 dbg_printf("linking bytecode\n");
541 ret
= _lttng_filter_event_link_bytecode(event
, bc
,
544 dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
550 * We own the filter_bytecode if we return success.
552 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler
*enabler
,
553 struct lttng_ust_filter_bytecode_node
*filter_bytecode
)
555 cds_list_add(&filter_bytecode
->node
, &enabler
->filter_bytecode_head
);
559 void lttng_free_event_filter_runtime(struct lttng_event
*event
)
561 struct bytecode_runtime
*runtime
, *tmp
;
563 cds_list_for_each_entry_safe(runtime
, tmp
,
564 &event
->bytecode_runtime_head
, p
.node
) {