Abstract base `lttng_enabler` to support other types of enablers
[deliverable/lttng-ust.git] / liblttng-ust / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng UST filter code.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include <stddef.h>
29 #include <stdint.h>
30
31 #include <urcu/rculist.h>
32
33 #include "lttng-filter.h"
34 #include "ust-events-internal.h"
35
36 static const char *opnames[] = {
37 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
38
39 [ FILTER_OP_RETURN ] = "RETURN",
40
41 /* binary */
42 [ FILTER_OP_MUL ] = "MUL",
43 [ FILTER_OP_DIV ] = "DIV",
44 [ FILTER_OP_MOD ] = "MOD",
45 [ FILTER_OP_PLUS ] = "PLUS",
46 [ FILTER_OP_MINUS ] = "MINUS",
47 [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
48 [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
49 [ FILTER_OP_BIT_AND ] = "BIT_AND",
50 [ FILTER_OP_BIT_OR ] = "BIT_OR",
51 [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
52
53 /* binary comparators */
54 [ FILTER_OP_EQ ] = "EQ",
55 [ FILTER_OP_NE ] = "NE",
56 [ FILTER_OP_GT ] = "GT",
57 [ FILTER_OP_LT ] = "LT",
58 [ FILTER_OP_GE ] = "GE",
59 [ FILTER_OP_LE ] = "LE",
60
61 /* string binary comparators */
62 [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
63 [ FILTER_OP_NE_STRING ] = "NE_STRING",
64 [ FILTER_OP_GT_STRING ] = "GT_STRING",
65 [ FILTER_OP_LT_STRING ] = "LT_STRING",
66 [ FILTER_OP_GE_STRING ] = "GE_STRING",
67 [ FILTER_OP_LE_STRING ] = "LE_STRING",
68
69 /* s64 binary comparators */
70 [ FILTER_OP_EQ_S64 ] = "EQ_S64",
71 [ FILTER_OP_NE_S64 ] = "NE_S64",
72 [ FILTER_OP_GT_S64 ] = "GT_S64",
73 [ FILTER_OP_LT_S64 ] = "LT_S64",
74 [ FILTER_OP_GE_S64 ] = "GE_S64",
75 [ FILTER_OP_LE_S64 ] = "LE_S64",
76
77 /* double binary comparators */
78 [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
79 [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
80 [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
81 [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
82 [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
83 [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
84
85 /* Mixed S64-double binary comparators */
86 [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
87 [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
88 [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
89 [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
90 [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
91 [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
92
93 [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
94 [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
95 [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
96 [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
97 [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
98 [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
99
100 /* unary */
101 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
102 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
103 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
104 [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
105 [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
106 [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
107 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
108 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
109 [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
110
111 /* logical */
112 [ FILTER_OP_AND ] = "AND",
113 [ FILTER_OP_OR ] = "OR",
114
115 /* load field ref */
116 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
117 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
118 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
119 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
120 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
121
122 /* load from immediate operand */
123 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
124 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
125 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
126
127 /* cast */
128 [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
129 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
130 [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
131
132 /* get context ref */
133 [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
134 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
135 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
136 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
137
138 /* load userspace field ref */
139 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
140 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
141
142 /*
143 * load immediate star globbing pattern (literal string)
144 * from immediate.
145 */
146 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
147
148 /* globbing pattern binary operator: apply to */
149 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
150 [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
151
152 /*
153 * Instructions for recursive traversal through composed types.
154 */
155 [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
156 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
157 [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
158
159 [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
160 [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
161 [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
162 [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
163
164 [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
165 [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
166 [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
167 [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
168 [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
169 [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
170 [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
171 [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
172 [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
173 [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
174 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
175 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
176
177 [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
178
179 [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
180 };
181
182 const char *print_op(enum filter_op op)
183 {
184 if (op >= NR_FILTER_OPS)
185 return "UNKNOWN";
186 else
187 return opnames[op];
188 }
189
190 static
191 int apply_field_reloc(struct lttng_event *event,
192 struct bytecode_runtime *runtime,
193 uint32_t runtime_len,
194 uint32_t reloc_offset,
195 const char *field_name,
196 enum filter_op filter_op)
197 {
198 const struct lttng_event_desc *desc;
199 const struct lttng_event_field *fields, *field = NULL;
200 unsigned int nr_fields, i;
201 struct load_op *op;
202 uint32_t field_offset = 0;
203
204 dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
205
206 /* Lookup event by name */
207 desc = event->desc;
208 if (!desc)
209 return -EINVAL;
210 fields = desc->fields;
211 if (!fields)
212 return -EINVAL;
213 nr_fields = desc->nr_fields;
214 for (i = 0; i < nr_fields; i++) {
215 if (fields[i].u.ext.nofilter) {
216 continue;
217 }
218 if (!strcmp(fields[i].name, field_name)) {
219 field = &fields[i];
220 break;
221 }
222 /* compute field offset */
223 switch (fields[i].type.atype) {
224 case atype_integer:
225 case atype_enum:
226 case atype_enum_nestable:
227 field_offset += sizeof(int64_t);
228 break;
229 case atype_array:
230 case atype_array_nestable:
231 case atype_sequence:
232 case atype_sequence_nestable:
233 field_offset += sizeof(unsigned long);
234 field_offset += sizeof(void *);
235 break;
236 case atype_string:
237 field_offset += sizeof(void *);
238 break;
239 case atype_float:
240 field_offset += sizeof(double);
241 break;
242 default:
243 return -EINVAL;
244 }
245 }
246 if (!field)
247 return -EINVAL;
248
249 /* Check if field offset is too large for 16-bit offset */
250 if (field_offset > FILTER_BYTECODE_MAX_LEN - 1)
251 return -EINVAL;
252
253 /* set type */
254 op = (struct load_op *) &runtime->code[reloc_offset];
255
256 switch (filter_op) {
257 case FILTER_OP_LOAD_FIELD_REF:
258 {
259 struct field_ref *field_ref;
260
261 field_ref = (struct field_ref *) op->data;
262 switch (field->type.atype) {
263 case atype_integer:
264 case atype_enum:
265 case atype_enum_nestable:
266 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
267 break;
268 case atype_array:
269 case atype_array_nestable:
270 case atype_sequence:
271 case atype_sequence_nestable:
272 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
273 break;
274 case atype_string:
275 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
276 break;
277 case atype_float:
278 op->op = FILTER_OP_LOAD_FIELD_REF_DOUBLE;
279 break;
280 default:
281 return -EINVAL;
282 }
283 /* set offset */
284 field_ref->offset = (uint16_t) field_offset;
285 break;
286 }
287 default:
288 return -EINVAL;
289 }
290 return 0;
291 }
292
293 static
294 int apply_context_reloc(struct lttng_event *event,
295 struct bytecode_runtime *runtime,
296 uint32_t runtime_len,
297 uint32_t reloc_offset,
298 const char *context_name,
299 enum filter_op filter_op)
300 {
301 struct load_op *op;
302 struct lttng_ctx_field *ctx_field;
303 int idx;
304 struct lttng_ctx *ctx = *runtime->p.pctx;
305
306 dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
307
308 /* Get context index */
309 idx = lttng_get_context_index(ctx, context_name);
310 if (idx < 0) {
311 if (lttng_context_is_app(context_name)) {
312 int ret;
313
314 ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
315 &ctx);
316 if (ret)
317 return ret;
318 idx = lttng_get_context_index(ctx, context_name);
319 if (idx < 0)
320 return -ENOENT;
321 } else {
322 return -ENOENT;
323 }
324 }
325 /* Check if idx is too large for 16-bit offset */
326 if (idx > FILTER_BYTECODE_MAX_LEN - 1)
327 return -EINVAL;
328
329 /* Get context return type */
330 ctx_field = &ctx->fields[idx];
331 op = (struct load_op *) &runtime->code[reloc_offset];
332
333 switch (filter_op) {
334 case FILTER_OP_GET_CONTEXT_REF:
335 {
336 struct field_ref *field_ref;
337
338 field_ref = (struct field_ref *) op->data;
339 switch (ctx_field->event_field.type.atype) {
340 case atype_integer:
341 case atype_enum:
342 case atype_enum_nestable:
343 op->op = FILTER_OP_GET_CONTEXT_REF_S64;
344 break;
345 /* Sequence and array supported as string */
346 case atype_string:
347 case atype_array:
348 case atype_array_nestable:
349 case atype_sequence:
350 case atype_sequence_nestable:
351 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
352 break;
353 case atype_float:
354 op->op = FILTER_OP_GET_CONTEXT_REF_DOUBLE;
355 break;
356 case atype_dynamic:
357 op->op = FILTER_OP_GET_CONTEXT_REF;
358 break;
359 default:
360 return -EINVAL;
361 }
362 /* set offset to context index within channel contexts */
363 field_ref->offset = (uint16_t) idx;
364 break;
365 }
366 default:
367 return -EINVAL;
368 }
369 return 0;
370 }
371
372 static
373 int apply_reloc(struct lttng_event *event,
374 struct bytecode_runtime *runtime,
375 uint32_t runtime_len,
376 uint32_t reloc_offset,
377 const char *name)
378 {
379 struct load_op *op;
380
381 dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
382
383 /* Ensure that the reloc is within the code */
384 if (runtime_len - reloc_offset < sizeof(uint16_t))
385 return -EINVAL;
386
387 op = (struct load_op *) &runtime->code[reloc_offset];
388 switch (op->op) {
389 case FILTER_OP_LOAD_FIELD_REF:
390 return apply_field_reloc(event, runtime, runtime_len,
391 reloc_offset, name, op->op);
392 case FILTER_OP_GET_CONTEXT_REF:
393 return apply_context_reloc(event, runtime, runtime_len,
394 reloc_offset, name, op->op);
395 case FILTER_OP_GET_SYMBOL:
396 case FILTER_OP_GET_SYMBOL_FIELD:
397 /*
398 * Will be handled by load specialize phase or
399 * dynamically by interpreter.
400 */
401 return 0;
402 default:
403 ERR("Unknown reloc op type %u\n", op->op);
404 return -EINVAL;
405 }
406 return 0;
407 }
408
409 static
410 int bytecode_is_linked(struct lttng_ust_filter_bytecode_node *filter_bytecode,
411 struct lttng_event *event)
412 {
413 struct lttng_bytecode_runtime *bc_runtime;
414
415 cds_list_for_each_entry(bc_runtime,
416 &event->bytecode_runtime_head, node) {
417 if (bc_runtime->bc == filter_bytecode)
418 return 1;
419 }
420 return 0;
421 }
422
423 /*
424 * Take a bytecode with reloc table and link it to an event to create a
425 * bytecode runtime.
426 */
427 static
428 int _lttng_filter_event_link_bytecode(struct lttng_event *event,
429 struct lttng_ust_filter_bytecode_node *filter_bytecode,
430 struct cds_list_head *insert_loc)
431 {
432 int ret, offset, next_offset;
433 struct bytecode_runtime *runtime = NULL;
434 size_t runtime_alloc_len;
435
436 if (!filter_bytecode)
437 return 0;
438 /* Bytecode already linked */
439 if (bytecode_is_linked(filter_bytecode, event))
440 return 0;
441
442 dbg_printf("Linking...\n");
443
444 /* We don't need the reloc table in the runtime */
445 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
446 runtime = zmalloc(runtime_alloc_len);
447 if (!runtime) {
448 ret = -ENOMEM;
449 goto alloc_error;
450 }
451 runtime->p.bc = filter_bytecode;
452 runtime->p.pctx = &event->chan->session->ctx;
453 runtime->len = filter_bytecode->bc.reloc_offset;
454 /* copy original bytecode */
455 memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
456 /*
457 * apply relocs. Those are a uint16_t (offset in bytecode)
458 * followed by a string (field name).
459 */
460 for (offset = filter_bytecode->bc.reloc_offset;
461 offset < filter_bytecode->bc.len;
462 offset = next_offset) {
463 uint16_t reloc_offset =
464 *(uint16_t *) &filter_bytecode->bc.data[offset];
465 const char *name =
466 (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
467
468 ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
469 if (ret) {
470 goto link_error;
471 }
472 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
473 }
474 /* Validate bytecode */
475 ret = lttng_filter_validate_bytecode(runtime);
476 if (ret) {
477 goto link_error;
478 }
479 /* Specialize bytecode */
480 ret = lttng_filter_specialize_bytecode(event, runtime);
481 if (ret) {
482 goto link_error;
483 }
484 runtime->p.filter = lttng_filter_interpret_bytecode;
485 runtime->p.link_failed = 0;
486 cds_list_add_rcu(&runtime->p.node, insert_loc);
487 dbg_printf("Linking successful.\n");
488 return 0;
489
490 link_error:
491 runtime->p.filter = lttng_filter_false;
492 runtime->p.link_failed = 1;
493 cds_list_add_rcu(&runtime->p.node, insert_loc);
494 alloc_error:
495 dbg_printf("Linking failed.\n");
496 return ret;
497 }
498
499 void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
500 {
501 struct lttng_ust_filter_bytecode_node *bc = runtime->bc;
502
503 if (!bc->enabler->enabled || runtime->link_failed)
504 runtime->filter = lttng_filter_false;
505 else
506 runtime->filter = lttng_filter_interpret_bytecode;
507 }
508
509 /*
510 * Link bytecode for all enablers referenced by an event.
511 */
512 void lttng_event_enabler_link_bytecode(struct lttng_event *event,
513 struct lttng_event_enabler *event_enabler)
514 {
515 struct lttng_ust_filter_bytecode_node *bc;
516 struct lttng_bytecode_runtime *runtime;
517 struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler);
518
519 /* Can only be called for events with desc attached */
520 assert(event->desc);
521
522 /* Link each bytecode. */
523 cds_list_for_each_entry(bc, &base_enabler->filter_bytecode_head, node) {
524 int found = 0, ret;
525 struct cds_list_head *insert_loc;
526
527 cds_list_for_each_entry(runtime,
528 &event->bytecode_runtime_head, node) {
529 if (runtime->bc == bc) {
530 found = 1;
531 break;
532 }
533 }
534 /* Skip bytecode already linked */
535 if (found)
536 continue;
537
538 /*
539 * Insert at specified priority (seqnum) in increasing
540 * order. If there already is a bytecode of the same priority,
541 * insert the new bytecode right after it.
542 */
543 cds_list_for_each_entry_reverse(runtime,
544 &event->bytecode_runtime_head, node) {
545 if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
546 /* insert here */
547 insert_loc = &runtime->node;
548 goto add_within;
549 }
550 }
551 /* Add to head to list */
552 insert_loc = &event->bytecode_runtime_head;
553 add_within:
554 dbg_printf("linking bytecode\n");
555 ret = _lttng_filter_event_link_bytecode(event, bc,
556 insert_loc);
557 if (ret) {
558 dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
559 }
560 }
561 }
562
563 /*
564 * We own the filter_bytecode if we return success.
565 */
566 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
567 struct lttng_ust_filter_bytecode_node *filter_bytecode)
568 {
569 cds_list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
570 return 0;
571 }
572
573 static
574 void free_filter_runtime(struct cds_list_head *bytecode_runtime_head)
575 {
576 struct bytecode_runtime *runtime, *tmp;
577
578 cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
579 p.node) {
580 free(runtime->data);
581 free(runtime);
582 }
583 }
584
585 void lttng_free_event_filter_runtime(struct lttng_event *event)
586 {
587 free_filter_runtime(&event->bytecode_runtime_head);
588 }
This page took 0.043834 seconds and 5 git commands to generate.