4 * Babeltrace CTF IR - Trace Visitor
6 * Copyright 2015 Jérémie Galarneau <jeremie.galarneau@efficios.com>
8 * Author: Jérémie Galarneau <jeremie.galarneau@efficios.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #include <babeltrace/ctf-ir/event.h>
30 #include <babeltrace/ctf-ir/stream-class.h>
31 #include <babeltrace/ctf-ir/visitor-internal.h>
32 #include <babeltrace/ctf-ir/event-types-internal.h>
33 #include <babeltrace/ctf-ir/event-internal.h>
34 #include <babeltrace/babeltrace-internal.h>
36 /* TSDL dynamic scope prefixes defined in CTF Section 7.3.2 */
37 static const char * const absolute_path_prefixes
[] = {
38 [CTF_NODE_ENV
] = "env.",
39 [CTF_NODE_TRACE_PACKET_HEADER
] = "trace.packet.header.",
40 [CTF_NODE_STREAM_PACKET_CONTEXT
] = "stream.packet.context.",
41 [CTF_NODE_STREAM_EVENT_HEADER
] = "stream.event.header.",
42 [CTF_NODE_STREAM_EVENT_CONTEXT
] = "stream.event.context.",
43 [CTF_NODE_EVENT_CONTEXT
] = "event.context.",
44 [CTF_NODE_EVENT_FIELDS
] = "event.fields.",
47 const int absolute_path_prefix_token_counts
[] = {
49 [CTF_NODE_TRACE_PACKET_HEADER
] = 3,
50 [CTF_NODE_STREAM_PACKET_CONTEXT
] = 3,
51 [CTF_NODE_STREAM_EVENT_HEADER
] = 3,
52 [CTF_NODE_STREAM_EVENT_CONTEXT
] = 3,
53 [CTF_NODE_EVENT_CONTEXT
] = 2,
54 [CTF_NODE_EVENT_FIELDS
] = 2,
57 static const char * const type_names
[] = {
58 [CTF_TYPE_UNKNOWN
] = "unknown",
59 [CTF_TYPE_INTEGER
] = "integer",
60 [CTF_TYPE_FLOAT
] = "float",
61 [CTF_TYPE_ENUM
] = "enumeration",
62 [CTF_TYPE_STRING
] = "string",
63 [CTF_TYPE_STRUCT
] = "structure",
64 [CTF_TYPE_UNTAGGED_VARIANT
] = "untagged variant",
65 [CTF_TYPE_VARIANT
] = "variant",
66 [CTF_TYPE_ARRAY
] = "array",
67 [CTF_TYPE_SEQUENCE
] = "sequence",
71 int field_type_visit(struct bt_ctf_field_type
*type
,
72 struct ctf_type_visitor_context
*context
,
73 ctf_type_visitor_func func
);
76 int field_type_recursive_visit(struct bt_ctf_field_type
*type
,
77 struct ctf_type_visitor_context
*context
,
78 ctf_type_visitor_func func
);
81 int get_type_field_count(struct bt_ctf_field_type
*type
)
84 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
86 if (type_id
== CTF_TYPE_STRUCT
) {
87 field_count
= bt_ctf_field_type_structure_get_field_count(type
);
88 } else if (type_id
== CTF_TYPE_VARIANT
) {
89 field_count
= bt_ctf_field_type_variant_get_field_count(type
);
95 struct bt_ctf_field_type
*get_type_field(struct bt_ctf_field_type
*type
, int i
)
97 struct bt_ctf_field_type
*field
= NULL
;
98 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
100 if (type_id
== CTF_TYPE_STRUCT
) {
101 bt_ctf_field_type_structure_get_field(type
, NULL
,
103 } else if (type_id
== CTF_TYPE_VARIANT
) {
104 bt_ctf_field_type_variant_get_field(type
,
112 int set_type_field(struct bt_ctf_field_type
*type
,
113 struct bt_ctf_field_type
*field
, int i
)
116 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
118 if (type_id
== CTF_TYPE_STRUCT
) {
119 ret
= bt_ctf_field_type_structure_set_field_index(
121 } else if (type_id
== CTF_TYPE_VARIANT
) {
122 ret
= bt_ctf_field_type_variant_set_field_index(
130 int get_type_field_index(struct bt_ctf_field_type
*type
, const char *name
)
132 int field_index
= -1;
133 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
135 if (type_id
== CTF_TYPE_STRUCT
) {
136 field_index
= bt_ctf_field_type_structure_get_field_name_index(
138 } else if (type_id
== CTF_TYPE_VARIANT
) {
139 field_index
= bt_ctf_field_type_variant_get_field_name_index(
147 ctf_type_stack
*ctf_type_stack_create(void)
149 return g_ptr_array_new();
153 void ctf_type_stack_destroy(
154 ctf_type_stack
*stack
)
156 g_ptr_array_free(stack
, TRUE
);
160 int ctf_type_stack_push(ctf_type_stack
*stack
,
161 struct ctf_type_stack_frame
*entry
)
165 if (!stack
|| !entry
) {
170 g_ptr_array_add(stack
, entry
);
176 struct ctf_type_stack_frame
*ctf_type_stack_peek(ctf_type_stack
*stack
)
178 struct ctf_type_stack_frame
*entry
= NULL
;
180 if (!stack
|| stack
->len
== 0) {
184 entry
= g_ptr_array_index(stack
, stack
->len
- 1);
190 struct ctf_type_stack_frame
*ctf_type_stack_pop(ctf_type_stack
*stack
)
192 struct ctf_type_stack_frame
*entry
= NULL
;
194 entry
= ctf_type_stack_peek(stack
);
196 g_ptr_array_set_size(stack
, stack
->len
- 1);
202 int field_type_visit(struct bt_ctf_field_type
*type
,
203 struct ctf_type_visitor_context
*context
,
204 ctf_type_visitor_func func
)
207 enum ctf_type_id type_id
;
208 struct ctf_type_stack_frame
*frame
= NULL
;
210 ret
= func(type
, context
);
215 type_id
= bt_ctf_field_type_get_type_id(type
);
216 if (type_id
== CTF_TYPE_SEQUENCE
|| type_id
== CTF_TYPE_ARRAY
) {
217 struct bt_ctf_field_type
*element
=
218 type_id
== CTF_TYPE_SEQUENCE
?
219 bt_ctf_field_type_sequence_get_element_type(type
) :
220 bt_ctf_field_type_array_get_element_type(type
);
222 ret
= field_type_recursive_visit(element
, context
, func
);
223 bt_ctf_field_type_put(element
);
229 if (type_id
!= CTF_TYPE_STRUCT
&&
230 type_id
!= CTF_TYPE_VARIANT
) {
231 /* No need to create a new stack frame */
235 frame
= g_new0(struct ctf_type_stack_frame
, 1);
242 ret
= ctf_type_stack_push(context
->stack
, frame
);
252 int field_type_recursive_visit(struct bt_ctf_field_type
*type
,
253 struct ctf_type_visitor_context
*context
,
254 ctf_type_visitor_func func
)
257 struct ctf_type_stack_frame
*stack_marker
= NULL
;
259 ret
= field_type_visit(type
, context
, func
);
264 stack_marker
= ctf_type_stack_peek(context
->stack
);
265 if (!stack_marker
|| stack_marker
->type
!= type
) {
266 /* No need for a recursive visit */
271 struct bt_ctf_field_type
*field
;
272 struct ctf_type_stack_frame
*entry
=
273 ctf_type_stack_peek(context
->stack
);
274 int field_count
= get_type_field_count(entry
->type
);
276 if (field_count
<= 0 &&
277 !bt_ctf_field_type_is_structure(entry
->type
)) {
279 * Propagate error if one was given, else return
280 * -1 since empty variants are invalid
283 ret
= field_count
< 0 ? field_count
: -1;
287 if (entry
->index
== field_count
) {
288 /* This level has been completely visited */
289 entry
= ctf_type_stack_pop(context
->stack
);
294 if (entry
== stack_marker
) {
295 /* Completed visit */
302 field
= get_type_field(entry
->type
, entry
->index
);
303 /* Will push a new stack frame if field is struct or variant */
304 ret
= field_type_visit(field
, context
, func
);
305 bt_ctf_field_type_put(field
);
317 int bt_ctf_event_class_visit(struct bt_ctf_event_class
*event_class
,
318 struct bt_ctf_trace
*trace
,
319 struct bt_ctf_stream_class
*stream_class
,
320 ctf_type_visitor_func func
)
323 struct bt_ctf_field_type
*type
;
324 struct ctf_type_visitor_context context
= { 0 };
326 if (!event_class
|| !func
) {
331 context
.trace
= trace
;
332 context
.stream_class
= stream_class
;
333 context
.event_class
= event_class
;
334 context
.stack
= ctf_type_stack_create();
335 if (!context
.stack
) {
340 /* Visit event context */
341 context
.root_node
= CTF_NODE_EVENT_CONTEXT
;
342 type
= bt_ctf_event_class_get_context_type(event_class
);
344 ret
= field_type_recursive_visit(type
, &context
, func
);
345 bt_ctf_field_type_put(type
);
352 /* Visit event payload */
353 context
.root_node
= CTF_NODE_EVENT_FIELDS
;
354 type
= bt_ctf_event_class_get_payload_type(event_class
);
356 ret
= field_type_recursive_visit(type
, &context
, func
);
357 bt_ctf_field_type_put(type
);
365 ctf_type_stack_destroy(context
.stack
);
371 int bt_ctf_stream_class_visit(struct bt_ctf_stream_class
*stream_class
,
372 struct bt_ctf_trace
*trace
,
373 ctf_type_visitor_func func
)
375 int i
, ret
= 0, event_count
;
376 struct bt_ctf_field_type
*type
;
377 struct ctf_type_visitor_context context
= { 0 };
379 if (!stream_class
|| !func
) {
384 context
.trace
= trace
;
385 context
.stream_class
= stream_class
;
386 context
.stack
= ctf_type_stack_create();
387 if (!context
.stack
) {
392 /* Visit stream packet context header */
393 context
.root_node
= CTF_NODE_STREAM_PACKET_CONTEXT
;
394 type
= bt_ctf_stream_class_get_packet_context_type(stream_class
);
396 ret
= field_type_recursive_visit(type
, &context
, func
);
397 bt_ctf_field_type_put(type
);
404 /* Visit stream event header */
405 context
.root_node
= CTF_NODE_STREAM_EVENT_HEADER
;
406 type
= bt_ctf_stream_class_get_event_header_type(stream_class
);
408 ret
= field_type_recursive_visit(type
, &context
, func
);
409 bt_ctf_field_type_put(type
);
416 /* Visit stream event context */
417 context
.root_node
= CTF_NODE_STREAM_EVENT_CONTEXT
;
418 type
= bt_ctf_stream_class_get_event_context_type(stream_class
);
420 ret
= field_type_recursive_visit(type
, &context
, func
);
421 bt_ctf_field_type_put(type
);
428 /* Visit event classes */
429 event_count
= bt_ctf_stream_class_get_event_class_count(stream_class
);
430 if (event_count
< 0) {
434 for (i
= 0; i
< event_count
; i
++) {
435 struct bt_ctf_event_class
*event_class
=
436 bt_ctf_stream_class_get_event_class(stream_class
, i
);
438 ret
= bt_ctf_event_class_visit(event_class
, trace
,
440 bt_ctf_event_class_put(event_class
);
447 ctf_type_stack_destroy(context
.stack
);
453 int set_field_path_relative(struct ctf_type_visitor_context
*context
,
454 struct bt_ctf_field_path
*field_path
,
455 GList
**path_tokens
, struct bt_ctf_field_type
**resolved_field
)
459 struct bt_ctf_field_type
*field
= NULL
;
460 struct ctf_type_stack_frame
*frame
=
461 ctf_type_stack_peek(context
->stack
);
462 size_t token_count
= g_list_length(*path_tokens
), i
;
470 bt_ctf_field_type_get(field
);
471 for (i
= 0; i
< token_count
; i
++) {
472 struct bt_ctf_field_type
*next_field
= NULL
;
473 int field_index
= get_type_field_index(field
,
474 (*path_tokens
)->data
);
476 if (field_index
< 0) {
477 /* Field name not found, abort */
478 printf_verbose("Could not resolve field \"%s\"\n",
479 (char *) (*path_tokens
)->data
);
484 if (field_index
>= frame
->index
) {
485 printf_verbose("Invalid relative path refers to a member after the current one\n");
490 next_field
= get_type_field(field
, field_index
);
496 bt_ctf_field_type_put(field
);
498 g_array_append_val(field_path
->path_indexes
, field_index
);
501 * Free token and remove from list. This function does not
502 * assume the ownership of path_tokens; it is therefore _not_
503 * a leak to leave elements in this list. The caller should
504 * clean-up what is left (in case of error).
506 free((*path_tokens
)->data
);
507 *path_tokens
= g_list_delete_link(*path_tokens
, *path_tokens
);
510 root_path
= g_array_sized_new(FALSE
, FALSE
,
511 sizeof(int), context
->stack
->len
- 1);
517 /* Set the current root node as the resolved type's root */
518 field_path
->root
= context
->root_node
;
520 * Prepend the current fields' path to the relative path that
521 * was found by walking the stack.
523 for (i
= 0; i
< context
->stack
->len
- 1; i
++) {
525 struct ctf_type_stack_frame
*frame
=
526 g_ptr_array_index(context
->stack
, i
);
528 /* Decrement "index" since it points to the next field */
529 index
= frame
->index
- 1;
530 g_array_append_val(root_path
, index
);
532 g_array_prepend_vals(field_path
->path_indexes
, root_path
->data
,
534 g_array_free(root_path
, TRUE
);
537 bt_ctf_field_type_put(field
);
538 *resolved_field
= field
;
545 int set_field_path_absolute(struct ctf_type_visitor_context
*context
,
546 struct bt_ctf_field_path
*field_path
,
547 GList
**path_tokens
, struct bt_ctf_field_type
**resolved_field
)
550 struct bt_ctf_field_type
*field
= NULL
;
551 size_t token_count
= g_list_length(*path_tokens
), i
;
553 if (field_path
->root
> context
->root_node
) {
555 * The target path's root is lower in the dynamic scope
556 * hierarchy than the current field being visited. This
557 * is invalid since it would not be possible to have read
558 * the target before the current field.
561 printf_verbose("The target path's root is lower in the dynamic scope than the current field.\n");
565 /* Set the appropriate root field */
566 switch (field_path
->root
) {
567 case CTF_NODE_TRACE_PACKET_HEADER
:
568 field
= bt_ctf_trace_get_packet_header_type(context
->trace
);
570 case CTF_NODE_STREAM_PACKET_CONTEXT
:
571 field
= bt_ctf_stream_class_get_packet_context_type(
572 context
->stream_class
);
574 case CTF_NODE_STREAM_EVENT_HEADER
:
575 field
= bt_ctf_stream_class_get_event_header_type(
576 context
->stream_class
);
578 case CTF_NODE_STREAM_EVENT_CONTEXT
:
579 field
= bt_ctf_stream_class_get_event_context_type(
580 context
->stream_class
);
582 case CTF_NODE_EVENT_CONTEXT
:
583 field
= bt_ctf_event_class_get_context_type(
584 context
->event_class
);
586 case CTF_NODE_EVENT_FIELDS
:
587 field
= bt_ctf_event_class_get_payload_type(
588 context
->event_class
);
600 for (i
= 0; i
< token_count
; i
++) {
601 int field_index
= get_type_field_index(field
,
602 (*path_tokens
)->data
);
603 struct bt_ctf_field_type
*next_field
= NULL
;
605 if (field_index
< 0) {
606 /* Field name not found, abort */
607 printf_verbose("Could not resolve field \"%s\"\n",
608 (char *) (*path_tokens
)->data
);
613 next_field
= get_type_field(field
, field_index
);
619 bt_ctf_field_type_put(field
);
621 g_array_append_val(field_path
->path_indexes
, field_index
);
624 * Free token and remove from list. This function does not
625 * assume the ownership of path_tokens; it is therefore _not_
626 * a leak to leave elements in this list. The caller should
627 * clean-up what is left (in case of error).
629 free((*path_tokens
)->data
);
630 *path_tokens
= g_list_delete_link(*path_tokens
, *path_tokens
);
634 bt_ctf_field_type_put(field
);
635 *resolved_field
= field
;
641 int get_field_path(struct ctf_type_visitor_context
*context
,
642 const char *path
, struct bt_ctf_field_path
**field_path
,
643 struct bt_ctf_field_type
**resolved_field
)
646 GList
*path_tokens
= NULL
;
647 char *name_copy
, *save_ptr
, *token
;
649 /* Tokenize path to a list of strings */
650 name_copy
= strdup(path
);
655 token
= strtok_r(name_copy
, ".", &save_ptr
);
657 char *token_string
= strdup(token
);
663 path_tokens
= g_list_append(path_tokens
, token_string
);
664 token
= strtok_r(NULL
, ".", &save_ptr
);
672 *field_path
= bt_ctf_field_path_create();
678 /* Check if the path is absolute */
679 for (i
= 0; i
< sizeof(absolute_path_prefixes
) / sizeof(char *); i
++) {
683 * Chech if "path" starts with a known absolute path prefix.
684 * Refer to CTF 7.3.2 STATIC AND DYNAMIC SCOPES.
686 if (strncmp(path
, absolute_path_prefixes
[i
],
687 strlen(absolute_path_prefixes
[i
]))) {
688 /* Wrong prefix, try the next one */
693 * Remove the first n tokens of this prefix.
694 * e.g. trace.packet.header: remove the first 3 tokens.
696 for (j
= 0; j
< absolute_path_prefix_token_counts
[i
]; j
++) {
697 free(path_tokens
->data
);
698 path_tokens
= g_list_delete_link(
699 path_tokens
, path_tokens
);
702 /* i maps to enum bt_ctf_node constants */
703 (*field_path
)->root
= (enum bt_ctf_node
) i
;
707 if ((*field_path
)->root
== CTF_NODE_UNKNOWN
) {
709 ret
= set_field_path_relative(context
,
710 *field_path
, &path_tokens
, resolved_field
);
716 ret
= set_field_path_absolute(context
,
717 *field_path
, &path_tokens
, resolved_field
);
727 g_list_foreach(path_tokens
, (GFunc
) free
, NULL
);
728 g_list_free(path_tokens
);
733 bt_ctf_field_path_destroy(*field_path
);
739 void print_path(const char *field_name
,
740 struct bt_ctf_field_type
*resolved_type
,
741 struct bt_ctf_field_path
*field_path
)
744 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(
747 if (type_id
< CTF_TYPE_UNKNOWN
|| type_id
>= NR_CTF_TYPES
) {
748 type_id
= CTF_TYPE_UNKNOWN
;
751 printf_verbose("Resolved field \"%s\" as type \"%s\", ",
752 field_name
, type_names
[type_id
]);
753 printf_verbose("path: %s",
754 absolute_path_prefixes
[field_path
->root
]);
756 for (i
= 0; i
< field_path
->path_indexes
->len
; i
++) {
757 printf_verbose(" %d",
758 g_array_index(field_path
->path_indexes
, int, i
));
760 printf_verbose("\n");
764 int type_resolve_func(struct bt_ctf_field_type
*type
,
765 struct ctf_type_visitor_context
*context
)
768 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
769 const char *field_name
= NULL
;
770 struct bt_ctf_field_path
*field_path
= NULL
;
771 struct bt_ctf_field_type
*resolved_type
= NULL
;
772 struct bt_ctf_field_type
*type_copy
= NULL
;
773 struct ctf_type_stack_frame
*frame
;
775 if (type_id
!= CTF_TYPE_SEQUENCE
&&
776 type_id
!= CTF_TYPE_VARIANT
) {
780 field_name
= type_id
== CTF_TYPE_SEQUENCE
?
781 bt_ctf_field_type_sequence_get_length_field_name(type
) :
782 bt_ctf_field_type_variant_get_tag_name(type
);
788 ret
= get_field_path(context
, field_name
,
789 &field_path
, &resolved_type
);
794 assert(field_path
&& resolved_type
);
796 /* Print path if in verbose mode */
797 print_path(field_name
, resolved_type
, field_path
);
800 * Set field type's path.
802 * The original field is copied since it may have been reused
803 * in multiple structures which would cause a conflict.
805 type_copy
= bt_ctf_field_type_copy(type
);
811 if (type_id
== CTF_TYPE_VARIANT
) {
812 if (bt_ctf_field_type_get_type_id(resolved_type
) !=
814 printf_verbose("Invalid variant tag \"%s\"; expected enum\n", field_name
);
818 ret
= bt_ctf_field_type_variant_set_tag(
819 type_copy
, resolved_type
);
824 ret
= bt_ctf_field_type_variant_set_tag_field_path(type_copy
,
831 if (bt_ctf_field_type_get_type_id(resolved_type
) !=
833 printf_verbose("Invalid sequence length field \"%s\"; expected integer\n", field_name
);
838 if (bt_ctf_field_type_integer_get_signed(resolved_type
) != 0) {
839 printf_verbose("Invalid sequence length field \"%s\"; integer should be unsigned\n", field_name
);
844 ret
= bt_ctf_field_type_sequence_set_length_field_path(
845 type_copy
, field_path
);
851 /* Replace the original field */
852 frame
= ctf_type_stack_peek(context
->stack
);
853 ret
= set_type_field(frame
->type
, type_copy
, frame
->index
);
854 bt_ctf_field_type_put(type_copy
);
860 int bt_ctf_trace_visit(struct bt_ctf_trace
*trace
,
861 ctf_type_visitor_func func
)
863 int i
, stream_count
, ret
= 0;
864 struct bt_ctf_field_type
*type
= NULL
;
865 struct ctf_type_visitor_context visitor_ctx
= { 0 };
867 if (!trace
|| !func
) {
872 visitor_ctx
.trace
= trace
;
873 visitor_ctx
.stack
= ctf_type_stack_create();
874 if (!visitor_ctx
.stack
) {
879 /* Visit trace packet header */
880 type
= bt_ctf_trace_get_packet_header_type(trace
);
882 visitor_ctx
.root_node
= CTF_NODE_TRACE_PACKET_HEADER
;
883 ret
= field_type_recursive_visit(type
, &visitor_ctx
, func
);
884 visitor_ctx
.root_node
= CTF_NODE_UNKNOWN
;
885 bt_ctf_field_type_put(type
);
892 stream_count
= bt_ctf_trace_get_stream_class_count(trace
);
893 for (i
= 0; i
< stream_count
; i
++) {
894 struct bt_ctf_stream_class
*stream_class
=
895 bt_ctf_trace_get_stream_class(trace
, i
);
898 ret
= bt_ctf_stream_class_visit(stream_class
, trace
,
900 bt_ctf_stream_class_put(stream_class
);
906 if (visitor_ctx
.stack
) {
907 ctf_type_stack_destroy(visitor_ctx
.stack
);
913 int bt_ctf_trace_resolve_types(struct bt_ctf_trace
*trace
)
915 return bt_ctf_trace_visit(trace
, type_resolve_func
);
919 int bt_ctf_stream_class_resolve_types(struct bt_ctf_stream_class
*stream_class
,
920 struct bt_ctf_trace
*trace
)
922 return bt_ctf_stream_class_visit(stream_class
, trace
,
927 int bt_ctf_event_class_resolve_types(struct bt_ctf_event_class
*event_class
,
928 struct bt_ctf_trace
*trace
,
929 struct bt_ctf_stream_class
*stream_class
)
931 return bt_ctf_event_class_visit(event_class
, trace
, stream_class
,