4 * Babeltrace CTF IR - Trace Visitor
6 * Copyright 2015 Jérémie Galarneau <jeremie.galarneau@efficios.com>
8 * Author: Jérémie Galarneau <jeremie.galarneau@efficios.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #include <babeltrace/ctf-ir/event.h>
30 #include <babeltrace/ctf-ir/stream-class.h>
31 #include <babeltrace/ctf-ir/visitor-internal.h>
32 #include <babeltrace/ctf-ir/event-types-internal.h>
33 #include <babeltrace/ctf-ir/event-internal.h>
34 #include <babeltrace/babeltrace-internal.h>
36 /* TSDL dynamic scope prefixes defined in CTF Section 7.3.2 */
37 static const char * const absolute_path_prefixes
[] = {
38 [CTF_NODE_ENV
] = "env.",
39 [CTF_NODE_TRACE_PACKET_HEADER
] = "trace.packet.header.",
40 [CTF_NODE_STREAM_PACKET_CONTEXT
] = "stream.packet.context.",
41 [CTF_NODE_STREAM_EVENT_HEADER
] = "stream.event.header.",
42 [CTF_NODE_STREAM_EVENT_CONTEXT
] = "stream.event.context.",
43 [CTF_NODE_EVENT_CONTEXT
] = "event.context.",
44 [CTF_NODE_EVENT_FIELDS
] = "event.fields.",
47 const int absolute_path_prefix_token_counts
[] = {
49 [CTF_NODE_TRACE_PACKET_HEADER
] = 3,
50 [CTF_NODE_STREAM_PACKET_CONTEXT
] = 3,
51 [CTF_NODE_STREAM_EVENT_HEADER
] = 3,
52 [CTF_NODE_STREAM_EVENT_CONTEXT
] = 3,
53 [CTF_NODE_EVENT_CONTEXT
] = 2,
54 [CTF_NODE_EVENT_FIELDS
] = 2,
57 static const char * const type_names
[] = {
58 [CTF_TYPE_UNKNOWN
] = "unknown",
59 [CTF_TYPE_INTEGER
] = "integer",
60 [CTF_TYPE_FLOAT
] = "float",
61 [CTF_TYPE_ENUM
] = "enumeration",
62 [CTF_TYPE_STRING
] = "string",
63 [CTF_TYPE_STRUCT
] = "structure",
64 [CTF_TYPE_UNTAGGED_VARIANT
] = "untagged variant",
65 [CTF_TYPE_VARIANT
] = "variant",
66 [CTF_TYPE_ARRAY
] = "array",
67 [CTF_TYPE_SEQUENCE
] = "sequence",
71 int field_type_visit(struct bt_ctf_field_type
*type
,
72 struct ctf_type_visitor_context
*context
,
73 ctf_type_visitor_func func
);
76 int field_type_recursive_visit(struct bt_ctf_field_type
*type
,
77 struct ctf_type_visitor_context
*context
,
78 ctf_type_visitor_func func
);
81 int get_type_field_count(struct bt_ctf_field_type
*type
)
84 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
86 if (type_id
== CTF_TYPE_STRUCT
) {
87 field_count
= bt_ctf_field_type_structure_get_field_count(type
);
88 } else if (type_id
== CTF_TYPE_VARIANT
) {
89 field_count
= bt_ctf_field_type_variant_get_field_count(type
);
95 struct bt_ctf_field_type
*get_type_field(struct bt_ctf_field_type
*type
, int i
)
97 struct bt_ctf_field_type
*field
= NULL
;
98 const char *unused_name
;
99 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
101 if (type_id
== CTF_TYPE_STRUCT
) {
102 bt_ctf_field_type_structure_get_field(type
, &unused_name
,
104 } else if (type_id
== CTF_TYPE_VARIANT
) {
105 bt_ctf_field_type_variant_get_field(type
,
106 &unused_name
, &field
, i
);
113 int get_type_field_index(struct bt_ctf_field_type
*type
, const char *name
)
115 int field_index
= -1;
116 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
118 if (type_id
== CTF_TYPE_STRUCT
) {
119 field_index
= bt_ctf_field_type_structure_get_field_name_index(
121 } else if (type_id
== CTF_TYPE_VARIANT
) {
122 field_index
= bt_ctf_field_type_variant_get_field_name_index(
130 ctf_type_stack
*ctf_type_stack_create(void)
132 return g_ptr_array_new();
136 void ctf_type_stack_destroy(
137 ctf_type_stack
*stack
)
139 g_ptr_array_free(stack
, TRUE
);
143 int ctf_type_stack_push(ctf_type_stack
*stack
,
144 struct ctf_type_stack_frame
*entry
)
148 if (!stack
|| !entry
) {
153 g_ptr_array_add(stack
, entry
);
159 struct ctf_type_stack_frame
*ctf_type_stack_peek(ctf_type_stack
*stack
)
161 struct ctf_type_stack_frame
*entry
= NULL
;
163 if (!stack
|| stack
->len
== 0) {
167 entry
= g_ptr_array_index(stack
, stack
->len
- 1);
173 struct ctf_type_stack_frame
*ctf_type_stack_pop(ctf_type_stack
*stack
)
175 struct ctf_type_stack_frame
*entry
= NULL
;
177 entry
= ctf_type_stack_peek(stack
);
179 g_ptr_array_set_size(stack
, stack
->len
- 1);
185 int field_type_visit(struct bt_ctf_field_type
*type
,
186 struct ctf_type_visitor_context
*context
,
187 ctf_type_visitor_func func
)
190 enum ctf_type_id type_id
;
191 struct ctf_type_stack_frame
*frame
= NULL
;
193 ret
= func(type
, context
);
198 type_id
= bt_ctf_field_type_get_type_id(type
);
199 if (type_id
== CTF_TYPE_SEQUENCE
|| type_id
== CTF_TYPE_ARRAY
) {
200 struct bt_ctf_field_type
*element
=
201 type_id
== CTF_TYPE_SEQUENCE
?
202 bt_ctf_field_type_sequence_get_element_type(type
) :
203 bt_ctf_field_type_array_get_element_type(type
);
205 ret
= field_type_recursive_visit(element
, context
, func
);
206 bt_ctf_field_type_put(element
);
212 if (type_id
!= CTF_TYPE_STRUCT
&&
213 type_id
!= CTF_TYPE_VARIANT
) {
214 /* No need to create a new stack frame */
218 frame
= g_new0(struct ctf_type_stack_frame
, 1);
225 ret
= ctf_type_stack_push(context
->stack
, frame
);
235 int field_type_recursive_visit(struct bt_ctf_field_type
*type
,
236 struct ctf_type_visitor_context
*context
,
237 ctf_type_visitor_func func
)
240 struct ctf_type_stack_frame
*stack_marker
= NULL
;
242 ret
= field_type_visit(type
, context
, func
);
247 stack_marker
= ctf_type_stack_peek(context
->stack
);
248 if (!stack_marker
|| stack_marker
->type
!= type
) {
249 /* No need for a recursive visit */
254 struct bt_ctf_field_type
*field
;
255 struct ctf_type_stack_frame
*entry
=
256 ctf_type_stack_peek(context
->stack
);
257 int field_count
= get_type_field_count(entry
->type
);
259 if (field_count
<= 0) {
261 * Propagate error if one was given, else return
262 * -1 since empty structures or variants are invalid
265 ret
= field_count
< 0 ? field_count
: -1;
269 if (entry
->index
== field_count
) {
270 /* This level has been completely visited */
271 entry
= ctf_type_stack_pop(context
->stack
);
276 if (entry
== stack_marker
) {
277 /* Completed visit */
284 field
= get_type_field(entry
->type
, entry
->index
);
285 /* Will push a new stack frame if field is struct or variant */
286 ret
= field_type_visit(field
, context
, func
);
287 bt_ctf_field_type_put(field
);
299 int bt_ctf_event_class_visit(struct bt_ctf_event_class
*event_class
,
300 struct bt_ctf_trace
*trace
,
301 struct bt_ctf_stream_class
*stream_class
,
302 ctf_type_visitor_func func
)
305 struct bt_ctf_field_type
*type
;
306 struct ctf_type_visitor_context context
= { 0 };
308 if (!event_class
|| !func
) {
313 context
.trace
= trace
;
314 context
.stream_class
= stream_class
;
315 context
.event_class
= event_class
;
316 context
.stack
= ctf_type_stack_create();
317 if (!context
.stack
) {
322 /* Visit event context */
323 context
.root_node
= CTF_NODE_EVENT_CONTEXT
;
324 type
= bt_ctf_event_class_get_context_type(event_class
);
326 ret
= field_type_recursive_visit(type
, &context
, func
);
327 bt_ctf_field_type_put(type
);
334 /* Visit event payload */
335 context
.root_node
= CTF_NODE_EVENT_FIELDS
;
336 type
= bt_ctf_event_class_get_payload_type(event_class
);
338 ret
= field_type_recursive_visit(type
, &context
, func
);
339 bt_ctf_field_type_put(type
);
347 ctf_type_stack_destroy(context
.stack
);
353 int bt_ctf_stream_class_visit(struct bt_ctf_stream_class
*stream_class
,
354 struct bt_ctf_trace
*trace
,
355 ctf_type_visitor_func func
)
357 int i
, ret
= 0, event_count
;
358 struct bt_ctf_field_type
*type
;
359 struct ctf_type_visitor_context context
= { 0 };
361 if (!stream_class
|| !func
) {
366 context
.trace
= trace
;
367 context
.stream_class
= stream_class
;
368 context
.stack
= ctf_type_stack_create();
369 if (!context
.stack
) {
374 /* Visit stream packet context header */
375 context
.root_node
= CTF_NODE_STREAM_PACKET_CONTEXT
;
376 type
= bt_ctf_stream_class_get_packet_context_type(stream_class
);
378 ret
= field_type_recursive_visit(type
, &context
, func
);
379 bt_ctf_field_type_put(type
);
386 /* Visit stream event header */
387 context
.root_node
= CTF_NODE_STREAM_EVENT_HEADER
;
388 type
= bt_ctf_stream_class_get_event_header_type(stream_class
);
390 ret
= field_type_recursive_visit(type
, &context
, func
);
391 bt_ctf_field_type_put(type
);
398 /* Visit stream event context */
399 context
.root_node
= CTF_NODE_STREAM_EVENT_CONTEXT
;
400 type
= bt_ctf_stream_class_get_event_context_type(stream_class
);
402 ret
= field_type_recursive_visit(type
, &context
, func
);
403 bt_ctf_field_type_put(type
);
410 /* Visit event classes */
411 event_count
= bt_ctf_stream_class_get_event_class_count(stream_class
);
412 if (event_count
< 0) {
416 for (i
= 0; i
< event_count
; i
++) {
417 struct bt_ctf_event_class
*event_class
=
418 bt_ctf_stream_class_get_event_class(stream_class
, i
);
420 ret
= bt_ctf_event_class_visit(event_class
, trace
,
422 bt_ctf_event_class_put(event_class
);
429 ctf_type_stack_destroy(context
.stack
);
435 int set_field_path_relative(struct ctf_type_visitor_context
*context
,
436 struct bt_ctf_field_path
*field_path
,
437 GList
**path_tokens
, struct bt_ctf_field_type
**resolved_field
)
441 struct bt_ctf_field_type
*field
= NULL
;
442 struct ctf_type_stack_frame
*frame
=
443 ctf_type_stack_peek(context
->stack
);
444 size_t token_count
= g_list_length(*path_tokens
), i
;
452 bt_ctf_field_type_get(field
);
453 for (i
= 0; i
< token_count
; i
++) {
454 struct bt_ctf_field_type
*next_field
= NULL
;
455 int field_index
= get_type_field_index(field
,
456 (*path_tokens
)->data
);
458 if (field_index
< 0) {
459 /* Field name not found, abort */
460 printf_verbose("Could not resolve field \"%s\"\n",
461 (char *) (*path_tokens
)->data
);
466 if (field_index
>= frame
->index
) {
467 printf_verbose("Invalid relative path refers to a member after the current one\n");
472 next_field
= get_type_field(field
, field_index
);
478 bt_ctf_field_type_put(field
);
480 g_array_append_val(field_path
->path_indexes
, field_index
);
483 * Free token and remove from list. This function does not
484 * assume the ownership of path_tokens; it is therefore _not_
485 * a leak to leave elements in this list. The caller should
486 * clean-up what is left (in case of error).
488 free((*path_tokens
)->data
);
489 *path_tokens
= g_list_delete_link(*path_tokens
, *path_tokens
);
492 root_path
= g_array_sized_new(FALSE
, FALSE
,
493 sizeof(int), context
->stack
->len
- 1);
499 /* Set the current root node as the resolved type's root */
500 field_path
->root
= context
->root_node
;
502 * Prepend the current fields' path to the relative path that
503 * was found by walking the stack.
505 for (i
= 0; i
< context
->stack
->len
- 1; i
++) {
507 struct ctf_type_stack_frame
*frame
=
508 g_ptr_array_index(context
->stack
, i
);
510 /* Decrement "index" since it points to the next field */
511 index
= frame
->index
- 1;
512 g_array_append_val(root_path
, index
);
514 g_array_prepend_vals(field_path
->path_indexes
, root_path
->data
,
516 g_array_free(root_path
, TRUE
);
519 bt_ctf_field_type_put(field
);
520 *resolved_field
= field
;
527 int set_field_path_absolute(struct ctf_type_visitor_context
*context
,
528 struct bt_ctf_field_path
*field_path
,
529 GList
**path_tokens
, struct bt_ctf_field_type
**resolved_field
)
532 struct bt_ctf_field_type
*field
= NULL
;
533 size_t token_count
= g_list_length(*path_tokens
), i
;
535 if (field_path
->root
> context
->root_node
) {
537 * The target path's root is lower in the dynamic scope
538 * hierarchy than the current field being visited. This
539 * is invalid since it would not be possible to have read
540 * the target before the current field.
543 printf_verbose("The target path's root is lower in the dynamic scope than the current field.\n");
547 /* Set the appropriate root field */
548 switch (field_path
->root
) {
549 case CTF_NODE_TRACE_PACKET_HEADER
:
550 field
= bt_ctf_trace_get_packet_header_type(context
->trace
);
552 case CTF_NODE_STREAM_PACKET_CONTEXT
:
553 field
= bt_ctf_stream_class_get_packet_context_type(
554 context
->stream_class
);
556 case CTF_NODE_STREAM_EVENT_HEADER
:
557 field
= bt_ctf_stream_class_get_event_header_type(
558 context
->stream_class
);
560 case CTF_NODE_STREAM_EVENT_CONTEXT
:
561 field
= bt_ctf_stream_class_get_event_context_type(
562 context
->stream_class
);
564 case CTF_NODE_EVENT_CONTEXT
:
565 field
= bt_ctf_event_class_get_context_type(
566 context
->event_class
);
568 case CTF_NODE_EVENT_FIELDS
:
569 field
= bt_ctf_event_class_get_payload_type(
570 context
->event_class
);
582 for (i
= 0; i
< token_count
; i
++) {
583 int field_index
= get_type_field_index(field
,
584 (*path_tokens
)->data
);
585 struct bt_ctf_field_type
*next_field
= NULL
;
587 if (field_index
< 0) {
588 /* Field name not found, abort */
589 printf_verbose("Could not resolve field \"%s\"\n",
590 (char *) (*path_tokens
)->data
);
595 next_field
= get_type_field(field
, field_index
);
601 bt_ctf_field_type_put(field
);
603 g_array_append_val(field_path
->path_indexes
, field_index
);
606 * Free token and remove from list. This function does not
607 * assume the ownership of path_tokens; it is therefore _not_
608 * a leak to leave elements in this list. The caller should
609 * clean-up what is left (in case of error).
611 free((*path_tokens
)->data
);
612 *path_tokens
= g_list_delete_link(*path_tokens
, *path_tokens
);
616 bt_ctf_field_type_put(field
);
617 *resolved_field
= field
;
623 int get_field_path(struct ctf_type_visitor_context
*context
,
624 const char *path
, struct bt_ctf_field_path
**field_path
,
625 struct bt_ctf_field_type
**resolved_field
)
628 GList
*path_tokens
= NULL
;
629 char *name_copy
, *save_ptr
, *token
;
631 /* Tokenize path to a list of strings */
632 name_copy
= strdup(path
);
637 token
= strtok_r(name_copy
, ".", &save_ptr
);
639 char *token_string
= strdup(token
);
645 path_tokens
= g_list_append(path_tokens
, token_string
);
646 token
= strtok_r(NULL
, ".", &save_ptr
);
654 *field_path
= bt_ctf_field_path_create();
660 /* Check if the path is absolute */
661 for (i
= 0; i
< sizeof(absolute_path_prefixes
) / sizeof(char *); i
++) {
665 * Chech if "path" starts with a known absolute path prefix.
666 * Refer to CTF 7.3.2 STATIC AND DYNAMIC SCOPES.
668 if (strncmp(path
, absolute_path_prefixes
[i
],
669 strlen(absolute_path_prefixes
[i
]))) {
670 /* Wrong prefix, try the next one */
675 * Remove the first n tokens of this prefix.
676 * e.g. trace.packet.header: remove the first 3 tokens.
678 for (j
= 0; j
< absolute_path_prefix_token_counts
[i
]; j
++) {
679 free(path_tokens
->data
);
680 path_tokens
= g_list_delete_link(
681 path_tokens
, path_tokens
);
684 /* i maps to enum bt_ctf_node constants */
685 (*field_path
)->root
= (enum bt_ctf_node
) i
;
689 if ((*field_path
)->root
== CTF_NODE_UNKNOWN
) {
691 ret
= set_field_path_relative(context
,
692 *field_path
, &path_tokens
, resolved_field
);
698 ret
= set_field_path_absolute(context
,
699 *field_path
, &path_tokens
, resolved_field
);
709 g_list_free_full(path_tokens
, free
);
714 bt_ctf_field_path_destroy(*field_path
);
720 void print_path(const char *field_name
,
721 struct bt_ctf_field_type
*resolved_type
,
722 struct bt_ctf_field_path
*field_path
)
725 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(
728 if (type_id
< CTF_TYPE_UNKNOWN
|| type_id
>= NR_CTF_TYPES
) {
729 type_id
= CTF_TYPE_UNKNOWN
;
732 printf_verbose("Resolved field \"%s\" as type \"%s\", ",
734 type_names
[type_id
]);
735 printf_verbose("path: %s",
736 absolute_path_prefixes
[field_path
->root
]);
738 for (i
= 0; i
< field_path
->path_indexes
->len
; i
++) {
739 printf_verbose(" %d",
740 g_array_index(field_path
->path_indexes
, int, i
));
742 printf_verbose("\n");
746 int type_resolve_func(struct bt_ctf_field_type
*type
,
747 struct ctf_type_visitor_context
*context
)
750 enum ctf_type_id type_id
= bt_ctf_field_type_get_type_id(type
);
751 const char *field_name
= NULL
;
752 struct bt_ctf_field_path
*field_path
= NULL
;
753 struct bt_ctf_field_type
*resolved_type
= NULL
;
755 if (type_id
!= CTF_TYPE_SEQUENCE
&&
756 type_id
!= CTF_TYPE_VARIANT
) {
760 field_name
= type_id
== CTF_TYPE_SEQUENCE
?
761 bt_ctf_field_type_sequence_get_length_field_name(type
) :
762 bt_ctf_field_type_variant_get_tag_name(type
);
768 ret
= get_field_path(context
, field_name
,
769 &field_path
, &resolved_type
);
774 assert(field_path
&& resolved_type
);
776 /* Print path if in verbose mode */
777 print_path(field_name
, resolved_type
, field_path
);
779 /* Set type's path */
780 if (type_id
== CTF_TYPE_VARIANT
) {
781 if (bt_ctf_field_type_get_type_id(resolved_type
) !=
783 printf_verbose("Invalid variant tag \"%s\"; expected enum\n", field_name
);
787 ret
= bt_ctf_field_type_variant_set_tag(type
, resolved_type
);
792 ret
= bt_ctf_field_type_variant_set_tag_field_path(type
,
799 if (bt_ctf_field_type_get_type_id(resolved_type
) !=
801 printf_verbose("Invalid sequence length field \"%s\"; expected integer\n", field_name
);
806 if (bt_ctf_field_type_integer_get_signed(resolved_type
) != 0) {
807 printf_verbose("Invalid sequence length field \"%s\"; integer should be unsigned\n", field_name
);
812 ret
= bt_ctf_field_type_sequence_set_length_field_path(type
,
823 int bt_ctf_trace_visit(struct bt_ctf_trace
*trace
,
824 ctf_type_visitor_func func
)
826 int i
, stream_count
, ret
= 0;
827 struct bt_ctf_field_type
*type
= NULL
;
828 struct ctf_type_visitor_context visitor_ctx
= { 0 };
830 if (!trace
|| !func
) {
835 visitor_ctx
.trace
= trace
;
836 visitor_ctx
.stack
= ctf_type_stack_create();
837 if (!visitor_ctx
.stack
) {
842 /* Visit trace packet header */
843 type
= bt_ctf_trace_get_packet_header_type(trace
);
845 visitor_ctx
.root_node
= CTF_NODE_TRACE_PACKET_HEADER
;
846 ret
= field_type_recursive_visit(type
, &visitor_ctx
, func
);
847 visitor_ctx
.root_node
= CTF_NODE_UNKNOWN
;
848 bt_ctf_field_type_put(type
);
855 stream_count
= bt_ctf_trace_get_stream_class_count(trace
);
856 for (i
= 0; i
< stream_count
; i
++) {
857 struct bt_ctf_stream_class
*stream_class
=
858 bt_ctf_trace_get_stream_class(trace
, i
);
861 ret
= bt_ctf_stream_class_visit(stream_class
, trace
,
863 bt_ctf_stream_class_put(stream_class
);
869 if (visitor_ctx
.stack
) {
870 ctf_type_stack_destroy(visitor_ctx
.stack
);
876 int bt_ctf_trace_resolve_types(struct bt_ctf_trace
*trace
)
878 return bt_ctf_trace_visit(trace
, type_resolve_func
);
882 int bt_ctf_stream_class_resolve_types(struct bt_ctf_stream_class
*stream_class
,
883 struct bt_ctf_trace
*trace
)
885 return bt_ctf_stream_class_visit(stream_class
, trace
,
890 int bt_ctf_event_class_resolve_types(struct bt_ctf_event_class
*event_class
,
891 struct bt_ctf_trace
*trace
,
892 struct bt_ctf_stream_class
*stream_class
)
894 return bt_ctf_event_class_visit(event_class
, trace
, stream_class
,