6fc6e30ed679359df9f1527ca10f24dfc56c687f
[babeltrace.git] / formats / ctf / ir / visitor.c
1 /*
2 * visitor.c
3 *
4 * Babeltrace CTF IR - Trace Visitor
5 *
6 * Copyright 2015 Jérémie Galarneau <jeremie.galarneau@efficios.com>
7 *
8 * Author: Jérémie Galarneau <jeremie.galarneau@efficios.com>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * SOFTWARE.
27 */
28
29 #include <babeltrace/ctf-ir/event.h>
30 #include <babeltrace/ctf-ir/stream-class.h>
31 #include <babeltrace/ctf-ir/visitor-internal.h>
32 #include <babeltrace/ctf-ir/event-types-internal.h>
33 #include <babeltrace/ctf-ir/event-internal.h>
34 #include <babeltrace/babeltrace-internal.h>
35
36 /* TSDL dynamic scope prefixes defined in CTF Section 7.3.2 */
37 static const char * const absolute_path_prefixes[] = {
38 [CTF_NODE_ENV] = "env.",
39 [CTF_NODE_TRACE_PACKET_HEADER] = "trace.packet.header.",
40 [CTF_NODE_STREAM_PACKET_CONTEXT] = "stream.packet.context.",
41 [CTF_NODE_STREAM_EVENT_HEADER] = "stream.event.header.",
42 [CTF_NODE_STREAM_EVENT_CONTEXT] = "stream.event.context.",
43 [CTF_NODE_EVENT_CONTEXT] = "event.context.",
44 [CTF_NODE_EVENT_FIELDS] = "event.fields.",
45 };
46
47 const int absolute_path_prefix_token_counts[] = {
48 [CTF_NODE_ENV] = 1,
49 [CTF_NODE_TRACE_PACKET_HEADER] = 3,
50 [CTF_NODE_STREAM_PACKET_CONTEXT] = 3,
51 [CTF_NODE_STREAM_EVENT_HEADER] = 3,
52 [CTF_NODE_STREAM_EVENT_CONTEXT] = 3,
53 [CTF_NODE_EVENT_CONTEXT] = 2,
54 [CTF_NODE_EVENT_FIELDS] = 2,
55 };
56
57 static const char * const type_names[] = {
58 [CTF_TYPE_UNKNOWN] = "unknown",
59 [CTF_TYPE_INTEGER] = "integer",
60 [CTF_TYPE_FLOAT] = "float",
61 [CTF_TYPE_ENUM] = "enumeration",
62 [CTF_TYPE_STRING] = "string",
63 [CTF_TYPE_STRUCT] = "structure",
64 [CTF_TYPE_UNTAGGED_VARIANT] = "untagged variant",
65 [CTF_TYPE_VARIANT] = "variant",
66 [CTF_TYPE_ARRAY] = "array",
67 [CTF_TYPE_SEQUENCE] = "sequence",
68 };
69
70 static
71 int field_type_visit(struct bt_ctf_field_type *type,
72 struct ctf_type_visitor_context *context,
73 ctf_type_visitor_func func);
74
75 static
76 int field_type_recursive_visit(struct bt_ctf_field_type *type,
77 struct ctf_type_visitor_context *context,
78 ctf_type_visitor_func func);
79
80 static inline
81 int get_type_field_count(struct bt_ctf_field_type *type)
82 {
83 int field_count = -1;
84 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
85
86 if (type_id == CTF_TYPE_STRUCT) {
87 field_count = bt_ctf_field_type_structure_get_field_count(type);
88 } else if (type_id == CTF_TYPE_VARIANT) {
89 field_count = bt_ctf_field_type_variant_get_field_count(type);
90 }
91 return field_count;
92 }
93
94 static inline
95 struct bt_ctf_field_type *get_type_field(struct bt_ctf_field_type *type, int i)
96 {
97 struct bt_ctf_field_type *field = NULL;
98 const char *unused_name;
99 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
100
101 if (type_id == CTF_TYPE_STRUCT) {
102 bt_ctf_field_type_structure_get_field(type, &unused_name,
103 &field, i);
104 } else if (type_id == CTF_TYPE_VARIANT) {
105 bt_ctf_field_type_variant_get_field(type,
106 &unused_name, &field, i);
107 }
108
109 return field;
110 }
111
112 static inline
113 int get_type_field_index(struct bt_ctf_field_type *type, const char *name)
114 {
115 int field_index = -1;
116 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
117
118 if (type_id == CTF_TYPE_STRUCT) {
119 field_index = bt_ctf_field_type_structure_get_field_name_index(
120 type, name);
121 } else if (type_id == CTF_TYPE_VARIANT) {
122 field_index = bt_ctf_field_type_variant_get_field_name_index(
123 type, name);
124 }
125
126 return field_index;
127 }
128
129 BT_HIDDEN
130 ctf_type_stack *ctf_type_stack_create(void)
131 {
132 return g_ptr_array_new();
133 }
134
135 BT_HIDDEN
136 void ctf_type_stack_destroy(
137 ctf_type_stack *stack)
138 {
139 g_ptr_array_free(stack, TRUE);
140 }
141
142 BT_HIDDEN
143 int ctf_type_stack_push(ctf_type_stack *stack,
144 struct ctf_type_stack_frame *entry)
145 {
146 int ret = 0;
147
148 if (!stack || !entry) {
149 ret = -1;
150 goto end;
151 }
152
153 g_ptr_array_add(stack, entry);
154 end:
155 return ret;
156 }
157
158 BT_HIDDEN
159 struct ctf_type_stack_frame *ctf_type_stack_peek(ctf_type_stack *stack)
160 {
161 struct ctf_type_stack_frame *entry = NULL;
162
163 if (!stack || stack->len == 0) {
164 goto end;
165 }
166
167 entry = g_ptr_array_index(stack, stack->len - 1);
168 end:
169 return entry;
170 }
171
172 BT_HIDDEN
173 struct ctf_type_stack_frame *ctf_type_stack_pop(ctf_type_stack *stack)
174 {
175 struct ctf_type_stack_frame *entry = NULL;
176
177 entry = ctf_type_stack_peek(stack);
178 if (entry) {
179 g_ptr_array_set_size(stack, stack->len - 1);
180 }
181 return entry;
182 }
183
184 static
185 int field_type_visit(struct bt_ctf_field_type *type,
186 struct ctf_type_visitor_context *context,
187 ctf_type_visitor_func func)
188 {
189 int ret;
190 enum ctf_type_id type_id;
191 struct ctf_type_stack_frame *frame = NULL;
192
193 ret = func(type, context);
194 if (ret) {
195 goto end;
196 }
197
198 type_id = bt_ctf_field_type_get_type_id(type);
199 if (type_id == CTF_TYPE_SEQUENCE || type_id == CTF_TYPE_ARRAY) {
200 struct bt_ctf_field_type *element =
201 type_id == CTF_TYPE_SEQUENCE ?
202 bt_ctf_field_type_sequence_get_element_type(type) :
203 bt_ctf_field_type_array_get_element_type(type);
204
205 ret = field_type_recursive_visit(element, context, func);
206 bt_ctf_field_type_put(element);
207 if (ret) {
208 goto end;
209 }
210 }
211
212 if (type_id != CTF_TYPE_STRUCT &&
213 type_id != CTF_TYPE_VARIANT) {
214 /* No need to create a new stack frame */
215 goto end;
216 }
217
218 frame = g_new0(struct ctf_type_stack_frame, 1);
219 if (!frame) {
220 ret = -1;
221 goto end;
222 }
223
224 frame->type = type;
225 ret = ctf_type_stack_push(context->stack, frame);
226 if (ret) {
227 g_free(frame);
228 goto end;
229 }
230 end:
231 return ret;
232 }
233
234 static
235 int field_type_recursive_visit(struct bt_ctf_field_type *type,
236 struct ctf_type_visitor_context *context,
237 ctf_type_visitor_func func)
238 {
239 int ret = 0;
240 struct ctf_type_stack_frame *stack_marker = NULL;
241
242 ret = field_type_visit(type, context, func);
243 if (ret) {
244 goto end;
245 }
246
247 stack_marker = ctf_type_stack_peek(context->stack);
248 if (!stack_marker || stack_marker->type != type) {
249 /* No need for a recursive visit */
250 goto end;
251 }
252
253 while (true) {
254 struct bt_ctf_field_type *field;
255 struct ctf_type_stack_frame *entry =
256 ctf_type_stack_peek(context->stack);
257 int field_count = get_type_field_count(entry->type);
258
259 if (field_count <= 0) {
260 /*
261 * Propagate error if one was given, else return
262 * -1 since empty structures or variants are invalid
263 * at this point.
264 */
265 ret = field_count < 0 ? field_count : -1;
266 goto end;
267 }
268
269 if (entry->index == field_count) {
270 /* This level has been completely visited */
271 entry = ctf_type_stack_pop(context->stack);
272 if (entry) {
273 g_free(entry);
274 }
275
276 if (entry == stack_marker) {
277 /* Completed visit */
278 break;
279 } else {
280 continue;
281 }
282 }
283
284 field = get_type_field(entry->type, entry->index);
285 /* Will push a new stack frame if field is struct or variant */
286 ret = field_type_visit(field, context, func);
287 bt_ctf_field_type_put(field);
288 if (ret) {
289 goto end;
290 }
291
292 entry->index++;
293 }
294 end:
295 return ret;
296 }
297
298 static
299 int bt_ctf_event_class_visit(struct bt_ctf_event_class *event_class,
300 struct bt_ctf_trace *trace,
301 struct bt_ctf_stream_class *stream_class,
302 ctf_type_visitor_func func)
303 {
304 int ret = 0;
305 struct bt_ctf_field_type *type;
306 struct ctf_type_visitor_context context = { 0 };
307
308 if (!event_class || !func) {
309 ret = -1;
310 goto end;
311 }
312
313 context.trace = trace;
314 context.stream_class = stream_class;
315 context.event_class = event_class;
316 context.stack = ctf_type_stack_create();
317 if (!context.stack) {
318 ret = -1;
319 goto end;
320 }
321
322 /* Visit event context */
323 context.root_node = CTF_NODE_EVENT_CONTEXT;
324 type = bt_ctf_event_class_get_context_type(event_class);
325 if (type) {
326 ret = field_type_recursive_visit(type, &context, func);
327 bt_ctf_field_type_put(type);
328 type = NULL;
329 if (ret) {
330 goto end;
331 }
332 }
333
334 /* Visit event payload */
335 context.root_node = CTF_NODE_EVENT_FIELDS;
336 type = bt_ctf_event_class_get_payload_type(event_class);
337 if (type) {
338 ret = field_type_recursive_visit(type, &context, func);
339 bt_ctf_field_type_put(type);
340 type = NULL;
341 if (ret) {
342 goto end;
343 }
344 }
345 end:
346 if (context.stack) {
347 ctf_type_stack_destroy(context.stack);
348 }
349 return ret;
350 }
351
352 static
353 int bt_ctf_stream_class_visit(struct bt_ctf_stream_class *stream_class,
354 struct bt_ctf_trace *trace,
355 ctf_type_visitor_func func)
356 {
357 int i, ret = 0, event_count;
358 struct bt_ctf_field_type *type;
359 struct ctf_type_visitor_context context = { 0 };
360
361 if (!stream_class || !func) {
362 ret = -1;
363 goto end;
364 }
365
366 context.trace = trace;
367 context.stream_class = stream_class;
368 context.stack = ctf_type_stack_create();
369 if (!context.stack) {
370 ret = -1;
371 goto end;
372 }
373
374 /* Visit stream packet context header */
375 context.root_node = CTF_NODE_STREAM_PACKET_CONTEXT;
376 type = bt_ctf_stream_class_get_packet_context_type(stream_class);
377 if (type) {
378 ret = field_type_recursive_visit(type, &context, func);
379 bt_ctf_field_type_put(type);
380 type = NULL;
381 if (ret) {
382 goto end;
383 }
384 }
385
386 /* Visit stream event header */
387 context.root_node = CTF_NODE_STREAM_EVENT_HEADER;
388 type = bt_ctf_stream_class_get_event_header_type(stream_class);
389 if (type) {
390 ret = field_type_recursive_visit(type, &context, func);
391 bt_ctf_field_type_put(type);
392 type = NULL;
393 if (ret) {
394 goto end;
395 }
396 }
397
398 /* Visit stream event context */
399 context.root_node = CTF_NODE_STREAM_EVENT_CONTEXT;
400 type = bt_ctf_stream_class_get_event_context_type(stream_class);
401 if (type) {
402 ret = field_type_recursive_visit(type, &context, func);
403 bt_ctf_field_type_put(type);
404 type = NULL;
405 if (ret) {
406 goto end;
407 }
408 }
409
410 /* Visit event classes */
411 event_count = bt_ctf_stream_class_get_event_class_count(stream_class);
412 if (event_count < 0) {
413 ret = event_count;
414 goto end;
415 }
416 for (i = 0; i < event_count; i++) {
417 struct bt_ctf_event_class *event_class =
418 bt_ctf_stream_class_get_event_class(stream_class, i);
419
420 ret = bt_ctf_event_class_visit(event_class, trace,
421 stream_class, func);
422 bt_ctf_event_class_put(event_class);
423 if (ret) {
424 goto end;
425 }
426 }
427 end:
428 if (context.stack) {
429 ctf_type_stack_destroy(context.stack);
430 }
431 return ret;
432 }
433
434 static
435 int set_field_path_relative(struct ctf_type_visitor_context *context,
436 struct bt_ctf_field_path *field_path,
437 GList **path_tokens, struct bt_ctf_field_type **resolved_field)
438 {
439 int ret = 0;
440 GArray *root_path;
441 struct bt_ctf_field_type *field = NULL;
442 struct ctf_type_stack_frame *frame =
443 ctf_type_stack_peek(context->stack);
444 size_t token_count = g_list_length(*path_tokens), i;
445
446 if (!frame) {
447 ret = -1;
448 goto end;
449 }
450
451 field = frame->type;
452 bt_ctf_field_type_get(field);
453 for (i = 0; i < token_count; i++) {
454 struct bt_ctf_field_type *next_field = NULL;
455 int field_index = get_type_field_index(field,
456 (*path_tokens)->data);
457
458 if (field_index < 0) {
459 /* Field name not found, abort */
460 printf_verbose("Could not resolve field \"%s\"\n",
461 (char *) (*path_tokens)->data);
462 ret = -1;
463 goto end;
464 }
465
466 if (field_index >= frame->index) {
467 printf_verbose("Invalid relative path refers to a member after the current one\n");
468 ret = -1;
469 goto end;
470 }
471
472 next_field = get_type_field(field, field_index);
473 if (!next_field) {
474 ret = -1;
475 goto end;
476 }
477
478 bt_ctf_field_type_put(field);
479 field = next_field;
480 g_array_append_val(field_path->path_indexes, field_index);
481
482 /*
483 * Free token and remove from list. This function does not
484 * assume the ownership of path_tokens; it is therefore _not_
485 * a leak to leave elements in this list. The caller should
486 * clean-up what is left (in case of error).
487 */
488 free((*path_tokens)->data);
489 *path_tokens = g_list_delete_link(*path_tokens, *path_tokens);
490 }
491
492 root_path = g_array_sized_new(FALSE, FALSE,
493 sizeof(int), context->stack->len - 1);
494 if (!root_path) {
495 ret = -1;
496 goto end;
497 }
498
499 /* Set the current root node as the resolved type's root */
500 field_path->root = context->root_node;
501 /*
502 * Prepend the current fields' path to the relative path that
503 * was found by walking the stack.
504 */
505 for (i = 0; i < context->stack->len - 1; i++) {
506 int index;
507 struct ctf_type_stack_frame *frame =
508 g_ptr_array_index(context->stack, i);
509
510 /* Decrement "index" since it points to the next field */
511 index = frame->index - 1;
512 g_array_append_val(root_path, index);
513 }
514 g_array_prepend_vals(field_path->path_indexes, root_path->data,
515 root_path->len);
516 g_array_free(root_path, TRUE);
517 end:
518 if (field) {
519 bt_ctf_field_type_put(field);
520 *resolved_field = field;
521 }
522
523 return ret;
524 }
525
526 static
527 int set_field_path_absolute(struct ctf_type_visitor_context *context,
528 struct bt_ctf_field_path *field_path,
529 GList **path_tokens, struct bt_ctf_field_type **resolved_field)
530 {
531 int ret = 0;
532 struct bt_ctf_field_type *field = NULL;
533 size_t token_count = g_list_length(*path_tokens), i;
534
535 if (field_path->root > context->root_node) {
536 /*
537 * The target path's root is lower in the dynamic scope
538 * hierarchy than the current field being visited. This
539 * is invalid since it would not be possible to have read
540 * the target before the current field.
541 */
542 ret = -1;
543 printf_verbose("The target path's root is lower in the dynamic scope than the current field.\n");
544 goto end;
545 }
546
547 /* Set the appropriate root field */
548 switch (field_path->root) {
549 case CTF_NODE_TRACE_PACKET_HEADER:
550 field = bt_ctf_trace_get_packet_header_type(context->trace);
551 break;
552 case CTF_NODE_STREAM_PACKET_CONTEXT:
553 field = bt_ctf_stream_class_get_packet_context_type(
554 context->stream_class);
555 break;
556 case CTF_NODE_STREAM_EVENT_HEADER:
557 field = bt_ctf_stream_class_get_event_header_type(
558 context->stream_class);
559 break;
560 case CTF_NODE_STREAM_EVENT_CONTEXT:
561 field = bt_ctf_stream_class_get_event_context_type(
562 context->stream_class);
563 break;
564 case CTF_NODE_EVENT_CONTEXT:
565 field = bt_ctf_event_class_get_context_type(
566 context->event_class);
567 break;
568 case CTF_NODE_EVENT_FIELDS:
569 field = bt_ctf_event_class_get_payload_type(
570 context->event_class);
571 break;
572 default:
573 ret = -1;
574 goto end;
575 }
576
577 if (!field) {
578 ret = -1;
579 goto end;
580 }
581
582 for (i = 0; i < token_count; i++) {
583 int field_index = get_type_field_index(field,
584 (*path_tokens)->data);
585 struct bt_ctf_field_type *next_field = NULL;
586
587 if (field_index < 0) {
588 /* Field name not found, abort */
589 printf_verbose("Could not resolve field \"%s\"\n",
590 (char *) (*path_tokens)->data);
591 ret = -1;
592 goto end;
593 }
594
595 next_field = get_type_field(field, field_index);
596 if (!next_field) {
597 ret = -1;
598 goto end;
599 }
600
601 bt_ctf_field_type_put(field);
602 field = next_field;
603 g_array_append_val(field_path->path_indexes, field_index);
604
605 /*
606 * Free token and remove from list. This function does not
607 * assume the ownership of path_tokens; it is therefore _not_
608 * a leak to leave elements in this list. The caller should
609 * clean-up what is left (in case of error).
610 */
611 free((*path_tokens)->data);
612 *path_tokens = g_list_delete_link(*path_tokens, *path_tokens);
613 }
614 end:
615 if (field) {
616 bt_ctf_field_type_put(field);
617 *resolved_field = field;
618 }
619 return ret;
620 }
621
622 static
623 int get_field_path(struct ctf_type_visitor_context *context,
624 const char *path, struct bt_ctf_field_path **field_path,
625 struct bt_ctf_field_type **resolved_field)
626 {
627 int i, ret = 0;
628 GList *path_tokens = NULL;
629 char *name_copy, *save_ptr, *token;
630
631 /* Tokenize path to a list of strings */
632 name_copy = strdup(path);
633 if (!name_copy) {
634 goto error;
635 }
636
637 token = strtok_r(name_copy, ".", &save_ptr);
638 while (token) {
639 char *token_string = strdup(token);
640
641 if (!token_string) {
642 ret = -1;
643 goto error;
644 }
645 path_tokens = g_list_append(path_tokens, token_string);
646 token = strtok_r(NULL, ".", &save_ptr);
647 }
648
649 if (!path_tokens) {
650 ret = -1;
651 goto error;
652 }
653
654 *field_path = bt_ctf_field_path_create();
655 if (!*field_path) {
656 ret = -1;
657 goto error;
658 }
659
660 /* Check if the path is absolute */
661 for (i = 0; i < sizeof(absolute_path_prefixes) / sizeof(char *); i++) {
662 int j;
663
664 /*
665 * Chech if "path" starts with a known absolute path prefix.
666 * Refer to CTF 7.3.2 STATIC AND DYNAMIC SCOPES.
667 */
668 if (strncmp(path, absolute_path_prefixes[i],
669 strlen(absolute_path_prefixes[i]))) {
670 /* Wrong prefix, try the next one */
671 continue;
672 }
673
674 /*
675 * Remove the first n tokens of this prefix.
676 * e.g. trace.packet.header: remove the first 3 tokens.
677 */
678 for (j = 0; j < absolute_path_prefix_token_counts[i]; j++) {
679 free(path_tokens->data);
680 path_tokens = g_list_delete_link(
681 path_tokens, path_tokens);
682 }
683
684 /* i maps to enum bt_ctf_node constants */
685 (*field_path)->root = (enum bt_ctf_node) i;
686 break;
687 }
688
689 if ((*field_path)->root == CTF_NODE_UNKNOWN) {
690 /* Relative path */
691 ret = set_field_path_relative(context,
692 *field_path, &path_tokens, resolved_field);
693 if (ret) {
694 goto error;
695 }
696 } else {
697 /* Absolute path */
698 ret = set_field_path_absolute(context,
699 *field_path, &path_tokens, resolved_field);
700 if (ret) {
701 goto error;
702 }
703 }
704 end:
705 if (name_copy) {
706 g_free(name_copy);
707 }
708 if (path_tokens) {
709 g_list_free_full(path_tokens, free);
710 }
711 return ret;
712 error:
713 if (*field_path) {
714 bt_ctf_field_path_destroy(*field_path);
715 *field_path = NULL;
716 }
717 goto end;
718 }
719
720 void print_path(const char *field_name,
721 struct bt_ctf_field_type *resolved_type,
722 struct bt_ctf_field_path *field_path)
723 {
724 int i;
725
726 printf_verbose("Resolved field \"%s\" as type \"%s\", ",
727 field_name,
728 type_names[bt_ctf_field_type_get_type_id(resolved_type)]);
729 printf_verbose("path: %s",
730 absolute_path_prefixes[field_path->root]);
731
732 for (i = 0; i < field_path->path_indexes->len; i++) {
733 printf_verbose(" %d",
734 g_array_index(field_path->path_indexes, int, i));
735 }
736 printf_verbose("\n");
737 }
738
739 static
740 int type_resolve_func(struct bt_ctf_field_type *type,
741 struct ctf_type_visitor_context *context)
742 {
743 int ret = 0;
744 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
745 const char *field_name = NULL;
746 struct bt_ctf_field_path *field_path = NULL;
747 struct bt_ctf_field_type *resolved_type = NULL;
748
749 if (type_id != CTF_TYPE_SEQUENCE &&
750 type_id != CTF_TYPE_VARIANT) {
751 goto end;
752 }
753
754 field_name = type_id == CTF_TYPE_SEQUENCE ?
755 bt_ctf_field_type_sequence_get_length_field_name(type) :
756 bt_ctf_field_type_variant_get_tag_name(type);
757 if (!field_name) {
758 ret = -1;
759 goto end;
760 }
761
762 ret = get_field_path(context, field_name,
763 &field_path, &resolved_type);
764 if (ret) {
765 goto end;
766 }
767
768 assert(field_path && resolved_type);
769
770 /* Print path if in verbose mode */
771 print_path(field_name, resolved_type, field_path);
772
773 /* Set type's path */
774 if (type_id == CTF_TYPE_VARIANT) {
775 if (bt_ctf_field_type_get_type_id(resolved_type) !=
776 CTF_TYPE_ENUM) {
777 printf_verbose("Invalid variant tag \"%s\"; expected enum\n", field_name);
778 ret = -1;
779 goto end;
780 }
781 ret = bt_ctf_field_type_variant_set_tag(type, resolved_type);
782 if (ret) {
783 goto end;
784 }
785
786 ret = bt_ctf_field_type_variant_set_tag_field_path(type,
787 field_path);
788 if (ret) {
789 goto end;
790 }
791 } else {
792 /* Sequence */
793 if (bt_ctf_field_type_get_type_id(resolved_type) !=
794 CTF_TYPE_INTEGER) {
795 printf_verbose("Invalid sequence length field \"%s\"; expected integer\n", field_name);
796 ret = -1;
797 goto end;
798 }
799
800 if (bt_ctf_field_type_integer_get_signed(resolved_type) != 0) {
801 printf_verbose("Invalid sequence length field \"%s\"; integer should be unsigned\n", field_name);
802 ret = -1;
803 goto end;
804 }
805
806 ret = bt_ctf_field_type_sequence_set_length_field_path(type,
807 field_path);
808 if (ret) {
809 goto end;
810 }
811 }
812 end:
813 return ret;
814 }
815
816 BT_HIDDEN
817 int bt_ctf_trace_visit(struct bt_ctf_trace *trace,
818 ctf_type_visitor_func func)
819 {
820 int i, stream_count, ret = 0;
821 struct bt_ctf_field_type *type = NULL;
822 struct ctf_type_visitor_context visitor_ctx = { 0 };
823
824 if (!trace || !func) {
825 ret = -1;
826 goto end;
827 }
828
829 visitor_ctx.trace = trace;
830 visitor_ctx.stack = ctf_type_stack_create();
831 if (!visitor_ctx.stack) {
832 ret = -1;
833 goto end;
834 }
835
836 /* Visit trace packet header */
837 type = bt_ctf_trace_get_packet_header_type(trace);
838 if (type) {
839 visitor_ctx.root_node = CTF_NODE_TRACE_PACKET_HEADER;
840 ret = field_type_recursive_visit(type, &visitor_ctx, func);
841 visitor_ctx.root_node = CTF_NODE_UNKNOWN;
842 bt_ctf_field_type_put(type);
843 type = NULL;
844 if (ret) {
845 goto end;
846 }
847 }
848
849 stream_count = bt_ctf_trace_get_stream_class_count(trace);
850 for (i = 0; i < stream_count; i++) {
851 struct bt_ctf_stream_class *stream_class =
852 bt_ctf_trace_get_stream_class(trace, i);
853
854 /* Visit streams */
855 ret = bt_ctf_stream_class_visit(stream_class, trace,
856 func);
857 bt_ctf_stream_class_put(stream_class);
858 if (ret) {
859 goto end;
860 }
861 }
862 end:
863 if (visitor_ctx.stack) {
864 ctf_type_stack_destroy(visitor_ctx.stack);
865 }
866 return ret;
867 }
868
869 BT_HIDDEN
870 int bt_ctf_trace_resolve_types(struct bt_ctf_trace *trace)
871 {
872 return bt_ctf_trace_visit(trace, type_resolve_func);
873 }
874
875 BT_HIDDEN
876 int bt_ctf_stream_class_resolve_types(struct bt_ctf_stream_class *stream_class,
877 struct bt_ctf_trace *trace)
878 {
879 return bt_ctf_stream_class_visit(stream_class, trace,
880 type_resolve_func);
881 }
882
883 BT_HIDDEN
884 int bt_ctf_event_class_resolve_types(struct bt_ctf_event_class *event_class,
885 struct bt_ctf_trace *trace,
886 struct bt_ctf_stream_class *stream_class)
887 {
888 return bt_ctf_event_class_visit(event_class, trace, stream_class,
889 type_resolve_func);
890 }
This page took 0.045927 seconds and 3 git commands to generate.