9b65ed58e62ce29b721dcb3ffc78808f87714a91
[babeltrace.git] / formats / ctf / ir / visitor.c
1 /*
2 * visitor.c
3 *
4 * Babeltrace CTF IR - Trace Visitor
5 *
6 * Copyright 2015 Jérémie Galarneau <jeremie.galarneau@efficios.com>
7 *
8 * Author: Jérémie Galarneau <jeremie.galarneau@efficios.com>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * SOFTWARE.
27 */
28
29 #include <babeltrace/ctf-ir/event.h>
30 #include <babeltrace/ctf-ir/stream-class.h>
31 #include <babeltrace/ctf-ir/visitor-internal.h>
32 #include <babeltrace/ctf-ir/event-types-internal.h>
33 #include <babeltrace/ctf-ir/event-internal.h>
34 #include <babeltrace/babeltrace-internal.h>
35
36 /* TSDL dynamic scope prefixes defined in CTF Section 7.3.2 */
37 static const char * const absolute_path_prefixes[] = {
38 [CTF_NODE_ENV] = "env.",
39 [CTF_NODE_TRACE_PACKET_HEADER] = "trace.packet.header.",
40 [CTF_NODE_STREAM_PACKET_CONTEXT] = "stream.packet.context.",
41 [CTF_NODE_STREAM_EVENT_HEADER] = "stream.event.header.",
42 [CTF_NODE_STREAM_EVENT_CONTEXT] = "stream.event.context.",
43 [CTF_NODE_EVENT_CONTEXT] = "event.context.",
44 [CTF_NODE_EVENT_FIELDS] = "event.fields.",
45 };
46
47 const int absolute_path_prefix_token_counts[] = {
48 [CTF_NODE_ENV] = 1,
49 [CTF_NODE_TRACE_PACKET_HEADER] = 3,
50 [CTF_NODE_STREAM_PACKET_CONTEXT] = 3,
51 [CTF_NODE_STREAM_EVENT_HEADER] = 3,
52 [CTF_NODE_STREAM_EVENT_CONTEXT] = 3,
53 [CTF_NODE_EVENT_CONTEXT] = 2,
54 [CTF_NODE_EVENT_FIELDS] = 2,
55 };
56
57 static const char * const type_names[] = {
58 [CTF_TYPE_UNKNOWN] = "unknown",
59 [CTF_TYPE_INTEGER] = "integer",
60 [CTF_TYPE_FLOAT] = "float",
61 [CTF_TYPE_ENUM] = "enumeration",
62 [CTF_TYPE_STRING] = "string",
63 [CTF_TYPE_STRUCT] = "structure",
64 [CTF_TYPE_UNTAGGED_VARIANT] = "untagged variant",
65 [CTF_TYPE_VARIANT] = "variant",
66 [CTF_TYPE_ARRAY] = "array",
67 [CTF_TYPE_SEQUENCE] = "sequence",
68 };
69
70 static
71 int field_type_visit(struct bt_ctf_field_type *type,
72 struct ctf_type_visitor_context *context,
73 ctf_type_visitor_func func);
74
75 static
76 int field_type_recursive_visit(struct bt_ctf_field_type *type,
77 struct ctf_type_visitor_context *context,
78 ctf_type_visitor_func func);
79
80 static inline
81 int get_type_field_count(struct bt_ctf_field_type *type)
82 {
83 int field_count = -1;
84 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
85
86 if (type_id == CTF_TYPE_STRUCT) {
87 field_count = bt_ctf_field_type_structure_get_field_count(type);
88 } else if (type_id == CTF_TYPE_VARIANT) {
89 field_count = bt_ctf_field_type_variant_get_field_count(type);
90 }
91 return field_count;
92 }
93
94 static inline
95 struct bt_ctf_field_type *get_type_field(struct bt_ctf_field_type *type, int i)
96 {
97 struct bt_ctf_field_type *field = NULL;
98 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
99
100 if (type_id == CTF_TYPE_STRUCT) {
101 bt_ctf_field_type_structure_get_field(type, NULL,
102 &field, i);
103 } else if (type_id == CTF_TYPE_VARIANT) {
104 bt_ctf_field_type_variant_get_field(type,
105 NULL, &field, i);
106 }
107
108 return field;
109 }
110
111 static inline
112 int set_type_field(struct bt_ctf_field_type *type,
113 struct bt_ctf_field_type *field, int i)
114 {
115 int ret = -1;
116 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
117
118 if (type_id == CTF_TYPE_STRUCT) {
119 ret = bt_ctf_field_type_structure_set_field_index(
120 type, field, i);
121 } else if (type_id == CTF_TYPE_VARIANT) {
122 ret = bt_ctf_field_type_variant_set_field_index(
123 type, field, i);
124 }
125
126 return ret;
127 }
128
129 static inline
130 int get_type_field_index(struct bt_ctf_field_type *type, const char *name)
131 {
132 int field_index = -1;
133 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
134
135 if (type_id == CTF_TYPE_STRUCT) {
136 field_index = bt_ctf_field_type_structure_get_field_name_index(
137 type, name);
138 } else if (type_id == CTF_TYPE_VARIANT) {
139 field_index = bt_ctf_field_type_variant_get_field_name_index(
140 type, name);
141 }
142
143 return field_index;
144 }
145
146 BT_HIDDEN
147 ctf_type_stack *ctf_type_stack_create(void)
148 {
149 return g_ptr_array_new();
150 }
151
152 BT_HIDDEN
153 void ctf_type_stack_destroy(
154 ctf_type_stack *stack)
155 {
156 g_ptr_array_free(stack, TRUE);
157 }
158
159 BT_HIDDEN
160 int ctf_type_stack_push(ctf_type_stack *stack,
161 struct ctf_type_stack_frame *entry)
162 {
163 int ret = 0;
164
165 if (!stack || !entry) {
166 ret = -1;
167 goto end;
168 }
169
170 g_ptr_array_add(stack, entry);
171 end:
172 return ret;
173 }
174
175 BT_HIDDEN
176 struct ctf_type_stack_frame *ctf_type_stack_peek(ctf_type_stack *stack)
177 {
178 struct ctf_type_stack_frame *entry = NULL;
179
180 if (!stack || stack->len == 0) {
181 goto end;
182 }
183
184 entry = g_ptr_array_index(stack, stack->len - 1);
185 end:
186 return entry;
187 }
188
189 BT_HIDDEN
190 struct ctf_type_stack_frame *ctf_type_stack_pop(ctf_type_stack *stack)
191 {
192 struct ctf_type_stack_frame *entry = NULL;
193
194 entry = ctf_type_stack_peek(stack);
195 if (entry) {
196 g_ptr_array_set_size(stack, stack->len - 1);
197 }
198 return entry;
199 }
200
201 static
202 int field_type_visit(struct bt_ctf_field_type *type,
203 struct ctf_type_visitor_context *context,
204 ctf_type_visitor_func func)
205 {
206 int ret;
207 enum ctf_type_id type_id;
208 struct ctf_type_stack_frame *frame = NULL;
209
210 ret = func(type, context);
211 if (ret) {
212 goto end;
213 }
214
215 type_id = bt_ctf_field_type_get_type_id(type);
216 if (type_id == CTF_TYPE_SEQUENCE || type_id == CTF_TYPE_ARRAY) {
217 struct bt_ctf_field_type *element =
218 type_id == CTF_TYPE_SEQUENCE ?
219 bt_ctf_field_type_sequence_get_element_type(type) :
220 bt_ctf_field_type_array_get_element_type(type);
221
222 ret = field_type_recursive_visit(element, context, func);
223 bt_ctf_field_type_put(element);
224 if (ret) {
225 goto end;
226 }
227 }
228
229 if (type_id != CTF_TYPE_STRUCT &&
230 type_id != CTF_TYPE_VARIANT) {
231 /* No need to create a new stack frame */
232 goto end;
233 }
234
235 frame = g_new0(struct ctf_type_stack_frame, 1);
236 if (!frame) {
237 ret = -1;
238 goto end;
239 }
240
241 frame->type = type;
242 ret = ctf_type_stack_push(context->stack, frame);
243 if (ret) {
244 g_free(frame);
245 goto end;
246 }
247 end:
248 return ret;
249 }
250
251 static
252 int field_type_recursive_visit(struct bt_ctf_field_type *type,
253 struct ctf_type_visitor_context *context,
254 ctf_type_visitor_func func)
255 {
256 int ret = 0;
257 struct ctf_type_stack_frame *stack_marker = NULL;
258
259 ret = field_type_visit(type, context, func);
260 if (ret) {
261 goto end;
262 }
263
264 stack_marker = ctf_type_stack_peek(context->stack);
265 if (!stack_marker || stack_marker->type != type) {
266 /* No need for a recursive visit */
267 goto end;
268 }
269
270 while (true) {
271 struct bt_ctf_field_type *field;
272 struct ctf_type_stack_frame *entry =
273 ctf_type_stack_peek(context->stack);
274 int field_count = get_type_field_count(entry->type);
275
276 if (field_count <= 0 &&
277 !bt_ctf_field_type_is_structure(entry->type)) {
278 /*
279 * Propagate error if one was given, else return
280 * -1 since empty variants are invalid
281 * at this point.
282 */
283 ret = field_count < 0 ? field_count : -1;
284 goto end;
285 }
286
287 if (entry->index == field_count) {
288 /* This level has been completely visited */
289 entry = ctf_type_stack_pop(context->stack);
290 if (entry) {
291 g_free(entry);
292 }
293
294 if (entry == stack_marker) {
295 /* Completed visit */
296 break;
297 } else {
298 continue;
299 }
300 }
301
302 field = get_type_field(entry->type, entry->index);
303 /* Will push a new stack frame if field is struct or variant */
304 ret = field_type_visit(field, context, func);
305 bt_ctf_field_type_put(field);
306 if (ret) {
307 goto end;
308 }
309
310 entry->index++;
311 }
312 end:
313 return ret;
314 }
315
316 static
317 int bt_ctf_event_class_visit(struct bt_ctf_event_class *event_class,
318 struct bt_ctf_trace *trace,
319 struct bt_ctf_stream_class *stream_class,
320 ctf_type_visitor_func func)
321 {
322 int ret = 0;
323 struct bt_ctf_field_type *type;
324 struct ctf_type_visitor_context context = { 0 };
325
326 if (!event_class || !func) {
327 ret = -1;
328 goto end;
329 }
330
331 context.trace = trace;
332 context.stream_class = stream_class;
333 context.event_class = event_class;
334 context.stack = ctf_type_stack_create();
335 if (!context.stack) {
336 ret = -1;
337 goto end;
338 }
339
340 /* Visit event context */
341 context.root_node = CTF_NODE_EVENT_CONTEXT;
342 type = bt_ctf_event_class_get_context_type(event_class);
343 if (type) {
344 ret = field_type_recursive_visit(type, &context, func);
345 bt_ctf_field_type_put(type);
346 type = NULL;
347 if (ret) {
348 goto end;
349 }
350 }
351
352 /* Visit event payload */
353 context.root_node = CTF_NODE_EVENT_FIELDS;
354 type = bt_ctf_event_class_get_payload_type(event_class);
355 if (type) {
356 ret = field_type_recursive_visit(type, &context, func);
357 bt_ctf_field_type_put(type);
358 type = NULL;
359 if (ret) {
360 goto end;
361 }
362 }
363 end:
364 if (context.stack) {
365 ctf_type_stack_destroy(context.stack);
366 }
367 return ret;
368 }
369
370 static
371 int bt_ctf_stream_class_visit(struct bt_ctf_stream_class *stream_class,
372 struct bt_ctf_trace *trace,
373 ctf_type_visitor_func func)
374 {
375 int i, ret = 0, event_count;
376 struct bt_ctf_field_type *type;
377 struct ctf_type_visitor_context context = { 0 };
378
379 if (!stream_class || !func) {
380 ret = -1;
381 goto end;
382 }
383
384 context.trace = trace;
385 context.stream_class = stream_class;
386 context.stack = ctf_type_stack_create();
387 if (!context.stack) {
388 ret = -1;
389 goto end;
390 }
391
392 /* Visit stream packet context header */
393 context.root_node = CTF_NODE_STREAM_PACKET_CONTEXT;
394 type = bt_ctf_stream_class_get_packet_context_type(stream_class);
395 if (type) {
396 ret = field_type_recursive_visit(type, &context, func);
397 bt_ctf_field_type_put(type);
398 type = NULL;
399 if (ret) {
400 goto end;
401 }
402 }
403
404 /* Visit stream event header */
405 context.root_node = CTF_NODE_STREAM_EVENT_HEADER;
406 type = bt_ctf_stream_class_get_event_header_type(stream_class);
407 if (type) {
408 ret = field_type_recursive_visit(type, &context, func);
409 bt_ctf_field_type_put(type);
410 type = NULL;
411 if (ret) {
412 goto end;
413 }
414 }
415
416 /* Visit stream event context */
417 context.root_node = CTF_NODE_STREAM_EVENT_CONTEXT;
418 type = bt_ctf_stream_class_get_event_context_type(stream_class);
419 if (type) {
420 ret = field_type_recursive_visit(type, &context, func);
421 bt_ctf_field_type_put(type);
422 type = NULL;
423 if (ret) {
424 goto end;
425 }
426 }
427
428 /* Visit event classes */
429 event_count = bt_ctf_stream_class_get_event_class_count(stream_class);
430 if (event_count < 0) {
431 ret = event_count;
432 goto end;
433 }
434 for (i = 0; i < event_count; i++) {
435 struct bt_ctf_event_class *event_class =
436 bt_ctf_stream_class_get_event_class(stream_class, i);
437
438 ret = bt_ctf_event_class_visit(event_class, trace,
439 stream_class, func);
440 bt_ctf_event_class_put(event_class);
441 if (ret) {
442 goto end;
443 }
444 }
445 end:
446 if (context.stack) {
447 ctf_type_stack_destroy(context.stack);
448 }
449 return ret;
450 }
451
452 static
453 int set_field_path_relative(struct ctf_type_visitor_context *context,
454 struct bt_ctf_field_path *field_path,
455 GList **path_tokens, struct bt_ctf_field_type **resolved_field)
456 {
457 int ret = 0;
458 GArray *root_path;
459 struct bt_ctf_field_type *field = NULL;
460 struct ctf_type_stack_frame *frame =
461 ctf_type_stack_peek(context->stack);
462 size_t token_count = g_list_length(*path_tokens), i;
463
464 if (!frame) {
465 ret = -1;
466 goto end;
467 }
468
469 field = frame->type;
470 bt_ctf_field_type_get(field);
471 for (i = 0; i < token_count; i++) {
472 struct bt_ctf_field_type *next_field = NULL;
473 int field_index = get_type_field_index(field,
474 (*path_tokens)->data);
475
476 if (field_index < 0) {
477 /* Field name not found, abort */
478 printf_verbose("Could not resolve field \"%s\"\n",
479 (char *) (*path_tokens)->data);
480 ret = -1;
481 goto end;
482 }
483
484 if (field_index >= frame->index) {
485 printf_verbose("Invalid relative path refers to a member after the current one\n");
486 ret = -1;
487 goto end;
488 }
489
490 next_field = get_type_field(field, field_index);
491 if (!next_field) {
492 ret = -1;
493 goto end;
494 }
495
496 bt_ctf_field_type_put(field);
497 field = next_field;
498 g_array_append_val(field_path->path_indexes, field_index);
499
500 /*
501 * Free token and remove from list. This function does not
502 * assume the ownership of path_tokens; it is therefore _not_
503 * a leak to leave elements in this list. The caller should
504 * clean-up what is left (in case of error).
505 */
506 free((*path_tokens)->data);
507 *path_tokens = g_list_delete_link(*path_tokens, *path_tokens);
508 }
509
510 root_path = g_array_sized_new(FALSE, FALSE,
511 sizeof(int), context->stack->len - 1);
512 if (!root_path) {
513 ret = -1;
514 goto end;
515 }
516
517 /* Set the current root node as the resolved type's root */
518 field_path->root = context->root_node;
519 /*
520 * Prepend the current fields' path to the relative path that
521 * was found by walking the stack.
522 */
523 for (i = 0; i < context->stack->len - 1; i++) {
524 int index;
525 struct ctf_type_stack_frame *frame =
526 g_ptr_array_index(context->stack, i);
527
528 /* Decrement "index" since it points to the next field */
529 index = frame->index - 1;
530 g_array_append_val(root_path, index);
531 }
532 g_array_prepend_vals(field_path->path_indexes, root_path->data,
533 root_path->len);
534 g_array_free(root_path, TRUE);
535 end:
536 if (field) {
537 bt_ctf_field_type_put(field);
538 *resolved_field = field;
539 }
540
541 return ret;
542 }
543
544 static
545 int set_field_path_absolute(struct ctf_type_visitor_context *context,
546 struct bt_ctf_field_path *field_path,
547 GList **path_tokens, struct bt_ctf_field_type **resolved_field)
548 {
549 int ret = 0;
550 struct bt_ctf_field_type *field = NULL;
551 size_t token_count = g_list_length(*path_tokens), i;
552
553 if (field_path->root > context->root_node) {
554 /*
555 * The target path's root is lower in the dynamic scope
556 * hierarchy than the current field being visited. This
557 * is invalid since it would not be possible to have read
558 * the target before the current field.
559 */
560 ret = -1;
561 printf_verbose("The target path's root is lower in the dynamic scope than the current field.\n");
562 goto end;
563 }
564
565 /* Set the appropriate root field */
566 switch (field_path->root) {
567 case CTF_NODE_TRACE_PACKET_HEADER:
568 field = bt_ctf_trace_get_packet_header_type(context->trace);
569 break;
570 case CTF_NODE_STREAM_PACKET_CONTEXT:
571 field = bt_ctf_stream_class_get_packet_context_type(
572 context->stream_class);
573 break;
574 case CTF_NODE_STREAM_EVENT_HEADER:
575 field = bt_ctf_stream_class_get_event_header_type(
576 context->stream_class);
577 break;
578 case CTF_NODE_STREAM_EVENT_CONTEXT:
579 field = bt_ctf_stream_class_get_event_context_type(
580 context->stream_class);
581 break;
582 case CTF_NODE_EVENT_CONTEXT:
583 field = bt_ctf_event_class_get_context_type(
584 context->event_class);
585 break;
586 case CTF_NODE_EVENT_FIELDS:
587 field = bt_ctf_event_class_get_payload_type(
588 context->event_class);
589 break;
590 default:
591 ret = -1;
592 goto end;
593 }
594
595 if (!field) {
596 ret = -1;
597 goto end;
598 }
599
600 for (i = 0; i < token_count; i++) {
601 int field_index = get_type_field_index(field,
602 (*path_tokens)->data);
603 struct bt_ctf_field_type *next_field = NULL;
604
605 if (field_index < 0) {
606 /* Field name not found, abort */
607 printf_verbose("Could not resolve field \"%s\"\n",
608 (char *) (*path_tokens)->data);
609 ret = -1;
610 goto end;
611 }
612
613 next_field = get_type_field(field, field_index);
614 if (!next_field) {
615 ret = -1;
616 goto end;
617 }
618
619 bt_ctf_field_type_put(field);
620 field = next_field;
621 g_array_append_val(field_path->path_indexes, field_index);
622
623 /*
624 * Free token and remove from list. This function does not
625 * assume the ownership of path_tokens; it is therefore _not_
626 * a leak to leave elements in this list. The caller should
627 * clean-up what is left (in case of error).
628 */
629 free((*path_tokens)->data);
630 *path_tokens = g_list_delete_link(*path_tokens, *path_tokens);
631 }
632 end:
633 if (field) {
634 bt_ctf_field_type_put(field);
635 *resolved_field = field;
636 }
637 return ret;
638 }
639
640 static
641 int get_field_path(struct ctf_type_visitor_context *context,
642 const char *path, struct bt_ctf_field_path **field_path,
643 struct bt_ctf_field_type **resolved_field)
644 {
645 int i, ret = 0;
646 GList *path_tokens = NULL;
647 char *name_copy, *save_ptr, *token;
648
649 /* Tokenize path to a list of strings */
650 name_copy = strdup(path);
651 if (!name_copy) {
652 goto error;
653 }
654
655 token = strtok_r(name_copy, ".", &save_ptr);
656 while (token) {
657 char *token_string = strdup(token);
658
659 if (!token_string) {
660 ret = -1;
661 goto error;
662 }
663 path_tokens = g_list_append(path_tokens, token_string);
664 token = strtok_r(NULL, ".", &save_ptr);
665 }
666
667 if (!path_tokens) {
668 ret = -1;
669 goto error;
670 }
671
672 *field_path = bt_ctf_field_path_create();
673 if (!*field_path) {
674 ret = -1;
675 goto error;
676 }
677
678 /* Check if the path is absolute */
679 for (i = 0; i < sizeof(absolute_path_prefixes) / sizeof(char *); i++) {
680 int j;
681
682 /*
683 * Chech if "path" starts with a known absolute path prefix.
684 * Refer to CTF 7.3.2 STATIC AND DYNAMIC SCOPES.
685 */
686 if (strncmp(path, absolute_path_prefixes[i],
687 strlen(absolute_path_prefixes[i]))) {
688 /* Wrong prefix, try the next one */
689 continue;
690 }
691
692 /*
693 * Remove the first n tokens of this prefix.
694 * e.g. trace.packet.header: remove the first 3 tokens.
695 */
696 for (j = 0; j < absolute_path_prefix_token_counts[i]; j++) {
697 free(path_tokens->data);
698 path_tokens = g_list_delete_link(
699 path_tokens, path_tokens);
700 }
701
702 /* i maps to enum bt_ctf_node constants */
703 (*field_path)->root = (enum bt_ctf_node) i;
704 break;
705 }
706
707 if ((*field_path)->root == CTF_NODE_UNKNOWN) {
708 /* Relative path */
709 ret = set_field_path_relative(context,
710 *field_path, &path_tokens, resolved_field);
711 if (ret) {
712 goto error;
713 }
714 } else {
715 /* Absolute path */
716 ret = set_field_path_absolute(context,
717 *field_path, &path_tokens, resolved_field);
718 if (ret) {
719 goto error;
720 }
721 }
722 end:
723 if (name_copy) {
724 g_free(name_copy);
725 }
726 if (path_tokens) {
727 g_list_free_full(path_tokens, free);
728 }
729 return ret;
730 error:
731 if (*field_path) {
732 bt_ctf_field_path_destroy(*field_path);
733 *field_path = NULL;
734 }
735 goto end;
736 }
737
738 void print_path(const char *field_name,
739 struct bt_ctf_field_type *resolved_type,
740 struct bt_ctf_field_path *field_path)
741 {
742 int i;
743 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(
744 resolved_type);
745
746 if (type_id < CTF_TYPE_UNKNOWN || type_id >= NR_CTF_TYPES) {
747 type_id = CTF_TYPE_UNKNOWN;
748 }
749
750 printf_verbose("Resolved field \"%s\" as type \"%s\", ",
751 field_name, type_names[type_id]);
752 printf_verbose("path: %s",
753 absolute_path_prefixes[field_path->root]);
754
755 for (i = 0; i < field_path->path_indexes->len; i++) {
756 printf_verbose(" %d",
757 g_array_index(field_path->path_indexes, int, i));
758 }
759 printf_verbose("\n");
760 }
761
762 static
763 int type_resolve_func(struct bt_ctf_field_type *type,
764 struct ctf_type_visitor_context *context)
765 {
766 int ret = 0;
767 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
768 const char *field_name = NULL;
769 struct bt_ctf_field_path *field_path = NULL;
770 struct bt_ctf_field_type *resolved_type = NULL;
771 struct bt_ctf_field_type *type_copy = NULL;
772 struct ctf_type_stack_frame *frame;
773
774 if (type_id != CTF_TYPE_SEQUENCE &&
775 type_id != CTF_TYPE_VARIANT) {
776 goto end;
777 }
778
779 field_name = type_id == CTF_TYPE_SEQUENCE ?
780 bt_ctf_field_type_sequence_get_length_field_name(type) :
781 bt_ctf_field_type_variant_get_tag_name(type);
782 if (!field_name) {
783 ret = -1;
784 goto end;
785 }
786
787 ret = get_field_path(context, field_name,
788 &field_path, &resolved_type);
789 if (ret) {
790 goto end;
791 }
792
793 assert(field_path && resolved_type);
794
795 /* Print path if in verbose mode */
796 print_path(field_name, resolved_type, field_path);
797
798 /*
799 * Set field type's path.
800 *
801 * The original field is copied since it may have been reused
802 * in multiple structures which would cause a conflict.
803 */
804 type_copy = bt_ctf_field_type_copy(type);
805 if (!type_copy) {
806 ret = -1;
807 goto end;
808 }
809
810 if (type_id == CTF_TYPE_VARIANT) {
811 if (bt_ctf_field_type_get_type_id(resolved_type) !=
812 CTF_TYPE_ENUM) {
813 printf_verbose("Invalid variant tag \"%s\"; expected enum\n", field_name);
814 ret = -1;
815 goto end;
816 }
817 ret = bt_ctf_field_type_variant_set_tag(
818 type_copy, resolved_type);
819 if (ret) {
820 goto end;
821 }
822
823 ret = bt_ctf_field_type_variant_set_tag_field_path(type_copy,
824 field_path);
825 if (ret) {
826 goto end;
827 }
828 } else {
829 /* Sequence */
830 if (bt_ctf_field_type_get_type_id(resolved_type) !=
831 CTF_TYPE_INTEGER) {
832 printf_verbose("Invalid sequence length field \"%s\"; expected integer\n", field_name);
833 ret = -1;
834 goto end;
835 }
836
837 if (bt_ctf_field_type_integer_get_signed(resolved_type) != 0) {
838 printf_verbose("Invalid sequence length field \"%s\"; integer should be unsigned\n", field_name);
839 ret = -1;
840 goto end;
841 }
842
843 ret = bt_ctf_field_type_sequence_set_length_field_path(
844 type_copy, field_path);
845 if (ret) {
846 goto end;
847 }
848 }
849
850 /* Replace the original field */
851 frame = ctf_type_stack_peek(context->stack);
852 ret = set_type_field(frame->type, type_copy, frame->index);
853 bt_ctf_field_type_put(type_copy);
854 end:
855 return ret;
856 }
857
858 BT_HIDDEN
859 int bt_ctf_trace_visit(struct bt_ctf_trace *trace,
860 ctf_type_visitor_func func)
861 {
862 int i, stream_count, ret = 0;
863 struct bt_ctf_field_type *type = NULL;
864 struct ctf_type_visitor_context visitor_ctx = { 0 };
865
866 if (!trace || !func) {
867 ret = -1;
868 goto end;
869 }
870
871 visitor_ctx.trace = trace;
872 visitor_ctx.stack = ctf_type_stack_create();
873 if (!visitor_ctx.stack) {
874 ret = -1;
875 goto end;
876 }
877
878 /* Visit trace packet header */
879 type = bt_ctf_trace_get_packet_header_type(trace);
880 if (type) {
881 visitor_ctx.root_node = CTF_NODE_TRACE_PACKET_HEADER;
882 ret = field_type_recursive_visit(type, &visitor_ctx, func);
883 visitor_ctx.root_node = CTF_NODE_UNKNOWN;
884 bt_ctf_field_type_put(type);
885 type = NULL;
886 if (ret) {
887 goto end;
888 }
889 }
890
891 stream_count = bt_ctf_trace_get_stream_class_count(trace);
892 for (i = 0; i < stream_count; i++) {
893 struct bt_ctf_stream_class *stream_class =
894 bt_ctf_trace_get_stream_class(trace, i);
895
896 /* Visit streams */
897 ret = bt_ctf_stream_class_visit(stream_class, trace,
898 func);
899 bt_ctf_stream_class_put(stream_class);
900 if (ret) {
901 goto end;
902 }
903 }
904 end:
905 if (visitor_ctx.stack) {
906 ctf_type_stack_destroy(visitor_ctx.stack);
907 }
908 return ret;
909 }
910
911 BT_HIDDEN
912 int bt_ctf_trace_resolve_types(struct bt_ctf_trace *trace)
913 {
914 return bt_ctf_trace_visit(trace, type_resolve_func);
915 }
916
917 BT_HIDDEN
918 int bt_ctf_stream_class_resolve_types(struct bt_ctf_stream_class *stream_class,
919 struct bt_ctf_trace *trace)
920 {
921 return bt_ctf_stream_class_visit(stream_class, trace,
922 type_resolve_func);
923 }
924
925 BT_HIDDEN
926 int bt_ctf_event_class_resolve_types(struct bt_ctf_event_class *event_class,
927 struct bt_ctf_trace *trace,
928 struct bt_ctf_stream_class *stream_class)
929 {
930 return bt_ctf_event_class_visit(event_class, trace, stream_class,
931 type_resolve_func);
932 }
This page took 0.045579 seconds and 3 git commands to generate.