62195b64adb6f3e7561b88e585686c287f29efb3
[babeltrace.git] / formats / ctf / ir / visitor.c
1 /*
2 * visitor.c
3 *
4 * Babeltrace CTF IR - Trace Visitor
5 *
6 * Copyright 2015 Jérémie Galarneau <jeremie.galarneau@efficios.com>
7 *
8 * Author: Jérémie Galarneau <jeremie.galarneau@efficios.com>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * SOFTWARE.
27 */
28
29 #include <babeltrace/ctf-ir/event.h>
30 #include <babeltrace/ctf-ir/stream-class.h>
31 #include <babeltrace/ctf-ir/visitor-internal.h>
32 #include <babeltrace/ctf-ir/event-types-internal.h>
33 #include <babeltrace/ctf-ir/event-internal.h>
34 #include <babeltrace/babeltrace-internal.h>
35
36 /* TSDL dynamic scope prefixes defined in CTF Section 7.3.2 */
37 static const char * const absolute_path_prefixes[] = {
38 [CTF_NODE_ENV] = "env.",
39 [CTF_NODE_TRACE_PACKET_HEADER] = "trace.packet.header.",
40 [CTF_NODE_STREAM_PACKET_CONTEXT] = "stream.packet.context.",
41 [CTF_NODE_STREAM_EVENT_HEADER] = "stream.event.header.",
42 [CTF_NODE_STREAM_EVENT_CONTEXT] = "stream.event.context.",
43 [CTF_NODE_EVENT_CONTEXT] = "event.context.",
44 [CTF_NODE_EVENT_FIELDS] = "event.fields.",
45 };
46
47 const int absolute_path_prefix_token_counts[] = {
48 [CTF_NODE_ENV] = 1,
49 [CTF_NODE_TRACE_PACKET_HEADER] = 3,
50 [CTF_NODE_STREAM_PACKET_CONTEXT] = 3,
51 [CTF_NODE_STREAM_EVENT_HEADER] = 3,
52 [CTF_NODE_STREAM_EVENT_CONTEXT] = 3,
53 [CTF_NODE_EVENT_CONTEXT] = 2,
54 [CTF_NODE_EVENT_FIELDS] = 2,
55 };
56
57 static const char * const type_names[] = {
58 [CTF_TYPE_UNKNOWN] = "unknown",
59 [CTF_TYPE_INTEGER] = "integer",
60 [CTF_TYPE_FLOAT] = "float",
61 [CTF_TYPE_ENUM] = "enumeration",
62 [CTF_TYPE_STRING] = "string",
63 [CTF_TYPE_STRUCT] = "structure",
64 [CTF_TYPE_UNTAGGED_VARIANT] = "untagged variant",
65 [CTF_TYPE_VARIANT] = "variant",
66 [CTF_TYPE_ARRAY] = "array",
67 [CTF_TYPE_SEQUENCE] = "sequence",
68 };
69
70 static
71 int field_type_visit(struct bt_ctf_field_type *type,
72 struct ctf_type_visitor_context *context,
73 ctf_type_visitor_func func);
74
75 static
76 int field_type_recursive_visit(struct bt_ctf_field_type *type,
77 struct ctf_type_visitor_context *context,
78 ctf_type_visitor_func func);
79
80 static inline
81 int get_type_field_count(struct bt_ctf_field_type *type)
82 {
83 int field_count = -1;
84 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
85
86 if (type_id == CTF_TYPE_STRUCT) {
87 field_count = bt_ctf_field_type_structure_get_field_count(type);
88 } else if (type_id == CTF_TYPE_VARIANT) {
89 field_count = bt_ctf_field_type_variant_get_field_count(type);
90 }
91 return field_count;
92 }
93
94 static inline
95 struct bt_ctf_field_type *get_type_field(struct bt_ctf_field_type *type, int i)
96 {
97 struct bt_ctf_field_type *field = NULL;
98 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
99
100 if (type_id == CTF_TYPE_STRUCT) {
101 bt_ctf_field_type_structure_get_field(type, NULL,
102 &field, i);
103 } else if (type_id == CTF_TYPE_VARIANT) {
104 bt_ctf_field_type_variant_get_field(type,
105 NULL, &field, i);
106 }
107
108 return field;
109 }
110
111 static inline
112 int set_type_field(struct bt_ctf_field_type *type,
113 struct bt_ctf_field_type *field, int i)
114 {
115 int ret = -1;
116 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
117
118 if (type_id == CTF_TYPE_STRUCT) {
119 ret = bt_ctf_field_type_structure_set_field_index(
120 type, field, i);
121 } else if (type_id == CTF_TYPE_VARIANT) {
122 ret = bt_ctf_field_type_variant_set_field_index(
123 type, field, i);
124 }
125
126 return ret;
127 }
128
129 static inline
130 int get_type_field_index(struct bt_ctf_field_type *type, const char *name)
131 {
132 int field_index = -1;
133 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
134
135 if (type_id == CTF_TYPE_STRUCT) {
136 field_index = bt_ctf_field_type_structure_get_field_name_index(
137 type, name);
138 } else if (type_id == CTF_TYPE_VARIANT) {
139 field_index = bt_ctf_field_type_variant_get_field_name_index(
140 type, name);
141 }
142
143 return field_index;
144 }
145
146 BT_HIDDEN
147 ctf_type_stack *ctf_type_stack_create(void)
148 {
149 return g_ptr_array_new();
150 }
151
152 BT_HIDDEN
153 void ctf_type_stack_destroy(
154 ctf_type_stack *stack)
155 {
156 g_ptr_array_free(stack, TRUE);
157 }
158
159 BT_HIDDEN
160 int ctf_type_stack_push(ctf_type_stack *stack,
161 struct ctf_type_stack_frame *entry)
162 {
163 int ret = 0;
164
165 if (!stack || !entry) {
166 ret = -1;
167 goto end;
168 }
169
170 g_ptr_array_add(stack, entry);
171 end:
172 return ret;
173 }
174
175 BT_HIDDEN
176 struct ctf_type_stack_frame *ctf_type_stack_peek(ctf_type_stack *stack)
177 {
178 struct ctf_type_stack_frame *entry = NULL;
179
180 if (!stack || stack->len == 0) {
181 goto end;
182 }
183
184 entry = g_ptr_array_index(stack, stack->len - 1);
185 end:
186 return entry;
187 }
188
189 BT_HIDDEN
190 struct ctf_type_stack_frame *ctf_type_stack_pop(ctf_type_stack *stack)
191 {
192 struct ctf_type_stack_frame *entry = NULL;
193
194 entry = ctf_type_stack_peek(stack);
195 if (entry) {
196 g_ptr_array_set_size(stack, stack->len - 1);
197 }
198 return entry;
199 }
200
201 static
202 int field_type_visit(struct bt_ctf_field_type *type,
203 struct ctf_type_visitor_context *context,
204 ctf_type_visitor_func func)
205 {
206 int ret;
207 enum ctf_type_id type_id;
208 struct ctf_type_stack_frame *frame = NULL;
209
210 ret = func(type, context);
211 if (ret) {
212 goto end;
213 }
214
215 type_id = bt_ctf_field_type_get_type_id(type);
216 if (type_id == CTF_TYPE_SEQUENCE || type_id == CTF_TYPE_ARRAY) {
217 struct bt_ctf_field_type *element =
218 type_id == CTF_TYPE_SEQUENCE ?
219 bt_ctf_field_type_sequence_get_element_type(type) :
220 bt_ctf_field_type_array_get_element_type(type);
221
222 ret = field_type_recursive_visit(element, context, func);
223 bt_ctf_field_type_put(element);
224 if (ret) {
225 goto end;
226 }
227 }
228
229 if (type_id != CTF_TYPE_STRUCT &&
230 type_id != CTF_TYPE_VARIANT) {
231 /* No need to create a new stack frame */
232 goto end;
233 }
234
235 frame = g_new0(struct ctf_type_stack_frame, 1);
236 if (!frame) {
237 ret = -1;
238 goto end;
239 }
240
241 frame->type = type;
242 ret = ctf_type_stack_push(context->stack, frame);
243 if (ret) {
244 g_free(frame);
245 goto end;
246 }
247 end:
248 return ret;
249 }
250
251 static
252 int field_type_recursive_visit(struct bt_ctf_field_type *type,
253 struct ctf_type_visitor_context *context,
254 ctf_type_visitor_func func)
255 {
256 int ret = 0;
257 struct ctf_type_stack_frame *stack_marker = NULL;
258
259 ret = field_type_visit(type, context, func);
260 if (ret) {
261 goto end;
262 }
263
264 stack_marker = ctf_type_stack_peek(context->stack);
265 if (!stack_marker || stack_marker->type != type) {
266 /* No need for a recursive visit */
267 goto end;
268 }
269
270 while (true) {
271 struct bt_ctf_field_type *field;
272 struct ctf_type_stack_frame *entry =
273 ctf_type_stack_peek(context->stack);
274 int field_count = get_type_field_count(entry->type);
275
276 if (field_count <= 0) {
277 /*
278 * Propagate error if one was given, else return
279 * -1 since empty structures or variants are invalid
280 * at this point.
281 */
282 ret = field_count < 0 ? field_count : -1;
283 goto end;
284 }
285
286 if (entry->index == field_count) {
287 /* This level has been completely visited */
288 entry = ctf_type_stack_pop(context->stack);
289 if (entry) {
290 g_free(entry);
291 }
292
293 if (entry == stack_marker) {
294 /* Completed visit */
295 break;
296 } else {
297 continue;
298 }
299 }
300
301 field = get_type_field(entry->type, entry->index);
302 /* Will push a new stack frame if field is struct or variant */
303 ret = field_type_visit(field, context, func);
304 bt_ctf_field_type_put(field);
305 if (ret) {
306 goto end;
307 }
308
309 entry->index++;
310 }
311 end:
312 return ret;
313 }
314
315 static
316 int bt_ctf_event_class_visit(struct bt_ctf_event_class *event_class,
317 struct bt_ctf_trace *trace,
318 struct bt_ctf_stream_class *stream_class,
319 ctf_type_visitor_func func)
320 {
321 int ret = 0;
322 struct bt_ctf_field_type *type;
323 struct ctf_type_visitor_context context = { 0 };
324
325 if (!event_class || !func) {
326 ret = -1;
327 goto end;
328 }
329
330 context.trace = trace;
331 context.stream_class = stream_class;
332 context.event_class = event_class;
333 context.stack = ctf_type_stack_create();
334 if (!context.stack) {
335 ret = -1;
336 goto end;
337 }
338
339 /* Visit event context */
340 context.root_node = CTF_NODE_EVENT_CONTEXT;
341 type = bt_ctf_event_class_get_context_type(event_class);
342 if (type) {
343 ret = field_type_recursive_visit(type, &context, func);
344 bt_ctf_field_type_put(type);
345 type = NULL;
346 if (ret) {
347 goto end;
348 }
349 }
350
351 /* Visit event payload */
352 context.root_node = CTF_NODE_EVENT_FIELDS;
353 type = bt_ctf_event_class_get_payload_type(event_class);
354 if (type) {
355 ret = field_type_recursive_visit(type, &context, func);
356 bt_ctf_field_type_put(type);
357 type = NULL;
358 if (ret) {
359 goto end;
360 }
361 }
362 end:
363 if (context.stack) {
364 ctf_type_stack_destroy(context.stack);
365 }
366 return ret;
367 }
368
369 static
370 int bt_ctf_stream_class_visit(struct bt_ctf_stream_class *stream_class,
371 struct bt_ctf_trace *trace,
372 ctf_type_visitor_func func)
373 {
374 int i, ret = 0, event_count;
375 struct bt_ctf_field_type *type;
376 struct ctf_type_visitor_context context = { 0 };
377
378 if (!stream_class || !func) {
379 ret = -1;
380 goto end;
381 }
382
383 context.trace = trace;
384 context.stream_class = stream_class;
385 context.stack = ctf_type_stack_create();
386 if (!context.stack) {
387 ret = -1;
388 goto end;
389 }
390
391 /* Visit stream packet context header */
392 context.root_node = CTF_NODE_STREAM_PACKET_CONTEXT;
393 type = bt_ctf_stream_class_get_packet_context_type(stream_class);
394 if (type) {
395 ret = field_type_recursive_visit(type, &context, func);
396 bt_ctf_field_type_put(type);
397 type = NULL;
398 if (ret) {
399 goto end;
400 }
401 }
402
403 /* Visit stream event header */
404 context.root_node = CTF_NODE_STREAM_EVENT_HEADER;
405 type = bt_ctf_stream_class_get_event_header_type(stream_class);
406 if (type) {
407 ret = field_type_recursive_visit(type, &context, func);
408 bt_ctf_field_type_put(type);
409 type = NULL;
410 if (ret) {
411 goto end;
412 }
413 }
414
415 /* Visit stream event context */
416 context.root_node = CTF_NODE_STREAM_EVENT_CONTEXT;
417 type = bt_ctf_stream_class_get_event_context_type(stream_class);
418 if (type) {
419 ret = field_type_recursive_visit(type, &context, func);
420 bt_ctf_field_type_put(type);
421 type = NULL;
422 if (ret) {
423 goto end;
424 }
425 }
426
427 /* Visit event classes */
428 event_count = bt_ctf_stream_class_get_event_class_count(stream_class);
429 if (event_count < 0) {
430 ret = event_count;
431 goto end;
432 }
433 for (i = 0; i < event_count; i++) {
434 struct bt_ctf_event_class *event_class =
435 bt_ctf_stream_class_get_event_class(stream_class, i);
436
437 ret = bt_ctf_event_class_visit(event_class, trace,
438 stream_class, func);
439 bt_ctf_event_class_put(event_class);
440 if (ret) {
441 goto end;
442 }
443 }
444 end:
445 if (context.stack) {
446 ctf_type_stack_destroy(context.stack);
447 }
448 return ret;
449 }
450
451 static
452 int set_field_path_relative(struct ctf_type_visitor_context *context,
453 struct bt_ctf_field_path *field_path,
454 GList **path_tokens, struct bt_ctf_field_type **resolved_field)
455 {
456 int ret = 0;
457 GArray *root_path;
458 struct bt_ctf_field_type *field = NULL;
459 struct ctf_type_stack_frame *frame =
460 ctf_type_stack_peek(context->stack);
461 size_t token_count = g_list_length(*path_tokens), i;
462
463 if (!frame) {
464 ret = -1;
465 goto end;
466 }
467
468 field = frame->type;
469 bt_ctf_field_type_get(field);
470 for (i = 0; i < token_count; i++) {
471 struct bt_ctf_field_type *next_field = NULL;
472 int field_index = get_type_field_index(field,
473 (*path_tokens)->data);
474
475 if (field_index < 0) {
476 /* Field name not found, abort */
477 printf_verbose("Could not resolve field \"%s\"\n",
478 (char *) (*path_tokens)->data);
479 ret = -1;
480 goto end;
481 }
482
483 if (field_index >= frame->index) {
484 printf_verbose("Invalid relative path refers to a member after the current one\n");
485 ret = -1;
486 goto end;
487 }
488
489 next_field = get_type_field(field, field_index);
490 if (!next_field) {
491 ret = -1;
492 goto end;
493 }
494
495 bt_ctf_field_type_put(field);
496 field = next_field;
497 g_array_append_val(field_path->path_indexes, field_index);
498
499 /*
500 * Free token and remove from list. This function does not
501 * assume the ownership of path_tokens; it is therefore _not_
502 * a leak to leave elements in this list. The caller should
503 * clean-up what is left (in case of error).
504 */
505 free((*path_tokens)->data);
506 *path_tokens = g_list_delete_link(*path_tokens, *path_tokens);
507 }
508
509 root_path = g_array_sized_new(FALSE, FALSE,
510 sizeof(int), context->stack->len - 1);
511 if (!root_path) {
512 ret = -1;
513 goto end;
514 }
515
516 /* Set the current root node as the resolved type's root */
517 field_path->root = context->root_node;
518 /*
519 * Prepend the current fields' path to the relative path that
520 * was found by walking the stack.
521 */
522 for (i = 0; i < context->stack->len - 1; i++) {
523 int index;
524 struct ctf_type_stack_frame *frame =
525 g_ptr_array_index(context->stack, i);
526
527 /* Decrement "index" since it points to the next field */
528 index = frame->index - 1;
529 g_array_append_val(root_path, index);
530 }
531 g_array_prepend_vals(field_path->path_indexes, root_path->data,
532 root_path->len);
533 g_array_free(root_path, TRUE);
534 end:
535 if (field) {
536 bt_ctf_field_type_put(field);
537 *resolved_field = field;
538 }
539
540 return ret;
541 }
542
543 static
544 int set_field_path_absolute(struct ctf_type_visitor_context *context,
545 struct bt_ctf_field_path *field_path,
546 GList **path_tokens, struct bt_ctf_field_type **resolved_field)
547 {
548 int ret = 0;
549 struct bt_ctf_field_type *field = NULL;
550 size_t token_count = g_list_length(*path_tokens), i;
551
552 if (field_path->root > context->root_node) {
553 /*
554 * The target path's root is lower in the dynamic scope
555 * hierarchy than the current field being visited. This
556 * is invalid since it would not be possible to have read
557 * the target before the current field.
558 */
559 ret = -1;
560 printf_verbose("The target path's root is lower in the dynamic scope than the current field.\n");
561 goto end;
562 }
563
564 /* Set the appropriate root field */
565 switch (field_path->root) {
566 case CTF_NODE_TRACE_PACKET_HEADER:
567 field = bt_ctf_trace_get_packet_header_type(context->trace);
568 break;
569 case CTF_NODE_STREAM_PACKET_CONTEXT:
570 field = bt_ctf_stream_class_get_packet_context_type(
571 context->stream_class);
572 break;
573 case CTF_NODE_STREAM_EVENT_HEADER:
574 field = bt_ctf_stream_class_get_event_header_type(
575 context->stream_class);
576 break;
577 case CTF_NODE_STREAM_EVENT_CONTEXT:
578 field = bt_ctf_stream_class_get_event_context_type(
579 context->stream_class);
580 break;
581 case CTF_NODE_EVENT_CONTEXT:
582 field = bt_ctf_event_class_get_context_type(
583 context->event_class);
584 break;
585 case CTF_NODE_EVENT_FIELDS:
586 field = bt_ctf_event_class_get_payload_type(
587 context->event_class);
588 break;
589 default:
590 ret = -1;
591 goto end;
592 }
593
594 if (!field) {
595 ret = -1;
596 goto end;
597 }
598
599 for (i = 0; i < token_count; i++) {
600 int field_index = get_type_field_index(field,
601 (*path_tokens)->data);
602 struct bt_ctf_field_type *next_field = NULL;
603
604 if (field_index < 0) {
605 /* Field name not found, abort */
606 printf_verbose("Could not resolve field \"%s\"\n",
607 (char *) (*path_tokens)->data);
608 ret = -1;
609 goto end;
610 }
611
612 next_field = get_type_field(field, field_index);
613 if (!next_field) {
614 ret = -1;
615 goto end;
616 }
617
618 bt_ctf_field_type_put(field);
619 field = next_field;
620 g_array_append_val(field_path->path_indexes, field_index);
621
622 /*
623 * Free token and remove from list. This function does not
624 * assume the ownership of path_tokens; it is therefore _not_
625 * a leak to leave elements in this list. The caller should
626 * clean-up what is left (in case of error).
627 */
628 free((*path_tokens)->data);
629 *path_tokens = g_list_delete_link(*path_tokens, *path_tokens);
630 }
631 end:
632 if (field) {
633 bt_ctf_field_type_put(field);
634 *resolved_field = field;
635 }
636 return ret;
637 }
638
639 static
640 int get_field_path(struct ctf_type_visitor_context *context,
641 const char *path, struct bt_ctf_field_path **field_path,
642 struct bt_ctf_field_type **resolved_field)
643 {
644 int i, ret = 0;
645 GList *path_tokens = NULL;
646 char *name_copy, *save_ptr, *token;
647
648 /* Tokenize path to a list of strings */
649 name_copy = strdup(path);
650 if (!name_copy) {
651 goto error;
652 }
653
654 token = strtok_r(name_copy, ".", &save_ptr);
655 while (token) {
656 char *token_string = strdup(token);
657
658 if (!token_string) {
659 ret = -1;
660 goto error;
661 }
662 path_tokens = g_list_append(path_tokens, token_string);
663 token = strtok_r(NULL, ".", &save_ptr);
664 }
665
666 if (!path_tokens) {
667 ret = -1;
668 goto error;
669 }
670
671 *field_path = bt_ctf_field_path_create();
672 if (!*field_path) {
673 ret = -1;
674 goto error;
675 }
676
677 /* Check if the path is absolute */
678 for (i = 0; i < sizeof(absolute_path_prefixes) / sizeof(char *); i++) {
679 int j;
680
681 /*
682 * Chech if "path" starts with a known absolute path prefix.
683 * Refer to CTF 7.3.2 STATIC AND DYNAMIC SCOPES.
684 */
685 if (strncmp(path, absolute_path_prefixes[i],
686 strlen(absolute_path_prefixes[i]))) {
687 /* Wrong prefix, try the next one */
688 continue;
689 }
690
691 /*
692 * Remove the first n tokens of this prefix.
693 * e.g. trace.packet.header: remove the first 3 tokens.
694 */
695 for (j = 0; j < absolute_path_prefix_token_counts[i]; j++) {
696 free(path_tokens->data);
697 path_tokens = g_list_delete_link(
698 path_tokens, path_tokens);
699 }
700
701 /* i maps to enum bt_ctf_node constants */
702 (*field_path)->root = (enum bt_ctf_node) i;
703 break;
704 }
705
706 if ((*field_path)->root == CTF_NODE_UNKNOWN) {
707 /* Relative path */
708 ret = set_field_path_relative(context,
709 *field_path, &path_tokens, resolved_field);
710 if (ret) {
711 goto error;
712 }
713 } else {
714 /* Absolute path */
715 ret = set_field_path_absolute(context,
716 *field_path, &path_tokens, resolved_field);
717 if (ret) {
718 goto error;
719 }
720 }
721 end:
722 if (name_copy) {
723 g_free(name_copy);
724 }
725 if (path_tokens) {
726 g_list_free_full(path_tokens, free);
727 }
728 return ret;
729 error:
730 if (*field_path) {
731 bt_ctf_field_path_destroy(*field_path);
732 *field_path = NULL;
733 }
734 goto end;
735 }
736
737 void print_path(const char *field_name,
738 struct bt_ctf_field_type *resolved_type,
739 struct bt_ctf_field_path *field_path)
740 {
741 int i;
742 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(
743 resolved_type);
744
745 if (type_id < CTF_TYPE_UNKNOWN || type_id >= NR_CTF_TYPES) {
746 type_id = CTF_TYPE_UNKNOWN;
747 }
748
749 printf_verbose("Resolved field \"%s\" as type \"%s\", ",
750 field_name, type_names[type_id]);
751 printf_verbose("path: %s",
752 absolute_path_prefixes[field_path->root]);
753
754 for (i = 0; i < field_path->path_indexes->len; i++) {
755 printf_verbose(" %d",
756 g_array_index(field_path->path_indexes, int, i));
757 }
758 printf_verbose("\n");
759 }
760
761 static
762 int type_resolve_func(struct bt_ctf_field_type *type,
763 struct ctf_type_visitor_context *context)
764 {
765 int ret = 0;
766 enum ctf_type_id type_id = bt_ctf_field_type_get_type_id(type);
767 const char *field_name = NULL;
768 struct bt_ctf_field_path *field_path = NULL;
769 struct bt_ctf_field_type *resolved_type = NULL;
770 struct bt_ctf_field_type *type_copy = NULL;
771 struct ctf_type_stack_frame *frame;
772
773 if (type_id != CTF_TYPE_SEQUENCE &&
774 type_id != CTF_TYPE_VARIANT) {
775 goto end;
776 }
777
778 field_name = type_id == CTF_TYPE_SEQUENCE ?
779 bt_ctf_field_type_sequence_get_length_field_name(type) :
780 bt_ctf_field_type_variant_get_tag_name(type);
781 if (!field_name) {
782 ret = -1;
783 goto end;
784 }
785
786 ret = get_field_path(context, field_name,
787 &field_path, &resolved_type);
788 if (ret) {
789 goto end;
790 }
791
792 assert(field_path && resolved_type);
793
794 /* Print path if in verbose mode */
795 print_path(field_name, resolved_type, field_path);
796
797 /*
798 * Set field type's path.
799 *
800 * The original field is copied since it may have been reused
801 * in multiple structures which would cause a conflict.
802 */
803 type_copy = bt_ctf_field_type_copy(type);
804 if (!type_copy) {
805 ret = -1;
806 goto end;
807 }
808
809 if (type_id == CTF_TYPE_VARIANT) {
810 if (bt_ctf_field_type_get_type_id(resolved_type) !=
811 CTF_TYPE_ENUM) {
812 printf_verbose("Invalid variant tag \"%s\"; expected enum\n", field_name);
813 ret = -1;
814 goto end;
815 }
816 ret = bt_ctf_field_type_variant_set_tag(
817 type_copy, resolved_type);
818 if (ret) {
819 goto end;
820 }
821
822 ret = bt_ctf_field_type_variant_set_tag_field_path(type_copy,
823 field_path);
824 if (ret) {
825 goto end;
826 }
827 } else {
828 /* Sequence */
829 if (bt_ctf_field_type_get_type_id(resolved_type) !=
830 CTF_TYPE_INTEGER) {
831 printf_verbose("Invalid sequence length field \"%s\"; expected integer\n", field_name);
832 ret = -1;
833 goto end;
834 }
835
836 if (bt_ctf_field_type_integer_get_signed(resolved_type) != 0) {
837 printf_verbose("Invalid sequence length field \"%s\"; integer should be unsigned\n", field_name);
838 ret = -1;
839 goto end;
840 }
841
842 ret = bt_ctf_field_type_sequence_set_length_field_path(
843 type_copy, field_path);
844 if (ret) {
845 goto end;
846 }
847 }
848
849 /* Replace the original field */
850 frame = ctf_type_stack_peek(context->stack);
851 ret = set_type_field(frame->type, type_copy, frame->index);
852 bt_ctf_field_type_put(type_copy);
853 end:
854 return ret;
855 }
856
857 BT_HIDDEN
858 int bt_ctf_trace_visit(struct bt_ctf_trace *trace,
859 ctf_type_visitor_func func)
860 {
861 int i, stream_count, ret = 0;
862 struct bt_ctf_field_type *type = NULL;
863 struct ctf_type_visitor_context visitor_ctx = { 0 };
864
865 if (!trace || !func) {
866 ret = -1;
867 goto end;
868 }
869
870 visitor_ctx.trace = trace;
871 visitor_ctx.stack = ctf_type_stack_create();
872 if (!visitor_ctx.stack) {
873 ret = -1;
874 goto end;
875 }
876
877 /* Visit trace packet header */
878 type = bt_ctf_trace_get_packet_header_type(trace);
879 if (type) {
880 visitor_ctx.root_node = CTF_NODE_TRACE_PACKET_HEADER;
881 ret = field_type_recursive_visit(type, &visitor_ctx, func);
882 visitor_ctx.root_node = CTF_NODE_UNKNOWN;
883 bt_ctf_field_type_put(type);
884 type = NULL;
885 if (ret) {
886 goto end;
887 }
888 }
889
890 stream_count = bt_ctf_trace_get_stream_class_count(trace);
891 for (i = 0; i < stream_count; i++) {
892 struct bt_ctf_stream_class *stream_class =
893 bt_ctf_trace_get_stream_class(trace, i);
894
895 /* Visit streams */
896 ret = bt_ctf_stream_class_visit(stream_class, trace,
897 func);
898 bt_ctf_stream_class_put(stream_class);
899 if (ret) {
900 goto end;
901 }
902 }
903 end:
904 if (visitor_ctx.stack) {
905 ctf_type_stack_destroy(visitor_ctx.stack);
906 }
907 return ret;
908 }
909
910 BT_HIDDEN
911 int bt_ctf_trace_resolve_types(struct bt_ctf_trace *trace)
912 {
913 return bt_ctf_trace_visit(trace, type_resolve_func);
914 }
915
916 BT_HIDDEN
917 int bt_ctf_stream_class_resolve_types(struct bt_ctf_stream_class *stream_class,
918 struct bt_ctf_trace *trace)
919 {
920 return bt_ctf_stream_class_visit(stream_class, trace,
921 type_resolve_func);
922 }
923
924 BT_HIDDEN
925 int bt_ctf_event_class_resolve_types(struct bt_ctf_event_class *event_class,
926 struct bt_ctf_trace *trace,
927 struct bt_ctf_stream_class *stream_class)
928 {
929 return bt_ctf_event_class_visit(event_class, trace, stream_class,
930 type_resolve_func);
931 }
This page took 0.046052 seconds and 3 git commands to generate.