lib: decouple variant FC option names from selector FC mapping names
[babeltrace.git] / src / plugins / ctf / common / metadata / ctf-meta-translate.c
1 /*
2 * Copyright 2018 - Philippe Proulx <pproulx@efficios.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 */
14
15 #include <babeltrace2/babeltrace.h>
16 #include "common/macros.h"
17 #include "common/assert.h"
18 #include <glib.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <inttypes.h>
22
23 #include "ctf-meta-visitors.h"
24
25 struct ctx {
26 bt_self_component *self_comp;
27 bt_trace_class *ir_tc;
28 bt_stream_class *ir_sc;
29 struct ctf_trace_class *tc;
30 struct ctf_stream_class *sc;
31 struct ctf_event_class *ec;
32 enum ctf_scope scope;
33 };
34
35 static inline
36 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
37 struct ctf_field_class *fc);
38
39 static inline
40 void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
41 bt_field_class *ir_fc)
42 {
43 bt_field_class_integer_set_field_value_range(ir_fc,
44 fc->base.size);
45 bt_field_class_integer_set_preferred_display_base(ir_fc,
46 fc->disp_base);
47 }
48
49 static inline
50 bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
51 struct ctf_field_class_int *fc)
52 {
53 bt_field_class *ir_fc;
54
55 if (fc->is_signed) {
56 ir_fc = bt_field_class_signed_integer_create(ctx->ir_tc);
57 } else {
58 ir_fc = bt_field_class_unsigned_integer_create(ctx->ir_tc);
59 }
60
61 BT_ASSERT(ir_fc);
62 ctf_field_class_int_set_props(fc, ir_fc);
63 return ir_fc;
64 }
65
66 static inline
67 bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
68 struct ctf_field_class_enum *fc)
69 {
70 int ret;
71 bt_field_class *ir_fc;
72 uint64_t i;
73
74 if (fc->base.is_signed) {
75 ir_fc = bt_field_class_signed_enumeration_create(ctx->ir_tc);
76 } else {
77 ir_fc = bt_field_class_unsigned_enumeration_create(ctx->ir_tc);
78 }
79
80 BT_ASSERT(ir_fc);
81 ctf_field_class_int_set_props((void *) fc, ir_fc);
82
83 for (i = 0; i < fc->mappings->len; i++) {
84 struct ctf_field_class_enum_mapping *mapping =
85 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
86 void *range_set;
87 uint64_t range_i;
88
89 if (fc->base.is_signed) {
90 range_set = bt_integer_range_set_signed_create();
91 } else {
92 range_set = bt_integer_range_set_unsigned_create();
93 }
94
95 BT_ASSERT(range_set);
96
97 for (range_i = 0; range_i < mapping->ranges->len; range_i++) {
98 struct ctf_range *range =
99 ctf_field_class_enum_mapping_borrow_range_by_index(
100 mapping, range_i);
101
102 if (fc->base.is_signed) {
103 ret = bt_integer_range_set_signed_add_range(
104 range_set, range->lower.i,
105 range->upper.i);
106 } else {
107 ret = bt_integer_range_set_unsigned_add_range(
108 range_set, range->lower.u,
109 range->upper.u);
110 }
111
112 BT_ASSERT(ret == 0);
113 }
114
115 if (fc->base.is_signed) {
116 ret = bt_field_class_signed_enumeration_add_mapping(
117 ir_fc, mapping->label->str, range_set);
118 BT_RANGE_SET_SIGNED_PUT_REF_AND_RESET(range_set);
119 } else {
120 ret = bt_field_class_unsigned_enumeration_add_mapping(
121 ir_fc, mapping->label->str, range_set);
122 BT_RANGE_SET_UNSIGNED_PUT_REF_AND_RESET(range_set);
123 }
124
125 BT_ASSERT(ret == 0);
126 }
127
128 return ir_fc;
129 }
130
131 static inline
132 bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
133 struct ctf_field_class_float *fc)
134 {
135 bt_field_class *ir_fc;
136
137 ir_fc = bt_field_class_real_create(ctx->ir_tc);
138 BT_ASSERT(ir_fc);
139
140 if (fc->base.size == 32) {
141 bt_field_class_real_set_is_single_precision(ir_fc,
142 BT_TRUE);
143 }
144
145 return ir_fc;
146 }
147
148 static inline
149 bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
150 struct ctf_field_class_string *fc)
151 {
152 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
153
154 BT_ASSERT(ir_fc);
155 return ir_fc;
156 }
157
158 static inline
159 void translate_struct_field_class_members(struct ctx *ctx,
160 struct ctf_field_class_struct *fc, bt_field_class *ir_fc,
161 bool with_header_prefix,
162 struct ctf_field_class_struct *context_fc)
163 {
164 uint64_t i;
165 int ret;
166
167 for (i = 0; i < fc->members->len; i++) {
168 struct ctf_named_field_class *named_fc =
169 ctf_field_class_struct_borrow_member_by_index(fc, i);
170 bt_field_class *member_ir_fc;
171 const char *name = named_fc->name->str;
172
173 if (!named_fc->fc->in_ir) {
174 continue;
175 }
176
177 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
178 BT_ASSERT(member_ir_fc);
179 ret = bt_field_class_structure_append_member(ir_fc, name,
180 member_ir_fc);
181 BT_ASSERT(ret == 0);
182 bt_field_class_put_ref(member_ir_fc);
183 }
184 }
185
186 static inline
187 bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
188 struct ctf_field_class_struct *fc)
189 {
190 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
191
192 BT_ASSERT(ir_fc);
193 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
194 return ir_fc;
195 }
196
197 static inline
198 bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
199 struct ctf_field_path *field_path)
200 {
201 bt_field_class *ir_fc = NULL;
202 struct ctf_field_class *fc = ctf_field_path_borrow_field_class(
203 field_path, ctx->tc, ctx->sc, ctx->ec);
204
205 BT_ASSERT(fc);
206
207 if (fc->in_ir) {
208 ir_fc = fc->ir_fc;
209 }
210
211 return ir_fc;
212 }
213
214 static inline
215 const void *find_ir_enum_field_class_mapping_by_label(const bt_field_class *fc,
216 const char *label, bool is_signed)
217 {
218 const void *mapping = NULL;
219 uint64_t i;
220
221 for (i = 0; i < bt_field_class_enumeration_get_mapping_count(fc); i++) {
222 const bt_field_class_enumeration_mapping *this_mapping;
223 const void *spec_this_mapping;
224
225 if (is_signed) {
226 spec_this_mapping =
227 bt_field_class_signed_enumeration_borrow_mapping_by_index_const(
228 fc, i);
229 this_mapping =
230 bt_field_class_signed_enumeration_mapping_as_mapping_const(
231 spec_this_mapping);
232 } else {
233 spec_this_mapping =
234 bt_field_class_unsigned_enumeration_borrow_mapping_by_index_const(
235 fc, i);
236 this_mapping =
237 bt_field_class_unsigned_enumeration_mapping_as_mapping_const(
238 spec_this_mapping);
239 }
240
241 BT_ASSERT(this_mapping);
242 BT_ASSERT(spec_this_mapping);
243
244 if (strcmp(bt_field_class_enumeration_mapping_get_label(
245 this_mapping), label) == 0) {
246 mapping = spec_this_mapping;
247 goto end;
248 }
249 }
250
251 end:
252 return mapping;
253 }
254
255 static inline
256 bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
257 struct ctf_field_class_variant *fc)
258 {
259 int ret;
260 bt_field_class *ir_fc;
261 uint64_t i;
262 bt_field_class *ir_tag_fc = NULL;
263
264 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
265 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
266 ir_tag_fc = borrow_ir_fc_from_field_path(ctx, &fc->tag_path);
267 BT_ASSERT(ir_tag_fc);
268 }
269
270 ir_fc = bt_field_class_variant_create(ctx->ir_tc, ir_tag_fc);
271 BT_ASSERT(ir_fc);
272
273 for (i = 0; i < fc->options->len; i++) {
274 struct ctf_named_field_class *named_fc =
275 ctf_field_class_variant_borrow_option_by_index(fc, i);
276 bt_field_class *option_ir_fc;
277
278 BT_ASSERT(named_fc->fc->in_ir);
279 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
280 BT_ASSERT(option_ir_fc);
281
282 if (ir_tag_fc) {
283 /*
284 * At this point the trace IR selector
285 * (enumeration) field class already exists if
286 * the variant is tagged (`ir_tag_fc`). This one
287 * already contains range sets for its mappings,
288 * so we just reuse the same, finding them by
289 * matching a variant field class's option's
290 * _original_ name (with a leading underscore,
291 * possibly) with a selector field class's
292 * mapping name.
293 */
294 if (fc->tag_fc->base.is_signed) {
295 const bt_field_class_signed_enumeration_mapping *mapping =
296 find_ir_enum_field_class_mapping_by_label(
297 ir_tag_fc,
298 named_fc->orig_name->str, true);
299 const bt_integer_range_set_signed *range_set;
300
301 BT_ASSERT(mapping);
302 range_set =
303 bt_field_class_signed_enumeration_mapping_borrow_ranges_const(
304 mapping);
305 BT_ASSERT(range_set);
306 ret = bt_field_class_variant_with_signed_selector_append_option(
307 ir_fc, named_fc->name->str,
308 option_ir_fc, range_set);
309 } else {
310 const bt_field_class_unsigned_enumeration_mapping *mapping =
311 find_ir_enum_field_class_mapping_by_label(
312 ir_tag_fc,
313 named_fc->orig_name->str,
314 false);
315 const bt_integer_range_set_unsigned *range_set;
316
317 BT_ASSERT(mapping);
318 range_set =
319 bt_field_class_unsigned_enumeration_mapping_borrow_ranges_const(
320 mapping);
321 BT_ASSERT(range_set);
322 ret = bt_field_class_variant_with_unsigned_selector_append_option(
323 ir_fc, named_fc->name->str,
324 option_ir_fc, range_set);
325 }
326 } else {
327 ret = bt_field_class_variant_without_selector_append_option(
328 ir_fc, named_fc->name->str, option_ir_fc);
329 }
330
331 BT_ASSERT(ret == 0);
332 bt_field_class_put_ref(option_ir_fc);
333 }
334
335 return ir_fc;
336 }
337
338 static inline
339 bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
340 struct ctf_field_class_array *fc)
341 {
342 bt_field_class *ir_fc;
343 bt_field_class *elem_ir_fc;
344
345 if (fc->base.is_text) {
346 ir_fc = bt_field_class_string_create(ctx->ir_tc);
347 BT_ASSERT(ir_fc);
348 goto end;
349 }
350
351 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
352 BT_ASSERT(elem_ir_fc);
353 ir_fc = bt_field_class_static_array_create(ctx->ir_tc, elem_ir_fc,
354 fc->length);
355 BT_ASSERT(ir_fc);
356 bt_field_class_put_ref(elem_ir_fc);
357
358 end:
359 return ir_fc;
360 }
361
362 static inline
363 bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
364 struct ctf_field_class_sequence *fc)
365 {
366 int ret;
367 bt_field_class *ir_fc;
368 bt_field_class *elem_ir_fc;
369
370 if (fc->base.is_text) {
371 ir_fc = bt_field_class_string_create(ctx->ir_tc);
372 BT_ASSERT(ir_fc);
373 goto end;
374 }
375
376 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
377 BT_ASSERT(elem_ir_fc);
378 ir_fc = bt_field_class_dynamic_array_create(ctx->ir_tc, elem_ir_fc);
379 BT_ASSERT(ir_fc);
380 bt_field_class_put_ref(elem_ir_fc);
381 BT_ASSERT(ir_fc);
382
383 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
384 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
385 ret = bt_field_class_dynamic_array_set_length_field_class(
386 ir_fc, borrow_ir_fc_from_field_path(ctx, &fc->length_path));
387 BT_ASSERT(ret == 0);
388 }
389
390 end:
391 return ir_fc;
392 }
393
394 static inline
395 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
396 struct ctf_field_class *fc)
397 {
398 bt_field_class *ir_fc = NULL;
399
400 BT_ASSERT(fc);
401 BT_ASSERT(fc->in_ir);
402
403 switch (fc->type) {
404 case CTF_FIELD_CLASS_TYPE_INT:
405 ir_fc = ctf_field_class_int_to_ir(ctx, (void *) fc);
406 break;
407 case CTF_FIELD_CLASS_TYPE_ENUM:
408 ir_fc = ctf_field_class_enum_to_ir(ctx, (void *) fc);
409 break;
410 case CTF_FIELD_CLASS_TYPE_FLOAT:
411 ir_fc = ctf_field_class_float_to_ir(ctx, (void *) fc);
412 break;
413 case CTF_FIELD_CLASS_TYPE_STRING:
414 ir_fc = ctf_field_class_string_to_ir(ctx, (void *) fc);
415 break;
416 case CTF_FIELD_CLASS_TYPE_STRUCT:
417 ir_fc = ctf_field_class_struct_to_ir(ctx, (void *) fc);
418 break;
419 case CTF_FIELD_CLASS_TYPE_ARRAY:
420 ir_fc = ctf_field_class_array_to_ir(ctx, (void *) fc);
421 break;
422 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
423 ir_fc = ctf_field_class_sequence_to_ir(ctx, (void *) fc);
424 break;
425 case CTF_FIELD_CLASS_TYPE_VARIANT:
426 ir_fc = ctf_field_class_variant_to_ir(ctx, (void *) fc);
427 break;
428 default:
429 abort();
430 }
431
432 fc->ir_fc = ir_fc;
433 return ir_fc;
434 }
435
436 static inline
437 bool ctf_field_class_struct_has_immediate_member_in_ir(
438 struct ctf_field_class_struct *fc)
439 {
440 uint64_t i;
441 bool has_immediate_member_in_ir = false;
442
443 /*
444 * If the structure field class has no members at all, then it
445 * was an empty structure in the beginning, so leave it existing
446 * and empty.
447 */
448 if (fc->members->len == 0) {
449 has_immediate_member_in_ir = true;
450 goto end;
451 }
452
453 for (i = 0; i < fc->members->len; i++) {
454 struct ctf_named_field_class *named_fc =
455 ctf_field_class_struct_borrow_member_by_index(fc, i);
456
457 if (named_fc->fc->in_ir) {
458 has_immediate_member_in_ir = true;
459 goto end;
460 }
461 }
462
463 end:
464 return has_immediate_member_in_ir;
465 }
466
467 static inline
468 bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
469 {
470 bt_field_class *ir_fc = NULL;
471 struct ctf_field_class *fc = NULL;
472
473 switch (ctx->scope) {
474 case CTF_SCOPE_PACKET_CONTEXT:
475 fc = ctx->sc->packet_context_fc;
476 break;
477 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
478 fc = ctx->sc->event_common_context_fc;
479 break;
480 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
481 fc = ctx->ec->spec_context_fc;
482 break;
483 case CTF_SCOPE_EVENT_PAYLOAD:
484 fc = ctx->ec->payload_fc;
485 break;
486 default:
487 abort();
488 }
489
490 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(
491 (void *) fc)) {
492 ir_fc = ctf_field_class_to_ir(ctx, fc);
493 }
494
495 return ir_fc;
496 }
497
498 static inline
499 void ctf_event_class_to_ir(struct ctx *ctx)
500 {
501 int ret;
502 bt_event_class *ir_ec = NULL;
503 bt_field_class *ir_fc;
504
505 BT_ASSERT(ctx->ec);
506
507 if (ctx->ec->is_translated) {
508 ir_ec = bt_stream_class_borrow_event_class_by_id(
509 ctx->ir_sc, ctx->ec->id);
510 BT_ASSERT(ir_ec);
511 goto end;
512 }
513
514 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
515 BT_ASSERT(ir_ec);
516 bt_event_class_put_ref(ir_ec);
517 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
518 ir_fc = scope_ctf_field_class_to_ir(ctx);
519 if (ir_fc) {
520 ret = bt_event_class_set_specific_context_field_class(
521 ir_ec, ir_fc);
522 BT_ASSERT(ret == 0);
523 bt_field_class_put_ref(ir_fc);
524 }
525
526 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
527 ir_fc = scope_ctf_field_class_to_ir(ctx);
528 if (ir_fc) {
529 ret = bt_event_class_set_payload_field_class(ir_ec,
530 ir_fc);
531 BT_ASSERT(ret == 0);
532 bt_field_class_put_ref(ir_fc);
533 }
534
535 if (ctx->ec->name->len > 0) {
536 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
537 BT_ASSERT(ret == 0);
538 }
539
540 if (ctx->ec->emf_uri->len > 0) {
541 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
542 BT_ASSERT(ret == 0);
543 }
544
545 if (ctx->ec->log_level != -1) {
546 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
547 }
548
549 ctx->ec->is_translated = true;
550 ctx->ec->ir_ec = ir_ec;
551
552 end:
553 return;
554 }
555
556
557 static inline
558 void ctf_stream_class_to_ir(struct ctx *ctx)
559 {
560 int ret;
561 bt_field_class *ir_fc;
562
563 BT_ASSERT(ctx->sc);
564
565 if (ctx->sc->is_translated) {
566 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(
567 ctx->ir_tc, ctx->sc->id);
568 BT_ASSERT(ctx->ir_sc);
569 goto end;
570 }
571
572 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
573 BT_ASSERT(ctx->ir_sc);
574 bt_stream_class_put_ref(ctx->ir_sc);
575
576 if (ctx->sc->default_clock_class) {
577 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
578 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
579 ctx->sc->default_clock_class->ir_cc);
580 BT_ASSERT(ret == 0);
581 }
582
583 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE,
584 ctx->sc->packets_have_ts_begin, ctx->sc->packets_have_ts_end);
585 bt_stream_class_set_supports_discarded_events(ctx->ir_sc,
586 ctx->sc->has_discarded_events,
587 ctx->sc->discarded_events_have_default_cs);
588 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc,
589 ctx->sc->has_discarded_packets,
590 ctx->sc->discarded_packets_have_default_cs);
591 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
592 ir_fc = scope_ctf_field_class_to_ir(ctx);
593 if (ir_fc) {
594 ret = bt_stream_class_set_packet_context_field_class(
595 ctx->ir_sc, ir_fc);
596 BT_ASSERT(ret == 0);
597 bt_field_class_put_ref(ir_fc);
598 }
599
600 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
601 ir_fc = scope_ctf_field_class_to_ir(ctx);
602 if (ir_fc) {
603 ret = bt_stream_class_set_event_common_context_field_class(
604 ctx->ir_sc, ir_fc);
605 BT_ASSERT(ret == 0);
606 bt_field_class_put_ref(ir_fc);
607 }
608
609 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc,
610 BT_FALSE);
611 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
612
613 ctx->sc->is_translated = true;
614 ctx->sc->ir_sc = ctx->ir_sc;
615
616 end:
617 return;
618 }
619
620 static inline
621 void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
622 {
623 int ret;
624
625 if (strlen(cc->name->str) > 0) {
626 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
627 BT_ASSERT(ret == 0);
628 }
629
630 if (strlen(cc->description->str) > 0) {
631 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
632 BT_ASSERT(ret == 0);
633 }
634
635 bt_clock_class_set_frequency(ir_cc, cc->frequency);
636 bt_clock_class_set_precision(ir_cc, cc->precision);
637 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
638
639 if (cc->has_uuid) {
640 bt_clock_class_set_uuid(ir_cc, cc->uuid);
641 }
642
643 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
644 }
645
646 static inline
647 int ctf_trace_class_to_ir(struct ctx *ctx)
648 {
649 int ret = 0;
650 uint64_t i;
651
652 BT_ASSERT(ctx->tc);
653 BT_ASSERT(ctx->ir_tc);
654
655 if (ctx->tc->is_translated) {
656 goto end;
657 }
658
659 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
660 struct ctf_clock_class *cc = ctx->tc->clock_classes->pdata[i];
661
662 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
663 ctf_clock_class_to_ir(cc->ir_cc, cc);
664 }
665
666 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc,
667 BT_FALSE);
668 ctx->tc->is_translated = true;
669 ctx->tc->ir_tc = ctx->ir_tc;
670
671 end:
672 return ret;
673 }
674
675 BT_HIDDEN
676 int ctf_trace_class_translate(bt_self_component *self_comp,
677 bt_trace_class *ir_tc, struct ctf_trace_class *tc)
678 {
679 int ret = 0;
680 uint64_t i;
681 struct ctx ctx = { 0 };
682
683 ctx.self_comp = self_comp;
684 ctx.tc = tc;
685 ctx.ir_tc = ir_tc;
686 ret = ctf_trace_class_to_ir(&ctx);
687 if (ret) {
688 goto end;
689 }
690
691 for (i = 0; i < tc->stream_classes->len; i++) {
692 uint64_t j;
693 ctx.sc = tc->stream_classes->pdata[i];
694
695 ctf_stream_class_to_ir(&ctx);
696
697 for (j = 0; j < ctx.sc->event_classes->len; j++) {
698 ctx.ec = ctx.sc->event_classes->pdata[j];
699
700 ctf_event_class_to_ir(&ctx);
701 ctx.ec = NULL;
702 }
703
704 ctx.sc = NULL;
705 }
706
707 end:
708 return ret;
709 }
This page took 0.044885 seconds and 5 git commands to generate.