0d66dace09932f5f419ac146fe4b3947a598bbcb
[babeltrace.git] / src / plugins / ctf / common / metadata / ctf-meta-translate.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2018 Philippe Proulx <pproulx@efficios.com>
5 */
6
7 #include <babeltrace2/babeltrace.h>
8 #include "common/macros.h"
9 #include "common/assert.h"
10 #include <glib.h>
11 #include <stdbool.h>
12 #include <stdint.h>
13 #include <string.h>
14 #include <inttypes.h>
15
16 #include "ctf-meta-visitors.hpp"
17
18 struct ctx {
19 bt_self_component *self_comp;
20 bt_trace_class *ir_tc;
21 bt_stream_class *ir_sc;
22 struct ctf_trace_class *tc;
23 struct ctf_stream_class *sc;
24 struct ctf_event_class *ec;
25 enum ctf_scope scope;
26 };
27
28 static inline
29 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
30 struct ctf_field_class *fc);
31
32 static inline
33 void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
34 bt_field_class *ir_fc)
35 {
36 bt_field_class_integer_set_field_value_range(ir_fc,
37 fc->base.size);
38 bt_field_class_integer_set_preferred_display_base(ir_fc,
39 fc->disp_base);
40 }
41
42 static inline
43 bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
44 struct ctf_field_class_int *fc)
45 {
46 bt_field_class *ir_fc;
47
48 if (fc->is_signed) {
49 ir_fc = bt_field_class_integer_signed_create(ctx->ir_tc);
50 } else {
51 ir_fc = bt_field_class_integer_unsigned_create(ctx->ir_tc);
52 }
53
54 BT_ASSERT(ir_fc);
55 ctf_field_class_int_set_props(fc, ir_fc);
56 return ir_fc;
57 }
58
59 static inline
60 bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
61 struct ctf_field_class_enum *fc)
62 {
63 int ret;
64 bt_field_class *ir_fc;
65 uint64_t i;
66
67 if (fc->base.is_signed) {
68 ir_fc = bt_field_class_enumeration_signed_create(ctx->ir_tc);
69 } else {
70 ir_fc = bt_field_class_enumeration_unsigned_create(ctx->ir_tc);
71 }
72
73 BT_ASSERT(ir_fc);
74 ctf_field_class_int_set_props(&fc->base, ir_fc);
75
76 for (i = 0; i < fc->mappings->len; i++) {
77 struct ctf_field_class_enum_mapping *mapping =
78 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
79 bt_integer_range_set_signed *range_set_signed = NULL;
80 bt_integer_range_set_unsigned *range_set_unsigned = NULL;
81 uint64_t range_i;
82
83 if (fc->base.is_signed) {
84 range_set_signed = bt_integer_range_set_signed_create();
85 BT_ASSERT(range_set_signed);
86 } else {
87 range_set_unsigned = bt_integer_range_set_unsigned_create();
88 BT_ASSERT(range_set_unsigned);
89 }
90
91 for (range_i = 0; range_i < mapping->ranges->len; range_i++) {
92 struct ctf_range *range =
93 ctf_field_class_enum_mapping_borrow_range_by_index(
94 mapping, range_i);
95
96 if (fc->base.is_signed) {
97 ret = bt_integer_range_set_signed_add_range(
98 range_set_signed, range->lower.i,
99 range->upper.i);
100 } else {
101 ret = bt_integer_range_set_unsigned_add_range(
102 range_set_unsigned, range->lower.u,
103 range->upper.u);
104 }
105
106 BT_ASSERT(ret == 0);
107 }
108
109 if (fc->base.is_signed) {
110 ret = bt_field_class_enumeration_signed_add_mapping(
111 ir_fc, mapping->label->str, range_set_signed);
112 BT_INTEGER_RANGE_SET_SIGNED_PUT_REF_AND_RESET(range_set_signed);
113 } else {
114 ret = bt_field_class_enumeration_unsigned_add_mapping(
115 ir_fc, mapping->label->str, range_set_unsigned);
116 BT_INTEGER_RANGE_SET_UNSIGNED_PUT_REF_AND_RESET(range_set_unsigned);
117 }
118
119 BT_ASSERT(ret == 0);
120 }
121
122 return ir_fc;
123 }
124
125 static inline
126 bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
127 struct ctf_field_class_float *fc)
128 {
129 bt_field_class *ir_fc;
130
131 if (fc->base.size == 32) {
132 ir_fc = bt_field_class_real_single_precision_create(ctx->ir_tc);
133 } else {
134 ir_fc = bt_field_class_real_double_precision_create(ctx->ir_tc);
135 }
136 BT_ASSERT(ir_fc);
137
138 return ir_fc;
139 }
140
141 static inline
142 bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
143 struct ctf_field_class_string *fc)
144 {
145 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
146
147 BT_ASSERT(ir_fc);
148 return ir_fc;
149 }
150
151 static inline
152 void translate_struct_field_class_members(struct ctx *ctx,
153 struct ctf_field_class_struct *fc, bt_field_class *ir_fc,
154 bool with_header_prefix,
155 struct ctf_field_class_struct *context_fc)
156 {
157 uint64_t i;
158 int ret;
159
160 for (i = 0; i < fc->members->len; i++) {
161 struct ctf_named_field_class *named_fc =
162 ctf_field_class_struct_borrow_member_by_index(fc, i);
163 bt_field_class *member_ir_fc;
164 const char *name = named_fc->name->str;
165
166 if (!named_fc->fc->in_ir) {
167 continue;
168 }
169
170 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
171 BT_ASSERT(member_ir_fc);
172 ret = bt_field_class_structure_append_member(ir_fc, name,
173 member_ir_fc);
174 BT_ASSERT(ret == 0);
175 bt_field_class_put_ref(member_ir_fc);
176 }
177 }
178
179 static inline
180 bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
181 struct ctf_field_class_struct *fc)
182 {
183 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
184
185 BT_ASSERT(ir_fc);
186 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
187 return ir_fc;
188 }
189
190 static inline
191 bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
192 struct ctf_field_path *field_path)
193 {
194 bt_field_class *ir_fc = NULL;
195 struct ctf_field_class *fc = ctf_field_path_borrow_field_class(
196 field_path, ctx->tc, ctx->sc, ctx->ec);
197
198 BT_ASSERT(fc);
199
200 if (fc->in_ir) {
201 ir_fc = fc->ir_fc;
202 }
203
204 return ir_fc;
205 }
206
207 static inline
208 const bt_field_class_enumeration_mapping *find_ir_enum_field_class_mapping_by_label(
209 const bt_field_class *fc, const char *label, bool is_signed)
210 {
211 const bt_field_class_enumeration_mapping *mapping = NULL;
212 uint64_t i;
213
214 for (i = 0; i < bt_field_class_enumeration_get_mapping_count(fc); i++) {
215 const bt_field_class_enumeration_mapping *this_mapping;
216 const bt_field_class_enumeration_signed_mapping *signed_this_mapping = NULL;
217 const bt_field_class_enumeration_unsigned_mapping *unsigned_this_mapping = NULL;
218
219 if (is_signed) {
220 signed_this_mapping =
221 bt_field_class_enumeration_signed_borrow_mapping_by_index_const(
222 fc, i);
223 BT_ASSERT(signed_this_mapping);
224 this_mapping =
225 bt_field_class_enumeration_signed_mapping_as_mapping_const(
226 signed_this_mapping);
227 } else {
228 unsigned_this_mapping =
229 bt_field_class_enumeration_unsigned_borrow_mapping_by_index_const(
230 fc, i);
231 BT_ASSERT(unsigned_this_mapping);
232 this_mapping =
233 bt_field_class_enumeration_unsigned_mapping_as_mapping_const(
234 unsigned_this_mapping);
235 }
236
237 BT_ASSERT(this_mapping);
238
239 if (strcmp(bt_field_class_enumeration_mapping_get_label(
240 this_mapping), label) == 0) {
241 mapping = this_mapping;
242 goto end;
243 }
244 }
245
246 end:
247 return mapping;
248 }
249
250 static inline
251 bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
252 struct ctf_field_class_variant *fc)
253 {
254 int ret;
255 bt_field_class *ir_fc;
256 uint64_t i;
257 bt_field_class *ir_tag_fc = NULL;
258
259 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
260 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
261 ir_tag_fc = borrow_ir_fc_from_field_path(ctx, &fc->tag_path);
262 BT_ASSERT(ir_tag_fc);
263 }
264
265 ir_fc = bt_field_class_variant_create(ctx->ir_tc, ir_tag_fc);
266 BT_ASSERT(ir_fc);
267
268 for (i = 0; i < fc->options->len; i++) {
269 struct ctf_named_field_class *named_fc =
270 ctf_field_class_variant_borrow_option_by_index(fc, i);
271 bt_field_class *option_ir_fc;
272
273 BT_ASSERT(named_fc->fc->in_ir);
274 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
275 BT_ASSERT(option_ir_fc);
276
277 if (ir_tag_fc) {
278 /*
279 * At this point the trace IR selector
280 * (enumeration) field class already exists if
281 * the variant is tagged (`ir_tag_fc`). This one
282 * already contains range sets for its mappings,
283 * so we just reuse the same, finding them by
284 * matching a variant field class's option's
285 * _original_ name (with a leading underscore,
286 * possibly) with a selector field class's
287 * mapping name.
288 */
289 if (fc->tag_fc->base.is_signed) {
290 const bt_field_class_enumeration_signed_mapping *mapping =
291 (bt_field_class_enumeration_signed_mapping *)
292 find_ir_enum_field_class_mapping_by_label(
293 ir_tag_fc,
294 named_fc->orig_name->str, true);
295 const bt_integer_range_set_signed *range_set;
296
297 BT_ASSERT(mapping);
298 range_set =
299 bt_field_class_enumeration_signed_mapping_borrow_ranges_const(
300 mapping);
301 BT_ASSERT(range_set);
302 ret = bt_field_class_variant_with_selector_field_integer_signed_append_option(
303 ir_fc, named_fc->name->str,
304 option_ir_fc, range_set);
305 } else {
306 const bt_field_class_enumeration_unsigned_mapping *mapping =
307 (bt_field_class_enumeration_unsigned_mapping *)
308 find_ir_enum_field_class_mapping_by_label(
309 ir_tag_fc,
310 named_fc->orig_name->str,
311 false);
312 const bt_integer_range_set_unsigned *range_set;
313
314 BT_ASSERT(mapping);
315 range_set =
316 bt_field_class_enumeration_unsigned_mapping_borrow_ranges_const(
317 mapping);
318 BT_ASSERT(range_set);
319 ret = bt_field_class_variant_with_selector_field_integer_unsigned_append_option(
320 ir_fc, named_fc->name->str,
321 option_ir_fc, range_set);
322 }
323 } else {
324 ret = bt_field_class_variant_without_selector_append_option(
325 ir_fc, named_fc->name->str, option_ir_fc);
326 }
327
328 BT_ASSERT(ret == 0);
329 bt_field_class_put_ref(option_ir_fc);
330 }
331
332 return ir_fc;
333 }
334
335 static inline
336 bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
337 struct ctf_field_class_array *fc)
338 {
339 bt_field_class *ir_fc;
340 bt_field_class *elem_ir_fc;
341
342 if (fc->base.is_text) {
343 ir_fc = bt_field_class_string_create(ctx->ir_tc);
344 BT_ASSERT(ir_fc);
345 goto end;
346 }
347
348 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
349 BT_ASSERT(elem_ir_fc);
350 ir_fc = bt_field_class_array_static_create(ctx->ir_tc, elem_ir_fc,
351 fc->length);
352 BT_ASSERT(ir_fc);
353 bt_field_class_put_ref(elem_ir_fc);
354
355 end:
356 return ir_fc;
357 }
358
359 static inline
360 bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
361 struct ctf_field_class_sequence *fc)
362 {
363 bt_field_class *ir_fc;
364 bt_field_class *elem_ir_fc;
365 bt_field_class *length_fc = NULL;
366
367 if (fc->base.is_text) {
368 ir_fc = bt_field_class_string_create(ctx->ir_tc);
369 BT_ASSERT(ir_fc);
370 goto end;
371 }
372
373 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
374 BT_ASSERT(elem_ir_fc);
375
376 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
377 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
378 length_fc = borrow_ir_fc_from_field_path(ctx, &fc->length_path);
379 BT_ASSERT(length_fc);
380 }
381
382 ir_fc = bt_field_class_array_dynamic_create(ctx->ir_tc, elem_ir_fc,
383 length_fc);
384 BT_ASSERT(ir_fc);
385 bt_field_class_put_ref(elem_ir_fc);
386 BT_ASSERT(ir_fc);
387
388 end:
389 return ir_fc;
390 }
391
392 static inline
393 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
394 struct ctf_field_class *fc)
395 {
396 bt_field_class *ir_fc = NULL;
397
398 BT_ASSERT(fc);
399 BT_ASSERT(fc->in_ir);
400
401 switch (fc->type) {
402 case CTF_FIELD_CLASS_TYPE_INT:
403 ir_fc = ctf_field_class_int_to_ir(ctx, ctf_field_class_as_int(fc));
404 break;
405 case CTF_FIELD_CLASS_TYPE_ENUM:
406 ir_fc = ctf_field_class_enum_to_ir(ctx, ctf_field_class_as_enum(fc));
407 break;
408 case CTF_FIELD_CLASS_TYPE_FLOAT:
409 ir_fc = ctf_field_class_float_to_ir(ctx, ctf_field_class_as_float(fc));
410 break;
411 case CTF_FIELD_CLASS_TYPE_STRING:
412 ir_fc = ctf_field_class_string_to_ir(ctx, ctf_field_class_as_string(fc));
413 break;
414 case CTF_FIELD_CLASS_TYPE_STRUCT:
415 ir_fc = ctf_field_class_struct_to_ir(ctx, ctf_field_class_as_struct(fc));
416 break;
417 case CTF_FIELD_CLASS_TYPE_ARRAY:
418 ir_fc = ctf_field_class_array_to_ir(ctx, ctf_field_class_as_array(fc));
419 break;
420 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
421 ir_fc = ctf_field_class_sequence_to_ir(ctx, ctf_field_class_as_sequence(fc));
422 break;
423 case CTF_FIELD_CLASS_TYPE_VARIANT:
424 ir_fc = ctf_field_class_variant_to_ir(ctx, ctf_field_class_as_variant(fc));
425 break;
426 default:
427 bt_common_abort();
428 }
429
430 fc->ir_fc = ir_fc;
431 return ir_fc;
432 }
433
434 static inline
435 bool ctf_field_class_struct_has_immediate_member_in_ir(
436 struct ctf_field_class_struct *fc)
437 {
438 uint64_t i;
439 bool has_immediate_member_in_ir = false;
440
441 /*
442 * If the structure field class has no members at all, then it
443 * was an empty structure in the beginning, so leave it existing
444 * and empty.
445 */
446 if (fc->members->len == 0) {
447 has_immediate_member_in_ir = true;
448 goto end;
449 }
450
451 for (i = 0; i < fc->members->len; i++) {
452 struct ctf_named_field_class *named_fc =
453 ctf_field_class_struct_borrow_member_by_index(fc, i);
454
455 if (named_fc->fc->in_ir) {
456 has_immediate_member_in_ir = true;
457 goto end;
458 }
459 }
460
461 end:
462 return has_immediate_member_in_ir;
463 }
464
465 static inline
466 bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
467 {
468 bt_field_class *ir_fc = NULL;
469 struct ctf_field_class *fc = NULL;
470
471 switch (ctx->scope) {
472 case CTF_SCOPE_PACKET_CONTEXT:
473 fc = ctx->sc->packet_context_fc;
474 break;
475 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
476 fc = ctx->sc->event_common_context_fc;
477 break;
478 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
479 fc = ctx->ec->spec_context_fc;
480 break;
481 case CTF_SCOPE_EVENT_PAYLOAD:
482 fc = ctx->ec->payload_fc;
483 break;
484 default:
485 bt_common_abort();
486 }
487
488 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(
489 ctf_field_class_as_struct(fc))) {
490 ir_fc = ctf_field_class_to_ir(ctx, fc);
491 }
492
493 return ir_fc;
494 }
495
496 static inline
497 void ctf_event_class_to_ir(struct ctx *ctx)
498 {
499 int ret;
500 bt_event_class *ir_ec = NULL;
501 bt_field_class *ir_fc;
502
503 BT_ASSERT(ctx->ec);
504
505 if (ctx->ec->is_translated) {
506 ir_ec = bt_stream_class_borrow_event_class_by_id(
507 ctx->ir_sc, ctx->ec->id);
508 BT_ASSERT(ir_ec);
509 goto end;
510 }
511
512 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
513 BT_ASSERT(ir_ec);
514 bt_event_class_put_ref(ir_ec);
515 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
516 ir_fc = scope_ctf_field_class_to_ir(ctx);
517 if (ir_fc) {
518 ret = bt_event_class_set_specific_context_field_class(
519 ir_ec, ir_fc);
520 BT_ASSERT(ret == 0);
521 bt_field_class_put_ref(ir_fc);
522 }
523
524 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
525 ir_fc = scope_ctf_field_class_to_ir(ctx);
526 if (ir_fc) {
527 ret = bt_event_class_set_payload_field_class(ir_ec,
528 ir_fc);
529 BT_ASSERT(ret == 0);
530 bt_field_class_put_ref(ir_fc);
531 }
532
533 if (ctx->ec->name->len > 0) {
534 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
535 BT_ASSERT(ret == 0);
536 }
537
538 if (ctx->ec->emf_uri->len > 0) {
539 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
540 BT_ASSERT(ret == 0);
541 }
542
543 if (ctx->ec->is_log_level_set) {
544 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
545 }
546
547 ctx->ec->is_translated = true;
548 ctx->ec->ir_ec = ir_ec;
549
550 end:
551 return;
552 }
553
554
555 static inline
556 void ctf_stream_class_to_ir(struct ctx *ctx)
557 {
558 int ret;
559 bt_field_class *ir_fc;
560
561 BT_ASSERT(ctx->sc);
562
563 if (ctx->sc->is_translated) {
564 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(
565 ctx->ir_tc, ctx->sc->id);
566 BT_ASSERT(ctx->ir_sc);
567 goto end;
568 }
569
570 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
571 BT_ASSERT(ctx->ir_sc);
572 bt_stream_class_put_ref(ctx->ir_sc);
573
574 if (ctx->sc->default_clock_class) {
575 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
576 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
577 ctx->sc->default_clock_class->ir_cc);
578 BT_ASSERT(ret == 0);
579 }
580
581 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE,
582 ctx->sc->packets_have_ts_begin, ctx->sc->packets_have_ts_end);
583 bt_stream_class_set_supports_discarded_events(ctx->ir_sc,
584 ctx->sc->has_discarded_events,
585 ctx->sc->discarded_events_have_default_cs);
586 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc,
587 ctx->sc->has_discarded_packets,
588 ctx->sc->discarded_packets_have_default_cs);
589 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
590 ir_fc = scope_ctf_field_class_to_ir(ctx);
591 if (ir_fc) {
592 ret = bt_stream_class_set_packet_context_field_class(
593 ctx->ir_sc, ir_fc);
594 BT_ASSERT(ret == 0);
595 bt_field_class_put_ref(ir_fc);
596 }
597
598 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
599 ir_fc = scope_ctf_field_class_to_ir(ctx);
600 if (ir_fc) {
601 ret = bt_stream_class_set_event_common_context_field_class(
602 ctx->ir_sc, ir_fc);
603 BT_ASSERT(ret == 0);
604 bt_field_class_put_ref(ir_fc);
605 }
606
607 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc,
608 BT_FALSE);
609 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
610
611 ctx->sc->is_translated = true;
612 ctx->sc->ir_sc = ctx->ir_sc;
613
614 end:
615 return;
616 }
617
618 static inline
619 void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
620 {
621 int ret;
622
623 if (strlen(cc->name->str) > 0) {
624 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
625 BT_ASSERT(ret == 0);
626 }
627
628 if (strlen(cc->description->str) > 0) {
629 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
630 BT_ASSERT(ret == 0);
631 }
632
633 bt_clock_class_set_frequency(ir_cc, cc->frequency);
634 bt_clock_class_set_precision(ir_cc, cc->precision);
635 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
636
637 if (cc->has_uuid) {
638 bt_clock_class_set_uuid(ir_cc, cc->uuid);
639 }
640
641 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
642 }
643
644 static inline
645 int ctf_trace_class_to_ir(struct ctx *ctx)
646 {
647 int ret = 0;
648 uint64_t i;
649
650 BT_ASSERT(ctx->tc);
651 BT_ASSERT(ctx->ir_tc);
652
653 if (ctx->tc->is_translated) {
654 goto end;
655 }
656
657 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
658 ctf_clock_class *cc = (ctf_clock_class *) ctx->tc->clock_classes->pdata[i];
659
660 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
661 ctf_clock_class_to_ir(cc->ir_cc, cc);
662 }
663
664 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc,
665 BT_FALSE);
666 ctx->tc->is_translated = true;
667 ctx->tc->ir_tc = ctx->ir_tc;
668
669 end:
670 return ret;
671 }
672
673 BT_HIDDEN
674 int ctf_trace_class_translate(bt_self_component *self_comp,
675 bt_trace_class *ir_tc, struct ctf_trace_class *tc)
676 {
677 int ret = 0;
678 uint64_t i;
679 struct ctx ctx = { 0 };
680
681 ctx.self_comp = self_comp;
682 ctx.tc = tc;
683 ctx.ir_tc = ir_tc;
684 ret = ctf_trace_class_to_ir(&ctx);
685 if (ret) {
686 goto end;
687 }
688
689 for (i = 0; i < tc->stream_classes->len; i++) {
690 uint64_t j;
691 ctx.sc = (ctf_stream_class *) tc->stream_classes->pdata[i];
692
693 ctf_stream_class_to_ir(&ctx);
694
695 for (j = 0; j < ctx.sc->event_classes->len; j++) {
696 ctx.ec = (ctf_event_class *) ctx.sc->event_classes->pdata[j];
697
698 ctf_event_class_to_ir(&ctx);
699 ctx.ec = NULL;
700 }
701
702 ctx.sc = NULL;
703 }
704
705 end:
706 return ret;
707 }
This page took 0.042191 seconds and 3 git commands to generate.