Re-format C++ files
[babeltrace.git] / src / plugins / ctf / common / metadata / ctf-meta-translate.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2018 Philippe Proulx <pproulx@efficios.com>
5 */
6
7 #include <babeltrace2/babeltrace.h>
8 #include "common/macros.h"
9 #include "common/assert.h"
10 #include <glib.h>
11 #include <stdbool.h>
12 #include <stdint.h>
13 #include <string.h>
14 #include <inttypes.h>
15
16 #include "ctf-meta-visitors.hpp"
17
18 struct ctx
19 {
20 bt_self_component *self_comp;
21 bt_trace_class *ir_tc;
22 bt_stream_class *ir_sc;
23 struct ctf_trace_class *tc;
24 struct ctf_stream_class *sc;
25 struct ctf_event_class *ec;
26 enum ctf_scope scope;
27 };
28
29 static inline bt_field_class *ctf_field_class_to_ir(struct ctx *ctx, struct ctf_field_class *fc);
30
31 static inline void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
32 bt_field_class *ir_fc)
33 {
34 bt_field_class_integer_set_field_value_range(ir_fc, fc->base.size);
35 bt_field_class_integer_set_preferred_display_base(ir_fc, fc->disp_base);
36 }
37
38 static inline bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
39 struct ctf_field_class_int *fc)
40 {
41 bt_field_class *ir_fc;
42
43 if (fc->is_signed) {
44 ir_fc = bt_field_class_integer_signed_create(ctx->ir_tc);
45 } else {
46 ir_fc = bt_field_class_integer_unsigned_create(ctx->ir_tc);
47 }
48
49 BT_ASSERT(ir_fc);
50 ctf_field_class_int_set_props(fc, ir_fc);
51 return ir_fc;
52 }
53
54 static inline bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
55 struct ctf_field_class_enum *fc)
56 {
57 int ret;
58 bt_field_class *ir_fc;
59 uint64_t i;
60
61 if (fc->base.is_signed) {
62 ir_fc = bt_field_class_enumeration_signed_create(ctx->ir_tc);
63 } else {
64 ir_fc = bt_field_class_enumeration_unsigned_create(ctx->ir_tc);
65 }
66
67 BT_ASSERT(ir_fc);
68 ctf_field_class_int_set_props(&fc->base, ir_fc);
69
70 for (i = 0; i < fc->mappings->len; i++) {
71 struct ctf_field_class_enum_mapping *mapping =
72 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
73 bt_integer_range_set_signed *range_set_signed = NULL;
74 bt_integer_range_set_unsigned *range_set_unsigned = NULL;
75 uint64_t range_i;
76
77 if (fc->base.is_signed) {
78 range_set_signed = bt_integer_range_set_signed_create();
79 BT_ASSERT(range_set_signed);
80 } else {
81 range_set_unsigned = bt_integer_range_set_unsigned_create();
82 BT_ASSERT(range_set_unsigned);
83 }
84
85 for (range_i = 0; range_i < mapping->ranges->len; range_i++) {
86 struct ctf_range *range =
87 ctf_field_class_enum_mapping_borrow_range_by_index(mapping, range_i);
88
89 if (fc->base.is_signed) {
90 ret = bt_integer_range_set_signed_add_range(range_set_signed, range->lower.i,
91 range->upper.i);
92 } else {
93 ret = bt_integer_range_set_unsigned_add_range(range_set_unsigned, range->lower.u,
94 range->upper.u);
95 }
96
97 BT_ASSERT(ret == 0);
98 }
99
100 if (fc->base.is_signed) {
101 ret = bt_field_class_enumeration_signed_add_mapping(ir_fc, mapping->label->str,
102 range_set_signed);
103 BT_INTEGER_RANGE_SET_SIGNED_PUT_REF_AND_RESET(range_set_signed);
104 } else {
105 ret = bt_field_class_enumeration_unsigned_add_mapping(ir_fc, mapping->label->str,
106 range_set_unsigned);
107 BT_INTEGER_RANGE_SET_UNSIGNED_PUT_REF_AND_RESET(range_set_unsigned);
108 }
109
110 BT_ASSERT(ret == 0);
111 }
112
113 return ir_fc;
114 }
115
116 static inline bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
117 struct ctf_field_class_float *fc)
118 {
119 bt_field_class *ir_fc;
120
121 if (fc->base.size == 32) {
122 ir_fc = bt_field_class_real_single_precision_create(ctx->ir_tc);
123 } else {
124 ir_fc = bt_field_class_real_double_precision_create(ctx->ir_tc);
125 }
126 BT_ASSERT(ir_fc);
127
128 return ir_fc;
129 }
130
131 static inline bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
132 struct ctf_field_class_string *)
133 {
134 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
135
136 BT_ASSERT(ir_fc);
137 return ir_fc;
138 }
139
140 static inline void translate_struct_field_class_members(struct ctx *ctx,
141 struct ctf_field_class_struct *fc,
142 bt_field_class *ir_fc, bool,
143 struct ctf_field_class_struct *)
144 {
145 uint64_t i;
146 int ret;
147
148 for (i = 0; i < fc->members->len; i++) {
149 struct ctf_named_field_class *named_fc =
150 ctf_field_class_struct_borrow_member_by_index(fc, i);
151 bt_field_class *member_ir_fc;
152 const char *name = named_fc->name->str;
153
154 if (!named_fc->fc->in_ir) {
155 continue;
156 }
157
158 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
159 BT_ASSERT(member_ir_fc);
160 ret = bt_field_class_structure_append_member(ir_fc, name, member_ir_fc);
161 BT_ASSERT(ret == 0);
162 bt_field_class_put_ref(member_ir_fc);
163 }
164 }
165
166 static inline bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
167 struct ctf_field_class_struct *fc)
168 {
169 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
170
171 BT_ASSERT(ir_fc);
172 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
173 return ir_fc;
174 }
175
176 static inline bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
177 struct ctf_field_path *field_path)
178 {
179 bt_field_class *ir_fc = NULL;
180 struct ctf_field_class *fc =
181 ctf_field_path_borrow_field_class(field_path, ctx->tc, ctx->sc, ctx->ec);
182
183 BT_ASSERT(fc);
184
185 if (fc->in_ir) {
186 ir_fc = fc->ir_fc;
187 }
188
189 return ir_fc;
190 }
191
192 static inline const bt_field_class_enumeration_mapping *
193 find_ir_enum_field_class_mapping_by_label(const bt_field_class *fc, const char *label,
194 bool is_signed)
195 {
196 const bt_field_class_enumeration_mapping *mapping = NULL;
197 uint64_t i;
198
199 for (i = 0; i < bt_field_class_enumeration_get_mapping_count(fc); i++) {
200 const bt_field_class_enumeration_mapping *this_mapping;
201 const bt_field_class_enumeration_signed_mapping *signed_this_mapping = NULL;
202 const bt_field_class_enumeration_unsigned_mapping *unsigned_this_mapping = NULL;
203
204 if (is_signed) {
205 signed_this_mapping =
206 bt_field_class_enumeration_signed_borrow_mapping_by_index_const(fc, i);
207 BT_ASSERT(signed_this_mapping);
208 this_mapping =
209 bt_field_class_enumeration_signed_mapping_as_mapping_const(signed_this_mapping);
210 } else {
211 unsigned_this_mapping =
212 bt_field_class_enumeration_unsigned_borrow_mapping_by_index_const(fc, i);
213 BT_ASSERT(unsigned_this_mapping);
214 this_mapping =
215 bt_field_class_enumeration_unsigned_mapping_as_mapping_const(unsigned_this_mapping);
216 }
217
218 BT_ASSERT(this_mapping);
219
220 if (strcmp(bt_field_class_enumeration_mapping_get_label(this_mapping), label) == 0) {
221 mapping = this_mapping;
222 goto end;
223 }
224 }
225
226 end:
227 return mapping;
228 }
229
230 static inline bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
231 struct ctf_field_class_variant *fc)
232 {
233 int ret;
234 bt_field_class *ir_fc;
235 uint64_t i;
236 bt_field_class *ir_tag_fc = NULL;
237
238 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
239 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
240 ir_tag_fc = borrow_ir_fc_from_field_path(ctx, &fc->tag_path);
241 BT_ASSERT(ir_tag_fc);
242 }
243
244 ir_fc = bt_field_class_variant_create(ctx->ir_tc, ir_tag_fc);
245 BT_ASSERT(ir_fc);
246
247 for (i = 0; i < fc->options->len; i++) {
248 struct ctf_named_field_class *named_fc =
249 ctf_field_class_variant_borrow_option_by_index(fc, i);
250 bt_field_class *option_ir_fc;
251
252 BT_ASSERT(named_fc->fc->in_ir);
253 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
254 BT_ASSERT(option_ir_fc);
255
256 if (ir_tag_fc) {
257 /*
258 * At this point the trace IR selector
259 * (enumeration) field class already exists if
260 * the variant is tagged (`ir_tag_fc`). This one
261 * already contains range sets for its mappings,
262 * so we just reuse the same, finding them by
263 * matching a variant field class's option's
264 * _original_ name (with a leading underscore,
265 * possibly) with a selector field class's
266 * mapping name.
267 */
268 if (fc->tag_fc->base.is_signed) {
269 const bt_field_class_enumeration_signed_mapping *mapping =
270 (bt_field_class_enumeration_signed_mapping *)
271 find_ir_enum_field_class_mapping_by_label(ir_tag_fc,
272 named_fc->orig_name->str, true);
273 const bt_integer_range_set_signed *range_set;
274
275 BT_ASSERT(mapping);
276 range_set = bt_field_class_enumeration_signed_mapping_borrow_ranges_const(mapping);
277 BT_ASSERT(range_set);
278 ret = bt_field_class_variant_with_selector_field_integer_signed_append_option(
279 ir_fc, named_fc->name->str, option_ir_fc, range_set);
280 } else {
281 const bt_field_class_enumeration_unsigned_mapping *mapping =
282 (bt_field_class_enumeration_unsigned_mapping *)
283 find_ir_enum_field_class_mapping_by_label(ir_tag_fc,
284 named_fc->orig_name->str, false);
285 const bt_integer_range_set_unsigned *range_set;
286
287 BT_ASSERT(mapping);
288 range_set =
289 bt_field_class_enumeration_unsigned_mapping_borrow_ranges_const(mapping);
290 BT_ASSERT(range_set);
291 ret = bt_field_class_variant_with_selector_field_integer_unsigned_append_option(
292 ir_fc, named_fc->name->str, option_ir_fc, range_set);
293 }
294 } else {
295 ret = bt_field_class_variant_without_selector_append_option(ir_fc, named_fc->name->str,
296 option_ir_fc);
297 }
298
299 BT_ASSERT(ret == 0);
300 bt_field_class_put_ref(option_ir_fc);
301 }
302
303 return ir_fc;
304 }
305
306 static inline bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
307 struct ctf_field_class_array *fc)
308 {
309 bt_field_class *ir_fc;
310 bt_field_class *elem_ir_fc;
311
312 if (fc->base.is_text) {
313 ir_fc = bt_field_class_string_create(ctx->ir_tc);
314 BT_ASSERT(ir_fc);
315 goto end;
316 }
317
318 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
319 BT_ASSERT(elem_ir_fc);
320 ir_fc = bt_field_class_array_static_create(ctx->ir_tc, elem_ir_fc, fc->length);
321 BT_ASSERT(ir_fc);
322 bt_field_class_put_ref(elem_ir_fc);
323
324 end:
325 return ir_fc;
326 }
327
328 static inline bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
329 struct ctf_field_class_sequence *fc)
330 {
331 bt_field_class *ir_fc;
332 bt_field_class *elem_ir_fc;
333 bt_field_class *length_fc = NULL;
334
335 if (fc->base.is_text) {
336 ir_fc = bt_field_class_string_create(ctx->ir_tc);
337 BT_ASSERT(ir_fc);
338 goto end;
339 }
340
341 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
342 BT_ASSERT(elem_ir_fc);
343
344 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
345 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
346 length_fc = borrow_ir_fc_from_field_path(ctx, &fc->length_path);
347 BT_ASSERT(length_fc);
348 }
349
350 ir_fc = bt_field_class_array_dynamic_create(ctx->ir_tc, elem_ir_fc, length_fc);
351 BT_ASSERT(ir_fc);
352 bt_field_class_put_ref(elem_ir_fc);
353 BT_ASSERT(ir_fc);
354
355 end:
356 return ir_fc;
357 }
358
359 static inline bt_field_class *ctf_field_class_to_ir(struct ctx *ctx, struct ctf_field_class *fc)
360 {
361 bt_field_class *ir_fc = NULL;
362
363 BT_ASSERT(fc);
364 BT_ASSERT(fc->in_ir);
365
366 switch (fc->type) {
367 case CTF_FIELD_CLASS_TYPE_INT:
368 ir_fc = ctf_field_class_int_to_ir(ctx, ctf_field_class_as_int(fc));
369 break;
370 case CTF_FIELD_CLASS_TYPE_ENUM:
371 ir_fc = ctf_field_class_enum_to_ir(ctx, ctf_field_class_as_enum(fc));
372 break;
373 case CTF_FIELD_CLASS_TYPE_FLOAT:
374 ir_fc = ctf_field_class_float_to_ir(ctx, ctf_field_class_as_float(fc));
375 break;
376 case CTF_FIELD_CLASS_TYPE_STRING:
377 ir_fc = ctf_field_class_string_to_ir(ctx, ctf_field_class_as_string(fc));
378 break;
379 case CTF_FIELD_CLASS_TYPE_STRUCT:
380 ir_fc = ctf_field_class_struct_to_ir(ctx, ctf_field_class_as_struct(fc));
381 break;
382 case CTF_FIELD_CLASS_TYPE_ARRAY:
383 ir_fc = ctf_field_class_array_to_ir(ctx, ctf_field_class_as_array(fc));
384 break;
385 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
386 ir_fc = ctf_field_class_sequence_to_ir(ctx, ctf_field_class_as_sequence(fc));
387 break;
388 case CTF_FIELD_CLASS_TYPE_VARIANT:
389 ir_fc = ctf_field_class_variant_to_ir(ctx, ctf_field_class_as_variant(fc));
390 break;
391 default:
392 bt_common_abort();
393 }
394
395 fc->ir_fc = ir_fc;
396 return ir_fc;
397 }
398
399 static inline bool
400 ctf_field_class_struct_has_immediate_member_in_ir(struct ctf_field_class_struct *fc)
401 {
402 uint64_t i;
403 bool has_immediate_member_in_ir = false;
404
405 /*
406 * If the structure field class has no members at all, then it
407 * was an empty structure in the beginning, so leave it existing
408 * and empty.
409 */
410 if (fc->members->len == 0) {
411 has_immediate_member_in_ir = true;
412 goto end;
413 }
414
415 for (i = 0; i < fc->members->len; i++) {
416 struct ctf_named_field_class *named_fc =
417 ctf_field_class_struct_borrow_member_by_index(fc, i);
418
419 if (named_fc->fc->in_ir) {
420 has_immediate_member_in_ir = true;
421 goto end;
422 }
423 }
424
425 end:
426 return has_immediate_member_in_ir;
427 }
428
429 static inline bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
430 {
431 bt_field_class *ir_fc = NULL;
432 struct ctf_field_class *fc = NULL;
433
434 switch (ctx->scope) {
435 case CTF_SCOPE_PACKET_CONTEXT:
436 fc = ctx->sc->packet_context_fc;
437 break;
438 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
439 fc = ctx->sc->event_common_context_fc;
440 break;
441 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
442 fc = ctx->ec->spec_context_fc;
443 break;
444 case CTF_SCOPE_EVENT_PAYLOAD:
445 fc = ctx->ec->payload_fc;
446 break;
447 default:
448 bt_common_abort();
449 }
450
451 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(ctf_field_class_as_struct(fc))) {
452 ir_fc = ctf_field_class_to_ir(ctx, fc);
453 }
454
455 return ir_fc;
456 }
457
458 static inline void ctf_event_class_to_ir(struct ctx *ctx)
459 {
460 int ret;
461 bt_event_class *ir_ec = NULL;
462 bt_field_class *ir_fc;
463
464 BT_ASSERT(ctx->ec);
465
466 if (ctx->ec->is_translated) {
467 ir_ec = bt_stream_class_borrow_event_class_by_id(ctx->ir_sc, ctx->ec->id);
468 BT_ASSERT(ir_ec);
469 goto end;
470 }
471
472 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
473 BT_ASSERT(ir_ec);
474 bt_event_class_put_ref(ir_ec);
475 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
476 ir_fc = scope_ctf_field_class_to_ir(ctx);
477 if (ir_fc) {
478 ret = bt_event_class_set_specific_context_field_class(ir_ec, ir_fc);
479 BT_ASSERT(ret == 0);
480 bt_field_class_put_ref(ir_fc);
481 }
482
483 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
484 ir_fc = scope_ctf_field_class_to_ir(ctx);
485 if (ir_fc) {
486 ret = bt_event_class_set_payload_field_class(ir_ec, ir_fc);
487 BT_ASSERT(ret == 0);
488 bt_field_class_put_ref(ir_fc);
489 }
490
491 if (ctx->ec->name->len > 0) {
492 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
493 BT_ASSERT(ret == 0);
494 }
495
496 if (ctx->ec->emf_uri->len > 0) {
497 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
498 BT_ASSERT(ret == 0);
499 }
500
501 if (ctx->ec->is_log_level_set) {
502 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
503 }
504
505 ctx->ec->is_translated = true;
506 ctx->ec->ir_ec = ir_ec;
507
508 end:
509 return;
510 }
511
512 static inline void ctf_stream_class_to_ir(struct ctx *ctx)
513 {
514 int ret;
515 bt_field_class *ir_fc;
516
517 BT_ASSERT(ctx->sc);
518
519 if (ctx->sc->is_translated) {
520 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(ctx->ir_tc, ctx->sc->id);
521 BT_ASSERT(ctx->ir_sc);
522 goto end;
523 }
524
525 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
526 BT_ASSERT(ctx->ir_sc);
527 bt_stream_class_put_ref(ctx->ir_sc);
528
529 if (ctx->sc->default_clock_class) {
530 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
531 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
532 ctx->sc->default_clock_class->ir_cc);
533 BT_ASSERT(ret == 0);
534 }
535
536 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE, ctx->sc->packets_have_ts_begin,
537 ctx->sc->packets_have_ts_end);
538 bt_stream_class_set_supports_discarded_events(ctx->ir_sc, ctx->sc->has_discarded_events,
539 ctx->sc->discarded_events_have_default_cs);
540 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc, ctx->sc->has_discarded_packets,
541 ctx->sc->discarded_packets_have_default_cs);
542 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
543 ir_fc = scope_ctf_field_class_to_ir(ctx);
544 if (ir_fc) {
545 ret = bt_stream_class_set_packet_context_field_class(ctx->ir_sc, ir_fc);
546 BT_ASSERT(ret == 0);
547 bt_field_class_put_ref(ir_fc);
548 }
549
550 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
551 ir_fc = scope_ctf_field_class_to_ir(ctx);
552 if (ir_fc) {
553 ret = bt_stream_class_set_event_common_context_field_class(ctx->ir_sc, ir_fc);
554 BT_ASSERT(ret == 0);
555 bt_field_class_put_ref(ir_fc);
556 }
557
558 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc, BT_FALSE);
559 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
560
561 ctx->sc->is_translated = true;
562 ctx->sc->ir_sc = ctx->ir_sc;
563
564 end:
565 return;
566 }
567
568 static inline void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
569 {
570 int ret;
571
572 if (strlen(cc->name->str) > 0) {
573 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
574 BT_ASSERT(ret == 0);
575 }
576
577 if (strlen(cc->description->str) > 0) {
578 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
579 BT_ASSERT(ret == 0);
580 }
581
582 bt_clock_class_set_frequency(ir_cc, cc->frequency);
583 bt_clock_class_set_precision(ir_cc, cc->precision);
584 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
585
586 if (cc->has_uuid) {
587 bt_clock_class_set_uuid(ir_cc, cc->uuid);
588 }
589
590 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
591 }
592
593 static inline int ctf_trace_class_to_ir(struct ctx *ctx)
594 {
595 int ret = 0;
596 uint64_t i;
597
598 BT_ASSERT(ctx->tc);
599 BT_ASSERT(ctx->ir_tc);
600
601 if (ctx->tc->is_translated) {
602 goto end;
603 }
604
605 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
606 ctf_clock_class *cc = (ctf_clock_class *) ctx->tc->clock_classes->pdata[i];
607
608 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
609 ctf_clock_class_to_ir(cc->ir_cc, cc);
610 }
611
612 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc, BT_FALSE);
613 ctx->tc->is_translated = true;
614 ctx->tc->ir_tc = ctx->ir_tc;
615
616 end:
617 return ret;
618 }
619
620 int ctf_trace_class_translate(bt_self_component *self_comp, bt_trace_class *ir_tc,
621 struct ctf_trace_class *tc)
622 {
623 int ret = 0;
624 uint64_t i;
625 struct ctx ctx = {};
626
627 ctx.self_comp = self_comp;
628 ctx.tc = tc;
629 ctx.ir_tc = ir_tc;
630 ret = ctf_trace_class_to_ir(&ctx);
631 if (ret) {
632 goto end;
633 }
634
635 for (i = 0; i < tc->stream_classes->len; i++) {
636 uint64_t j;
637 ctx.sc = (ctf_stream_class *) tc->stream_classes->pdata[i];
638
639 ctf_stream_class_to_ir(&ctx);
640
641 for (j = 0; j < ctx.sc->event_classes->len; j++) {
642 ctx.ec = (ctf_event_class *) ctx.sc->event_classes->pdata[j];
643
644 ctf_event_class_to_ir(&ctx);
645 ctx.ec = NULL;
646 }
647
648 ctx.sc = NULL;
649 }
650
651 end:
652 return ret;
653 }
This page took 0.043219 seconds and 4 git commands to generate.