Sort includes in C++ files
[babeltrace.git] / src / plugins / ctf / common / metadata / ctf-meta-translate.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2018 Philippe Proulx <pproulx@efficios.com>
5 */
6
7 #include <glib.h>
8 #include <inttypes.h>
9 #include <stdbool.h>
10 #include <stdint.h>
11 #include <string.h>
12
13 #include <babeltrace2/babeltrace.h>
14
15 #include "common/assert.h"
16 #include "common/macros.h"
17
18 #include "ctf-meta-visitors.hpp"
19
20 struct ctx
21 {
22 bt_self_component *self_comp;
23 bt_trace_class *ir_tc;
24 bt_stream_class *ir_sc;
25 struct ctf_trace_class *tc;
26 struct ctf_stream_class *sc;
27 struct ctf_event_class *ec;
28 enum ctf_scope scope;
29 };
30
31 static inline bt_field_class *ctf_field_class_to_ir(struct ctx *ctx, struct ctf_field_class *fc);
32
33 static inline void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
34 bt_field_class *ir_fc)
35 {
36 bt_field_class_integer_set_field_value_range(ir_fc, fc->base.size);
37 bt_field_class_integer_set_preferred_display_base(ir_fc, fc->disp_base);
38 }
39
40 static inline bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
41 struct ctf_field_class_int *fc)
42 {
43 bt_field_class *ir_fc;
44
45 if (fc->is_signed) {
46 ir_fc = bt_field_class_integer_signed_create(ctx->ir_tc);
47 } else {
48 ir_fc = bt_field_class_integer_unsigned_create(ctx->ir_tc);
49 }
50
51 BT_ASSERT(ir_fc);
52 ctf_field_class_int_set_props(fc, ir_fc);
53 return ir_fc;
54 }
55
56 static inline bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
57 struct ctf_field_class_enum *fc)
58 {
59 int ret;
60 bt_field_class *ir_fc;
61 uint64_t i;
62
63 if (fc->base.is_signed) {
64 ir_fc = bt_field_class_enumeration_signed_create(ctx->ir_tc);
65 } else {
66 ir_fc = bt_field_class_enumeration_unsigned_create(ctx->ir_tc);
67 }
68
69 BT_ASSERT(ir_fc);
70 ctf_field_class_int_set_props(&fc->base, ir_fc);
71
72 for (i = 0; i < fc->mappings->len; i++) {
73 struct ctf_field_class_enum_mapping *mapping =
74 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
75 bt_integer_range_set_signed *range_set_signed = NULL;
76 bt_integer_range_set_unsigned *range_set_unsigned = NULL;
77 uint64_t range_i;
78
79 if (fc->base.is_signed) {
80 range_set_signed = bt_integer_range_set_signed_create();
81 BT_ASSERT(range_set_signed);
82 } else {
83 range_set_unsigned = bt_integer_range_set_unsigned_create();
84 BT_ASSERT(range_set_unsigned);
85 }
86
87 for (range_i = 0; range_i < mapping->ranges->len; range_i++) {
88 struct ctf_range *range =
89 ctf_field_class_enum_mapping_borrow_range_by_index(mapping, range_i);
90
91 if (fc->base.is_signed) {
92 ret = bt_integer_range_set_signed_add_range(range_set_signed, range->lower.i,
93 range->upper.i);
94 } else {
95 ret = bt_integer_range_set_unsigned_add_range(range_set_unsigned, range->lower.u,
96 range->upper.u);
97 }
98
99 BT_ASSERT(ret == 0);
100 }
101
102 if (fc->base.is_signed) {
103 ret = bt_field_class_enumeration_signed_add_mapping(ir_fc, mapping->label->str,
104 range_set_signed);
105 BT_INTEGER_RANGE_SET_SIGNED_PUT_REF_AND_RESET(range_set_signed);
106 } else {
107 ret = bt_field_class_enumeration_unsigned_add_mapping(ir_fc, mapping->label->str,
108 range_set_unsigned);
109 BT_INTEGER_RANGE_SET_UNSIGNED_PUT_REF_AND_RESET(range_set_unsigned);
110 }
111
112 BT_ASSERT(ret == 0);
113 }
114
115 return ir_fc;
116 }
117
118 static inline bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
119 struct ctf_field_class_float *fc)
120 {
121 bt_field_class *ir_fc;
122
123 if (fc->base.size == 32) {
124 ir_fc = bt_field_class_real_single_precision_create(ctx->ir_tc);
125 } else {
126 ir_fc = bt_field_class_real_double_precision_create(ctx->ir_tc);
127 }
128 BT_ASSERT(ir_fc);
129
130 return ir_fc;
131 }
132
133 static inline bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
134 struct ctf_field_class_string *)
135 {
136 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
137
138 BT_ASSERT(ir_fc);
139 return ir_fc;
140 }
141
142 static inline void translate_struct_field_class_members(struct ctx *ctx,
143 struct ctf_field_class_struct *fc,
144 bt_field_class *ir_fc, bool,
145 struct ctf_field_class_struct *)
146 {
147 uint64_t i;
148 int ret;
149
150 for (i = 0; i < fc->members->len; i++) {
151 struct ctf_named_field_class *named_fc =
152 ctf_field_class_struct_borrow_member_by_index(fc, i);
153 bt_field_class *member_ir_fc;
154 const char *name = named_fc->name->str;
155
156 if (!named_fc->fc->in_ir) {
157 continue;
158 }
159
160 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
161 BT_ASSERT(member_ir_fc);
162 ret = bt_field_class_structure_append_member(ir_fc, name, member_ir_fc);
163 BT_ASSERT(ret == 0);
164 bt_field_class_put_ref(member_ir_fc);
165 }
166 }
167
168 static inline bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
169 struct ctf_field_class_struct *fc)
170 {
171 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
172
173 BT_ASSERT(ir_fc);
174 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
175 return ir_fc;
176 }
177
178 static inline bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
179 struct ctf_field_path *field_path)
180 {
181 bt_field_class *ir_fc = NULL;
182 struct ctf_field_class *fc =
183 ctf_field_path_borrow_field_class(field_path, ctx->tc, ctx->sc, ctx->ec);
184
185 BT_ASSERT(fc);
186
187 if (fc->in_ir) {
188 ir_fc = fc->ir_fc;
189 }
190
191 return ir_fc;
192 }
193
194 static inline const bt_field_class_enumeration_mapping *
195 find_ir_enum_field_class_mapping_by_label(const bt_field_class *fc, const char *label,
196 bool is_signed)
197 {
198 const bt_field_class_enumeration_mapping *mapping = NULL;
199 uint64_t i;
200
201 for (i = 0; i < bt_field_class_enumeration_get_mapping_count(fc); i++) {
202 const bt_field_class_enumeration_mapping *this_mapping;
203 const bt_field_class_enumeration_signed_mapping *signed_this_mapping = NULL;
204 const bt_field_class_enumeration_unsigned_mapping *unsigned_this_mapping = NULL;
205
206 if (is_signed) {
207 signed_this_mapping =
208 bt_field_class_enumeration_signed_borrow_mapping_by_index_const(fc, i);
209 BT_ASSERT(signed_this_mapping);
210 this_mapping =
211 bt_field_class_enumeration_signed_mapping_as_mapping_const(signed_this_mapping);
212 } else {
213 unsigned_this_mapping =
214 bt_field_class_enumeration_unsigned_borrow_mapping_by_index_const(fc, i);
215 BT_ASSERT(unsigned_this_mapping);
216 this_mapping =
217 bt_field_class_enumeration_unsigned_mapping_as_mapping_const(unsigned_this_mapping);
218 }
219
220 BT_ASSERT(this_mapping);
221
222 if (strcmp(bt_field_class_enumeration_mapping_get_label(this_mapping), label) == 0) {
223 mapping = this_mapping;
224 goto end;
225 }
226 }
227
228 end:
229 return mapping;
230 }
231
232 static inline bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
233 struct ctf_field_class_variant *fc)
234 {
235 int ret;
236 bt_field_class *ir_fc;
237 uint64_t i;
238 bt_field_class *ir_tag_fc = NULL;
239
240 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
241 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
242 ir_tag_fc = borrow_ir_fc_from_field_path(ctx, &fc->tag_path);
243 BT_ASSERT(ir_tag_fc);
244 }
245
246 ir_fc = bt_field_class_variant_create(ctx->ir_tc, ir_tag_fc);
247 BT_ASSERT(ir_fc);
248
249 for (i = 0; i < fc->options->len; i++) {
250 struct ctf_named_field_class *named_fc =
251 ctf_field_class_variant_borrow_option_by_index(fc, i);
252 bt_field_class *option_ir_fc;
253
254 BT_ASSERT(named_fc->fc->in_ir);
255 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
256 BT_ASSERT(option_ir_fc);
257
258 if (ir_tag_fc) {
259 /*
260 * At this point the trace IR selector
261 * (enumeration) field class already exists if
262 * the variant is tagged (`ir_tag_fc`). This one
263 * already contains range sets for its mappings,
264 * so we just reuse the same, finding them by
265 * matching a variant field class's option's
266 * _original_ name (with a leading underscore,
267 * possibly) with a selector field class's
268 * mapping name.
269 */
270 if (fc->tag_fc->base.is_signed) {
271 const bt_field_class_enumeration_signed_mapping *mapping =
272 (bt_field_class_enumeration_signed_mapping *)
273 find_ir_enum_field_class_mapping_by_label(ir_tag_fc,
274 named_fc->orig_name->str, true);
275 const bt_integer_range_set_signed *range_set;
276
277 BT_ASSERT(mapping);
278 range_set = bt_field_class_enumeration_signed_mapping_borrow_ranges_const(mapping);
279 BT_ASSERT(range_set);
280 ret = bt_field_class_variant_with_selector_field_integer_signed_append_option(
281 ir_fc, named_fc->name->str, option_ir_fc, range_set);
282 } else {
283 const bt_field_class_enumeration_unsigned_mapping *mapping =
284 (bt_field_class_enumeration_unsigned_mapping *)
285 find_ir_enum_field_class_mapping_by_label(ir_tag_fc,
286 named_fc->orig_name->str, false);
287 const bt_integer_range_set_unsigned *range_set;
288
289 BT_ASSERT(mapping);
290 range_set =
291 bt_field_class_enumeration_unsigned_mapping_borrow_ranges_const(mapping);
292 BT_ASSERT(range_set);
293 ret = bt_field_class_variant_with_selector_field_integer_unsigned_append_option(
294 ir_fc, named_fc->name->str, option_ir_fc, range_set);
295 }
296 } else {
297 ret = bt_field_class_variant_without_selector_append_option(ir_fc, named_fc->name->str,
298 option_ir_fc);
299 }
300
301 BT_ASSERT(ret == 0);
302 bt_field_class_put_ref(option_ir_fc);
303 }
304
305 return ir_fc;
306 }
307
308 static inline bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
309 struct ctf_field_class_array *fc)
310 {
311 bt_field_class *ir_fc;
312 bt_field_class *elem_ir_fc;
313
314 if (fc->base.is_text) {
315 ir_fc = bt_field_class_string_create(ctx->ir_tc);
316 BT_ASSERT(ir_fc);
317 goto end;
318 }
319
320 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
321 BT_ASSERT(elem_ir_fc);
322 ir_fc = bt_field_class_array_static_create(ctx->ir_tc, elem_ir_fc, fc->length);
323 BT_ASSERT(ir_fc);
324 bt_field_class_put_ref(elem_ir_fc);
325
326 end:
327 return ir_fc;
328 }
329
330 static inline bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
331 struct ctf_field_class_sequence *fc)
332 {
333 bt_field_class *ir_fc;
334 bt_field_class *elem_ir_fc;
335 bt_field_class *length_fc = NULL;
336
337 if (fc->base.is_text) {
338 ir_fc = bt_field_class_string_create(ctx->ir_tc);
339 BT_ASSERT(ir_fc);
340 goto end;
341 }
342
343 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
344 BT_ASSERT(elem_ir_fc);
345
346 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
347 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
348 length_fc = borrow_ir_fc_from_field_path(ctx, &fc->length_path);
349 BT_ASSERT(length_fc);
350 }
351
352 ir_fc = bt_field_class_array_dynamic_create(ctx->ir_tc, elem_ir_fc, length_fc);
353 BT_ASSERT(ir_fc);
354 bt_field_class_put_ref(elem_ir_fc);
355 BT_ASSERT(ir_fc);
356
357 end:
358 return ir_fc;
359 }
360
361 static inline bt_field_class *ctf_field_class_to_ir(struct ctx *ctx, struct ctf_field_class *fc)
362 {
363 bt_field_class *ir_fc = NULL;
364
365 BT_ASSERT(fc);
366 BT_ASSERT(fc->in_ir);
367
368 switch (fc->type) {
369 case CTF_FIELD_CLASS_TYPE_INT:
370 ir_fc = ctf_field_class_int_to_ir(ctx, ctf_field_class_as_int(fc));
371 break;
372 case CTF_FIELD_CLASS_TYPE_ENUM:
373 ir_fc = ctf_field_class_enum_to_ir(ctx, ctf_field_class_as_enum(fc));
374 break;
375 case CTF_FIELD_CLASS_TYPE_FLOAT:
376 ir_fc = ctf_field_class_float_to_ir(ctx, ctf_field_class_as_float(fc));
377 break;
378 case CTF_FIELD_CLASS_TYPE_STRING:
379 ir_fc = ctf_field_class_string_to_ir(ctx, ctf_field_class_as_string(fc));
380 break;
381 case CTF_FIELD_CLASS_TYPE_STRUCT:
382 ir_fc = ctf_field_class_struct_to_ir(ctx, ctf_field_class_as_struct(fc));
383 break;
384 case CTF_FIELD_CLASS_TYPE_ARRAY:
385 ir_fc = ctf_field_class_array_to_ir(ctx, ctf_field_class_as_array(fc));
386 break;
387 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
388 ir_fc = ctf_field_class_sequence_to_ir(ctx, ctf_field_class_as_sequence(fc));
389 break;
390 case CTF_FIELD_CLASS_TYPE_VARIANT:
391 ir_fc = ctf_field_class_variant_to_ir(ctx, ctf_field_class_as_variant(fc));
392 break;
393 default:
394 bt_common_abort();
395 }
396
397 fc->ir_fc = ir_fc;
398 return ir_fc;
399 }
400
401 static inline bool
402 ctf_field_class_struct_has_immediate_member_in_ir(struct ctf_field_class_struct *fc)
403 {
404 uint64_t i;
405 bool has_immediate_member_in_ir = false;
406
407 /*
408 * If the structure field class has no members at all, then it
409 * was an empty structure in the beginning, so leave it existing
410 * and empty.
411 */
412 if (fc->members->len == 0) {
413 has_immediate_member_in_ir = true;
414 goto end;
415 }
416
417 for (i = 0; i < fc->members->len; i++) {
418 struct ctf_named_field_class *named_fc =
419 ctf_field_class_struct_borrow_member_by_index(fc, i);
420
421 if (named_fc->fc->in_ir) {
422 has_immediate_member_in_ir = true;
423 goto end;
424 }
425 }
426
427 end:
428 return has_immediate_member_in_ir;
429 }
430
431 static inline bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
432 {
433 bt_field_class *ir_fc = NULL;
434 struct ctf_field_class *fc = NULL;
435
436 switch (ctx->scope) {
437 case CTF_SCOPE_PACKET_CONTEXT:
438 fc = ctx->sc->packet_context_fc;
439 break;
440 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
441 fc = ctx->sc->event_common_context_fc;
442 break;
443 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
444 fc = ctx->ec->spec_context_fc;
445 break;
446 case CTF_SCOPE_EVENT_PAYLOAD:
447 fc = ctx->ec->payload_fc;
448 break;
449 default:
450 bt_common_abort();
451 }
452
453 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(ctf_field_class_as_struct(fc))) {
454 ir_fc = ctf_field_class_to_ir(ctx, fc);
455 }
456
457 return ir_fc;
458 }
459
460 static inline void ctf_event_class_to_ir(struct ctx *ctx)
461 {
462 int ret;
463 bt_event_class *ir_ec = NULL;
464 bt_field_class *ir_fc;
465
466 BT_ASSERT(ctx->ec);
467
468 if (ctx->ec->is_translated) {
469 ir_ec = bt_stream_class_borrow_event_class_by_id(ctx->ir_sc, ctx->ec->id);
470 BT_ASSERT(ir_ec);
471 goto end;
472 }
473
474 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
475 BT_ASSERT(ir_ec);
476 bt_event_class_put_ref(ir_ec);
477 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
478 ir_fc = scope_ctf_field_class_to_ir(ctx);
479 if (ir_fc) {
480 ret = bt_event_class_set_specific_context_field_class(ir_ec, ir_fc);
481 BT_ASSERT(ret == 0);
482 bt_field_class_put_ref(ir_fc);
483 }
484
485 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
486 ir_fc = scope_ctf_field_class_to_ir(ctx);
487 if (ir_fc) {
488 ret = bt_event_class_set_payload_field_class(ir_ec, ir_fc);
489 BT_ASSERT(ret == 0);
490 bt_field_class_put_ref(ir_fc);
491 }
492
493 if (ctx->ec->name->len > 0) {
494 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
495 BT_ASSERT(ret == 0);
496 }
497
498 if (ctx->ec->emf_uri->len > 0) {
499 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
500 BT_ASSERT(ret == 0);
501 }
502
503 if (ctx->ec->is_log_level_set) {
504 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
505 }
506
507 ctx->ec->is_translated = true;
508 ctx->ec->ir_ec = ir_ec;
509
510 end:
511 return;
512 }
513
514 static inline void ctf_stream_class_to_ir(struct ctx *ctx)
515 {
516 int ret;
517 bt_field_class *ir_fc;
518
519 BT_ASSERT(ctx->sc);
520
521 if (ctx->sc->is_translated) {
522 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(ctx->ir_tc, ctx->sc->id);
523 BT_ASSERT(ctx->ir_sc);
524 goto end;
525 }
526
527 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
528 BT_ASSERT(ctx->ir_sc);
529 bt_stream_class_put_ref(ctx->ir_sc);
530
531 if (ctx->sc->default_clock_class) {
532 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
533 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
534 ctx->sc->default_clock_class->ir_cc);
535 BT_ASSERT(ret == 0);
536 }
537
538 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE, ctx->sc->packets_have_ts_begin,
539 ctx->sc->packets_have_ts_end);
540 bt_stream_class_set_supports_discarded_events(ctx->ir_sc, ctx->sc->has_discarded_events,
541 ctx->sc->discarded_events_have_default_cs);
542 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc, ctx->sc->has_discarded_packets,
543 ctx->sc->discarded_packets_have_default_cs);
544 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
545 ir_fc = scope_ctf_field_class_to_ir(ctx);
546 if (ir_fc) {
547 ret = bt_stream_class_set_packet_context_field_class(ctx->ir_sc, ir_fc);
548 BT_ASSERT(ret == 0);
549 bt_field_class_put_ref(ir_fc);
550 }
551
552 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
553 ir_fc = scope_ctf_field_class_to_ir(ctx);
554 if (ir_fc) {
555 ret = bt_stream_class_set_event_common_context_field_class(ctx->ir_sc, ir_fc);
556 BT_ASSERT(ret == 0);
557 bt_field_class_put_ref(ir_fc);
558 }
559
560 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc, BT_FALSE);
561 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
562
563 ctx->sc->is_translated = true;
564 ctx->sc->ir_sc = ctx->ir_sc;
565
566 end:
567 return;
568 }
569
570 static inline void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
571 {
572 int ret;
573
574 if (strlen(cc->name->str) > 0) {
575 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
576 BT_ASSERT(ret == 0);
577 }
578
579 if (strlen(cc->description->str) > 0) {
580 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
581 BT_ASSERT(ret == 0);
582 }
583
584 bt_clock_class_set_frequency(ir_cc, cc->frequency);
585 bt_clock_class_set_precision(ir_cc, cc->precision);
586 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
587
588 if (cc->has_uuid) {
589 bt_clock_class_set_uuid(ir_cc, cc->uuid);
590 }
591
592 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
593 }
594
595 static inline int ctf_trace_class_to_ir(struct ctx *ctx)
596 {
597 int ret = 0;
598 uint64_t i;
599
600 BT_ASSERT(ctx->tc);
601 BT_ASSERT(ctx->ir_tc);
602
603 if (ctx->tc->is_translated) {
604 goto end;
605 }
606
607 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
608 ctf_clock_class *cc = (ctf_clock_class *) ctx->tc->clock_classes->pdata[i];
609
610 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
611 ctf_clock_class_to_ir(cc->ir_cc, cc);
612 }
613
614 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc, BT_FALSE);
615 ctx->tc->is_translated = true;
616 ctx->tc->ir_tc = ctx->ir_tc;
617
618 end:
619 return ret;
620 }
621
622 int ctf_trace_class_translate(bt_self_component *self_comp, bt_trace_class *ir_tc,
623 struct ctf_trace_class *tc)
624 {
625 int ret = 0;
626 uint64_t i;
627 struct ctx ctx = {};
628
629 ctx.self_comp = self_comp;
630 ctx.tc = tc;
631 ctx.ir_tc = ir_tc;
632 ret = ctf_trace_class_to_ir(&ctx);
633 if (ret) {
634 goto end;
635 }
636
637 for (i = 0; i < tc->stream_classes->len; i++) {
638 uint64_t j;
639 ctx.sc = (ctf_stream_class *) tc->stream_classes->pdata[i];
640
641 ctf_stream_class_to_ir(&ctx);
642
643 for (j = 0; j < ctx.sc->event_classes->len; j++) {
644 ctx.ec = (ctf_event_class *) ctx.sc->event_classes->pdata[j];
645
646 ctf_event_class_to_ir(&ctx);
647 ctx.ec = NULL;
648 }
649
650 ctx.sc = NULL;
651 }
652
653 end:
654 return ret;
655 }
This page took 0.04663 seconds and 4 git commands to generate.