lib: rename functions to clearly indicate API inheritance
[babeltrace.git] / src / plugins / ctf / common / metadata / ctf-meta-translate.c
1 /*
2 * Copyright 2018 - Philippe Proulx <pproulx@efficios.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 */
14
15 #include <babeltrace2/babeltrace.h>
16 #include "common/macros.h"
17 #include "common/assert.h"
18 #include <glib.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <inttypes.h>
22
23 #include "ctf-meta-visitors.h"
24
25 struct ctx {
26 bt_self_component *self_comp;
27 bt_trace_class *ir_tc;
28 bt_stream_class *ir_sc;
29 struct ctf_trace_class *tc;
30 struct ctf_stream_class *sc;
31 struct ctf_event_class *ec;
32 enum ctf_scope scope;
33 };
34
35 static inline
36 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
37 struct ctf_field_class *fc);
38
39 static inline
40 void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
41 bt_field_class *ir_fc)
42 {
43 bt_field_class_integer_set_field_value_range(ir_fc,
44 fc->base.size);
45 bt_field_class_integer_set_preferred_display_base(ir_fc,
46 fc->disp_base);
47 }
48
49 static inline
50 bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
51 struct ctf_field_class_int *fc)
52 {
53 bt_field_class *ir_fc;
54
55 if (fc->is_signed) {
56 ir_fc = bt_field_class_integer_signed_create(ctx->ir_tc);
57 } else {
58 ir_fc = bt_field_class_integer_unsigned_create(ctx->ir_tc);
59 }
60
61 BT_ASSERT(ir_fc);
62 ctf_field_class_int_set_props(fc, ir_fc);
63 return ir_fc;
64 }
65
66 static inline
67 bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
68 struct ctf_field_class_enum *fc)
69 {
70 int ret;
71 bt_field_class *ir_fc;
72 uint64_t i;
73
74 if (fc->base.is_signed) {
75 ir_fc = bt_field_class_enumeration_signed_create(ctx->ir_tc);
76 } else {
77 ir_fc = bt_field_class_enumeration_unsigned_create(ctx->ir_tc);
78 }
79
80 BT_ASSERT(ir_fc);
81 ctf_field_class_int_set_props((void *) fc, ir_fc);
82
83 for (i = 0; i < fc->mappings->len; i++) {
84 struct ctf_field_class_enum_mapping *mapping =
85 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
86 void *range_set;
87 uint64_t range_i;
88
89 if (fc->base.is_signed) {
90 range_set = bt_integer_range_set_signed_create();
91 } else {
92 range_set = bt_integer_range_set_unsigned_create();
93 }
94
95 BT_ASSERT(range_set);
96
97 for (range_i = 0; range_i < mapping->ranges->len; range_i++) {
98 struct ctf_range *range =
99 ctf_field_class_enum_mapping_borrow_range_by_index(
100 mapping, range_i);
101
102 if (fc->base.is_signed) {
103 ret = bt_integer_range_set_signed_add_range(
104 range_set, range->lower.i,
105 range->upper.i);
106 } else {
107 ret = bt_integer_range_set_unsigned_add_range(
108 range_set, range->lower.u,
109 range->upper.u);
110 }
111
112 BT_ASSERT(ret == 0);
113 }
114
115 if (fc->base.is_signed) {
116 ret = bt_field_class_enumeration_signed_add_mapping(
117 ir_fc, mapping->label->str, range_set);
118 BT_RANGE_SET_SIGNED_PUT_REF_AND_RESET(range_set);
119 } else {
120 ret = bt_field_class_enumeration_unsigned_add_mapping(
121 ir_fc, mapping->label->str, range_set);
122 BT_RANGE_SET_UNSIGNED_PUT_REF_AND_RESET(range_set);
123 }
124
125 BT_ASSERT(ret == 0);
126 }
127
128 return ir_fc;
129 }
130
131 static inline
132 bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
133 struct ctf_field_class_float *fc)
134 {
135 bt_field_class *ir_fc;
136
137 ir_fc = bt_field_class_real_create(ctx->ir_tc);
138 BT_ASSERT(ir_fc);
139
140 if (fc->base.size == 32) {
141 bt_field_class_real_set_is_single_precision(ir_fc,
142 BT_TRUE);
143 }
144
145 return ir_fc;
146 }
147
148 static inline
149 bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
150 struct ctf_field_class_string *fc)
151 {
152 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
153
154 BT_ASSERT(ir_fc);
155 return ir_fc;
156 }
157
158 static inline
159 void translate_struct_field_class_members(struct ctx *ctx,
160 struct ctf_field_class_struct *fc, bt_field_class *ir_fc,
161 bool with_header_prefix,
162 struct ctf_field_class_struct *context_fc)
163 {
164 uint64_t i;
165 int ret;
166
167 for (i = 0; i < fc->members->len; i++) {
168 struct ctf_named_field_class *named_fc =
169 ctf_field_class_struct_borrow_member_by_index(fc, i);
170 bt_field_class *member_ir_fc;
171 const char *name = named_fc->name->str;
172
173 if (!named_fc->fc->in_ir) {
174 continue;
175 }
176
177 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
178 BT_ASSERT(member_ir_fc);
179 ret = bt_field_class_structure_append_member(ir_fc, name,
180 member_ir_fc);
181 BT_ASSERT(ret == 0);
182 bt_field_class_put_ref(member_ir_fc);
183 }
184 }
185
186 static inline
187 bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
188 struct ctf_field_class_struct *fc)
189 {
190 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
191
192 BT_ASSERT(ir_fc);
193 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
194 return ir_fc;
195 }
196
197 static inline
198 bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
199 struct ctf_field_path *field_path)
200 {
201 bt_field_class *ir_fc = NULL;
202 struct ctf_field_class *fc = ctf_field_path_borrow_field_class(
203 field_path, ctx->tc, ctx->sc, ctx->ec);
204
205 BT_ASSERT(fc);
206
207 if (fc->in_ir) {
208 ir_fc = fc->ir_fc;
209 }
210
211 return ir_fc;
212 }
213
214 static inline
215 const void *find_ir_enum_field_class_mapping_by_label(const bt_field_class *fc,
216 const char *label, bool is_signed)
217 {
218 const void *mapping = NULL;
219 uint64_t i;
220
221 for (i = 0; i < bt_field_class_enumeration_get_mapping_count(fc); i++) {
222 const bt_field_class_enumeration_mapping *this_mapping;
223 const void *spec_this_mapping;
224
225 if (is_signed) {
226 spec_this_mapping =
227 bt_field_class_enumeration_signed_borrow_mapping_by_index_const(
228 fc, i);
229 this_mapping =
230 bt_field_class_enumeration_signed_mapping_as_mapping_const(
231 spec_this_mapping);
232 } else {
233 spec_this_mapping =
234 bt_field_class_enumeration_unsigned_borrow_mapping_by_index_const(
235 fc, i);
236 this_mapping =
237 bt_field_class_enumeration_unsigned_mapping_as_mapping_const(
238 spec_this_mapping);
239 }
240
241 BT_ASSERT(this_mapping);
242 BT_ASSERT(spec_this_mapping);
243
244 if (strcmp(bt_field_class_enumeration_mapping_get_label(
245 this_mapping), label) == 0) {
246 mapping = spec_this_mapping;
247 goto end;
248 }
249 }
250
251 end:
252 return mapping;
253 }
254
255 static inline
256 bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
257 struct ctf_field_class_variant *fc)
258 {
259 int ret;
260 bt_field_class *ir_fc;
261 uint64_t i;
262 bt_field_class *ir_tag_fc = NULL;
263
264 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
265 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
266 ir_tag_fc = borrow_ir_fc_from_field_path(ctx, &fc->tag_path);
267 BT_ASSERT(ir_tag_fc);
268 }
269
270 ir_fc = bt_field_class_variant_create(ctx->ir_tc, ir_tag_fc);
271 BT_ASSERT(ir_fc);
272
273 for (i = 0; i < fc->options->len; i++) {
274 struct ctf_named_field_class *named_fc =
275 ctf_field_class_variant_borrow_option_by_index(fc, i);
276 bt_field_class *option_ir_fc;
277
278 BT_ASSERT(named_fc->fc->in_ir);
279 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
280 BT_ASSERT(option_ir_fc);
281
282 if (ir_tag_fc) {
283 /*
284 * At this point the trace IR selector
285 * (enumeration) field class already exists if
286 * the variant is tagged (`ir_tag_fc`). This one
287 * already contains range sets for its mappings,
288 * so we just reuse the same, finding them by
289 * matching a variant field class's option's
290 * _original_ name (with a leading underscore,
291 * possibly) with a selector field class's
292 * mapping name.
293 */
294 if (fc->tag_fc->base.is_signed) {
295 const bt_field_class_enumeration_signed_mapping *mapping =
296 find_ir_enum_field_class_mapping_by_label(
297 ir_tag_fc,
298 named_fc->orig_name->str, true);
299 const bt_integer_range_set_signed *range_set;
300
301 BT_ASSERT(mapping);
302 range_set =
303 bt_field_class_enumeration_signed_mapping_borrow_ranges_const(
304 mapping);
305 BT_ASSERT(range_set);
306 ret = bt_field_class_variant_with_selector_signed_append_option(
307 ir_fc, named_fc->name->str,
308 option_ir_fc, range_set);
309 } else {
310 const bt_field_class_enumeration_unsigned_mapping *mapping =
311 find_ir_enum_field_class_mapping_by_label(
312 ir_tag_fc,
313 named_fc->orig_name->str,
314 false);
315 const bt_integer_range_set_unsigned *range_set;
316
317 BT_ASSERT(mapping);
318 range_set =
319 bt_field_class_enumeration_unsigned_mapping_borrow_ranges_const(
320 mapping);
321 BT_ASSERT(range_set);
322 ret = bt_field_class_variant_with_selector_unsigned_append_option(
323 ir_fc, named_fc->name->str,
324 option_ir_fc, range_set);
325 }
326 } else {
327 ret = bt_field_class_variant_without_selector_append_option(
328 ir_fc, named_fc->name->str, option_ir_fc);
329 }
330
331 BT_ASSERT(ret == 0);
332 bt_field_class_put_ref(option_ir_fc);
333 }
334
335 return ir_fc;
336 }
337
338 static inline
339 bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
340 struct ctf_field_class_array *fc)
341 {
342 bt_field_class *ir_fc;
343 bt_field_class *elem_ir_fc;
344
345 if (fc->base.is_text) {
346 ir_fc = bt_field_class_string_create(ctx->ir_tc);
347 BT_ASSERT(ir_fc);
348 goto end;
349 }
350
351 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
352 BT_ASSERT(elem_ir_fc);
353 ir_fc = bt_field_class_array_static_create(ctx->ir_tc, elem_ir_fc,
354 fc->length);
355 BT_ASSERT(ir_fc);
356 bt_field_class_put_ref(elem_ir_fc);
357
358 end:
359 return ir_fc;
360 }
361
362 static inline
363 bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
364 struct ctf_field_class_sequence *fc)
365 {
366 bt_field_class *ir_fc;
367 bt_field_class *elem_ir_fc;
368 bt_field_class *length_fc = NULL;
369
370 if (fc->base.is_text) {
371 ir_fc = bt_field_class_string_create(ctx->ir_tc);
372 BT_ASSERT(ir_fc);
373 goto end;
374 }
375
376 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
377 BT_ASSERT(elem_ir_fc);
378
379 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
380 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
381 length_fc = borrow_ir_fc_from_field_path(ctx, &fc->length_path);
382 BT_ASSERT(length_fc);
383 }
384
385 ir_fc = bt_field_class_array_dynamic_create(ctx->ir_tc, elem_ir_fc,
386 length_fc);
387 BT_ASSERT(ir_fc);
388 bt_field_class_put_ref(elem_ir_fc);
389 BT_ASSERT(ir_fc);
390
391 end:
392 return ir_fc;
393 }
394
395 static inline
396 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
397 struct ctf_field_class *fc)
398 {
399 bt_field_class *ir_fc = NULL;
400
401 BT_ASSERT(fc);
402 BT_ASSERT(fc->in_ir);
403
404 switch (fc->type) {
405 case CTF_FIELD_CLASS_TYPE_INT:
406 ir_fc = ctf_field_class_int_to_ir(ctx, (void *) fc);
407 break;
408 case CTF_FIELD_CLASS_TYPE_ENUM:
409 ir_fc = ctf_field_class_enum_to_ir(ctx, (void *) fc);
410 break;
411 case CTF_FIELD_CLASS_TYPE_FLOAT:
412 ir_fc = ctf_field_class_float_to_ir(ctx, (void *) fc);
413 break;
414 case CTF_FIELD_CLASS_TYPE_STRING:
415 ir_fc = ctf_field_class_string_to_ir(ctx, (void *) fc);
416 break;
417 case CTF_FIELD_CLASS_TYPE_STRUCT:
418 ir_fc = ctf_field_class_struct_to_ir(ctx, (void *) fc);
419 break;
420 case CTF_FIELD_CLASS_TYPE_ARRAY:
421 ir_fc = ctf_field_class_array_to_ir(ctx, (void *) fc);
422 break;
423 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
424 ir_fc = ctf_field_class_sequence_to_ir(ctx, (void *) fc);
425 break;
426 case CTF_FIELD_CLASS_TYPE_VARIANT:
427 ir_fc = ctf_field_class_variant_to_ir(ctx, (void *) fc);
428 break;
429 default:
430 abort();
431 }
432
433 fc->ir_fc = ir_fc;
434 return ir_fc;
435 }
436
437 static inline
438 bool ctf_field_class_struct_has_immediate_member_in_ir(
439 struct ctf_field_class_struct *fc)
440 {
441 uint64_t i;
442 bool has_immediate_member_in_ir = false;
443
444 /*
445 * If the structure field class has no members at all, then it
446 * was an empty structure in the beginning, so leave it existing
447 * and empty.
448 */
449 if (fc->members->len == 0) {
450 has_immediate_member_in_ir = true;
451 goto end;
452 }
453
454 for (i = 0; i < fc->members->len; i++) {
455 struct ctf_named_field_class *named_fc =
456 ctf_field_class_struct_borrow_member_by_index(fc, i);
457
458 if (named_fc->fc->in_ir) {
459 has_immediate_member_in_ir = true;
460 goto end;
461 }
462 }
463
464 end:
465 return has_immediate_member_in_ir;
466 }
467
468 static inline
469 bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
470 {
471 bt_field_class *ir_fc = NULL;
472 struct ctf_field_class *fc = NULL;
473
474 switch (ctx->scope) {
475 case CTF_SCOPE_PACKET_CONTEXT:
476 fc = ctx->sc->packet_context_fc;
477 break;
478 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
479 fc = ctx->sc->event_common_context_fc;
480 break;
481 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
482 fc = ctx->ec->spec_context_fc;
483 break;
484 case CTF_SCOPE_EVENT_PAYLOAD:
485 fc = ctx->ec->payload_fc;
486 break;
487 default:
488 abort();
489 }
490
491 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(
492 (void *) fc)) {
493 ir_fc = ctf_field_class_to_ir(ctx, fc);
494 }
495
496 return ir_fc;
497 }
498
499 static inline
500 void ctf_event_class_to_ir(struct ctx *ctx)
501 {
502 int ret;
503 bt_event_class *ir_ec = NULL;
504 bt_field_class *ir_fc;
505
506 BT_ASSERT(ctx->ec);
507
508 if (ctx->ec->is_translated) {
509 ir_ec = bt_stream_class_borrow_event_class_by_id(
510 ctx->ir_sc, ctx->ec->id);
511 BT_ASSERT(ir_ec);
512 goto end;
513 }
514
515 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
516 BT_ASSERT(ir_ec);
517 bt_event_class_put_ref(ir_ec);
518 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
519 ir_fc = scope_ctf_field_class_to_ir(ctx);
520 if (ir_fc) {
521 ret = bt_event_class_set_specific_context_field_class(
522 ir_ec, ir_fc);
523 BT_ASSERT(ret == 0);
524 bt_field_class_put_ref(ir_fc);
525 }
526
527 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
528 ir_fc = scope_ctf_field_class_to_ir(ctx);
529 if (ir_fc) {
530 ret = bt_event_class_set_payload_field_class(ir_ec,
531 ir_fc);
532 BT_ASSERT(ret == 0);
533 bt_field_class_put_ref(ir_fc);
534 }
535
536 if (ctx->ec->name->len > 0) {
537 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
538 BT_ASSERT(ret == 0);
539 }
540
541 if (ctx->ec->emf_uri->len > 0) {
542 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
543 BT_ASSERT(ret == 0);
544 }
545
546 if (ctx->ec->log_level != -1) {
547 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
548 }
549
550 ctx->ec->is_translated = true;
551 ctx->ec->ir_ec = ir_ec;
552
553 end:
554 return;
555 }
556
557
558 static inline
559 void ctf_stream_class_to_ir(struct ctx *ctx)
560 {
561 int ret;
562 bt_field_class *ir_fc;
563
564 BT_ASSERT(ctx->sc);
565
566 if (ctx->sc->is_translated) {
567 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(
568 ctx->ir_tc, ctx->sc->id);
569 BT_ASSERT(ctx->ir_sc);
570 goto end;
571 }
572
573 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
574 BT_ASSERT(ctx->ir_sc);
575 bt_stream_class_put_ref(ctx->ir_sc);
576
577 if (ctx->sc->default_clock_class) {
578 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
579 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
580 ctx->sc->default_clock_class->ir_cc);
581 BT_ASSERT(ret == 0);
582 }
583
584 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE,
585 ctx->sc->packets_have_ts_begin, ctx->sc->packets_have_ts_end);
586 bt_stream_class_set_supports_discarded_events(ctx->ir_sc,
587 ctx->sc->has_discarded_events,
588 ctx->sc->discarded_events_have_default_cs);
589 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc,
590 ctx->sc->has_discarded_packets,
591 ctx->sc->discarded_packets_have_default_cs);
592 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
593 ir_fc = scope_ctf_field_class_to_ir(ctx);
594 if (ir_fc) {
595 ret = bt_stream_class_set_packet_context_field_class(
596 ctx->ir_sc, ir_fc);
597 BT_ASSERT(ret == 0);
598 bt_field_class_put_ref(ir_fc);
599 }
600
601 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
602 ir_fc = scope_ctf_field_class_to_ir(ctx);
603 if (ir_fc) {
604 ret = bt_stream_class_set_event_common_context_field_class(
605 ctx->ir_sc, ir_fc);
606 BT_ASSERT(ret == 0);
607 bt_field_class_put_ref(ir_fc);
608 }
609
610 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc,
611 BT_FALSE);
612 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
613
614 ctx->sc->is_translated = true;
615 ctx->sc->ir_sc = ctx->ir_sc;
616
617 end:
618 return;
619 }
620
621 static inline
622 void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
623 {
624 int ret;
625
626 if (strlen(cc->name->str) > 0) {
627 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
628 BT_ASSERT(ret == 0);
629 }
630
631 if (strlen(cc->description->str) > 0) {
632 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
633 BT_ASSERT(ret == 0);
634 }
635
636 bt_clock_class_set_frequency(ir_cc, cc->frequency);
637 bt_clock_class_set_precision(ir_cc, cc->precision);
638 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
639
640 if (cc->has_uuid) {
641 bt_clock_class_set_uuid(ir_cc, cc->uuid);
642 }
643
644 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
645 }
646
647 static inline
648 int ctf_trace_class_to_ir(struct ctx *ctx)
649 {
650 int ret = 0;
651 uint64_t i;
652
653 BT_ASSERT(ctx->tc);
654 BT_ASSERT(ctx->ir_tc);
655
656 if (ctx->tc->is_translated) {
657 goto end;
658 }
659
660 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
661 struct ctf_clock_class *cc = ctx->tc->clock_classes->pdata[i];
662
663 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
664 ctf_clock_class_to_ir(cc->ir_cc, cc);
665 }
666
667 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc,
668 BT_FALSE);
669 ctx->tc->is_translated = true;
670 ctx->tc->ir_tc = ctx->ir_tc;
671
672 end:
673 return ret;
674 }
675
676 BT_HIDDEN
677 int ctf_trace_class_translate(bt_self_component *self_comp,
678 bt_trace_class *ir_tc, struct ctf_trace_class *tc)
679 {
680 int ret = 0;
681 uint64_t i;
682 struct ctx ctx = { 0 };
683
684 ctx.self_comp = self_comp;
685 ctx.tc = tc;
686 ctx.ir_tc = ir_tc;
687 ret = ctf_trace_class_to_ir(&ctx);
688 if (ret) {
689 goto end;
690 }
691
692 for (i = 0; i < tc->stream_classes->len; i++) {
693 uint64_t j;
694 ctx.sc = tc->stream_classes->pdata[i];
695
696 ctf_stream_class_to_ir(&ctx);
697
698 for (j = 0; j < ctx.sc->event_classes->len; j++) {
699 ctx.ec = ctx.sc->event_classes->pdata[j];
700
701 ctf_event_class_to_ir(&ctx);
702 ctx.ec = NULL;
703 }
704
705 ctx.sc = NULL;
706 }
707
708 end:
709 return ret;
710 }
This page took 0.054859 seconds and 5 git commands to generate.