src/plugins/ctf/common: restructure subtree
[babeltrace.git] / src / plugins / ctf / common / src / metadata / tsdl / ctf-meta-translate.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2018 Philippe Proulx <pproulx@efficios.com>
5 */
6
7 #include <stdint.h>
8 #include <string.h>
9
10 #include <babeltrace2/babeltrace.h>
11
12 #include "common/assert.h"
13
14 #include "ctf-meta-visitors.hpp"
15
16 struct ctx
17 {
18 bt_self_component *self_comp;
19 bt_trace_class *ir_tc;
20 bt_stream_class *ir_sc;
21 struct ctf_trace_class *tc;
22 struct ctf_stream_class *sc;
23 struct ctf_event_class *ec;
24 enum ctf_scope scope;
25 };
26
27 static inline bt_field_class *ctf_field_class_to_ir(struct ctx *ctx, struct ctf_field_class *fc);
28
29 static inline void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
30 bt_field_class *ir_fc)
31 {
32 bt_field_class_integer_set_field_value_range(ir_fc, fc->base.size);
33 bt_field_class_integer_set_preferred_display_base(ir_fc, fc->disp_base);
34 }
35
36 static inline bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
37 struct ctf_field_class_int *fc)
38 {
39 bt_field_class *ir_fc;
40
41 if (fc->is_signed) {
42 ir_fc = bt_field_class_integer_signed_create(ctx->ir_tc);
43 } else {
44 ir_fc = bt_field_class_integer_unsigned_create(ctx->ir_tc);
45 }
46
47 BT_ASSERT(ir_fc);
48 ctf_field_class_int_set_props(fc, ir_fc);
49 return ir_fc;
50 }
51
52 static inline bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
53 struct ctf_field_class_enum *fc)
54 {
55 int ret;
56 bt_field_class *ir_fc;
57 uint64_t i;
58
59 if (fc->base.is_signed) {
60 ir_fc = bt_field_class_enumeration_signed_create(ctx->ir_tc);
61 } else {
62 ir_fc = bt_field_class_enumeration_unsigned_create(ctx->ir_tc);
63 }
64
65 BT_ASSERT(ir_fc);
66 ctf_field_class_int_set_props(&fc->base, ir_fc);
67
68 for (i = 0; i < fc->mappings->len; i++) {
69 struct ctf_field_class_enum_mapping *mapping =
70 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
71 bt_integer_range_set_signed *range_set_signed = NULL;
72 bt_integer_range_set_unsigned *range_set_unsigned = NULL;
73 uint64_t range_i;
74
75 if (fc->base.is_signed) {
76 range_set_signed = bt_integer_range_set_signed_create();
77 BT_ASSERT(range_set_signed);
78 } else {
79 range_set_unsigned = bt_integer_range_set_unsigned_create();
80 BT_ASSERT(range_set_unsigned);
81 }
82
83 for (range_i = 0; range_i < mapping->ranges->len; range_i++) {
84 struct ctf_range *range =
85 ctf_field_class_enum_mapping_borrow_range_by_index(mapping, range_i);
86
87 if (fc->base.is_signed) {
88 ret = bt_integer_range_set_signed_add_range(range_set_signed, range->lower.i,
89 range->upper.i);
90 } else {
91 ret = bt_integer_range_set_unsigned_add_range(range_set_unsigned, range->lower.u,
92 range->upper.u);
93 }
94
95 BT_ASSERT(ret == 0);
96 }
97
98 if (fc->base.is_signed) {
99 ret = bt_field_class_enumeration_signed_add_mapping(ir_fc, mapping->label->str,
100 range_set_signed);
101 BT_INTEGER_RANGE_SET_SIGNED_PUT_REF_AND_RESET(range_set_signed);
102 } else {
103 ret = bt_field_class_enumeration_unsigned_add_mapping(ir_fc, mapping->label->str,
104 range_set_unsigned);
105 BT_INTEGER_RANGE_SET_UNSIGNED_PUT_REF_AND_RESET(range_set_unsigned);
106 }
107
108 BT_ASSERT(ret == 0);
109 }
110
111 return ir_fc;
112 }
113
114 static inline bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
115 struct ctf_field_class_float *fc)
116 {
117 bt_field_class *ir_fc;
118
119 if (fc->base.size == 32) {
120 ir_fc = bt_field_class_real_single_precision_create(ctx->ir_tc);
121 } else {
122 ir_fc = bt_field_class_real_double_precision_create(ctx->ir_tc);
123 }
124 BT_ASSERT(ir_fc);
125
126 return ir_fc;
127 }
128
129 static inline bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
130 struct ctf_field_class_string *)
131 {
132 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
133
134 BT_ASSERT(ir_fc);
135 return ir_fc;
136 }
137
138 static inline void translate_struct_field_class_members(struct ctx *ctx,
139 struct ctf_field_class_struct *fc,
140 bt_field_class *ir_fc, bool,
141 struct ctf_field_class_struct *)
142 {
143 uint64_t i;
144 int ret;
145
146 for (i = 0; i < fc->members->len; i++) {
147 struct ctf_named_field_class *named_fc =
148 ctf_field_class_struct_borrow_member_by_index(fc, i);
149 bt_field_class *member_ir_fc;
150 const char *name = named_fc->name->str;
151
152 if (!named_fc->fc->in_ir) {
153 continue;
154 }
155
156 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
157 BT_ASSERT(member_ir_fc);
158 ret = bt_field_class_structure_append_member(ir_fc, name, member_ir_fc);
159 BT_ASSERT(ret == 0);
160 bt_field_class_put_ref(member_ir_fc);
161 }
162 }
163
164 static inline bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
165 struct ctf_field_class_struct *fc)
166 {
167 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
168
169 BT_ASSERT(ir_fc);
170 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
171 return ir_fc;
172 }
173
174 static inline bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
175 struct ctf_field_path *field_path)
176 {
177 bt_field_class *ir_fc = NULL;
178 struct ctf_field_class *fc =
179 ctf_field_path_borrow_field_class(field_path, ctx->tc, ctx->sc, ctx->ec);
180
181 BT_ASSERT(fc);
182
183 if (fc->in_ir) {
184 ir_fc = fc->ir_fc;
185 }
186
187 return ir_fc;
188 }
189
190 static inline const bt_field_class_enumeration_mapping *
191 find_ir_enum_field_class_mapping_by_label(const bt_field_class *fc, const char *label,
192 bool is_signed)
193 {
194 const bt_field_class_enumeration_mapping *mapping = NULL;
195 uint64_t i;
196
197 for (i = 0; i < bt_field_class_enumeration_get_mapping_count(fc); i++) {
198 const bt_field_class_enumeration_mapping *this_mapping;
199 const bt_field_class_enumeration_signed_mapping *signed_this_mapping = NULL;
200 const bt_field_class_enumeration_unsigned_mapping *unsigned_this_mapping = NULL;
201
202 if (is_signed) {
203 signed_this_mapping =
204 bt_field_class_enumeration_signed_borrow_mapping_by_index_const(fc, i);
205 BT_ASSERT(signed_this_mapping);
206 this_mapping =
207 bt_field_class_enumeration_signed_mapping_as_mapping_const(signed_this_mapping);
208 } else {
209 unsigned_this_mapping =
210 bt_field_class_enumeration_unsigned_borrow_mapping_by_index_const(fc, i);
211 BT_ASSERT(unsigned_this_mapping);
212 this_mapping =
213 bt_field_class_enumeration_unsigned_mapping_as_mapping_const(unsigned_this_mapping);
214 }
215
216 BT_ASSERT(this_mapping);
217
218 if (strcmp(bt_field_class_enumeration_mapping_get_label(this_mapping), label) == 0) {
219 mapping = this_mapping;
220 goto end;
221 }
222 }
223
224 end:
225 return mapping;
226 }
227
228 static inline bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
229 struct ctf_field_class_variant *fc)
230 {
231 int ret;
232 bt_field_class *ir_fc;
233 uint64_t i;
234 bt_field_class *ir_tag_fc = NULL;
235
236 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
237 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
238 ir_tag_fc = borrow_ir_fc_from_field_path(ctx, &fc->tag_path);
239 BT_ASSERT(ir_tag_fc);
240 }
241
242 ir_fc = bt_field_class_variant_create(ctx->ir_tc, ir_tag_fc);
243 BT_ASSERT(ir_fc);
244
245 for (i = 0; i < fc->options->len; i++) {
246 struct ctf_named_field_class *named_fc =
247 ctf_field_class_variant_borrow_option_by_index(fc, i);
248 bt_field_class *option_ir_fc;
249
250 BT_ASSERT(named_fc->fc->in_ir);
251 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
252 BT_ASSERT(option_ir_fc);
253
254 if (ir_tag_fc) {
255 /*
256 * At this point the trace IR selector
257 * (enumeration) field class already exists if
258 * the variant is tagged (`ir_tag_fc`). This one
259 * already contains range sets for its mappings,
260 * so we just reuse the same, finding them by
261 * matching a variant field class's option's
262 * _original_ name (with a leading underscore,
263 * possibly) with a selector field class's
264 * mapping name.
265 */
266 if (fc->tag_fc->base.is_signed) {
267 const bt_field_class_enumeration_signed_mapping *mapping =
268 (bt_field_class_enumeration_signed_mapping *)
269 find_ir_enum_field_class_mapping_by_label(ir_tag_fc,
270 named_fc->orig_name->str, true);
271 const bt_integer_range_set_signed *range_set;
272
273 BT_ASSERT(mapping);
274 range_set = bt_field_class_enumeration_signed_mapping_borrow_ranges_const(mapping);
275 BT_ASSERT(range_set);
276 ret = bt_field_class_variant_with_selector_field_integer_signed_append_option(
277 ir_fc, named_fc->name->str, option_ir_fc, range_set);
278 } else {
279 const bt_field_class_enumeration_unsigned_mapping *mapping =
280 (bt_field_class_enumeration_unsigned_mapping *)
281 find_ir_enum_field_class_mapping_by_label(ir_tag_fc,
282 named_fc->orig_name->str, false);
283 const bt_integer_range_set_unsigned *range_set;
284
285 BT_ASSERT(mapping);
286 range_set =
287 bt_field_class_enumeration_unsigned_mapping_borrow_ranges_const(mapping);
288 BT_ASSERT(range_set);
289 ret = bt_field_class_variant_with_selector_field_integer_unsigned_append_option(
290 ir_fc, named_fc->name->str, option_ir_fc, range_set);
291 }
292 } else {
293 ret = bt_field_class_variant_without_selector_append_option(ir_fc, named_fc->name->str,
294 option_ir_fc);
295 }
296
297 BT_ASSERT(ret == 0);
298 bt_field_class_put_ref(option_ir_fc);
299 }
300
301 return ir_fc;
302 }
303
304 static inline bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
305 struct ctf_field_class_array *fc)
306 {
307 bt_field_class *ir_fc;
308 bt_field_class *elem_ir_fc;
309
310 if (fc->base.is_text) {
311 ir_fc = bt_field_class_string_create(ctx->ir_tc);
312 BT_ASSERT(ir_fc);
313 goto end;
314 }
315
316 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
317 BT_ASSERT(elem_ir_fc);
318 ir_fc = bt_field_class_array_static_create(ctx->ir_tc, elem_ir_fc, fc->length);
319 BT_ASSERT(ir_fc);
320 bt_field_class_put_ref(elem_ir_fc);
321
322 end:
323 return ir_fc;
324 }
325
326 static inline bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
327 struct ctf_field_class_sequence *fc)
328 {
329 bt_field_class *ir_fc;
330 bt_field_class *elem_ir_fc;
331 bt_field_class *length_fc = NULL;
332
333 if (fc->base.is_text) {
334 ir_fc = bt_field_class_string_create(ctx->ir_tc);
335 BT_ASSERT(ir_fc);
336 goto end;
337 }
338
339 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
340 BT_ASSERT(elem_ir_fc);
341
342 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
343 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
344 length_fc = borrow_ir_fc_from_field_path(ctx, &fc->length_path);
345 BT_ASSERT(length_fc);
346 }
347
348 ir_fc = bt_field_class_array_dynamic_create(ctx->ir_tc, elem_ir_fc, length_fc);
349 BT_ASSERT(ir_fc);
350 bt_field_class_put_ref(elem_ir_fc);
351 BT_ASSERT(ir_fc);
352
353 end:
354 return ir_fc;
355 }
356
357 static inline bt_field_class *ctf_field_class_to_ir(struct ctx *ctx, struct ctf_field_class *fc)
358 {
359 bt_field_class *ir_fc = NULL;
360
361 BT_ASSERT(fc);
362 BT_ASSERT(fc->in_ir);
363
364 switch (fc->type) {
365 case CTF_FIELD_CLASS_TYPE_INT:
366 ir_fc = ctf_field_class_int_to_ir(ctx, ctf_field_class_as_int(fc));
367 break;
368 case CTF_FIELD_CLASS_TYPE_ENUM:
369 ir_fc = ctf_field_class_enum_to_ir(ctx, ctf_field_class_as_enum(fc));
370 break;
371 case CTF_FIELD_CLASS_TYPE_FLOAT:
372 ir_fc = ctf_field_class_float_to_ir(ctx, ctf_field_class_as_float(fc));
373 break;
374 case CTF_FIELD_CLASS_TYPE_STRING:
375 ir_fc = ctf_field_class_string_to_ir(ctx, ctf_field_class_as_string(fc));
376 break;
377 case CTF_FIELD_CLASS_TYPE_STRUCT:
378 ir_fc = ctf_field_class_struct_to_ir(ctx, ctf_field_class_as_struct(fc));
379 break;
380 case CTF_FIELD_CLASS_TYPE_ARRAY:
381 ir_fc = ctf_field_class_array_to_ir(ctx, ctf_field_class_as_array(fc));
382 break;
383 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
384 ir_fc = ctf_field_class_sequence_to_ir(ctx, ctf_field_class_as_sequence(fc));
385 break;
386 case CTF_FIELD_CLASS_TYPE_VARIANT:
387 ir_fc = ctf_field_class_variant_to_ir(ctx, ctf_field_class_as_variant(fc));
388 break;
389 default:
390 bt_common_abort();
391 }
392
393 fc->ir_fc = ir_fc;
394 return ir_fc;
395 }
396
397 static inline bool
398 ctf_field_class_struct_has_immediate_member_in_ir(struct ctf_field_class_struct *fc)
399 {
400 uint64_t i;
401 bool has_immediate_member_in_ir = false;
402
403 /*
404 * If the structure field class has no members at all, then it
405 * was an empty structure in the beginning, so leave it existing
406 * and empty.
407 */
408 if (fc->members->len == 0) {
409 has_immediate_member_in_ir = true;
410 goto end;
411 }
412
413 for (i = 0; i < fc->members->len; i++) {
414 struct ctf_named_field_class *named_fc =
415 ctf_field_class_struct_borrow_member_by_index(fc, i);
416
417 if (named_fc->fc->in_ir) {
418 has_immediate_member_in_ir = true;
419 goto end;
420 }
421 }
422
423 end:
424 return has_immediate_member_in_ir;
425 }
426
427 static inline bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
428 {
429 bt_field_class *ir_fc = NULL;
430 struct ctf_field_class *fc = NULL;
431
432 switch (ctx->scope) {
433 case CTF_SCOPE_PACKET_CONTEXT:
434 fc = ctx->sc->packet_context_fc;
435 break;
436 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
437 fc = ctx->sc->event_common_context_fc;
438 break;
439 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
440 fc = ctx->ec->spec_context_fc;
441 break;
442 case CTF_SCOPE_EVENT_PAYLOAD:
443 fc = ctx->ec->payload_fc;
444 break;
445 default:
446 bt_common_abort();
447 }
448
449 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(ctf_field_class_as_struct(fc))) {
450 ir_fc = ctf_field_class_to_ir(ctx, fc);
451 }
452
453 return ir_fc;
454 }
455
456 static inline void ctf_event_class_to_ir(struct ctx *ctx)
457 {
458 int ret;
459 bt_event_class *ir_ec = NULL;
460 bt_field_class *ir_fc;
461
462 BT_ASSERT(ctx->ec);
463
464 if (ctx->ec->is_translated) {
465 ir_ec = bt_stream_class_borrow_event_class_by_id(ctx->ir_sc, ctx->ec->id);
466 BT_ASSERT(ir_ec);
467 goto end;
468 }
469
470 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
471 BT_ASSERT(ir_ec);
472 bt_event_class_put_ref(ir_ec);
473 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
474 ir_fc = scope_ctf_field_class_to_ir(ctx);
475 if (ir_fc) {
476 ret = bt_event_class_set_specific_context_field_class(ir_ec, ir_fc);
477 BT_ASSERT(ret == 0);
478 bt_field_class_put_ref(ir_fc);
479 }
480
481 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
482 ir_fc = scope_ctf_field_class_to_ir(ctx);
483 if (ir_fc) {
484 ret = bt_event_class_set_payload_field_class(ir_ec, ir_fc);
485 BT_ASSERT(ret == 0);
486 bt_field_class_put_ref(ir_fc);
487 }
488
489 if (ctx->ec->name->len > 0) {
490 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
491 BT_ASSERT(ret == 0);
492 }
493
494 if (ctx->ec->emf_uri->len > 0) {
495 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
496 BT_ASSERT(ret == 0);
497 }
498
499 if (ctx->ec->is_log_level_set) {
500 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
501 }
502
503 ctx->ec->is_translated = true;
504 ctx->ec->ir_ec = ir_ec;
505
506 end:
507 return;
508 }
509
510 static inline void ctf_stream_class_to_ir(struct ctx *ctx)
511 {
512 int ret;
513 bt_field_class *ir_fc;
514
515 BT_ASSERT(ctx->sc);
516
517 if (ctx->sc->is_translated) {
518 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(ctx->ir_tc, ctx->sc->id);
519 BT_ASSERT(ctx->ir_sc);
520 goto end;
521 }
522
523 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
524 BT_ASSERT(ctx->ir_sc);
525 bt_stream_class_put_ref(ctx->ir_sc);
526
527 if (ctx->sc->default_clock_class) {
528 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
529 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
530 ctx->sc->default_clock_class->ir_cc);
531 BT_ASSERT(ret == 0);
532 }
533
534 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE, ctx->sc->packets_have_ts_begin,
535 ctx->sc->packets_have_ts_end);
536 bt_stream_class_set_supports_discarded_events(ctx->ir_sc, ctx->sc->has_discarded_events,
537 ctx->sc->discarded_events_have_default_cs);
538 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc, ctx->sc->has_discarded_packets,
539 ctx->sc->discarded_packets_have_default_cs);
540 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
541 ir_fc = scope_ctf_field_class_to_ir(ctx);
542 if (ir_fc) {
543 ret = bt_stream_class_set_packet_context_field_class(ctx->ir_sc, ir_fc);
544 BT_ASSERT(ret == 0);
545 bt_field_class_put_ref(ir_fc);
546 }
547
548 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
549 ir_fc = scope_ctf_field_class_to_ir(ctx);
550 if (ir_fc) {
551 ret = bt_stream_class_set_event_common_context_field_class(ctx->ir_sc, ir_fc);
552 BT_ASSERT(ret == 0);
553 bt_field_class_put_ref(ir_fc);
554 }
555
556 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc, BT_FALSE);
557 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
558
559 ctx->sc->is_translated = true;
560 ctx->sc->ir_sc = ctx->ir_sc;
561
562 end:
563 return;
564 }
565
566 static inline void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
567 {
568 int ret;
569
570 if (strlen(cc->name->str) > 0) {
571 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
572 BT_ASSERT(ret == 0);
573 }
574
575 if (strlen(cc->description->str) > 0) {
576 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
577 BT_ASSERT(ret == 0);
578 }
579
580 bt_clock_class_set_frequency(ir_cc, cc->frequency);
581 bt_clock_class_set_precision(ir_cc, cc->precision);
582 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
583
584 if (cc->has_uuid) {
585 bt_clock_class_set_uuid(ir_cc, cc->uuid);
586 }
587
588 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
589 }
590
591 static inline int ctf_trace_class_to_ir(struct ctx *ctx)
592 {
593 int ret = 0;
594 uint64_t i;
595
596 BT_ASSERT(ctx->tc);
597 BT_ASSERT(ctx->ir_tc);
598
599 if (ctx->tc->is_translated) {
600 goto end;
601 }
602
603 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
604 ctf_clock_class *cc = (ctf_clock_class *) ctx->tc->clock_classes->pdata[i];
605
606 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
607 ctf_clock_class_to_ir(cc->ir_cc, cc);
608 }
609
610 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc, BT_FALSE);
611 ctx->tc->is_translated = true;
612 ctx->tc->ir_tc = ctx->ir_tc;
613
614 end:
615 return ret;
616 }
617
618 int ctf_trace_class_translate(bt_self_component *self_comp, bt_trace_class *ir_tc,
619 struct ctf_trace_class *tc)
620 {
621 int ret = 0;
622 uint64_t i;
623 struct ctx ctx = {};
624
625 ctx.self_comp = self_comp;
626 ctx.tc = tc;
627 ctx.ir_tc = ir_tc;
628 ret = ctf_trace_class_to_ir(&ctx);
629 if (ret) {
630 goto end;
631 }
632
633 for (i = 0; i < tc->stream_classes->len; i++) {
634 uint64_t j;
635 ctx.sc = (ctf_stream_class *) tc->stream_classes->pdata[i];
636
637 ctf_stream_class_to_ir(&ctx);
638
639 for (j = 0; j < ctx.sc->event_classes->len; j++) {
640 ctx.ec = (ctf_event_class *) ctx.sc->event_classes->pdata[j];
641
642 ctf_event_class_to_ir(&ctx);
643 ctx.ec = NULL;
644 }
645
646 ctx.sc = NULL;
647 }
648
649 end:
650 return ret;
651 }
This page took 0.043346 seconds and 4 git commands to generate.