lib: rename option/var. "selector" to "selector field"
[babeltrace.git] / src / plugins / ctf / common / metadata / ctf-meta-translate.c
1 /*
2 * Copyright 2018 - Philippe Proulx <pproulx@efficios.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 */
14
15 #include <babeltrace2/babeltrace.h>
16 #include "common/macros.h"
17 #include "common/assert.h"
18 #include <glib.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <inttypes.h>
22
23 #include "ctf-meta-visitors.h"
24
25 struct ctx {
26 bt_self_component *self_comp;
27 bt_trace_class *ir_tc;
28 bt_stream_class *ir_sc;
29 struct ctf_trace_class *tc;
30 struct ctf_stream_class *sc;
31 struct ctf_event_class *ec;
32 enum ctf_scope scope;
33 };
34
35 static inline
36 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
37 struct ctf_field_class *fc);
38
39 static inline
40 void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
41 bt_field_class *ir_fc)
42 {
43 bt_field_class_integer_set_field_value_range(ir_fc,
44 fc->base.size);
45 bt_field_class_integer_set_preferred_display_base(ir_fc,
46 fc->disp_base);
47 }
48
49 static inline
50 bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
51 struct ctf_field_class_int *fc)
52 {
53 bt_field_class *ir_fc;
54
55 if (fc->is_signed) {
56 ir_fc = bt_field_class_integer_signed_create(ctx->ir_tc);
57 } else {
58 ir_fc = bt_field_class_integer_unsigned_create(ctx->ir_tc);
59 }
60
61 BT_ASSERT(ir_fc);
62 ctf_field_class_int_set_props(fc, ir_fc);
63 return ir_fc;
64 }
65
66 static inline
67 bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
68 struct ctf_field_class_enum *fc)
69 {
70 int ret;
71 bt_field_class *ir_fc;
72 uint64_t i;
73
74 if (fc->base.is_signed) {
75 ir_fc = bt_field_class_enumeration_signed_create(ctx->ir_tc);
76 } else {
77 ir_fc = bt_field_class_enumeration_unsigned_create(ctx->ir_tc);
78 }
79
80 BT_ASSERT(ir_fc);
81 ctf_field_class_int_set_props((void *) fc, ir_fc);
82
83 for (i = 0; i < fc->mappings->len; i++) {
84 struct ctf_field_class_enum_mapping *mapping =
85 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
86 void *range_set;
87 uint64_t range_i;
88
89 if (fc->base.is_signed) {
90 range_set = bt_integer_range_set_signed_create();
91 } else {
92 range_set = bt_integer_range_set_unsigned_create();
93 }
94
95 BT_ASSERT(range_set);
96
97 for (range_i = 0; range_i < mapping->ranges->len; range_i++) {
98 struct ctf_range *range =
99 ctf_field_class_enum_mapping_borrow_range_by_index(
100 mapping, range_i);
101
102 if (fc->base.is_signed) {
103 ret = bt_integer_range_set_signed_add_range(
104 range_set, range->lower.i,
105 range->upper.i);
106 } else {
107 ret = bt_integer_range_set_unsigned_add_range(
108 range_set, range->lower.u,
109 range->upper.u);
110 }
111
112 BT_ASSERT(ret == 0);
113 }
114
115 if (fc->base.is_signed) {
116 ret = bt_field_class_enumeration_signed_add_mapping(
117 ir_fc, mapping->label->str, range_set);
118 BT_RANGE_SET_SIGNED_PUT_REF_AND_RESET(range_set);
119 } else {
120 ret = bt_field_class_enumeration_unsigned_add_mapping(
121 ir_fc, mapping->label->str, range_set);
122 BT_RANGE_SET_UNSIGNED_PUT_REF_AND_RESET(range_set);
123 }
124
125 BT_ASSERT(ret == 0);
126 }
127
128 return ir_fc;
129 }
130
131 static inline
132 bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
133 struct ctf_field_class_float *fc)
134 {
135 bt_field_class *ir_fc;
136
137 if (fc->base.size == 32) {
138 ir_fc = bt_field_class_real_single_precision_create(ctx->ir_tc);
139 } else {
140 ir_fc = bt_field_class_real_double_precision_create(ctx->ir_tc);
141 }
142 BT_ASSERT(ir_fc);
143
144 return ir_fc;
145 }
146
147 static inline
148 bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
149 struct ctf_field_class_string *fc)
150 {
151 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
152
153 BT_ASSERT(ir_fc);
154 return ir_fc;
155 }
156
157 static inline
158 void translate_struct_field_class_members(struct ctx *ctx,
159 struct ctf_field_class_struct *fc, bt_field_class *ir_fc,
160 bool with_header_prefix,
161 struct ctf_field_class_struct *context_fc)
162 {
163 uint64_t i;
164 int ret;
165
166 for (i = 0; i < fc->members->len; i++) {
167 struct ctf_named_field_class *named_fc =
168 ctf_field_class_struct_borrow_member_by_index(fc, i);
169 bt_field_class *member_ir_fc;
170 const char *name = named_fc->name->str;
171
172 if (!named_fc->fc->in_ir) {
173 continue;
174 }
175
176 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
177 BT_ASSERT(member_ir_fc);
178 ret = bt_field_class_structure_append_member(ir_fc, name,
179 member_ir_fc);
180 BT_ASSERT(ret == 0);
181 bt_field_class_put_ref(member_ir_fc);
182 }
183 }
184
185 static inline
186 bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
187 struct ctf_field_class_struct *fc)
188 {
189 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
190
191 BT_ASSERT(ir_fc);
192 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
193 return ir_fc;
194 }
195
196 static inline
197 bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
198 struct ctf_field_path *field_path)
199 {
200 bt_field_class *ir_fc = NULL;
201 struct ctf_field_class *fc = ctf_field_path_borrow_field_class(
202 field_path, ctx->tc, ctx->sc, ctx->ec);
203
204 BT_ASSERT(fc);
205
206 if (fc->in_ir) {
207 ir_fc = fc->ir_fc;
208 }
209
210 return ir_fc;
211 }
212
213 static inline
214 const void *find_ir_enum_field_class_mapping_by_label(const bt_field_class *fc,
215 const char *label, bool is_signed)
216 {
217 const void *mapping = NULL;
218 uint64_t i;
219
220 for (i = 0; i < bt_field_class_enumeration_get_mapping_count(fc); i++) {
221 const bt_field_class_enumeration_mapping *this_mapping;
222 const void *spec_this_mapping;
223
224 if (is_signed) {
225 spec_this_mapping =
226 bt_field_class_enumeration_signed_borrow_mapping_by_index_const(
227 fc, i);
228 this_mapping =
229 bt_field_class_enumeration_signed_mapping_as_mapping_const(
230 spec_this_mapping);
231 } else {
232 spec_this_mapping =
233 bt_field_class_enumeration_unsigned_borrow_mapping_by_index_const(
234 fc, i);
235 this_mapping =
236 bt_field_class_enumeration_unsigned_mapping_as_mapping_const(
237 spec_this_mapping);
238 }
239
240 BT_ASSERT(this_mapping);
241 BT_ASSERT(spec_this_mapping);
242
243 if (strcmp(bt_field_class_enumeration_mapping_get_label(
244 this_mapping), label) == 0) {
245 mapping = spec_this_mapping;
246 goto end;
247 }
248 }
249
250 end:
251 return mapping;
252 }
253
254 static inline
255 bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
256 struct ctf_field_class_variant *fc)
257 {
258 int ret;
259 bt_field_class *ir_fc;
260 uint64_t i;
261 bt_field_class *ir_tag_fc = NULL;
262
263 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
264 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
265 ir_tag_fc = borrow_ir_fc_from_field_path(ctx, &fc->tag_path);
266 BT_ASSERT(ir_tag_fc);
267 }
268
269 ir_fc = bt_field_class_variant_create(ctx->ir_tc, ir_tag_fc);
270 BT_ASSERT(ir_fc);
271
272 for (i = 0; i < fc->options->len; i++) {
273 struct ctf_named_field_class *named_fc =
274 ctf_field_class_variant_borrow_option_by_index(fc, i);
275 bt_field_class *option_ir_fc;
276
277 BT_ASSERT(named_fc->fc->in_ir);
278 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
279 BT_ASSERT(option_ir_fc);
280
281 if (ir_tag_fc) {
282 /*
283 * At this point the trace IR selector
284 * (enumeration) field class already exists if
285 * the variant is tagged (`ir_tag_fc`). This one
286 * already contains range sets for its mappings,
287 * so we just reuse the same, finding them by
288 * matching a variant field class's option's
289 * _original_ name (with a leading underscore,
290 * possibly) with a selector field class's
291 * mapping name.
292 */
293 if (fc->tag_fc->base.is_signed) {
294 const bt_field_class_enumeration_signed_mapping *mapping =
295 find_ir_enum_field_class_mapping_by_label(
296 ir_tag_fc,
297 named_fc->orig_name->str, true);
298 const bt_integer_range_set_signed *range_set;
299
300 BT_ASSERT(mapping);
301 range_set =
302 bt_field_class_enumeration_signed_mapping_borrow_ranges_const(
303 mapping);
304 BT_ASSERT(range_set);
305 ret = bt_field_class_variant_with_selector_field_integer_signed_append_option(
306 ir_fc, named_fc->name->str,
307 option_ir_fc, range_set);
308 } else {
309 const bt_field_class_enumeration_unsigned_mapping *mapping =
310 find_ir_enum_field_class_mapping_by_label(
311 ir_tag_fc,
312 named_fc->orig_name->str,
313 false);
314 const bt_integer_range_set_unsigned *range_set;
315
316 BT_ASSERT(mapping);
317 range_set =
318 bt_field_class_enumeration_unsigned_mapping_borrow_ranges_const(
319 mapping);
320 BT_ASSERT(range_set);
321 ret = bt_field_class_variant_with_selector_field_integer_unsigned_append_option(
322 ir_fc, named_fc->name->str,
323 option_ir_fc, range_set);
324 }
325 } else {
326 ret = bt_field_class_variant_without_selector_append_option(
327 ir_fc, named_fc->name->str, option_ir_fc);
328 }
329
330 BT_ASSERT(ret == 0);
331 bt_field_class_put_ref(option_ir_fc);
332 }
333
334 return ir_fc;
335 }
336
337 static inline
338 bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
339 struct ctf_field_class_array *fc)
340 {
341 bt_field_class *ir_fc;
342 bt_field_class *elem_ir_fc;
343
344 if (fc->base.is_text) {
345 ir_fc = bt_field_class_string_create(ctx->ir_tc);
346 BT_ASSERT(ir_fc);
347 goto end;
348 }
349
350 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
351 BT_ASSERT(elem_ir_fc);
352 ir_fc = bt_field_class_array_static_create(ctx->ir_tc, elem_ir_fc,
353 fc->length);
354 BT_ASSERT(ir_fc);
355 bt_field_class_put_ref(elem_ir_fc);
356
357 end:
358 return ir_fc;
359 }
360
361 static inline
362 bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
363 struct ctf_field_class_sequence *fc)
364 {
365 bt_field_class *ir_fc;
366 bt_field_class *elem_ir_fc;
367 bt_field_class *length_fc = NULL;
368
369 if (fc->base.is_text) {
370 ir_fc = bt_field_class_string_create(ctx->ir_tc);
371 BT_ASSERT(ir_fc);
372 goto end;
373 }
374
375 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
376 BT_ASSERT(elem_ir_fc);
377
378 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
379 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
380 length_fc = borrow_ir_fc_from_field_path(ctx, &fc->length_path);
381 BT_ASSERT(length_fc);
382 }
383
384 ir_fc = bt_field_class_array_dynamic_create(ctx->ir_tc, elem_ir_fc,
385 length_fc);
386 BT_ASSERT(ir_fc);
387 bt_field_class_put_ref(elem_ir_fc);
388 BT_ASSERT(ir_fc);
389
390 end:
391 return ir_fc;
392 }
393
394 static inline
395 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
396 struct ctf_field_class *fc)
397 {
398 bt_field_class *ir_fc = NULL;
399
400 BT_ASSERT(fc);
401 BT_ASSERT(fc->in_ir);
402
403 switch (fc->type) {
404 case CTF_FIELD_CLASS_TYPE_INT:
405 ir_fc = ctf_field_class_int_to_ir(ctx, (void *) fc);
406 break;
407 case CTF_FIELD_CLASS_TYPE_ENUM:
408 ir_fc = ctf_field_class_enum_to_ir(ctx, (void *) fc);
409 break;
410 case CTF_FIELD_CLASS_TYPE_FLOAT:
411 ir_fc = ctf_field_class_float_to_ir(ctx, (void *) fc);
412 break;
413 case CTF_FIELD_CLASS_TYPE_STRING:
414 ir_fc = ctf_field_class_string_to_ir(ctx, (void *) fc);
415 break;
416 case CTF_FIELD_CLASS_TYPE_STRUCT:
417 ir_fc = ctf_field_class_struct_to_ir(ctx, (void *) fc);
418 break;
419 case CTF_FIELD_CLASS_TYPE_ARRAY:
420 ir_fc = ctf_field_class_array_to_ir(ctx, (void *) fc);
421 break;
422 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
423 ir_fc = ctf_field_class_sequence_to_ir(ctx, (void *) fc);
424 break;
425 case CTF_FIELD_CLASS_TYPE_VARIANT:
426 ir_fc = ctf_field_class_variant_to_ir(ctx, (void *) fc);
427 break;
428 default:
429 abort();
430 }
431
432 fc->ir_fc = ir_fc;
433 return ir_fc;
434 }
435
436 static inline
437 bool ctf_field_class_struct_has_immediate_member_in_ir(
438 struct ctf_field_class_struct *fc)
439 {
440 uint64_t i;
441 bool has_immediate_member_in_ir = false;
442
443 /*
444 * If the structure field class has no members at all, then it
445 * was an empty structure in the beginning, so leave it existing
446 * and empty.
447 */
448 if (fc->members->len == 0) {
449 has_immediate_member_in_ir = true;
450 goto end;
451 }
452
453 for (i = 0; i < fc->members->len; i++) {
454 struct ctf_named_field_class *named_fc =
455 ctf_field_class_struct_borrow_member_by_index(fc, i);
456
457 if (named_fc->fc->in_ir) {
458 has_immediate_member_in_ir = true;
459 goto end;
460 }
461 }
462
463 end:
464 return has_immediate_member_in_ir;
465 }
466
467 static inline
468 bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
469 {
470 bt_field_class *ir_fc = NULL;
471 struct ctf_field_class *fc = NULL;
472
473 switch (ctx->scope) {
474 case CTF_SCOPE_PACKET_CONTEXT:
475 fc = ctx->sc->packet_context_fc;
476 break;
477 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
478 fc = ctx->sc->event_common_context_fc;
479 break;
480 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
481 fc = ctx->ec->spec_context_fc;
482 break;
483 case CTF_SCOPE_EVENT_PAYLOAD:
484 fc = ctx->ec->payload_fc;
485 break;
486 default:
487 abort();
488 }
489
490 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(
491 (void *) fc)) {
492 ir_fc = ctf_field_class_to_ir(ctx, fc);
493 }
494
495 return ir_fc;
496 }
497
498 static inline
499 void ctf_event_class_to_ir(struct ctx *ctx)
500 {
501 int ret;
502 bt_event_class *ir_ec = NULL;
503 bt_field_class *ir_fc;
504
505 BT_ASSERT(ctx->ec);
506
507 if (ctx->ec->is_translated) {
508 ir_ec = bt_stream_class_borrow_event_class_by_id(
509 ctx->ir_sc, ctx->ec->id);
510 BT_ASSERT(ir_ec);
511 goto end;
512 }
513
514 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
515 BT_ASSERT(ir_ec);
516 bt_event_class_put_ref(ir_ec);
517 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
518 ir_fc = scope_ctf_field_class_to_ir(ctx);
519 if (ir_fc) {
520 ret = bt_event_class_set_specific_context_field_class(
521 ir_ec, ir_fc);
522 BT_ASSERT(ret == 0);
523 bt_field_class_put_ref(ir_fc);
524 }
525
526 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
527 ir_fc = scope_ctf_field_class_to_ir(ctx);
528 if (ir_fc) {
529 ret = bt_event_class_set_payload_field_class(ir_ec,
530 ir_fc);
531 BT_ASSERT(ret == 0);
532 bt_field_class_put_ref(ir_fc);
533 }
534
535 if (ctx->ec->name->len > 0) {
536 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
537 BT_ASSERT(ret == 0);
538 }
539
540 if (ctx->ec->emf_uri->len > 0) {
541 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
542 BT_ASSERT(ret == 0);
543 }
544
545 if (ctx->ec->is_log_level_set) {
546 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
547 }
548
549 ctx->ec->is_translated = true;
550 ctx->ec->ir_ec = ir_ec;
551
552 end:
553 return;
554 }
555
556
557 static inline
558 void ctf_stream_class_to_ir(struct ctx *ctx)
559 {
560 int ret;
561 bt_field_class *ir_fc;
562
563 BT_ASSERT(ctx->sc);
564
565 if (ctx->sc->is_translated) {
566 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(
567 ctx->ir_tc, ctx->sc->id);
568 BT_ASSERT(ctx->ir_sc);
569 goto end;
570 }
571
572 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
573 BT_ASSERT(ctx->ir_sc);
574 bt_stream_class_put_ref(ctx->ir_sc);
575
576 if (ctx->sc->default_clock_class) {
577 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
578 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
579 ctx->sc->default_clock_class->ir_cc);
580 BT_ASSERT(ret == 0);
581 }
582
583 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE,
584 ctx->sc->packets_have_ts_begin, ctx->sc->packets_have_ts_end);
585 bt_stream_class_set_supports_discarded_events(ctx->ir_sc,
586 ctx->sc->has_discarded_events,
587 ctx->sc->discarded_events_have_default_cs);
588 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc,
589 ctx->sc->has_discarded_packets,
590 ctx->sc->discarded_packets_have_default_cs);
591 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
592 ir_fc = scope_ctf_field_class_to_ir(ctx);
593 if (ir_fc) {
594 ret = bt_stream_class_set_packet_context_field_class(
595 ctx->ir_sc, ir_fc);
596 BT_ASSERT(ret == 0);
597 bt_field_class_put_ref(ir_fc);
598 }
599
600 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
601 ir_fc = scope_ctf_field_class_to_ir(ctx);
602 if (ir_fc) {
603 ret = bt_stream_class_set_event_common_context_field_class(
604 ctx->ir_sc, ir_fc);
605 BT_ASSERT(ret == 0);
606 bt_field_class_put_ref(ir_fc);
607 }
608
609 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc,
610 BT_FALSE);
611 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
612
613 ctx->sc->is_translated = true;
614 ctx->sc->ir_sc = ctx->ir_sc;
615
616 end:
617 return;
618 }
619
620 static inline
621 void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
622 {
623 int ret;
624
625 if (strlen(cc->name->str) > 0) {
626 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
627 BT_ASSERT(ret == 0);
628 }
629
630 if (strlen(cc->description->str) > 0) {
631 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
632 BT_ASSERT(ret == 0);
633 }
634
635 bt_clock_class_set_frequency(ir_cc, cc->frequency);
636 bt_clock_class_set_precision(ir_cc, cc->precision);
637 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
638
639 if (cc->has_uuid) {
640 bt_clock_class_set_uuid(ir_cc, cc->uuid);
641 }
642
643 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
644 }
645
646 static inline
647 int ctf_trace_class_to_ir(struct ctx *ctx)
648 {
649 int ret = 0;
650 uint64_t i;
651
652 BT_ASSERT(ctx->tc);
653 BT_ASSERT(ctx->ir_tc);
654
655 if (ctx->tc->is_translated) {
656 goto end;
657 }
658
659 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
660 struct ctf_clock_class *cc = ctx->tc->clock_classes->pdata[i];
661
662 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
663 ctf_clock_class_to_ir(cc->ir_cc, cc);
664 }
665
666 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc,
667 BT_FALSE);
668 ctx->tc->is_translated = true;
669 ctx->tc->ir_tc = ctx->ir_tc;
670
671 end:
672 return ret;
673 }
674
675 BT_HIDDEN
676 int ctf_trace_class_translate(bt_self_component *self_comp,
677 bt_trace_class *ir_tc, struct ctf_trace_class *tc)
678 {
679 int ret = 0;
680 uint64_t i;
681 struct ctx ctx = { 0 };
682
683 ctx.self_comp = self_comp;
684 ctx.tc = tc;
685 ctx.ir_tc = ir_tc;
686 ret = ctf_trace_class_to_ir(&ctx);
687 if (ret) {
688 goto end;
689 }
690
691 for (i = 0; i < tc->stream_classes->len; i++) {
692 uint64_t j;
693 ctx.sc = tc->stream_classes->pdata[i];
694
695 ctf_stream_class_to_ir(&ctx);
696
697 for (j = 0; j < ctx.sc->event_classes->len; j++) {
698 ctx.ec = ctx.sc->event_classes->pdata[j];
699
700 ctf_event_class_to_ir(&ctx);
701 ctx.ec = NULL;
702 }
703
704 ctx.sc = NULL;
705 }
706
707 end:
708 return ret;
709 }
This page took 0.043199 seconds and 4 git commands to generate.