Move to kernel style SPDX license identifiers
[babeltrace.git] / src / plugins / ctf / common / metadata / ctf-meta-translate.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2018 Philippe Proulx <pproulx@efficios.com>
5 */
6
7 #include <babeltrace2/babeltrace.h>
8 #include "common/macros.h"
9 #include "common/assert.h"
10 #include <glib.h>
11 #include <stdbool.h>
12 #include <stdint.h>
13 #include <string.h>
14 #include <inttypes.h>
15
16 #include "ctf-meta-visitors.h"
17
18 struct ctx {
19 bt_self_component *self_comp;
20 bt_trace_class *ir_tc;
21 bt_stream_class *ir_sc;
22 struct ctf_trace_class *tc;
23 struct ctf_stream_class *sc;
24 struct ctf_event_class *ec;
25 enum ctf_scope scope;
26 };
27
28 static inline
29 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
30 struct ctf_field_class *fc);
31
32 static inline
33 void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
34 bt_field_class *ir_fc)
35 {
36 bt_field_class_integer_set_field_value_range(ir_fc,
37 fc->base.size);
38 bt_field_class_integer_set_preferred_display_base(ir_fc,
39 fc->disp_base);
40 }
41
42 static inline
43 bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
44 struct ctf_field_class_int *fc)
45 {
46 bt_field_class *ir_fc;
47
48 if (fc->is_signed) {
49 ir_fc = bt_field_class_integer_signed_create(ctx->ir_tc);
50 } else {
51 ir_fc = bt_field_class_integer_unsigned_create(ctx->ir_tc);
52 }
53
54 BT_ASSERT(ir_fc);
55 ctf_field_class_int_set_props(fc, ir_fc);
56 return ir_fc;
57 }
58
59 static inline
60 bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
61 struct ctf_field_class_enum *fc)
62 {
63 int ret;
64 bt_field_class *ir_fc;
65 uint64_t i;
66
67 if (fc->base.is_signed) {
68 ir_fc = bt_field_class_enumeration_signed_create(ctx->ir_tc);
69 } else {
70 ir_fc = bt_field_class_enumeration_unsigned_create(ctx->ir_tc);
71 }
72
73 BT_ASSERT(ir_fc);
74 ctf_field_class_int_set_props((void *) fc, ir_fc);
75
76 for (i = 0; i < fc->mappings->len; i++) {
77 struct ctf_field_class_enum_mapping *mapping =
78 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
79 void *range_set;
80 uint64_t range_i;
81
82 if (fc->base.is_signed) {
83 range_set = bt_integer_range_set_signed_create();
84 } else {
85 range_set = bt_integer_range_set_unsigned_create();
86 }
87
88 BT_ASSERT(range_set);
89
90 for (range_i = 0; range_i < mapping->ranges->len; range_i++) {
91 struct ctf_range *range =
92 ctf_field_class_enum_mapping_borrow_range_by_index(
93 mapping, range_i);
94
95 if (fc->base.is_signed) {
96 ret = bt_integer_range_set_signed_add_range(
97 range_set, range->lower.i,
98 range->upper.i);
99 } else {
100 ret = bt_integer_range_set_unsigned_add_range(
101 range_set, range->lower.u,
102 range->upper.u);
103 }
104
105 BT_ASSERT(ret == 0);
106 }
107
108 if (fc->base.is_signed) {
109 ret = bt_field_class_enumeration_signed_add_mapping(
110 ir_fc, mapping->label->str, range_set);
111 BT_INTEGER_RANGE_SET_SIGNED_PUT_REF_AND_RESET(range_set);
112 } else {
113 ret = bt_field_class_enumeration_unsigned_add_mapping(
114 ir_fc, mapping->label->str, range_set);
115 BT_INTEGER_RANGE_SET_UNSIGNED_PUT_REF_AND_RESET(range_set);
116 }
117
118 BT_ASSERT(ret == 0);
119 }
120
121 return ir_fc;
122 }
123
124 static inline
125 bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
126 struct ctf_field_class_float *fc)
127 {
128 bt_field_class *ir_fc;
129
130 if (fc->base.size == 32) {
131 ir_fc = bt_field_class_real_single_precision_create(ctx->ir_tc);
132 } else {
133 ir_fc = bt_field_class_real_double_precision_create(ctx->ir_tc);
134 }
135 BT_ASSERT(ir_fc);
136
137 return ir_fc;
138 }
139
140 static inline
141 bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
142 struct ctf_field_class_string *fc)
143 {
144 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
145
146 BT_ASSERT(ir_fc);
147 return ir_fc;
148 }
149
150 static inline
151 void translate_struct_field_class_members(struct ctx *ctx,
152 struct ctf_field_class_struct *fc, bt_field_class *ir_fc,
153 bool with_header_prefix,
154 struct ctf_field_class_struct *context_fc)
155 {
156 uint64_t i;
157 int ret;
158
159 for (i = 0; i < fc->members->len; i++) {
160 struct ctf_named_field_class *named_fc =
161 ctf_field_class_struct_borrow_member_by_index(fc, i);
162 bt_field_class *member_ir_fc;
163 const char *name = named_fc->name->str;
164
165 if (!named_fc->fc->in_ir) {
166 continue;
167 }
168
169 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
170 BT_ASSERT(member_ir_fc);
171 ret = bt_field_class_structure_append_member(ir_fc, name,
172 member_ir_fc);
173 BT_ASSERT(ret == 0);
174 bt_field_class_put_ref(member_ir_fc);
175 }
176 }
177
178 static inline
179 bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
180 struct ctf_field_class_struct *fc)
181 {
182 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
183
184 BT_ASSERT(ir_fc);
185 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
186 return ir_fc;
187 }
188
189 static inline
190 bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
191 struct ctf_field_path *field_path)
192 {
193 bt_field_class *ir_fc = NULL;
194 struct ctf_field_class *fc = ctf_field_path_borrow_field_class(
195 field_path, ctx->tc, ctx->sc, ctx->ec);
196
197 BT_ASSERT(fc);
198
199 if (fc->in_ir) {
200 ir_fc = fc->ir_fc;
201 }
202
203 return ir_fc;
204 }
205
206 static inline
207 const void *find_ir_enum_field_class_mapping_by_label(const bt_field_class *fc,
208 const char *label, bool is_signed)
209 {
210 const void *mapping = NULL;
211 uint64_t i;
212
213 for (i = 0; i < bt_field_class_enumeration_get_mapping_count(fc); i++) {
214 const bt_field_class_enumeration_mapping *this_mapping;
215 const void *spec_this_mapping;
216
217 if (is_signed) {
218 spec_this_mapping =
219 bt_field_class_enumeration_signed_borrow_mapping_by_index_const(
220 fc, i);
221 this_mapping =
222 bt_field_class_enumeration_signed_mapping_as_mapping_const(
223 spec_this_mapping);
224 } else {
225 spec_this_mapping =
226 bt_field_class_enumeration_unsigned_borrow_mapping_by_index_const(
227 fc, i);
228 this_mapping =
229 bt_field_class_enumeration_unsigned_mapping_as_mapping_const(
230 spec_this_mapping);
231 }
232
233 BT_ASSERT(this_mapping);
234 BT_ASSERT(spec_this_mapping);
235
236 if (strcmp(bt_field_class_enumeration_mapping_get_label(
237 this_mapping), label) == 0) {
238 mapping = spec_this_mapping;
239 goto end;
240 }
241 }
242
243 end:
244 return mapping;
245 }
246
247 static inline
248 bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
249 struct ctf_field_class_variant *fc)
250 {
251 int ret;
252 bt_field_class *ir_fc;
253 uint64_t i;
254 bt_field_class *ir_tag_fc = NULL;
255
256 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
257 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
258 ir_tag_fc = borrow_ir_fc_from_field_path(ctx, &fc->tag_path);
259 BT_ASSERT(ir_tag_fc);
260 }
261
262 ir_fc = bt_field_class_variant_create(ctx->ir_tc, ir_tag_fc);
263 BT_ASSERT(ir_fc);
264
265 for (i = 0; i < fc->options->len; i++) {
266 struct ctf_named_field_class *named_fc =
267 ctf_field_class_variant_borrow_option_by_index(fc, i);
268 bt_field_class *option_ir_fc;
269
270 BT_ASSERT(named_fc->fc->in_ir);
271 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
272 BT_ASSERT(option_ir_fc);
273
274 if (ir_tag_fc) {
275 /*
276 * At this point the trace IR selector
277 * (enumeration) field class already exists if
278 * the variant is tagged (`ir_tag_fc`). This one
279 * already contains range sets for its mappings,
280 * so we just reuse the same, finding them by
281 * matching a variant field class's option's
282 * _original_ name (with a leading underscore,
283 * possibly) with a selector field class's
284 * mapping name.
285 */
286 if (fc->tag_fc->base.is_signed) {
287 const bt_field_class_enumeration_signed_mapping *mapping =
288 find_ir_enum_field_class_mapping_by_label(
289 ir_tag_fc,
290 named_fc->orig_name->str, true);
291 const bt_integer_range_set_signed *range_set;
292
293 BT_ASSERT(mapping);
294 range_set =
295 bt_field_class_enumeration_signed_mapping_borrow_ranges_const(
296 mapping);
297 BT_ASSERT(range_set);
298 ret = bt_field_class_variant_with_selector_field_integer_signed_append_option(
299 ir_fc, named_fc->name->str,
300 option_ir_fc, range_set);
301 } else {
302 const bt_field_class_enumeration_unsigned_mapping *mapping =
303 find_ir_enum_field_class_mapping_by_label(
304 ir_tag_fc,
305 named_fc->orig_name->str,
306 false);
307 const bt_integer_range_set_unsigned *range_set;
308
309 BT_ASSERT(mapping);
310 range_set =
311 bt_field_class_enumeration_unsigned_mapping_borrow_ranges_const(
312 mapping);
313 BT_ASSERT(range_set);
314 ret = bt_field_class_variant_with_selector_field_integer_unsigned_append_option(
315 ir_fc, named_fc->name->str,
316 option_ir_fc, range_set);
317 }
318 } else {
319 ret = bt_field_class_variant_without_selector_append_option(
320 ir_fc, named_fc->name->str, option_ir_fc);
321 }
322
323 BT_ASSERT(ret == 0);
324 bt_field_class_put_ref(option_ir_fc);
325 }
326
327 return ir_fc;
328 }
329
330 static inline
331 bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
332 struct ctf_field_class_array *fc)
333 {
334 bt_field_class *ir_fc;
335 bt_field_class *elem_ir_fc;
336
337 if (fc->base.is_text) {
338 ir_fc = bt_field_class_string_create(ctx->ir_tc);
339 BT_ASSERT(ir_fc);
340 goto end;
341 }
342
343 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
344 BT_ASSERT(elem_ir_fc);
345 ir_fc = bt_field_class_array_static_create(ctx->ir_tc, elem_ir_fc,
346 fc->length);
347 BT_ASSERT(ir_fc);
348 bt_field_class_put_ref(elem_ir_fc);
349
350 end:
351 return ir_fc;
352 }
353
354 static inline
355 bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
356 struct ctf_field_class_sequence *fc)
357 {
358 bt_field_class *ir_fc;
359 bt_field_class *elem_ir_fc;
360 bt_field_class *length_fc = NULL;
361
362 if (fc->base.is_text) {
363 ir_fc = bt_field_class_string_create(ctx->ir_tc);
364 BT_ASSERT(ir_fc);
365 goto end;
366 }
367
368 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
369 BT_ASSERT(elem_ir_fc);
370
371 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
372 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
373 length_fc = borrow_ir_fc_from_field_path(ctx, &fc->length_path);
374 BT_ASSERT(length_fc);
375 }
376
377 ir_fc = bt_field_class_array_dynamic_create(ctx->ir_tc, elem_ir_fc,
378 length_fc);
379 BT_ASSERT(ir_fc);
380 bt_field_class_put_ref(elem_ir_fc);
381 BT_ASSERT(ir_fc);
382
383 end:
384 return ir_fc;
385 }
386
387 static inline
388 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
389 struct ctf_field_class *fc)
390 {
391 bt_field_class *ir_fc = NULL;
392
393 BT_ASSERT(fc);
394 BT_ASSERT(fc->in_ir);
395
396 switch (fc->type) {
397 case CTF_FIELD_CLASS_TYPE_INT:
398 ir_fc = ctf_field_class_int_to_ir(ctx, (void *) fc);
399 break;
400 case CTF_FIELD_CLASS_TYPE_ENUM:
401 ir_fc = ctf_field_class_enum_to_ir(ctx, (void *) fc);
402 break;
403 case CTF_FIELD_CLASS_TYPE_FLOAT:
404 ir_fc = ctf_field_class_float_to_ir(ctx, (void *) fc);
405 break;
406 case CTF_FIELD_CLASS_TYPE_STRING:
407 ir_fc = ctf_field_class_string_to_ir(ctx, (void *) fc);
408 break;
409 case CTF_FIELD_CLASS_TYPE_STRUCT:
410 ir_fc = ctf_field_class_struct_to_ir(ctx, (void *) fc);
411 break;
412 case CTF_FIELD_CLASS_TYPE_ARRAY:
413 ir_fc = ctf_field_class_array_to_ir(ctx, (void *) fc);
414 break;
415 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
416 ir_fc = ctf_field_class_sequence_to_ir(ctx, (void *) fc);
417 break;
418 case CTF_FIELD_CLASS_TYPE_VARIANT:
419 ir_fc = ctf_field_class_variant_to_ir(ctx, (void *) fc);
420 break;
421 default:
422 bt_common_abort();
423 }
424
425 fc->ir_fc = ir_fc;
426 return ir_fc;
427 }
428
429 static inline
430 bool ctf_field_class_struct_has_immediate_member_in_ir(
431 struct ctf_field_class_struct *fc)
432 {
433 uint64_t i;
434 bool has_immediate_member_in_ir = false;
435
436 /*
437 * If the structure field class has no members at all, then it
438 * was an empty structure in the beginning, so leave it existing
439 * and empty.
440 */
441 if (fc->members->len == 0) {
442 has_immediate_member_in_ir = true;
443 goto end;
444 }
445
446 for (i = 0; i < fc->members->len; i++) {
447 struct ctf_named_field_class *named_fc =
448 ctf_field_class_struct_borrow_member_by_index(fc, i);
449
450 if (named_fc->fc->in_ir) {
451 has_immediate_member_in_ir = true;
452 goto end;
453 }
454 }
455
456 end:
457 return has_immediate_member_in_ir;
458 }
459
460 static inline
461 bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
462 {
463 bt_field_class *ir_fc = NULL;
464 struct ctf_field_class *fc = NULL;
465
466 switch (ctx->scope) {
467 case CTF_SCOPE_PACKET_CONTEXT:
468 fc = ctx->sc->packet_context_fc;
469 break;
470 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
471 fc = ctx->sc->event_common_context_fc;
472 break;
473 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
474 fc = ctx->ec->spec_context_fc;
475 break;
476 case CTF_SCOPE_EVENT_PAYLOAD:
477 fc = ctx->ec->payload_fc;
478 break;
479 default:
480 bt_common_abort();
481 }
482
483 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(
484 (void *) fc)) {
485 ir_fc = ctf_field_class_to_ir(ctx, fc);
486 }
487
488 return ir_fc;
489 }
490
491 static inline
492 void ctf_event_class_to_ir(struct ctx *ctx)
493 {
494 int ret;
495 bt_event_class *ir_ec = NULL;
496 bt_field_class *ir_fc;
497
498 BT_ASSERT(ctx->ec);
499
500 if (ctx->ec->is_translated) {
501 ir_ec = bt_stream_class_borrow_event_class_by_id(
502 ctx->ir_sc, ctx->ec->id);
503 BT_ASSERT(ir_ec);
504 goto end;
505 }
506
507 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
508 BT_ASSERT(ir_ec);
509 bt_event_class_put_ref(ir_ec);
510 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
511 ir_fc = scope_ctf_field_class_to_ir(ctx);
512 if (ir_fc) {
513 ret = bt_event_class_set_specific_context_field_class(
514 ir_ec, ir_fc);
515 BT_ASSERT(ret == 0);
516 bt_field_class_put_ref(ir_fc);
517 }
518
519 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
520 ir_fc = scope_ctf_field_class_to_ir(ctx);
521 if (ir_fc) {
522 ret = bt_event_class_set_payload_field_class(ir_ec,
523 ir_fc);
524 BT_ASSERT(ret == 0);
525 bt_field_class_put_ref(ir_fc);
526 }
527
528 if (ctx->ec->name->len > 0) {
529 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
530 BT_ASSERT(ret == 0);
531 }
532
533 if (ctx->ec->emf_uri->len > 0) {
534 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
535 BT_ASSERT(ret == 0);
536 }
537
538 if (ctx->ec->is_log_level_set) {
539 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
540 }
541
542 ctx->ec->is_translated = true;
543 ctx->ec->ir_ec = ir_ec;
544
545 end:
546 return;
547 }
548
549
550 static inline
551 void ctf_stream_class_to_ir(struct ctx *ctx)
552 {
553 int ret;
554 bt_field_class *ir_fc;
555
556 BT_ASSERT(ctx->sc);
557
558 if (ctx->sc->is_translated) {
559 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(
560 ctx->ir_tc, ctx->sc->id);
561 BT_ASSERT(ctx->ir_sc);
562 goto end;
563 }
564
565 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
566 BT_ASSERT(ctx->ir_sc);
567 bt_stream_class_put_ref(ctx->ir_sc);
568
569 if (ctx->sc->default_clock_class) {
570 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
571 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
572 ctx->sc->default_clock_class->ir_cc);
573 BT_ASSERT(ret == 0);
574 }
575
576 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE,
577 ctx->sc->packets_have_ts_begin, ctx->sc->packets_have_ts_end);
578 bt_stream_class_set_supports_discarded_events(ctx->ir_sc,
579 ctx->sc->has_discarded_events,
580 ctx->sc->discarded_events_have_default_cs);
581 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc,
582 ctx->sc->has_discarded_packets,
583 ctx->sc->discarded_packets_have_default_cs);
584 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
585 ir_fc = scope_ctf_field_class_to_ir(ctx);
586 if (ir_fc) {
587 ret = bt_stream_class_set_packet_context_field_class(
588 ctx->ir_sc, ir_fc);
589 BT_ASSERT(ret == 0);
590 bt_field_class_put_ref(ir_fc);
591 }
592
593 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
594 ir_fc = scope_ctf_field_class_to_ir(ctx);
595 if (ir_fc) {
596 ret = bt_stream_class_set_event_common_context_field_class(
597 ctx->ir_sc, ir_fc);
598 BT_ASSERT(ret == 0);
599 bt_field_class_put_ref(ir_fc);
600 }
601
602 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc,
603 BT_FALSE);
604 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
605
606 ctx->sc->is_translated = true;
607 ctx->sc->ir_sc = ctx->ir_sc;
608
609 end:
610 return;
611 }
612
613 static inline
614 void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
615 {
616 int ret;
617
618 if (strlen(cc->name->str) > 0) {
619 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
620 BT_ASSERT(ret == 0);
621 }
622
623 if (strlen(cc->description->str) > 0) {
624 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
625 BT_ASSERT(ret == 0);
626 }
627
628 bt_clock_class_set_frequency(ir_cc, cc->frequency);
629 bt_clock_class_set_precision(ir_cc, cc->precision);
630 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
631
632 if (cc->has_uuid) {
633 bt_clock_class_set_uuid(ir_cc, cc->uuid);
634 }
635
636 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
637 }
638
639 static inline
640 int ctf_trace_class_to_ir(struct ctx *ctx)
641 {
642 int ret = 0;
643 uint64_t i;
644
645 BT_ASSERT(ctx->tc);
646 BT_ASSERT(ctx->ir_tc);
647
648 if (ctx->tc->is_translated) {
649 goto end;
650 }
651
652 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
653 struct ctf_clock_class *cc = ctx->tc->clock_classes->pdata[i];
654
655 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
656 ctf_clock_class_to_ir(cc->ir_cc, cc);
657 }
658
659 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc,
660 BT_FALSE);
661 ctx->tc->is_translated = true;
662 ctx->tc->ir_tc = ctx->ir_tc;
663
664 end:
665 return ret;
666 }
667
668 BT_HIDDEN
669 int ctf_trace_class_translate(bt_self_component *self_comp,
670 bt_trace_class *ir_tc, struct ctf_trace_class *tc)
671 {
672 int ret = 0;
673 uint64_t i;
674 struct ctx ctx = { 0 };
675
676 ctx.self_comp = self_comp;
677 ctx.tc = tc;
678 ctx.ir_tc = ir_tc;
679 ret = ctf_trace_class_to_ir(&ctx);
680 if (ret) {
681 goto end;
682 }
683
684 for (i = 0; i < tc->stream_classes->len; i++) {
685 uint64_t j;
686 ctx.sc = tc->stream_classes->pdata[i];
687
688 ctf_stream_class_to_ir(&ctx);
689
690 for (j = 0; j < ctx.sc->event_classes->len; j++) {
691 ctx.ec = ctx.sc->event_classes->pdata[j];
692
693 ctf_event_class_to_ir(&ctx);
694 ctx.ec = NULL;
695 }
696
697 ctx.sc = NULL;
698 }
699
700 end:
701 return ret;
702 }
This page took 0.043753 seconds and 4 git commands to generate.