lib: make packets and packet messages optional, disabled by default
[babeltrace.git] / src / plugins / ctf / common / metadata / ctf-meta-translate.c
1 /*
2 * Copyright 2018 - Philippe Proulx <pproulx@efficios.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 */
14
15 #include <babeltrace2/babeltrace.h>
16 #include "common/macros.h"
17 #include "common/assert.h"
18 #include <glib.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <inttypes.h>
22
23 #include "ctf-meta-visitors.h"
24
25 struct ctx {
26 bt_self_component *self_comp;
27 bt_trace_class *ir_tc;
28 bt_stream_class *ir_sc;
29 struct ctf_trace_class *tc;
30 struct ctf_stream_class *sc;
31 struct ctf_event_class *ec;
32 enum ctf_scope scope;
33 };
34
35 static inline
36 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
37 struct ctf_field_class *fc);
38
39 static inline
40 void ctf_field_class_int_set_props(struct ctf_field_class_int *fc,
41 bt_field_class *ir_fc)
42 {
43 bt_field_class_integer_set_field_value_range(ir_fc,
44 fc->base.size);
45 bt_field_class_integer_set_preferred_display_base(ir_fc,
46 fc->disp_base);
47 }
48
49 static inline
50 bt_field_class *ctf_field_class_int_to_ir(struct ctx *ctx,
51 struct ctf_field_class_int *fc)
52 {
53 bt_field_class *ir_fc;
54
55 if (fc->is_signed) {
56 ir_fc = bt_field_class_signed_integer_create(ctx->ir_tc);
57 } else {
58 ir_fc = bt_field_class_unsigned_integer_create(ctx->ir_tc);
59 }
60
61 BT_ASSERT(ir_fc);
62 ctf_field_class_int_set_props(fc, ir_fc);
63 return ir_fc;
64 }
65
66 static inline
67 bt_field_class *ctf_field_class_enum_to_ir(struct ctx *ctx,
68 struct ctf_field_class_enum *fc)
69 {
70 int ret;
71 bt_field_class *ir_fc;
72 uint64_t i;
73
74 if (fc->base.is_signed) {
75 ir_fc = bt_field_class_signed_enumeration_create(ctx->ir_tc);
76 } else {
77 ir_fc = bt_field_class_unsigned_enumeration_create(ctx->ir_tc);
78 }
79
80 BT_ASSERT(ir_fc);
81 ctf_field_class_int_set_props((void *) fc, ir_fc);
82
83 for (i = 0; i < fc->mappings->len; i++) {
84 struct ctf_field_class_enum_mapping *mapping =
85 ctf_field_class_enum_borrow_mapping_by_index(fc, i);
86
87 if (fc->base.is_signed) {
88 ret = bt_field_class_signed_enumeration_map_range(
89 ir_fc, mapping->label->str,
90 mapping->range.lower.i, mapping->range.upper.i);
91 } else {
92 ret = bt_field_class_unsigned_enumeration_map_range(
93 ir_fc, mapping->label->str,
94 mapping->range.lower.u, mapping->range.upper.u);
95 }
96
97 BT_ASSERT(ret == 0);
98 }
99
100 return ir_fc;
101 }
102
103 static inline
104 bt_field_class *ctf_field_class_float_to_ir(struct ctx *ctx,
105 struct ctf_field_class_float *fc)
106 {
107 bt_field_class *ir_fc;
108
109 ir_fc = bt_field_class_real_create(ctx->ir_tc);
110 BT_ASSERT(ir_fc);
111
112 if (fc->base.size == 32) {
113 bt_field_class_real_set_is_single_precision(ir_fc,
114 BT_TRUE);
115 }
116
117 return ir_fc;
118 }
119
120 static inline
121 bt_field_class *ctf_field_class_string_to_ir(struct ctx *ctx,
122 struct ctf_field_class_string *fc)
123 {
124 bt_field_class *ir_fc = bt_field_class_string_create(ctx->ir_tc);
125
126 BT_ASSERT(ir_fc);
127 return ir_fc;
128 }
129
130 static inline
131 void translate_struct_field_class_members(struct ctx *ctx,
132 struct ctf_field_class_struct *fc, bt_field_class *ir_fc,
133 bool with_header_prefix,
134 struct ctf_field_class_struct *context_fc)
135 {
136 uint64_t i;
137 int ret;
138
139 for (i = 0; i < fc->members->len; i++) {
140 struct ctf_named_field_class *named_fc =
141 ctf_field_class_struct_borrow_member_by_index(fc, i);
142 bt_field_class *member_ir_fc;
143 const char *name = named_fc->name->str;
144
145 if (!named_fc->fc->in_ir) {
146 continue;
147 }
148
149 member_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
150 BT_ASSERT(member_ir_fc);
151 ret = bt_field_class_structure_append_member(ir_fc, name,
152 member_ir_fc);
153 BT_ASSERT(ret == 0);
154 bt_field_class_put_ref(member_ir_fc);
155 }
156 }
157
158 static inline
159 bt_field_class *ctf_field_class_struct_to_ir(struct ctx *ctx,
160 struct ctf_field_class_struct *fc)
161 {
162 bt_field_class *ir_fc = bt_field_class_structure_create(ctx->ir_tc);
163
164 BT_ASSERT(ir_fc);
165 translate_struct_field_class_members(ctx, fc, ir_fc, false, NULL);
166 return ir_fc;
167 }
168
169 static inline
170 bt_field_class *borrow_ir_fc_from_field_path(struct ctx *ctx,
171 struct ctf_field_path *field_path)
172 {
173 bt_field_class *ir_fc = NULL;
174 struct ctf_field_class *fc = ctf_field_path_borrow_field_class(
175 field_path, ctx->tc, ctx->sc, ctx->ec);
176
177 BT_ASSERT(fc);
178
179 if (fc->in_ir) {
180 ir_fc = fc->ir_fc;
181 }
182
183 return ir_fc;
184 }
185
186 static inline
187 bt_field_class *ctf_field_class_variant_to_ir(struct ctx *ctx,
188 struct ctf_field_class_variant *fc)
189 {
190 int ret;
191 bt_field_class *ir_fc = bt_field_class_variant_create(ctx->ir_tc);
192 uint64_t i;
193
194 BT_ASSERT(ir_fc);
195
196 if (fc->tag_path.root != CTF_SCOPE_PACKET_HEADER &&
197 fc->tag_path.root != CTF_SCOPE_EVENT_HEADER) {
198 ret = bt_field_class_variant_set_selector_field_class(
199 ir_fc, borrow_ir_fc_from_field_path(ctx,
200 &fc->tag_path));
201 BT_ASSERT(ret == 0);
202 }
203
204 for (i = 0; i < fc->options->len; i++) {
205 struct ctf_named_field_class *named_fc =
206 ctf_field_class_variant_borrow_option_by_index(fc, i);
207 bt_field_class *option_ir_fc;
208
209 BT_ASSERT(named_fc->fc->in_ir);
210 option_ir_fc = ctf_field_class_to_ir(ctx, named_fc->fc);
211 BT_ASSERT(option_ir_fc);
212 ret = bt_field_class_variant_append_option(
213 ir_fc, named_fc->name->str, option_ir_fc);
214 BT_ASSERT(ret == 0);
215 bt_field_class_put_ref(option_ir_fc);
216 }
217
218 return ir_fc;
219 }
220
221 static inline
222 bt_field_class *ctf_field_class_array_to_ir(struct ctx *ctx,
223 struct ctf_field_class_array *fc)
224 {
225 bt_field_class *ir_fc;
226 bt_field_class *elem_ir_fc;
227
228 if (fc->base.is_text) {
229 ir_fc = bt_field_class_string_create(ctx->ir_tc);
230 BT_ASSERT(ir_fc);
231 goto end;
232 }
233
234 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
235 BT_ASSERT(elem_ir_fc);
236 ir_fc = bt_field_class_static_array_create(ctx->ir_tc, elem_ir_fc,
237 fc->length);
238 BT_ASSERT(ir_fc);
239 bt_field_class_put_ref(elem_ir_fc);
240
241 end:
242 return ir_fc;
243 }
244
245 static inline
246 bt_field_class *ctf_field_class_sequence_to_ir(struct ctx *ctx,
247 struct ctf_field_class_sequence *fc)
248 {
249 int ret;
250 bt_field_class *ir_fc;
251 bt_field_class *elem_ir_fc;
252
253 if (fc->base.is_text) {
254 ir_fc = bt_field_class_string_create(ctx->ir_tc);
255 BT_ASSERT(ir_fc);
256 goto end;
257 }
258
259 elem_ir_fc = ctf_field_class_to_ir(ctx, fc->base.elem_fc);
260 BT_ASSERT(elem_ir_fc);
261 ir_fc = bt_field_class_dynamic_array_create(ctx->ir_tc, elem_ir_fc);
262 BT_ASSERT(ir_fc);
263 bt_field_class_put_ref(elem_ir_fc);
264 BT_ASSERT(ir_fc);
265
266 if (fc->length_path.root != CTF_SCOPE_PACKET_HEADER &&
267 fc->length_path.root != CTF_SCOPE_EVENT_HEADER) {
268 ret = bt_field_class_dynamic_array_set_length_field_class(
269 ir_fc, borrow_ir_fc_from_field_path(ctx, &fc->length_path));
270 BT_ASSERT(ret == 0);
271 }
272
273 end:
274 return ir_fc;
275 }
276
277 static inline
278 bt_field_class *ctf_field_class_to_ir(struct ctx *ctx,
279 struct ctf_field_class *fc)
280 {
281 bt_field_class *ir_fc = NULL;
282
283 BT_ASSERT(fc);
284 BT_ASSERT(fc->in_ir);
285
286 switch (fc->type) {
287 case CTF_FIELD_CLASS_TYPE_INT:
288 ir_fc = ctf_field_class_int_to_ir(ctx, (void *) fc);
289 break;
290 case CTF_FIELD_CLASS_TYPE_ENUM:
291 ir_fc = ctf_field_class_enum_to_ir(ctx, (void *) fc);
292 break;
293 case CTF_FIELD_CLASS_TYPE_FLOAT:
294 ir_fc = ctf_field_class_float_to_ir(ctx, (void *) fc);
295 break;
296 case CTF_FIELD_CLASS_TYPE_STRING:
297 ir_fc = ctf_field_class_string_to_ir(ctx, (void *) fc);
298 break;
299 case CTF_FIELD_CLASS_TYPE_STRUCT:
300 ir_fc = ctf_field_class_struct_to_ir(ctx, (void *) fc);
301 break;
302 case CTF_FIELD_CLASS_TYPE_ARRAY:
303 ir_fc = ctf_field_class_array_to_ir(ctx, (void *) fc);
304 break;
305 case CTF_FIELD_CLASS_TYPE_SEQUENCE:
306 ir_fc = ctf_field_class_sequence_to_ir(ctx, (void *) fc);
307 break;
308 case CTF_FIELD_CLASS_TYPE_VARIANT:
309 ir_fc = ctf_field_class_variant_to_ir(ctx, (void *) fc);
310 break;
311 default:
312 abort();
313 }
314
315 fc->ir_fc = ir_fc;
316 return ir_fc;
317 }
318
319 static inline
320 bool ctf_field_class_struct_has_immediate_member_in_ir(
321 struct ctf_field_class_struct *fc)
322 {
323 uint64_t i;
324 bool has_immediate_member_in_ir = false;
325
326 /*
327 * If the structure field class has no members at all, then it
328 * was an empty structure in the beginning, so leave it existing
329 * and empty.
330 */
331 if (fc->members->len == 0) {
332 has_immediate_member_in_ir = true;
333 goto end;
334 }
335
336 for (i = 0; i < fc->members->len; i++) {
337 struct ctf_named_field_class *named_fc =
338 ctf_field_class_struct_borrow_member_by_index(fc, i);
339
340 if (named_fc->fc->in_ir) {
341 has_immediate_member_in_ir = true;
342 goto end;
343 }
344 }
345
346 end:
347 return has_immediate_member_in_ir;
348 }
349
350 static inline
351 bt_field_class *scope_ctf_field_class_to_ir(struct ctx *ctx)
352 {
353 bt_field_class *ir_fc = NULL;
354 struct ctf_field_class *fc = NULL;
355
356 switch (ctx->scope) {
357 case CTF_SCOPE_PACKET_CONTEXT:
358 fc = ctx->sc->packet_context_fc;
359 break;
360 case CTF_SCOPE_EVENT_COMMON_CONTEXT:
361 fc = ctx->sc->event_common_context_fc;
362 break;
363 case CTF_SCOPE_EVENT_SPECIFIC_CONTEXT:
364 fc = ctx->ec->spec_context_fc;
365 break;
366 case CTF_SCOPE_EVENT_PAYLOAD:
367 fc = ctx->ec->payload_fc;
368 break;
369 default:
370 abort();
371 }
372
373 if (fc && ctf_field_class_struct_has_immediate_member_in_ir(
374 (void *) fc)) {
375 ir_fc = ctf_field_class_to_ir(ctx, fc);
376 }
377
378 return ir_fc;
379 }
380
381 static inline
382 void ctf_event_class_to_ir(struct ctx *ctx)
383 {
384 int ret;
385 bt_event_class *ir_ec = NULL;
386 bt_field_class *ir_fc;
387
388 BT_ASSERT(ctx->ec);
389
390 if (ctx->ec->is_translated) {
391 ir_ec = bt_stream_class_borrow_event_class_by_id(
392 ctx->ir_sc, ctx->ec->id);
393 BT_ASSERT(ir_ec);
394 goto end;
395 }
396
397 ir_ec = bt_event_class_create_with_id(ctx->ir_sc, ctx->ec->id);
398 BT_ASSERT(ir_ec);
399 bt_event_class_put_ref(ir_ec);
400 ctx->scope = CTF_SCOPE_EVENT_SPECIFIC_CONTEXT;
401 ir_fc = scope_ctf_field_class_to_ir(ctx);
402 if (ir_fc) {
403 ret = bt_event_class_set_specific_context_field_class(
404 ir_ec, ir_fc);
405 BT_ASSERT(ret == 0);
406 bt_field_class_put_ref(ir_fc);
407 }
408
409 ctx->scope = CTF_SCOPE_EVENT_PAYLOAD;
410 ir_fc = scope_ctf_field_class_to_ir(ctx);
411 if (ir_fc) {
412 ret = bt_event_class_set_payload_field_class(ir_ec,
413 ir_fc);
414 BT_ASSERT(ret == 0);
415 bt_field_class_put_ref(ir_fc);
416 }
417
418 if (ctx->ec->name->len > 0) {
419 ret = bt_event_class_set_name(ir_ec, ctx->ec->name->str);
420 BT_ASSERT(ret == 0);
421 }
422
423 if (ctx->ec->emf_uri->len > 0) {
424 ret = bt_event_class_set_emf_uri(ir_ec, ctx->ec->emf_uri->str);
425 BT_ASSERT(ret == 0);
426 }
427
428 if (ctx->ec->log_level != -1) {
429 bt_event_class_set_log_level(ir_ec, ctx->ec->log_level);
430 }
431
432 ctx->ec->is_translated = true;
433 ctx->ec->ir_ec = ir_ec;
434
435 end:
436 return;
437 }
438
439
440 static inline
441 void ctf_stream_class_to_ir(struct ctx *ctx)
442 {
443 int ret;
444 bt_field_class *ir_fc;
445
446 BT_ASSERT(ctx->sc);
447
448 if (ctx->sc->is_translated) {
449 ctx->ir_sc = bt_trace_class_borrow_stream_class_by_id(
450 ctx->ir_tc, ctx->sc->id);
451 BT_ASSERT(ctx->ir_sc);
452 goto end;
453 }
454
455 ctx->ir_sc = bt_stream_class_create_with_id(ctx->ir_tc, ctx->sc->id);
456 BT_ASSERT(ctx->ir_sc);
457 bt_stream_class_put_ref(ctx->ir_sc);
458
459 if (ctx->sc->default_clock_class) {
460 BT_ASSERT(ctx->sc->default_clock_class->ir_cc);
461 ret = bt_stream_class_set_default_clock_class(ctx->ir_sc,
462 ctx->sc->default_clock_class->ir_cc);
463 BT_ASSERT(ret == 0);
464 }
465
466 bt_stream_class_set_supports_packets(ctx->ir_sc, BT_TRUE,
467 ctx->sc->packets_have_ts_begin, ctx->sc->packets_have_ts_end);
468 bt_stream_class_set_supports_discarded_events(ctx->ir_sc,
469 ctx->sc->has_discarded_events,
470 ctx->sc->discarded_events_have_default_cs);
471 bt_stream_class_set_supports_discarded_packets(ctx->ir_sc,
472 ctx->sc->has_discarded_packets,
473 ctx->sc->discarded_packets_have_default_cs);
474 ctx->scope = CTF_SCOPE_PACKET_CONTEXT;
475 ir_fc = scope_ctf_field_class_to_ir(ctx);
476 if (ir_fc) {
477 ret = bt_stream_class_set_packet_context_field_class(
478 ctx->ir_sc, ir_fc);
479 BT_ASSERT(ret == 0);
480 bt_field_class_put_ref(ir_fc);
481 }
482
483 ctx->scope = CTF_SCOPE_EVENT_COMMON_CONTEXT;
484 ir_fc = scope_ctf_field_class_to_ir(ctx);
485 if (ir_fc) {
486 ret = bt_stream_class_set_event_common_context_field_class(
487 ctx->ir_sc, ir_fc);
488 BT_ASSERT(ret == 0);
489 bt_field_class_put_ref(ir_fc);
490 }
491
492 bt_stream_class_set_assigns_automatic_event_class_id(ctx->ir_sc,
493 BT_FALSE);
494 bt_stream_class_set_assigns_automatic_stream_id(ctx->ir_sc, BT_FALSE);
495
496 ctx->sc->is_translated = true;
497 ctx->sc->ir_sc = ctx->ir_sc;
498
499 end:
500 return;
501 }
502
503 static inline
504 void ctf_clock_class_to_ir(bt_clock_class *ir_cc, struct ctf_clock_class *cc)
505 {
506 int ret;
507
508 if (strlen(cc->name->str) > 0) {
509 ret = bt_clock_class_set_name(ir_cc, cc->name->str);
510 BT_ASSERT(ret == 0);
511 }
512
513 if (strlen(cc->description->str) > 0) {
514 ret = bt_clock_class_set_description(ir_cc, cc->description->str);
515 BT_ASSERT(ret == 0);
516 }
517
518 bt_clock_class_set_frequency(ir_cc, cc->frequency);
519 bt_clock_class_set_precision(ir_cc, cc->precision);
520 bt_clock_class_set_offset(ir_cc, cc->offset_seconds, cc->offset_cycles);
521
522 if (cc->has_uuid) {
523 bt_clock_class_set_uuid(ir_cc, cc->uuid);
524 }
525
526 bt_clock_class_set_origin_is_unix_epoch(ir_cc, cc->is_absolute);
527 }
528
529 static inline
530 int ctf_trace_class_to_ir(struct ctx *ctx)
531 {
532 int ret = 0;
533 uint64_t i;
534
535 BT_ASSERT(ctx->tc);
536 BT_ASSERT(ctx->ir_tc);
537
538 if (ctx->tc->is_translated) {
539 goto end;
540 }
541
542 for (i = 0; i < ctx->tc->clock_classes->len; i++) {
543 struct ctf_clock_class *cc = ctx->tc->clock_classes->pdata[i];
544
545 cc->ir_cc = bt_clock_class_create(ctx->self_comp);
546 ctf_clock_class_to_ir(cc->ir_cc, cc);
547 }
548
549 bt_trace_class_set_assigns_automatic_stream_class_id(ctx->ir_tc,
550 BT_FALSE);
551 ctx->tc->is_translated = true;
552 ctx->tc->ir_tc = ctx->ir_tc;
553
554 end:
555 return ret;
556 }
557
558 BT_HIDDEN
559 int ctf_trace_class_translate(bt_self_component *self_comp,
560 bt_trace_class *ir_tc, struct ctf_trace_class *tc)
561 {
562 int ret = 0;
563 uint64_t i;
564 struct ctx ctx = { 0 };
565
566 ctx.self_comp = self_comp;
567 ctx.tc = tc;
568 ctx.ir_tc = ir_tc;
569 ret = ctf_trace_class_to_ir(&ctx);
570 if (ret) {
571 goto end;
572 }
573
574 for (i = 0; i < tc->stream_classes->len; i++) {
575 uint64_t j;
576 ctx.sc = tc->stream_classes->pdata[i];
577
578 ctf_stream_class_to_ir(&ctx);
579
580 for (j = 0; j < ctx.sc->event_classes->len; j++) {
581 ctx.ec = ctx.sc->event_classes->pdata[j];
582
583 ctf_event_class_to_ir(&ctx);
584 ctx.ec = NULL;
585 }
586
587 ctx.sc = NULL;
588 }
589
590 end:
591 return ret;
592 }
This page took 0.041138 seconds and 4 git commands to generate.