src.ctf.fs: modernize read_src_fs_parameters
[babeltrace.git] / src / plugins / ctf / fs-src / fs.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2015-2017 Philippe Proulx <pproulx@efficios.com>
5 * Copyright 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 *
7 * Babeltrace CTF file system Reader Component
8 */
9
10 #include <sstream>
11
12 #include <glib.h>
13
14 #include <babeltrace2/babeltrace.h>
15
16 #include "common/assert.h"
17 #include "common/common.h"
18 #include "common/uuid.h"
19 #include "cpp-common/bt2c/glib-up.hpp"
20 #include "cpp-common/bt2s/make-unique.hpp"
21
22 #include "plugins/common/param-validation/param-validation.h"
23
24 #include "../common/src/metadata/tsdl/ctf-meta-configure-ir-trace.hpp"
25 #include "../common/src/msg-iter/msg-iter.hpp"
26 #include "data-stream-file.hpp"
27 #include "file.hpp"
28 #include "fs.hpp"
29 #include "metadata.hpp"
30 #include "query.hpp"
31
32 struct tracer_info
33 {
34 const char *name;
35 int64_t major;
36 int64_t minor;
37 int64_t patch;
38 };
39
40 static bt_message_iterator_class_next_method_status
41 ctf_fs_iterator_next_one(struct ctf_fs_msg_iter_data *msg_iter_data, const bt_message **out_msg)
42 {
43 const auto msg_iter_status =
44 ctf_msg_iter_get_next_message(msg_iter_data->msg_iter.get(), out_msg);
45 bt_message_iterator_class_next_method_status status;
46
47 switch (msg_iter_status) {
48 case CTF_MSG_ITER_STATUS_OK:
49 /* Cool, message has been written to *out_msg. */
50 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_OK;
51 break;
52
53 case CTF_MSG_ITER_STATUS_EOF:
54 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_END;
55 break;
56
57 case CTF_MSG_ITER_STATUS_AGAIN:
58 /*
59 * Should not make it this far as this is
60 * medium-specific; there is nothing for the user to do
61 * and it should have been handled upstream.
62 */
63 bt_common_abort();
64
65 case CTF_MSG_ITER_STATUS_ERROR:
66 BT_CPPLOGE_APPEND_CAUSE_SPEC(msg_iter_data->logger,
67 "Failed to get next message from CTF message iterator.");
68 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_ERROR;
69 break;
70
71 case CTF_MSG_ITER_STATUS_MEMORY_ERROR:
72 BT_CPPLOGE_APPEND_CAUSE_SPEC(msg_iter_data->logger,
73 "Failed to get next message from CTF message iterator.");
74 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_MEMORY_ERROR;
75 break;
76
77 default:
78 bt_common_abort();
79 }
80
81 return status;
82 }
83
84 bt_message_iterator_class_next_method_status
85 ctf_fs_iterator_next(bt_self_message_iterator *iterator, bt_message_array_const msgs,
86 uint64_t capacity, uint64_t *count)
87 {
88 try {
89 struct ctf_fs_msg_iter_data *msg_iter_data =
90 (struct ctf_fs_msg_iter_data *) bt_self_message_iterator_get_data(iterator);
91
92 if (G_UNLIKELY(msg_iter_data->next_saved_error)) {
93 /*
94 * Last time we were called, we hit an error but had some
95 * messages to deliver, so we stashed the error here. Return
96 * it now.
97 */
98 BT_CURRENT_THREAD_MOVE_ERROR_AND_RESET(msg_iter_data->next_saved_error);
99 return msg_iter_data->next_saved_status;
100 }
101
102 bt_message_iterator_class_next_method_status status;
103 uint64_t i = 0;
104
105 do {
106 status = ctf_fs_iterator_next_one(msg_iter_data, &msgs[i]);
107 if (status == BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_OK) {
108 i++;
109 }
110 } while (i < capacity && status == BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_OK);
111
112 if (i > 0) {
113 /*
114 * Even if ctf_fs_iterator_next_one() returned something
115 * else than BT_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK, we
116 * accumulated message objects in the output
117 * message array, so we need to return
118 * BT_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK so that they are
119 * transferred to downstream. This other status occurs
120 * again the next time muxer_msg_iter_do_next() is
121 * called, possibly without any accumulated
122 * message, in which case we'll return it.
123 */
124 if (status < 0) {
125 /*
126 * Save this error for the next _next call. Assume that
127 * this component always appends error causes when
128 * returning an error status code, which will cause the
129 * current thread error to be non-NULL.
130 */
131 msg_iter_data->next_saved_error = bt_current_thread_take_error();
132 BT_ASSERT(msg_iter_data->next_saved_error);
133 msg_iter_data->next_saved_status = status;
134 }
135
136 *count = i;
137 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_OK;
138 }
139
140 return status;
141 return status;
142 } catch (const std::bad_alloc&) {
143 return BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_MEMORY_ERROR;
144 } catch (const bt2::Error&) {
145 return BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_ERROR;
146 }
147 }
148
149 bt_message_iterator_class_seek_beginning_method_status
150 ctf_fs_iterator_seek_beginning(bt_self_message_iterator *it)
151 {
152 try {
153 struct ctf_fs_msg_iter_data *msg_iter_data =
154 (struct ctf_fs_msg_iter_data *) bt_self_message_iterator_get_data(it);
155
156 BT_ASSERT(msg_iter_data);
157
158 ctf_msg_iter_reset(msg_iter_data->msg_iter.get());
159 ctf_fs_ds_group_medops_data_reset(msg_iter_data->msg_iter_medops_data.get());
160
161 return BT_MESSAGE_ITERATOR_CLASS_SEEK_BEGINNING_METHOD_STATUS_OK;
162 } catch (const std::bad_alloc&) {
163 return BT_MESSAGE_ITERATOR_CLASS_SEEK_BEGINNING_METHOD_STATUS_MEMORY_ERROR;
164 } catch (const bt2::Error&) {
165 return BT_MESSAGE_ITERATOR_CLASS_SEEK_BEGINNING_METHOD_STATUS_ERROR;
166 }
167 }
168
169 void ctf_fs_iterator_finalize(bt_self_message_iterator *it)
170 {
171 ctf_fs_msg_iter_data::UP {
172 (static_cast<ctf_fs_msg_iter_data *>(bt_self_message_iterator_get_data(it)))};
173 }
174
175 static bt_message_iterator_class_initialize_method_status
176 ctf_msg_iter_medium_status_to_msg_iter_initialize_status(enum ctf_msg_iter_medium_status status)
177 {
178 switch (status) {
179 case CTF_MSG_ITER_MEDIUM_STATUS_EOF:
180 case CTF_MSG_ITER_MEDIUM_STATUS_AGAIN:
181 case CTF_MSG_ITER_MEDIUM_STATUS_ERROR:
182 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
183 case CTF_MSG_ITER_MEDIUM_STATUS_MEMORY_ERROR:
184 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
185 case CTF_MSG_ITER_MEDIUM_STATUS_OK:
186 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_OK;
187 }
188
189 bt_common_abort();
190 }
191
192 bt_message_iterator_class_initialize_method_status
193 ctf_fs_iterator_init(bt_self_message_iterator *self_msg_iter,
194 bt_self_message_iterator_configuration *config,
195 bt_self_component_port_output *self_port)
196 {
197 try {
198 ctf_fs_port_data *port_data = (struct ctf_fs_port_data *) bt_self_component_port_get_data(
199 bt_self_component_port_output_as_self_component_port(self_port));
200 BT_ASSERT(port_data);
201
202 auto msg_iter_data = bt2s::make_unique<ctf_fs_msg_iter_data>(self_msg_iter);
203 msg_iter_data->ds_file_group = port_data->ds_file_group;
204
205 ctf_msg_iter_medium_status medium_status = ctf_fs_ds_group_medops_data_create(
206 msg_iter_data->ds_file_group, self_msg_iter, msg_iter_data->logger,
207 msg_iter_data->msg_iter_medops_data);
208 BT_ASSERT(medium_status == CTF_MSG_ITER_MEDIUM_STATUS_OK ||
209 medium_status == CTF_MSG_ITER_MEDIUM_STATUS_ERROR ||
210 medium_status == CTF_MSG_ITER_MEDIUM_STATUS_MEMORY_ERROR);
211 if (medium_status != CTF_MSG_ITER_MEDIUM_STATUS_OK) {
212 BT_CPPLOGE_APPEND_CAUSE_SPEC(msg_iter_data->logger,
213 "Failed to create ctf_fs_ds_group_medops");
214 return ctf_msg_iter_medium_status_to_msg_iter_initialize_status(medium_status);
215 }
216
217 msg_iter_data->msg_iter = ctf_msg_iter_create(
218 msg_iter_data->ds_file_group->ctf_fs_trace->metadata->tc,
219 bt_common_get_page_size(static_cast<int>(msg_iter_data->logger.level())) * 8,
220 ctf_fs_ds_group_medops, msg_iter_data->msg_iter_medops_data.get(), self_msg_iter,
221 msg_iter_data->logger);
222 if (!msg_iter_data->msg_iter) {
223 BT_CPPLOGE_APPEND_CAUSE_SPEC(msg_iter_data->logger,
224 "Cannot create a CTF message iterator.");
225 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
226 }
227
228 /*
229 * This iterator can seek forward if its stream class has a default
230 * clock class.
231 */
232 if (msg_iter_data->ds_file_group->sc->default_clock_class) {
233 bt_self_message_iterator_configuration_set_can_seek_forward(config, true);
234 }
235
236 bt_self_message_iterator_set_data(self_msg_iter, msg_iter_data.release());
237
238 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_OK;
239 } catch (const std::bad_alloc&) {
240 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
241 } catch (const bt2::Error&) {
242 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
243 }
244 }
245
246 void ctf_fs_finalize(bt_self_component_source *component)
247 {
248 ctf_fs_component::UP {static_cast<ctf_fs_component *>(
249 bt_self_component_get_data(bt_self_component_source_as_self_component(component)))};
250 }
251
252 std::string ctf_fs_make_port_name(ctf_fs_ds_file_group *ds_file_group)
253 {
254 std::stringstream name;
255
256 /*
257 * The unique port name is generated by concatenating unique identifiers
258 * for:
259 *
260 * - the trace
261 * - the stream class
262 * - the stream
263 */
264
265 /* For the trace, use the uuid if present, else the path. */
266 if (ds_file_group->ctf_fs_trace->metadata->tc->is_uuid_set) {
267 char uuid_str[BT_UUID_STR_LEN + 1];
268
269 bt_uuid_to_str(ds_file_group->ctf_fs_trace->metadata->tc->uuid, uuid_str);
270 name << uuid_str;
271 } else {
272 name << ds_file_group->ctf_fs_trace->path;
273 }
274
275 /*
276 * For the stream class, use the id if present. We can omit this field
277 * otherwise, as there will only be a single stream class.
278 */
279 if (ds_file_group->sc->id != UINT64_C(-1)) {
280 name << " | " << ds_file_group->sc->id;
281 }
282
283 /* For the stream, use the id if present, else, use the path. */
284 if (ds_file_group->stream_id != UINT64_C(-1)) {
285 name << " | " << ds_file_group->stream_id;
286 } else {
287 BT_ASSERT(ds_file_group->ds_file_infos.size() == 1);
288 const auto& ds_file_info = *ds_file_group->ds_file_infos[0];
289 name << " | " << ds_file_info.path;
290 }
291
292 return name.str();
293 }
294
295 static int create_one_port_for_trace(struct ctf_fs_component *ctf_fs,
296 struct ctf_fs_ds_file_group *ds_file_group,
297 bt_self_component_source *self_comp_src)
298 {
299 const auto port_name = ctf_fs_make_port_name(ds_file_group);
300 auto port_data = bt2s::make_unique<ctf_fs_port_data>();
301
302 BT_CPPLOGI_SPEC(ctf_fs->logger, "Creating one port named `{}`", port_name);
303
304 port_data->ctf_fs = ctf_fs;
305 port_data->ds_file_group = ds_file_group;
306
307 int ret = bt_self_component_source_add_output_port(self_comp_src, port_name.c_str(),
308 port_data.get(), NULL);
309 if (ret) {
310 return ret;
311 }
312
313 ctf_fs->port_data.emplace_back(std::move(port_data));
314 return 0;
315 }
316
317 static int create_ports_for_trace(struct ctf_fs_component *ctf_fs,
318 struct ctf_fs_trace *ctf_fs_trace,
319 bt_self_component_source *self_comp_src)
320 {
321 /* Create one output port for each stream file group */
322 for (const auto& ds_file_group : ctf_fs_trace->ds_file_groups) {
323 int ret = create_one_port_for_trace(ctf_fs, ds_file_group.get(), self_comp_src);
324 if (ret) {
325 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Cannot create output port.");
326 return ret;
327 }
328 }
329
330 return 0;
331 }
332
333 static bool ds_index_entries_equal(const ctf_fs_ds_index_entry& left,
334 const ctf_fs_ds_index_entry& right)
335 {
336 if (left.packetSize != right.packetSize) {
337 return false;
338 }
339
340 if (left.timestamp_begin != right.timestamp_begin) {
341 return false;
342 }
343
344 if (left.timestamp_end != right.timestamp_end) {
345 return false;
346 }
347
348 if (left.packet_seq_num != right.packet_seq_num) {
349 return false;
350 }
351
352 return true;
353 }
354
355 /*
356 * Insert `entry` into `index`, without duplication.
357 *
358 * The entry is inserted only if there isn't an identical entry already.
359 */
360
361 static void ds_index_insert_ds_index_entry_sorted(ctf_fs_ds_index& index,
362 const ctf_fs_ds_index_entry& entry)
363 {
364 /* Find the spot where to insert this index entry. */
365 auto otherEntry = index.entries.begin();
366 for (; otherEntry != index.entries.end(); ++otherEntry) {
367 if (entry.timestamp_begin_ns <= otherEntry->timestamp_begin_ns) {
368 break;
369 }
370 }
371
372 /*
373 * Insert the entry only if a duplicate doesn't already exist.
374 *
375 * There can be duplicate packets if reading multiple overlapping
376 * snapshots of the same trace. We then want the index to contain
377 * a reference to only one copy of that packet.
378 */
379 if (otherEntry == index.entries.end() || !ds_index_entries_equal(entry, *otherEntry)) {
380 index.entries.emplace(otherEntry, entry);
381 }
382 }
383
384 static void merge_ctf_fs_ds_indexes(ctf_fs_ds_index& dest, const ctf_fs_ds_index& src)
385 {
386 for (const auto& entry : src.entries) {
387 ds_index_insert_ds_index_entry_sorted(dest, entry);
388 }
389 }
390
391 static int add_ds_file_to_ds_file_group(struct ctf_fs_trace *ctf_fs_trace, const char *path)
392 {
393 /*
394 * Create a temporary ds_file to read some properties about the data
395 * stream file.
396 */
397 const auto ds_file =
398 ctf_fs_ds_file_create(ctf_fs_trace, bt2::Stream::Shared {}, path, ctf_fs_trace->logger);
399 if (!ds_file) {
400 return -1;
401 }
402
403 /* Create a temporary iterator to read the ds_file. */
404 ctf_msg_iter_up msg_iter = ctf_msg_iter_create(
405 ctf_fs_trace->metadata->tc,
406 bt_common_get_page_size(static_cast<int>(ctf_fs_trace->logger.level())) * 8,
407 ctf_fs_ds_file_medops, ds_file.get(), nullptr, ctf_fs_trace->logger);
408 if (!msg_iter) {
409 BT_CPPLOGE_STR_SPEC(ctf_fs_trace->logger, "Cannot create a CTF message iterator.");
410 return -1;
411 }
412
413 ctf_msg_iter_set_dry_run(msg_iter.get(), true);
414
415 ctf_msg_iter_packet_properties props;
416 int ret = ctf_msg_iter_get_packet_properties(msg_iter.get(), &props);
417 if (ret) {
418 BT_CPPLOGE_APPEND_CAUSE_SPEC(
419 ctf_fs_trace->logger,
420 "Cannot get stream file's first packet's header and context fields (`{}`).", path);
421 return ret;
422 }
423
424 ctf_stream_class *sc =
425 ctf_trace_class_borrow_stream_class_by_id(ds_file->metadata->tc, props.stream_class_id);
426 BT_ASSERT(sc);
427 int64_t stream_instance_id = props.data_stream_id;
428 int64_t begin_ns = -1;
429
430 if (props.snapshots.beginning_clock != UINT64_C(-1)) {
431 BT_ASSERT(sc->default_clock_class);
432 ret = bt_util_clock_cycles_to_ns_from_origin(
433 props.snapshots.beginning_clock, sc->default_clock_class->frequency,
434 sc->default_clock_class->offset_seconds, sc->default_clock_class->offset_cycles,
435 &begin_ns);
436 if (ret) {
437 BT_CPPLOGE_APPEND_CAUSE_SPEC(
438 ctf_fs_trace->logger,
439 "Cannot convert clock cycles to nanoseconds from origin (`{}`).", path);
440 return ret;
441 }
442 }
443
444 ctf_fs_ds_file_info::UP ds_file_info = ctf_fs_ds_file_info_create(path, begin_ns);
445 if (!ds_file_info) {
446 return -1;
447 }
448
449 auto index = ctf_fs_ds_file_build_index(ds_file.get(), ds_file_info.get(), msg_iter.get());
450 if (!index) {
451 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger, "Failed to index CTF stream file \'{}\'",
452 ds_file->file->path);
453 return -1;
454 }
455
456 if (begin_ns == -1) {
457 /*
458 * No beginning timestamp to sort the stream files
459 * within a stream file group, so consider that this
460 * file must be the only one within its group.
461 */
462 stream_instance_id = -1;
463 }
464
465 if (stream_instance_id == -1) {
466 /*
467 * No stream instance ID or no beginning timestamp:
468 * create a unique stream file group for this stream
469 * file because, even if there's a stream instance ID,
470 * there's no timestamp to order the file within its
471 * group.
472 */
473 auto new_ds_file_group =
474 ctf_fs_ds_file_group_create(ctf_fs_trace, sc, UINT64_C(-1), std::move(*index));
475
476 if (!new_ds_file_group) {
477 return -1;
478 }
479
480 new_ds_file_group->insert_ds_file_info_sorted(std::move(ds_file_info));
481 ctf_fs_trace->ds_file_groups.emplace_back(std::move(new_ds_file_group));
482 return 0;
483 }
484
485 BT_ASSERT(stream_instance_id != -1);
486 BT_ASSERT(begin_ns != -1);
487
488 /* Find an existing stream file group with this ID */
489 ctf_fs_ds_file_group *ds_file_group = NULL;
490 for (const auto& candidate : ctf_fs_trace->ds_file_groups) {
491 if (candidate->sc == sc && candidate->stream_id == stream_instance_id) {
492 ds_file_group = candidate.get();
493 break;
494 }
495 }
496
497 ctf_fs_ds_file_group::UP new_ds_file_group;
498
499 if (!ds_file_group) {
500 new_ds_file_group =
501 ctf_fs_ds_file_group_create(ctf_fs_trace, sc, stream_instance_id, std::move(*index));
502 if (!new_ds_file_group) {
503 return -1;
504 }
505
506 ds_file_group = new_ds_file_group.get();
507 ctf_fs_trace->ds_file_groups.emplace_back(std::move(new_ds_file_group));
508 } else {
509 merge_ctf_fs_ds_indexes(ds_file_group->index, *index);
510 }
511
512 ds_file_group->insert_ds_file_info_sorted(std::move(ds_file_info));
513
514 return 0;
515 }
516
517 static int create_ds_file_groups(struct ctf_fs_trace *ctf_fs_trace)
518 {
519 /* Check each file in the path directory, except specific ones */
520 GError *error = NULL;
521 const bt2c::GDirUP dir {g_dir_open(ctf_fs_trace->path.c_str(), 0, &error)};
522 if (!dir) {
523 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
524 "Cannot open directory `{}`: {} (code {})", ctf_fs_trace->path,
525 error->message, error->code);
526 if (error) {
527 g_error_free(error);
528 }
529 return -1;
530 }
531
532 while (const char *basename = g_dir_read_name(dir.get())) {
533 if (strcmp(basename, CTF_FS_METADATA_FILENAME) == 0) {
534 /* Ignore the metadata stream. */
535 BT_CPPLOGI_SPEC(ctf_fs_trace->logger,
536 "Ignoring metadata file `{}" G_DIR_SEPARATOR_S "{}`",
537 ctf_fs_trace->path, basename);
538 continue;
539 }
540
541 if (basename[0] == '.') {
542 BT_CPPLOGI_SPEC(ctf_fs_trace->logger,
543 "Ignoring hidden file `{}" G_DIR_SEPARATOR_S "{}`", ctf_fs_trace->path,
544 basename);
545 continue;
546 }
547
548 /* Create the file. */
549 ctf_fs_file file {ctf_fs_trace->logger};
550
551 /* Create full path string. */
552 file.path = fmt::format("{}" G_DIR_SEPARATOR_S "{}", ctf_fs_trace->path, basename);
553
554 if (!g_file_test(file.path.c_str(), G_FILE_TEST_IS_REGULAR)) {
555 BT_CPPLOGI_SPEC(ctf_fs_trace->logger, "Ignoring non-regular file `{}`", file.path);
556 continue;
557 }
558
559 int ret = ctf_fs_file_open(&file, "rb");
560 if (ret) {
561 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger, "Cannot open stream file `{}`",
562 file.path);
563 return ret;
564 }
565
566 if (file.size == 0) {
567 /* Skip empty stream. */
568 BT_CPPLOGI_SPEC(ctf_fs_trace->logger, "Ignoring empty file `{}`", file.path);
569 continue;
570 }
571
572 ret = add_ds_file_to_ds_file_group(ctf_fs_trace, file.path.c_str());
573 if (ret) {
574 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
575 "Cannot add stream file `{}` to stream file group",
576 file.path);
577 return ret;
578 }
579 }
580
581 return 0;
582 }
583
584 static void set_trace_name(const bt2::Trace trace, const char *name_suffix)
585 {
586 std::string name;
587
588 /*
589 * Check if we have a trace environment string value named `hostname`.
590 * If so, use it as the trace name's prefix.
591 */
592 const auto val = trace.environmentEntry("hostname");
593 if (val && val->isString()) {
594 name += val->asString().value();
595
596 if (name_suffix) {
597 name += G_DIR_SEPARATOR;
598 }
599 }
600
601 if (name_suffix) {
602 name += name_suffix;
603 }
604
605 trace.name(name);
606 }
607
608 static ctf_fs_trace::UP ctf_fs_trace_create(const char *path, const char *name,
609 const ctf::src::ClkClsCfg& clkClsCfg,
610 bt_self_component *selfComp,
611 const bt2c::Logger& parentLogger)
612 {
613 ctf_fs_trace::UP ctf_fs_trace = bt2s::make_unique<struct ctf_fs_trace>(parentLogger);
614 ctf_fs_trace->path = path;
615 ctf_fs_trace->metadata = bt2s::make_unique<ctf_fs_metadata>();
616
617 int ret = ctf_fs_metadata_set_trace_class(selfComp, ctf_fs_trace.get(), clkClsCfg);
618 if (ret) {
619 return nullptr;
620 }
621
622 if (ctf_fs_trace->metadata->trace_class) {
623 bt_trace *trace = bt_trace_create(ctf_fs_trace->metadata->trace_class->libObjPtr());
624 if (!trace) {
625 return nullptr;
626 }
627
628 ctf_fs_trace->trace = bt2::Trace::Shared::createWithoutRef(trace);
629 }
630
631 if (ctf_fs_trace->trace) {
632 ctf_trace_class_configure_ir_trace(ctf_fs_trace->metadata->tc, *ctf_fs_trace->trace);
633
634 set_trace_name(*ctf_fs_trace->trace, name);
635 }
636
637 ret = create_ds_file_groups(ctf_fs_trace.get());
638 if (ret) {
639 return nullptr;
640 }
641
642 return ctf_fs_trace;
643 }
644
645 static int path_is_ctf_trace(const char *path)
646 {
647 return g_file_test(fmt::format("{}" G_DIR_SEPARATOR_S CTF_FS_METADATA_FILENAME, path).c_str(),
648 G_FILE_TEST_IS_REGULAR);
649 }
650
651 /* Helper for ctf_fs_component_create_ctf_fs_trace, to handle a single path. */
652
653 static int ctf_fs_component_create_ctf_fs_trace_one_path(struct ctf_fs_component *ctf_fs,
654 const char *path_param,
655 const char *trace_name,
656 std::vector<ctf_fs_trace::UP>& traces,
657 bt_self_component *selfComp)
658 {
659 bt2c::GStringUP norm_path {bt_common_normalize_path(path_param, NULL)};
660 if (!norm_path) {
661 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Failed to normalize path: `{}`.", path_param);
662 return -1;
663 }
664
665 int ret = path_is_ctf_trace(norm_path->str);
666 if (ret < 0) {
667 BT_CPPLOGE_APPEND_CAUSE_SPEC(
668 ctf_fs->logger, "Failed to check if path is a CTF trace: path={}", norm_path->str);
669 return ret;
670 } else if (ret == 0) {
671 BT_CPPLOGE_APPEND_CAUSE_SPEC(
672 ctf_fs->logger, "Path is not a CTF trace (does not contain a metadata file): `{}`.",
673 norm_path->str);
674 return -1;
675 }
676
677 // FIXME: Remove or ifdef for __MINGW32__
678 if (strcmp(norm_path->str, "/") == 0) {
679 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Opening a trace in `/` is not supported.");
680 return -1;
681 }
682
683 ctf_fs_trace::UP ctf_fs_trace = ctf_fs_trace_create(
684 norm_path->str, trace_name, ctf_fs->clkClsCfg, selfComp, ctf_fs->logger);
685 if (!ctf_fs_trace) {
686 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Cannot create trace for `{}`.",
687 norm_path->str);
688 return -1;
689 }
690
691 traces.emplace_back(std::move(ctf_fs_trace));
692
693 return 0;
694 }
695
696 /*
697 * Count the number of stream and event classes defined by this trace's metadata.
698 *
699 * This is used to determine which metadata is the "latest", out of multiple
700 * traces sharing the same UUID. It is assumed that amongst all these metadatas,
701 * a bigger metadata is a superset of a smaller metadata. Therefore, it is
702 * enough to just count the classes.
703 */
704
705 static unsigned int metadata_count_stream_and_event_classes(struct ctf_fs_trace *trace)
706 {
707 unsigned int num = trace->metadata->tc->stream_classes->len;
708
709 for (guint i = 0; i < trace->metadata->tc->stream_classes->len; i++) {
710 struct ctf_stream_class *sc =
711 (struct ctf_stream_class *) trace->metadata->tc->stream_classes->pdata[i];
712 num += sc->event_classes->len;
713 }
714
715 return num;
716 }
717
718 /*
719 * Merge the src ds_file_group into dest. This consists of merging their
720 * ds_file_infos, making sure to keep the result sorted.
721 */
722
723 static void merge_ctf_fs_ds_file_groups(struct ctf_fs_ds_file_group *dest,
724 ctf_fs_ds_file_group::UP src)
725 {
726 for (auto& ds_file_info : src->ds_file_infos) {
727 dest->insert_ds_file_info_sorted(std::move(ds_file_info));
728 }
729
730 /* Merge both indexes. */
731 merge_ctf_fs_ds_indexes(dest->index, src->index);
732 }
733
734 /* Merge src_trace's data stream file groups into dest_trace's. */
735
736 static int merge_matching_ctf_fs_ds_file_groups(struct ctf_fs_trace *dest_trace,
737 ctf_fs_trace::UP src_trace)
738 {
739 std::vector<ctf_fs_ds_file_group::UP>& dest = dest_trace->ds_file_groups;
740 std::vector<ctf_fs_ds_file_group::UP>& src = src_trace->ds_file_groups;
741
742 /*
743 * Save the initial length of dest: we only want to check against the
744 * original elements in the inner loop.
745 */
746 size_t dest_len = dest.size();
747
748 for (auto& src_group : src) {
749 struct ctf_fs_ds_file_group *dest_group = NULL;
750
751 /* A stream instance without ID can't match a stream in the other trace. */
752 if (src_group->stream_id != -1) {
753 /* Let's search for a matching ds_file_group in the destination. */
754 for (size_t d_i = 0; d_i < dest_len; ++d_i) {
755 ctf_fs_ds_file_group *candidate_dest = dest[d_i].get();
756
757 /* Can't match a stream instance without ID. */
758 if (candidate_dest->stream_id == -1) {
759 continue;
760 }
761
762 /*
763 * If the two groups have the same stream instance id
764 * and belong to the same stream class (stream instance
765 * ids are per-stream class), they represent the same
766 * stream instance.
767 */
768 if (candidate_dest->stream_id != src_group->stream_id ||
769 candidate_dest->sc->id != src_group->sc->id) {
770 continue;
771 }
772
773 dest_group = candidate_dest;
774 break;
775 }
776 }
777
778 /*
779 * Didn't find a friend in dest to merge our src_group into?
780 * Create a new empty one. This can happen if a stream was
781 * active in the source trace chunk but not in the destination
782 * trace chunk.
783 */
784 if (!dest_group) {
785 ctf_stream_class *sc = ctf_trace_class_borrow_stream_class_by_id(
786 dest_trace->metadata->tc, src_group->sc->id);
787 BT_ASSERT(sc);
788
789 auto new_dest_group =
790 ctf_fs_ds_file_group_create(dest_trace, sc, src_group->stream_id, {});
791
792 if (!new_dest_group) {
793 return -1;
794 }
795
796 dest_group = new_dest_group.get();
797 dest_trace->ds_file_groups.emplace_back(std::move(new_dest_group));
798 }
799
800 BT_ASSERT(dest_group);
801 merge_ctf_fs_ds_file_groups(dest_group, std::move(src_group));
802 }
803
804 return 0;
805 }
806
807 /*
808 * Collapse the given traces, which must all share the same UUID, in a single
809 * one.
810 *
811 * The trace with the most expansive metadata is chosen and all other traces
812 * are merged into that one. On return, the elements of `traces` are nullptr
813 * and the merged trace is placed in `out_trace`.
814 */
815
816 static int merge_ctf_fs_traces(std::vector<ctf_fs_trace::UP> traces, ctf_fs_trace::UP& out_trace)
817 {
818 BT_ASSERT(traces.size() >= 2);
819
820 unsigned int winner_count = metadata_count_stream_and_event_classes(traces[0].get());
821 ctf_fs_trace *winner = traces[0].get();
822 guint winner_i = 0;
823
824 /* Find the trace with the largest metadata. */
825 for (guint i = 1; i < traces.size(); i++) {
826 ctf_fs_trace *candidate = traces[i].get();
827 unsigned int candidate_count;
828
829 /* A bit of sanity check. */
830 BT_ASSERT(bt_uuid_compare(winner->metadata->tc->uuid, candidate->metadata->tc->uuid) == 0);
831
832 candidate_count = metadata_count_stream_and_event_classes(candidate);
833
834 if (candidate_count > winner_count) {
835 winner_count = candidate_count;
836 winner = candidate;
837 winner_i = i;
838 }
839 }
840
841 /* Merge all the other traces in the winning trace. */
842 for (ctf_fs_trace::UP& trace : traces) {
843 /* Don't merge the winner into itself. */
844 if (trace.get() == winner) {
845 continue;
846 }
847
848 /* Merge trace's data stream file groups into winner's. */
849 int ret = merge_matching_ctf_fs_ds_file_groups(winner, std::move(trace));
850 if (ret) {
851 return ret;
852 }
853 }
854
855 /*
856 * Move the winner out of the array, into `*out_trace`.
857 */
858 out_trace = std::move(traces[winner_i]);
859
860 return 0;
861 }
862
863 enum target_event
864 {
865 FIRST_EVENT,
866 LAST_EVENT,
867 };
868
869 static int decode_clock_snapshot_after_event(struct ctf_fs_trace *ctf_fs_trace,
870 struct ctf_clock_class *default_cc,
871 const ctf_fs_ds_index_entry& index_entry,
872 enum target_event target_event, uint64_t *cs,
873 int64_t *ts_ns)
874 {
875 BT_ASSERT(ctf_fs_trace);
876 BT_ASSERT(index_entry.path);
877
878 const auto ds_file = ctf_fs_ds_file_create(ctf_fs_trace, bt2::Stream::Shared {},
879 index_entry.path, ctf_fs_trace->logger);
880 if (!ds_file) {
881 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger, "Failed to create a ctf_fs_ds_file");
882 return -1;
883 }
884
885 BT_ASSERT(ctf_fs_trace->metadata);
886 BT_ASSERT(ctf_fs_trace->metadata->tc);
887
888 ctf_msg_iter_up msg_iter = ctf_msg_iter_create(
889 ctf_fs_trace->metadata->tc,
890 bt_common_get_page_size(static_cast<int>(ctf_fs_trace->logger.level())) * 8,
891
892 ctf_fs_ds_file_medops, ds_file.get(), NULL, ctf_fs_trace->logger);
893 if (!msg_iter) {
894 /* ctf_msg_iter_create() logs errors. */
895 return -1;
896 }
897
898 /*
899 * Turn on dry run mode to prevent the creation and usage of Babeltrace
900 * library objects (bt_field, bt_message_*, etc.).
901 */
902 ctf_msg_iter_set_dry_run(msg_iter.get(), true);
903
904 /* Seek to the beginning of the target packet. */
905 enum ctf_msg_iter_status iter_status =
906 ctf_msg_iter_seek(msg_iter.get(), index_entry.offset.bytes());
907 if (iter_status) {
908 /* ctf_msg_iter_seek() logs errors. */
909 return -1;
910 }
911
912 switch (target_event) {
913 case FIRST_EVENT:
914 /*
915 * Start to decode the packet until we reach the end of
916 * the first event. To extract the first event's clock
917 * snapshot.
918 */
919 iter_status = ctf_msg_iter_curr_packet_first_event_clock_snapshot(msg_iter.get(), cs);
920 break;
921 case LAST_EVENT:
922 /* Decode the packet to extract the last event's clock snapshot. */
923 iter_status = ctf_msg_iter_curr_packet_last_event_clock_snapshot(msg_iter.get(), cs);
924 break;
925 default:
926 bt_common_abort();
927 }
928 if (iter_status) {
929 return -1;
930 }
931
932 /* Convert clock snapshot to timestamp. */
933 int ret = bt_util_clock_cycles_to_ns_from_origin(
934 *cs, default_cc->frequency, default_cc->offset_seconds, default_cc->offset_cycles, ts_ns);
935 if (ret) {
936 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
937 "Failed to convert clock snapshot to timestamp");
938 return ret;
939 }
940
941 return 0;
942 }
943
944 static int decode_packet_first_event_timestamp(struct ctf_fs_trace *ctf_fs_trace,
945 struct ctf_clock_class *default_cc,
946 const ctf_fs_ds_index_entry& index_entry,
947 uint64_t *cs, int64_t *ts_ns)
948 {
949 return decode_clock_snapshot_after_event(ctf_fs_trace, default_cc, index_entry, FIRST_EVENT, cs,
950 ts_ns);
951 }
952
953 static int decode_packet_last_event_timestamp(struct ctf_fs_trace *ctf_fs_trace,
954 struct ctf_clock_class *default_cc,
955 const ctf_fs_ds_index_entry& index_entry,
956 uint64_t *cs, int64_t *ts_ns)
957 {
958 return decode_clock_snapshot_after_event(ctf_fs_trace, default_cc, index_entry, LAST_EVENT, cs,
959 ts_ns);
960 }
961
962 /*
963 * Fix up packet index entries for lttng's "event-after-packet" bug.
964 * Some buggy lttng tracer versions may emit events with a timestamp that is
965 * larger (after) than the timestamp_end of the their packets.
966 *
967 * To fix up this erroneous data we do the following:
968 * 1. If it's not the stream file's last packet: set the packet index entry's
969 * end time to the next packet's beginning time.
970 * 2. If it's the stream file's last packet, set the packet index entry's end
971 * time to the packet's last event's time, if any, or to the packet's
972 * beginning time otherwise.
973 *
974 * Known buggy tracer versions:
975 * - before lttng-ust 2.11.0
976 * - before lttng-module 2.11.0
977 * - before lttng-module 2.10.10
978 * - before lttng-module 2.9.13
979 */
980 static int fix_index_lttng_event_after_packet_bug(struct ctf_fs_trace *trace)
981 {
982 for (const auto& ds_file_group : trace->ds_file_groups) {
983 BT_ASSERT(ds_file_group);
984 auto& index = ds_file_group->index;
985
986 BT_ASSERT(!index.entries.empty());
987
988 /*
989 * Iterate over all entries but the last one. The last one is
990 * fixed differently after.
991 */
992 for (size_t entry_i = 0; entry_i < index.entries.size() - 1; ++entry_i) {
993 auto& curr_entry = index.entries[entry_i];
994 const auto& next_entry = index.entries[entry_i + 1];
995
996 /*
997 * 1. Set the current index entry `end` timestamp to
998 * the next index entry `begin` timestamp.
999 */
1000 curr_entry.timestamp_end = next_entry.timestamp_begin;
1001 curr_entry.timestamp_end_ns = next_entry.timestamp_begin_ns;
1002 }
1003
1004 /*
1005 * 2. Fix the last entry by decoding the last event of the last
1006 * packet.
1007 */
1008 auto& last_entry = index.entries.back();
1009
1010 BT_ASSERT(ds_file_group->sc->default_clock_class);
1011 ctf_clock_class *default_cc = ds_file_group->sc->default_clock_class;
1012
1013 /*
1014 * Decode packet to read the timestamp of the last event of the
1015 * entry.
1016 */
1017 int ret = decode_packet_last_event_timestamp(
1018 trace, default_cc, last_entry, &last_entry.timestamp_end, &last_entry.timestamp_end_ns);
1019 if (ret) {
1020 BT_CPPLOGE_APPEND_CAUSE_SPEC(
1021 trace->logger,
1022 "Failed to decode stream's last packet to get its last event's clock snapshot.");
1023 return ret;
1024 }
1025 }
1026
1027 return 0;
1028 }
1029
1030 /*
1031 * Fix up packet index entries for barectf's "event-before-packet" bug.
1032 * Some buggy barectf tracer versions may emit events with a timestamp that is
1033 * less than the timestamp_begin of the their packets.
1034 *
1035 * To fix up this erroneous data we do the following:
1036 * 1. Starting at the second index entry, set the timestamp_begin of the
1037 * current entry to the timestamp of the first event of the packet.
1038 * 2. Set the previous entry's timestamp_end to the timestamp_begin of the
1039 * current packet.
1040 *
1041 * Known buggy tracer versions:
1042 * - before barectf 2.3.1
1043 */
1044 static int fix_index_barectf_event_before_packet_bug(struct ctf_fs_trace *trace)
1045 {
1046 for (const auto& ds_file_group : trace->ds_file_groups) {
1047 auto& index = ds_file_group->index;
1048
1049 BT_ASSERT(!index.entries.empty());
1050
1051 BT_ASSERT(ds_file_group->sc->default_clock_class);
1052 ctf_clock_class *default_cc = ds_file_group->sc->default_clock_class;
1053
1054 /*
1055 * 1. Iterate over the index, starting from the second entry
1056 * (index = 1).
1057 */
1058 for (size_t entry_i = 1; entry_i < index.entries.size(); ++entry_i) {
1059 auto& prev_entry = index.entries[entry_i - 1];
1060 auto& curr_entry = index.entries[entry_i];
1061 /*
1062 * 2. Set the current entry `begin` timestamp to the
1063 * timestamp of the first event of the current packet.
1064 */
1065 int ret = decode_packet_first_event_timestamp(trace, default_cc, curr_entry,
1066 &curr_entry.timestamp_begin,
1067 &curr_entry.timestamp_begin_ns);
1068 if (ret) {
1069 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1070 "Failed to decode first event's clock snapshot");
1071 return ret;
1072 }
1073
1074 /*
1075 * 3. Set the previous entry `end` timestamp to the
1076 * timestamp of the first event of the current packet.
1077 */
1078 prev_entry.timestamp_end = curr_entry.timestamp_begin;
1079 prev_entry.timestamp_end_ns = curr_entry.timestamp_begin_ns;
1080 }
1081 }
1082
1083 return 0;
1084 }
1085
1086 /*
1087 * When using the lttng-crash feature it's likely that the last packets of each
1088 * stream have their timestamp_end set to zero. This is caused by the fact that
1089 * the tracer crashed and was not able to properly close the packets.
1090 *
1091 * To fix up this erroneous data we do the following:
1092 * For each index entry, if the entry's timestamp_end is 0 and the
1093 * timestamp_begin is not 0:
1094 * - If it's the stream file's last packet: set the packet index entry's end
1095 * time to the packet's last event's time, if any, or to the packet's
1096 * beginning time otherwise.
1097 * - If it's not the stream file's last packet: set the packet index
1098 * entry's end time to the next packet's beginning time.
1099 *
1100 * Affected versions:
1101 * - All current and future lttng-ust and lttng-modules versions.
1102 */
1103 static int fix_index_lttng_crash_quirk(struct ctf_fs_trace *trace)
1104 {
1105 for (const auto& ds_file_group : trace->ds_file_groups) {
1106 struct ctf_clock_class *default_cc;
1107
1108 BT_ASSERT(ds_file_group);
1109 auto& index = ds_file_group->index;
1110
1111 BT_ASSERT(ds_file_group->sc->default_clock_class);
1112 default_cc = ds_file_group->sc->default_clock_class;
1113
1114 BT_ASSERT(!index.entries.empty());
1115
1116 auto& last_entry = index.entries.back();
1117
1118 /* 1. Fix the last entry first. */
1119 if (last_entry.timestamp_end == 0 && last_entry.timestamp_begin != 0) {
1120 /*
1121 * Decode packet to read the timestamp of the
1122 * last event of the stream file.
1123 */
1124 int ret = decode_packet_last_event_timestamp(trace, default_cc, last_entry,
1125 &last_entry.timestamp_end,
1126 &last_entry.timestamp_end_ns);
1127 if (ret) {
1128 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1129 "Failed to decode last event's clock snapshot");
1130 return ret;
1131 }
1132 }
1133
1134 /* Iterate over all entries but the last one. */
1135 for (size_t entry_idx = 0; entry_idx < index.entries.size() - 1; ++entry_idx) {
1136 auto& curr_entry = index.entries[entry_idx];
1137 const auto& next_entry = index.entries[entry_idx + 1];
1138
1139 if (curr_entry.timestamp_end == 0 && curr_entry.timestamp_begin != 0) {
1140 /*
1141 * 2. Set the current index entry `end` timestamp to
1142 * the next index entry `begin` timestamp.
1143 */
1144 curr_entry.timestamp_end = next_entry.timestamp_begin;
1145 curr_entry.timestamp_end_ns = next_entry.timestamp_begin_ns;
1146 }
1147 }
1148 }
1149
1150 return 0;
1151 }
1152
1153 /*
1154 * Extract the tracer information necessary to compare versions.
1155 * Returns 0 on success, and -1 if the extraction is not successful because the
1156 * necessary fields are absents in the trace metadata.
1157 */
1158 static int extract_tracer_info(struct ctf_fs_trace *trace, struct tracer_info *current_tracer_info)
1159 {
1160 /* Clear the current_tracer_info struct */
1161 memset(current_tracer_info, 0, sizeof(*current_tracer_info));
1162
1163 /*
1164 * To compare 2 tracer versions, at least the tracer name and it's
1165 * major version are needed. If one of these is missing, consider it an
1166 * extraction failure.
1167 */
1168 ctf_trace_class_env_entry *entry =
1169 ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_name");
1170 if (!entry || entry->type != CTF_TRACE_CLASS_ENV_ENTRY_TYPE_STR) {
1171 return -1;
1172 }
1173
1174 /* Set tracer name. */
1175 current_tracer_info->name = entry->value.str->str;
1176
1177 entry = ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_major");
1178 if (!entry || entry->type != CTF_TRACE_CLASS_ENV_ENTRY_TYPE_INT) {
1179 return -1;
1180 }
1181
1182 /* Set major version number. */
1183 current_tracer_info->major = entry->value.i;
1184
1185 entry = ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_minor");
1186 if (!entry || entry->type != CTF_TRACE_CLASS_ENV_ENTRY_TYPE_INT) {
1187 return 0;
1188 }
1189
1190 /* Set minor version number. */
1191 current_tracer_info->minor = entry->value.i;
1192
1193 entry = ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_patch");
1194 if (!entry) {
1195 /*
1196 * If `tracer_patch` doesn't exist `tracer_patchlevel` might.
1197 * For example, `lttng-modules` uses entry name
1198 * `tracer_patchlevel`.
1199 */
1200 entry = ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_patchlevel");
1201 }
1202
1203 if (!entry || entry->type != CTF_TRACE_CLASS_ENV_ENTRY_TYPE_INT) {
1204 return 0;
1205 }
1206
1207 /* Set patch version number. */
1208 current_tracer_info->patch = entry->value.i;
1209
1210 return 0;
1211 }
1212
1213 static bool is_tracer_affected_by_lttng_event_after_packet_bug(struct tracer_info *curr_tracer_info)
1214 {
1215 bool is_affected = false;
1216
1217 if (strcmp(curr_tracer_info->name, "lttng-ust") == 0) {
1218 if (curr_tracer_info->major < 2) {
1219 is_affected = true;
1220 } else if (curr_tracer_info->major == 2) {
1221 /* fixed in lttng-ust 2.11.0 */
1222 if (curr_tracer_info->minor < 11) {
1223 is_affected = true;
1224 }
1225 }
1226 } else if (strcmp(curr_tracer_info->name, "lttng-modules") == 0) {
1227 if (curr_tracer_info->major < 2) {
1228 is_affected = true;
1229 } else if (curr_tracer_info->major == 2) {
1230 /* fixed in lttng-modules 2.11.0 */
1231 if (curr_tracer_info->minor == 10) {
1232 /* fixed in lttng-modules 2.10.10 */
1233 if (curr_tracer_info->patch < 10) {
1234 is_affected = true;
1235 }
1236 } else if (curr_tracer_info->minor == 9) {
1237 /* fixed in lttng-modules 2.9.13 */
1238 if (curr_tracer_info->patch < 13) {
1239 is_affected = true;
1240 }
1241 } else if (curr_tracer_info->minor < 9) {
1242 is_affected = true;
1243 }
1244 }
1245 }
1246
1247 return is_affected;
1248 }
1249
1250 static bool
1251 is_tracer_affected_by_barectf_event_before_packet_bug(struct tracer_info *curr_tracer_info)
1252 {
1253 bool is_affected = false;
1254
1255 if (strcmp(curr_tracer_info->name, "barectf") == 0) {
1256 if (curr_tracer_info->major < 2) {
1257 is_affected = true;
1258 } else if (curr_tracer_info->major == 2) {
1259 if (curr_tracer_info->minor < 3) {
1260 is_affected = true;
1261 } else if (curr_tracer_info->minor == 3) {
1262 /* fixed in barectf 2.3.1 */
1263 if (curr_tracer_info->patch < 1) {
1264 is_affected = true;
1265 }
1266 }
1267 }
1268 }
1269
1270 return is_affected;
1271 }
1272
1273 static bool is_tracer_affected_by_lttng_crash_quirk(struct tracer_info *curr_tracer_info)
1274 {
1275 bool is_affected = false;
1276
1277 /* All LTTng tracer may be affected by this lttng crash quirk. */
1278 if (strcmp(curr_tracer_info->name, "lttng-ust") == 0) {
1279 is_affected = true;
1280 } else if (strcmp(curr_tracer_info->name, "lttng-modules") == 0) {
1281 is_affected = true;
1282 }
1283
1284 return is_affected;
1285 }
1286
1287 /*
1288 * Looks for trace produced by known buggy tracers and fix up the index
1289 * produced earlier.
1290 */
1291 static int fix_packet_index_tracer_bugs(ctf_fs_trace *trace)
1292 {
1293 struct tracer_info current_tracer_info;
1294
1295 int ret = extract_tracer_info(trace, &current_tracer_info);
1296 if (ret) {
1297 /*
1298 * A trace may not have all the necessary environment
1299 * entries to do the tracer version comparison.
1300 * At least, the tracer name and major version number
1301 * are needed. Failing to extract these entries is not
1302 * an error.
1303 */
1304 BT_CPPLOGI_STR_SPEC(
1305 trace->logger,
1306 "Cannot extract tracer information necessary to compare with buggy versions.");
1307 return 0;
1308 }
1309
1310 /* Check if the trace may be affected by old tracer bugs. */
1311 if (is_tracer_affected_by_lttng_event_after_packet_bug(&current_tracer_info)) {
1312 BT_CPPLOGI_STR_SPEC(
1313 trace->logger,
1314 "Trace may be affected by LTTng tracer packet timestamp bug. Fixing up.");
1315 ret = fix_index_lttng_event_after_packet_bug(trace);
1316 if (ret) {
1317 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1318 "Failed to fix LTTng event-after-packet bug.");
1319 return ret;
1320 }
1321 trace->metadata->tc->quirks.lttng_event_after_packet = true;
1322 }
1323
1324 if (is_tracer_affected_by_barectf_event_before_packet_bug(&current_tracer_info)) {
1325 BT_CPPLOGI_STR_SPEC(
1326 trace->logger,
1327 "Trace may be affected by barectf tracer packet timestamp bug. Fixing up.");
1328 ret = fix_index_barectf_event_before_packet_bug(trace);
1329 if (ret) {
1330 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1331 "Failed to fix barectf event-before-packet bug.");
1332 return ret;
1333 }
1334 trace->metadata->tc->quirks.barectf_event_before_packet = true;
1335 }
1336
1337 if (is_tracer_affected_by_lttng_crash_quirk(&current_tracer_info)) {
1338 ret = fix_index_lttng_crash_quirk(trace);
1339 if (ret) {
1340 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1341 "Failed to fix lttng-crash timestamp quirks.");
1342 return ret;
1343 }
1344 trace->metadata->tc->quirks.lttng_crash = true;
1345 }
1346
1347 return 0;
1348 }
1349
1350 static bool compare_ds_file_groups_by_first_path(const ctf_fs_ds_file_group::UP& ds_file_group_a,
1351 const ctf_fs_ds_file_group::UP& ds_file_group_b)
1352 {
1353 BT_ASSERT(!ds_file_group_a->ds_file_infos.empty());
1354 BT_ASSERT(!ds_file_group_b->ds_file_infos.empty());
1355
1356 const auto& first_ds_file_info_a = *ds_file_group_a->ds_file_infos[0];
1357 const auto& first_ds_file_info_b = *ds_file_group_b->ds_file_infos[0];
1358
1359 return first_ds_file_info_a.path < first_ds_file_info_b.path;
1360 }
1361
1362 int ctf_fs_component_create_ctf_fs_trace(struct ctf_fs_component *ctf_fs,
1363 const bt2::ConstArrayValue pathsValue,
1364 const char *traceName, bt_self_component *selfComp)
1365 {
1366 std::vector<std::string> paths;
1367
1368 BT_ASSERT(!pathsValue.isEmpty());
1369
1370 /*
1371 * Create a sorted array of the paths, to make the execution of this
1372 * component deterministic.
1373 */
1374 for (const auto pathValue : pathsValue) {
1375 BT_ASSERT(pathValue.isString());
1376 paths.emplace_back(pathValue.asString().value().str());
1377 }
1378
1379 std::sort(paths.begin(), paths.end());
1380
1381 /* Create a separate ctf_fs_trace object for each path. */
1382 std::vector<ctf_fs_trace::UP> traces;
1383 for (const auto& path : paths) {
1384 int ret = ctf_fs_component_create_ctf_fs_trace_one_path(ctf_fs, path.c_str(), traceName,
1385 traces, selfComp);
1386 if (ret) {
1387 return ret;
1388 }
1389 }
1390
1391 if (traces.size() > 1) {
1392 ctf_fs_trace *first_trace = traces[0].get();
1393 const uint8_t *first_trace_uuid = first_trace->metadata->tc->uuid;
1394
1395 /*
1396 * We have more than one trace, they must all share the same
1397 * UUID, verify that.
1398 */
1399 for (size_t i = 0; i < traces.size(); i++) {
1400 ctf_fs_trace *this_trace = traces[i].get();
1401 const uint8_t *this_trace_uuid = this_trace->metadata->tc->uuid;
1402
1403 if (!this_trace->metadata->tc->is_uuid_set) {
1404 BT_CPPLOGE_APPEND_CAUSE_SPEC(
1405 ctf_fs->logger,
1406 "Multiple traces given, but a trace does not have a UUID: path={}",
1407 this_trace->path);
1408 return -1;
1409 }
1410
1411 if (bt_uuid_compare(first_trace_uuid, this_trace_uuid) != 0) {
1412 char first_trace_uuid_str[BT_UUID_STR_LEN + 1];
1413 char this_trace_uuid_str[BT_UUID_STR_LEN + 1];
1414
1415 bt_uuid_to_str(first_trace_uuid, first_trace_uuid_str);
1416 bt_uuid_to_str(this_trace_uuid, this_trace_uuid_str);
1417
1418 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger,
1419 "Multiple traces given, but UUIDs don't match: "
1420 "first-trace-uuid={}, first-trace-path={}, "
1421 "trace-uuid={}, trace-path={}",
1422 first_trace_uuid_str, first_trace->path,
1423 this_trace_uuid_str, this_trace->path);
1424 return -1;
1425 }
1426 }
1427
1428 int ret = merge_ctf_fs_traces(std::move(traces), ctf_fs->trace);
1429 if (ret) {
1430 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger,
1431 "Failed to merge traces with the same UUID.");
1432 return ret;
1433 }
1434 } else {
1435 /* Just one trace, it may or may not have a UUID, both are fine. */
1436 ctf_fs->trace = std::move(traces[0]);
1437 }
1438
1439 int ret = fix_packet_index_tracer_bugs(ctf_fs->trace.get());
1440 if (ret) {
1441 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Failed to fix packet index tracer bugs.");
1442 return ret;
1443 }
1444
1445 /*
1446 * Sort data stream file groups by first data stream file info
1447 * path to get a deterministic order. This order influences the
1448 * order of the output ports. It also influences the order of
1449 * the automatic stream IDs if the trace's packet headers do not
1450 * contain a `stream_instance_id` field, in which case the data
1451 * stream file to stream ID association is always the same,
1452 * whatever the build and the system.
1453 *
1454 * Having a deterministic order here can help debugging and
1455 * testing.
1456 */
1457 std::sort(ctf_fs->trace->ds_file_groups.begin(), ctf_fs->trace->ds_file_groups.end(),
1458 compare_ds_file_groups_by_first_path);
1459
1460 return 0;
1461 }
1462
1463 static const std::string&
1464 get_stream_instance_unique_name(struct ctf_fs_ds_file_group *ds_file_group)
1465 {
1466 /*
1467 * The first (earliest) stream file's path is used as the stream's unique
1468 * name.
1469 */
1470 BT_ASSERT(!ds_file_group->ds_file_infos.empty());
1471 return ds_file_group->ds_file_infos[0]->path;
1472 }
1473
1474 /* Create the IR stream objects for ctf_fs_trace. */
1475
1476 static int create_streams_for_trace(struct ctf_fs_trace *ctf_fs_trace)
1477 {
1478 for (const auto& ds_file_group : ctf_fs_trace->ds_file_groups) {
1479 const std::string& name = get_stream_instance_unique_name(ds_file_group.get());
1480
1481 BT_ASSERT(ds_file_group->sc->ir_sc);
1482 BT_ASSERT(ctf_fs_trace->trace);
1483
1484 bt_stream *stream;
1485
1486 if (ds_file_group->stream_id == UINT64_C(-1)) {
1487 /* No stream ID: use 0 */
1488 stream =
1489 bt_stream_create_with_id(ds_file_group->sc->ir_sc, ctf_fs_trace->trace->libObjPtr(),
1490 ctf_fs_trace->next_stream_id);
1491 ctf_fs_trace->next_stream_id++;
1492 } else {
1493 /* Specific stream ID */
1494 stream =
1495 bt_stream_create_with_id(ds_file_group->sc->ir_sc, ctf_fs_trace->trace->libObjPtr(),
1496 (uint64_t) ds_file_group->stream_id);
1497 }
1498
1499 if (!stream) {
1500 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
1501 "Cannot create stream for DS file group: "
1502 "addr={}, stream-name=\"{}\"",
1503 fmt::ptr(ds_file_group), name);
1504 return -1;
1505 }
1506
1507 ds_file_group->stream = bt2::Stream::Shared::createWithoutRef(stream);
1508
1509 int ret = bt_stream_set_name(ds_file_group->stream->libObjPtr(), name.c_str());
1510 if (ret) {
1511 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
1512 "Cannot set stream's name: "
1513 "addr={}, stream-name=\"{}\"",
1514 fmt::ptr(ds_file_group->stream->libObjPtr()), name);
1515 return ret;
1516 }
1517 }
1518
1519 return 0;
1520 }
1521
1522 static const bt_param_validation_value_descr inputs_elem_descr =
1523 bt_param_validation_value_descr::makeString();
1524
1525 static bt_param_validation_map_value_entry_descr fs_params_entries_descr[] = {
1526 {"inputs", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_MANDATORY,
1527 bt_param_validation_value_descr::makeArray(1, BT_PARAM_VALIDATION_INFINITE,
1528 inputs_elem_descr)},
1529 {"trace-name", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_OPTIONAL,
1530 bt_param_validation_value_descr::makeString()},
1531 {"clock-class-offset-s", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_OPTIONAL,
1532 bt_param_validation_value_descr::makeSignedInteger()},
1533 {"clock-class-offset-ns", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_OPTIONAL,
1534 bt_param_validation_value_descr::makeSignedInteger()},
1535 {"force-clock-class-origin-unix-epoch", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_OPTIONAL,
1536 bt_param_validation_value_descr::makeBool()},
1537 BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_END};
1538
1539 ctf::src::fs::Parameters read_src_fs_parameters(const bt2::ConstMapValue params,
1540 const bt2c::Logger& logger)
1541 {
1542 gchar *error = NULL;
1543 bt_param_validation_status validate_value_status =
1544 bt_param_validation_validate(params.libObjPtr(), fs_params_entries_descr, &error);
1545
1546 if (validate_value_status != BT_PARAM_VALIDATION_STATUS_OK) {
1547 bt2c::GCharUP errorFreer {error};
1548 BT_CPPLOGE_APPEND_CAUSE_AND_THROW_SPEC(logger, bt2c::Error, "{}", error);
1549 }
1550
1551 ctf::src::fs::Parameters parameters {params["inputs"]->asArray()};
1552
1553 /* clock-class-offset-s parameter */
1554 if (const auto clockClassOffsetS = params["clock-class-offset-s"]) {
1555 parameters.clkClsCfg.offsetSec = clockClassOffsetS->asSignedInteger().value();
1556 }
1557
1558 /* clock-class-offset-ns parameter */
1559 if (const auto clockClassOffsetNs = params["clock-class-offset-ns"]) {
1560 parameters.clkClsCfg.offsetNanoSec = clockClassOffsetNs->asSignedInteger().value();
1561 }
1562
1563 /* force-clock-class-origin-unix-epoch parameter */
1564 if (const auto forceClockClassOriginUnixEpoch = params["force-clock-class-origin-unix-epoch"]) {
1565 parameters.clkClsCfg.forceOriginIsUnixEpoch =
1566 forceClockClassOriginUnixEpoch->asBool().value();
1567 }
1568
1569 /* trace-name parameter */
1570 if (const auto traceName = params["trace-name"]) {
1571 parameters.traceName = traceName->asString().value().str();
1572 }
1573
1574 return parameters;
1575 }
1576
1577 static ctf_fs_component::UP ctf_fs_create(const bt2::ConstMapValue params,
1578 bt_self_component_source *self_comp_src)
1579 {
1580 bt_self_component *self_comp = bt_self_component_source_as_self_component(self_comp_src);
1581 ctf_fs_component::UP ctf_fs = bt2s::make_unique<ctf_fs_component>(
1582 bt2c::Logger {bt2::SelfSourceComponent {self_comp_src}, "PLUGIN/SRC.CTF.FS/COMP"});
1583 const auto parameters = read_src_fs_parameters(params, ctf_fs->logger);
1584
1585 ctf_fs->clkClsCfg = parameters.clkClsCfg;
1586
1587 if (ctf_fs_component_create_ctf_fs_trace(
1588 ctf_fs.get(), parameters.inputs,
1589 parameters.traceName ? parameters.traceName->c_str() : nullptr, self_comp)) {
1590 return nullptr;
1591 }
1592
1593 if (create_streams_for_trace(ctf_fs->trace.get())) {
1594 return nullptr;
1595 }
1596
1597 if (create_ports_for_trace(ctf_fs.get(), ctf_fs->trace.get(), self_comp_src)) {
1598 return nullptr;
1599 }
1600
1601 return ctf_fs;
1602 }
1603
1604 bt_component_class_initialize_method_status ctf_fs_init(bt_self_component_source *self_comp_src,
1605 bt_self_component_source_configuration *,
1606 const bt_value *params, void *)
1607 {
1608 try {
1609 bt_component_class_initialize_method_status ret =
1610 BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_OK;
1611
1612 ctf_fs_component::UP ctf_fs = ctf_fs_create(bt2::ConstMapValue {params}, self_comp_src);
1613 if (!ctf_fs) {
1614 ret = BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
1615 }
1616
1617 bt_self_component_set_data(bt_self_component_source_as_self_component(self_comp_src),
1618 ctf_fs.release());
1619 return ret;
1620 } catch (const std::bad_alloc&) {
1621 return BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
1622 } catch (const bt2::Error&) {
1623 return BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
1624 }
1625 }
1626
1627 bt_component_class_query_method_status ctf_fs_query(bt_self_component_class_source *comp_class_src,
1628 bt_private_query_executor *priv_query_exec,
1629 const char *object, const bt_value *params,
1630 __attribute__((unused)) void *method_data,
1631 const bt_value **result)
1632 {
1633 try {
1634 bt2c::Logger logger {bt2::SelfComponentClass {comp_class_src},
1635 bt2::PrivateQueryExecutor {priv_query_exec},
1636 "PLUGIN/SRC.CTF.FS/QUERY"};
1637 bt2::ConstMapValue paramsObj(params);
1638 bt2::Value::Shared resultObj;
1639
1640 if (strcmp(object, "metadata-info") == 0) {
1641 resultObj = metadata_info_query(paramsObj, logger);
1642 } else if (strcmp(object, "babeltrace.trace-infos") == 0) {
1643 resultObj = trace_infos_query(paramsObj, logger);
1644 } else if (!strcmp(object, "babeltrace.support-info")) {
1645 resultObj = support_info_query(paramsObj, logger);
1646 } else {
1647 BT_CPPLOGE_SPEC(logger, "Unknown query object `{}`", object);
1648 return BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_UNKNOWN_OBJECT;
1649 }
1650
1651 *result = resultObj.release().libObjPtr();
1652
1653 return BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_OK;
1654 } catch (const std::bad_alloc&) {
1655 return BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_MEMORY_ERROR;
1656 } catch (const bt2::Error&) {
1657 return BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
1658 }
1659 }
This page took 0.083201 seconds and 4 git commands to generate.