src.ctf.fs: make ds_file_group_insert_ds_file_info_sorted a method of ctf_fs_ds_file_...
[babeltrace.git] / src / plugins / ctf / fs-src / fs.cpp
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2015-2017 Philippe Proulx <pproulx@efficios.com>
5 * Copyright 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 *
7 * Babeltrace CTF file system Reader Component
8 */
9
10 #include <sstream>
11
12 #include <glib.h>
13
14 #include <babeltrace2/babeltrace.h>
15
16 #include "common/assert.h"
17 #include "common/common.h"
18 #include "common/uuid.h"
19 #include "cpp-common/bt2c/glib-up.hpp"
20 #include "cpp-common/bt2s/make-unique.hpp"
21
22 #include "plugins/common/param-validation/param-validation.h"
23
24 #include "../common/src/metadata/tsdl/ctf-meta-configure-ir-trace.hpp"
25 #include "../common/src/msg-iter/msg-iter.hpp"
26 #include "data-stream-file.hpp"
27 #include "file.hpp"
28 #include "fs.hpp"
29 #include "metadata.hpp"
30 #include "query.hpp"
31
32 struct tracer_info
33 {
34 const char *name;
35 int64_t major;
36 int64_t minor;
37 int64_t patch;
38 };
39
40 static bt_message_iterator_class_next_method_status
41 ctf_fs_iterator_next_one(struct ctf_fs_msg_iter_data *msg_iter_data, const bt_message **out_msg)
42 {
43 const auto msg_iter_status =
44 ctf_msg_iter_get_next_message(msg_iter_data->msg_iter.get(), out_msg);
45 bt_message_iterator_class_next_method_status status;
46
47 switch (msg_iter_status) {
48 case CTF_MSG_ITER_STATUS_OK:
49 /* Cool, message has been written to *out_msg. */
50 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_OK;
51 break;
52
53 case CTF_MSG_ITER_STATUS_EOF:
54 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_END;
55 break;
56
57 case CTF_MSG_ITER_STATUS_AGAIN:
58 /*
59 * Should not make it this far as this is
60 * medium-specific; there is nothing for the user to do
61 * and it should have been handled upstream.
62 */
63 bt_common_abort();
64
65 case CTF_MSG_ITER_STATUS_ERROR:
66 BT_CPPLOGE_APPEND_CAUSE_SPEC(msg_iter_data->logger,
67 "Failed to get next message from CTF message iterator.");
68 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_ERROR;
69 break;
70
71 case CTF_MSG_ITER_STATUS_MEMORY_ERROR:
72 BT_CPPLOGE_APPEND_CAUSE_SPEC(msg_iter_data->logger,
73 "Failed to get next message from CTF message iterator.");
74 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_MEMORY_ERROR;
75 break;
76
77 default:
78 bt_common_abort();
79 }
80
81 return status;
82 }
83
84 bt_message_iterator_class_next_method_status
85 ctf_fs_iterator_next(bt_self_message_iterator *iterator, bt_message_array_const msgs,
86 uint64_t capacity, uint64_t *count)
87 {
88 try {
89 struct ctf_fs_msg_iter_data *msg_iter_data =
90 (struct ctf_fs_msg_iter_data *) bt_self_message_iterator_get_data(iterator);
91
92 if (G_UNLIKELY(msg_iter_data->next_saved_error)) {
93 /*
94 * Last time we were called, we hit an error but had some
95 * messages to deliver, so we stashed the error here. Return
96 * it now.
97 */
98 BT_CURRENT_THREAD_MOVE_ERROR_AND_RESET(msg_iter_data->next_saved_error);
99 return msg_iter_data->next_saved_status;
100 }
101
102 bt_message_iterator_class_next_method_status status;
103 uint64_t i = 0;
104
105 do {
106 status = ctf_fs_iterator_next_one(msg_iter_data, &msgs[i]);
107 if (status == BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_OK) {
108 i++;
109 }
110 } while (i < capacity && status == BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_OK);
111
112 if (i > 0) {
113 /*
114 * Even if ctf_fs_iterator_next_one() returned something
115 * else than BT_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK, we
116 * accumulated message objects in the output
117 * message array, so we need to return
118 * BT_MESSAGE_ITERATOR_NEXT_METHOD_STATUS_OK so that they are
119 * transferred to downstream. This other status occurs
120 * again the next time muxer_msg_iter_do_next() is
121 * called, possibly without any accumulated
122 * message, in which case we'll return it.
123 */
124 if (status < 0) {
125 /*
126 * Save this error for the next _next call. Assume that
127 * this component always appends error causes when
128 * returning an error status code, which will cause the
129 * current thread error to be non-NULL.
130 */
131 msg_iter_data->next_saved_error = bt_current_thread_take_error();
132 BT_ASSERT(msg_iter_data->next_saved_error);
133 msg_iter_data->next_saved_status = status;
134 }
135
136 *count = i;
137 status = BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_OK;
138 }
139
140 return status;
141 return status;
142 } catch (const std::bad_alloc&) {
143 return BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_MEMORY_ERROR;
144 } catch (const bt2::Error&) {
145 return BT_MESSAGE_ITERATOR_CLASS_NEXT_METHOD_STATUS_ERROR;
146 }
147 }
148
149 bt_message_iterator_class_seek_beginning_method_status
150 ctf_fs_iterator_seek_beginning(bt_self_message_iterator *it)
151 {
152 try {
153 struct ctf_fs_msg_iter_data *msg_iter_data =
154 (struct ctf_fs_msg_iter_data *) bt_self_message_iterator_get_data(it);
155
156 BT_ASSERT(msg_iter_data);
157
158 ctf_msg_iter_reset(msg_iter_data->msg_iter.get());
159 ctf_fs_ds_group_medops_data_reset(msg_iter_data->msg_iter_medops_data.get());
160
161 return BT_MESSAGE_ITERATOR_CLASS_SEEK_BEGINNING_METHOD_STATUS_OK;
162 } catch (const std::bad_alloc&) {
163 return BT_MESSAGE_ITERATOR_CLASS_SEEK_BEGINNING_METHOD_STATUS_MEMORY_ERROR;
164 } catch (const bt2::Error&) {
165 return BT_MESSAGE_ITERATOR_CLASS_SEEK_BEGINNING_METHOD_STATUS_ERROR;
166 }
167 }
168
169 void ctf_fs_iterator_finalize(bt_self_message_iterator *it)
170 {
171 ctf_fs_msg_iter_data::UP {
172 (static_cast<ctf_fs_msg_iter_data *>(bt_self_message_iterator_get_data(it)))};
173 }
174
175 static bt_message_iterator_class_initialize_method_status
176 ctf_msg_iter_medium_status_to_msg_iter_initialize_status(enum ctf_msg_iter_medium_status status)
177 {
178 switch (status) {
179 case CTF_MSG_ITER_MEDIUM_STATUS_EOF:
180 case CTF_MSG_ITER_MEDIUM_STATUS_AGAIN:
181 case CTF_MSG_ITER_MEDIUM_STATUS_ERROR:
182 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
183 case CTF_MSG_ITER_MEDIUM_STATUS_MEMORY_ERROR:
184 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
185 case CTF_MSG_ITER_MEDIUM_STATUS_OK:
186 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_OK;
187 }
188
189 bt_common_abort();
190 }
191
192 bt_message_iterator_class_initialize_method_status
193 ctf_fs_iterator_init(bt_self_message_iterator *self_msg_iter,
194 bt_self_message_iterator_configuration *config,
195 bt_self_component_port_output *self_port)
196 {
197 try {
198 ctf_fs_port_data *port_data = (struct ctf_fs_port_data *) bt_self_component_port_get_data(
199 bt_self_component_port_output_as_self_component_port(self_port));
200 BT_ASSERT(port_data);
201
202 auto msg_iter_data = bt2s::make_unique<ctf_fs_msg_iter_data>(self_msg_iter);
203 msg_iter_data->ds_file_group = port_data->ds_file_group;
204
205 ctf_msg_iter_medium_status medium_status = ctf_fs_ds_group_medops_data_create(
206 msg_iter_data->ds_file_group, self_msg_iter, msg_iter_data->logger,
207 msg_iter_data->msg_iter_medops_data);
208 BT_ASSERT(medium_status == CTF_MSG_ITER_MEDIUM_STATUS_OK ||
209 medium_status == CTF_MSG_ITER_MEDIUM_STATUS_ERROR ||
210 medium_status == CTF_MSG_ITER_MEDIUM_STATUS_MEMORY_ERROR);
211 if (medium_status != CTF_MSG_ITER_MEDIUM_STATUS_OK) {
212 BT_CPPLOGE_APPEND_CAUSE_SPEC(msg_iter_data->logger,
213 "Failed to create ctf_fs_ds_group_medops");
214 return ctf_msg_iter_medium_status_to_msg_iter_initialize_status(medium_status);
215 }
216
217 msg_iter_data->msg_iter = ctf_msg_iter_create(
218 msg_iter_data->ds_file_group->ctf_fs_trace->metadata->tc,
219 bt_common_get_page_size(static_cast<int>(msg_iter_data->logger.level())) * 8,
220 ctf_fs_ds_group_medops, msg_iter_data->msg_iter_medops_data.get(), self_msg_iter,
221 msg_iter_data->logger);
222 if (!msg_iter_data->msg_iter) {
223 BT_CPPLOGE_APPEND_CAUSE_SPEC(msg_iter_data->logger,
224 "Cannot create a CTF message iterator.");
225 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
226 }
227
228 /*
229 * This iterator can seek forward if its stream class has a default
230 * clock class.
231 */
232 if (msg_iter_data->ds_file_group->sc->default_clock_class) {
233 bt_self_message_iterator_configuration_set_can_seek_forward(config, true);
234 }
235
236 bt_self_message_iterator_set_data(self_msg_iter, msg_iter_data.release());
237
238 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_OK;
239 } catch (const std::bad_alloc&) {
240 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
241 } catch (const bt2::Error&) {
242 return BT_MESSAGE_ITERATOR_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
243 }
244 }
245
246 void ctf_fs_finalize(bt_self_component_source *component)
247 {
248 ctf_fs_component::UP {static_cast<ctf_fs_component *>(
249 bt_self_component_get_data(bt_self_component_source_as_self_component(component)))};
250 }
251
252 std::string ctf_fs_make_port_name(ctf_fs_ds_file_group *ds_file_group)
253 {
254 std::stringstream name;
255
256 /*
257 * The unique port name is generated by concatenating unique identifiers
258 * for:
259 *
260 * - the trace
261 * - the stream class
262 * - the stream
263 */
264
265 /* For the trace, use the uuid if present, else the path. */
266 if (ds_file_group->ctf_fs_trace->metadata->tc->is_uuid_set) {
267 char uuid_str[BT_UUID_STR_LEN + 1];
268
269 bt_uuid_to_str(ds_file_group->ctf_fs_trace->metadata->tc->uuid, uuid_str);
270 name << uuid_str;
271 } else {
272 name << ds_file_group->ctf_fs_trace->path;
273 }
274
275 /*
276 * For the stream class, use the id if present. We can omit this field
277 * otherwise, as there will only be a single stream class.
278 */
279 if (ds_file_group->sc->id != UINT64_C(-1)) {
280 name << " | " << ds_file_group->sc->id;
281 }
282
283 /* For the stream, use the id if present, else, use the path. */
284 if (ds_file_group->stream_id != UINT64_C(-1)) {
285 name << " | " << ds_file_group->stream_id;
286 } else {
287 BT_ASSERT(ds_file_group->ds_file_infos.size() == 1);
288 const auto& ds_file_info = *ds_file_group->ds_file_infos[0];
289 name << " | " << ds_file_info.path;
290 }
291
292 return name.str();
293 }
294
295 static int create_one_port_for_trace(struct ctf_fs_component *ctf_fs,
296 struct ctf_fs_ds_file_group *ds_file_group,
297 bt_self_component_source *self_comp_src)
298 {
299 const auto port_name = ctf_fs_make_port_name(ds_file_group);
300 auto port_data = bt2s::make_unique<ctf_fs_port_data>();
301
302 BT_CPPLOGI_SPEC(ctf_fs->logger, "Creating one port named `{}`", port_name);
303
304 port_data->ctf_fs = ctf_fs;
305 port_data->ds_file_group = ds_file_group;
306
307 int ret = bt_self_component_source_add_output_port(self_comp_src, port_name.c_str(),
308 port_data.get(), NULL);
309 if (ret) {
310 return ret;
311 }
312
313 ctf_fs->port_data.emplace_back(std::move(port_data));
314 return 0;
315 }
316
317 static int create_ports_for_trace(struct ctf_fs_component *ctf_fs,
318 struct ctf_fs_trace *ctf_fs_trace,
319 bt_self_component_source *self_comp_src)
320 {
321 /* Create one output port for each stream file group */
322 for (const auto& ds_file_group : ctf_fs_trace->ds_file_groups) {
323 int ret = create_one_port_for_trace(ctf_fs, ds_file_group.get(), self_comp_src);
324 if (ret) {
325 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Cannot create output port.");
326 return ret;
327 }
328 }
329
330 return 0;
331 }
332
333 static bool ds_index_entries_equal(const ctf_fs_ds_index_entry& left,
334 const ctf_fs_ds_index_entry& right)
335 {
336 if (left.packetSize != right.packetSize) {
337 return false;
338 }
339
340 if (left.timestamp_begin != right.timestamp_begin) {
341 return false;
342 }
343
344 if (left.timestamp_end != right.timestamp_end) {
345 return false;
346 }
347
348 if (left.packet_seq_num != right.packet_seq_num) {
349 return false;
350 }
351
352 return true;
353 }
354
355 /*
356 * Insert `entry` into `index`, without duplication.
357 *
358 * The entry is inserted only if there isn't an identical entry already.
359 */
360
361 static void ds_index_insert_ds_index_entry_sorted(struct ctf_fs_ds_index *index,
362 const ctf_fs_ds_index_entry& entry)
363 {
364 /* Find the spot where to insert this index entry. */
365 auto otherEntry = index->entries.begin();
366 for (; otherEntry != index->entries.end(); ++otherEntry) {
367 if (entry.timestamp_begin_ns <= otherEntry->timestamp_begin_ns) {
368 break;
369 }
370 }
371
372 /*
373 * Insert the entry only if a duplicate doesn't already exist.
374 *
375 * There can be duplicate packets if reading multiple overlapping
376 * snapshots of the same trace. We then want the index to contain
377 * a reference to only one copy of that packet.
378 */
379 if (otherEntry == index->entries.end() || !ds_index_entries_equal(entry, *otherEntry)) {
380 index->entries.emplace(otherEntry, entry);
381 }
382 }
383
384 static void merge_ctf_fs_ds_indexes(struct ctf_fs_ds_index *dest, const ctf_fs_ds_index& src)
385 {
386 for (const auto& entry : src.entries) {
387 ds_index_insert_ds_index_entry_sorted(dest, entry);
388 }
389 }
390
391 static int add_ds_file_to_ds_file_group(struct ctf_fs_trace *ctf_fs_trace, const char *path)
392 {
393 /*
394 * Create a temporary ds_file to read some properties about the data
395 * stream file.
396 */
397 const auto ds_file =
398 ctf_fs_ds_file_create(ctf_fs_trace, bt2::Stream::Shared {}, path, ctf_fs_trace->logger);
399 if (!ds_file) {
400 return -1;
401 }
402
403 /* Create a temporary iterator to read the ds_file. */
404 ctf_msg_iter_up msg_iter = ctf_msg_iter_create(
405 ctf_fs_trace->metadata->tc,
406 bt_common_get_page_size(static_cast<int>(ctf_fs_trace->logger.level())) * 8,
407 ctf_fs_ds_file_medops, ds_file.get(), nullptr, ctf_fs_trace->logger);
408 if (!msg_iter) {
409 BT_CPPLOGE_STR_SPEC(ctf_fs_trace->logger, "Cannot create a CTF message iterator.");
410 return -1;
411 }
412
413 ctf_msg_iter_set_dry_run(msg_iter.get(), true);
414
415 ctf_msg_iter_packet_properties props;
416 int ret = ctf_msg_iter_get_packet_properties(msg_iter.get(), &props);
417 if (ret) {
418 BT_CPPLOGE_APPEND_CAUSE_SPEC(
419 ctf_fs_trace->logger,
420 "Cannot get stream file's first packet's header and context fields (`{}`).", path);
421 return ret;
422 }
423
424 ctf_stream_class *sc =
425 ctf_trace_class_borrow_stream_class_by_id(ds_file->metadata->tc, props.stream_class_id);
426 BT_ASSERT(sc);
427 int64_t stream_instance_id = props.data_stream_id;
428 int64_t begin_ns = -1;
429
430 if (props.snapshots.beginning_clock != UINT64_C(-1)) {
431 BT_ASSERT(sc->default_clock_class);
432 ret = bt_util_clock_cycles_to_ns_from_origin(
433 props.snapshots.beginning_clock, sc->default_clock_class->frequency,
434 sc->default_clock_class->offset_seconds, sc->default_clock_class->offset_cycles,
435 &begin_ns);
436 if (ret) {
437 BT_CPPLOGE_APPEND_CAUSE_SPEC(
438 ctf_fs_trace->logger,
439 "Cannot convert clock cycles to nanoseconds from origin (`{}`).", path);
440 return ret;
441 }
442 }
443
444 ctf_fs_ds_file_info::UP ds_file_info = ctf_fs_ds_file_info_create(path, begin_ns);
445 if (!ds_file_info) {
446 return -1;
447 }
448
449 auto index = ctf_fs_ds_file_build_index(ds_file.get(), ds_file_info.get(), msg_iter.get());
450 if (!index) {
451 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger, "Failed to index CTF stream file \'{}\'",
452 ds_file->file->path);
453 return -1;
454 }
455
456 if (begin_ns == -1) {
457 /*
458 * No beginning timestamp to sort the stream files
459 * within a stream file group, so consider that this
460 * file must be the only one within its group.
461 */
462 stream_instance_id = -1;
463 }
464
465 if (stream_instance_id == -1) {
466 /*
467 * No stream instance ID or no beginning timestamp:
468 * create a unique stream file group for this stream
469 * file because, even if there's a stream instance ID,
470 * there's no timestamp to order the file within its
471 * group.
472 */
473 auto new_ds_file_group =
474 ctf_fs_ds_file_group_create(ctf_fs_trace, sc, UINT64_C(-1), std::move(*index));
475
476 if (!new_ds_file_group) {
477 return -1;
478 }
479
480 new_ds_file_group->insert_ds_file_info_sorted(std::move(ds_file_info));
481 ctf_fs_trace->ds_file_groups.emplace_back(std::move(new_ds_file_group));
482 return 0;
483 }
484
485 BT_ASSERT(stream_instance_id != -1);
486 BT_ASSERT(begin_ns != -1);
487
488 /* Find an existing stream file group with this ID */
489 ctf_fs_ds_file_group *ds_file_group = NULL;
490 for (const auto& candidate : ctf_fs_trace->ds_file_groups) {
491 if (candidate->sc == sc && candidate->stream_id == stream_instance_id) {
492 ds_file_group = candidate.get();
493 break;
494 }
495 }
496
497 ctf_fs_ds_file_group::UP new_ds_file_group;
498
499 if (!ds_file_group) {
500 new_ds_file_group =
501 ctf_fs_ds_file_group_create(ctf_fs_trace, sc, stream_instance_id, std::move(*index));
502 if (!new_ds_file_group) {
503 return -1;
504 }
505
506 ds_file_group = new_ds_file_group.get();
507 ctf_fs_trace->ds_file_groups.emplace_back(std::move(new_ds_file_group));
508 } else {
509 merge_ctf_fs_ds_indexes(&ds_file_group->index, *index);
510 }
511
512 ds_file_group->insert_ds_file_info_sorted(std::move(ds_file_info));
513
514 return 0;
515 }
516
517 static int create_ds_file_groups(struct ctf_fs_trace *ctf_fs_trace)
518 {
519 /* Check each file in the path directory, except specific ones */
520 GError *error = NULL;
521 const bt2c::GDirUP dir {g_dir_open(ctf_fs_trace->path.c_str(), 0, &error)};
522 if (!dir) {
523 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
524 "Cannot open directory `{}`: {} (code {})", ctf_fs_trace->path,
525 error->message, error->code);
526 if (error) {
527 g_error_free(error);
528 }
529 return -1;
530 }
531
532 while (const char *basename = g_dir_read_name(dir.get())) {
533 if (strcmp(basename, CTF_FS_METADATA_FILENAME) == 0) {
534 /* Ignore the metadata stream. */
535 BT_CPPLOGI_SPEC(ctf_fs_trace->logger,
536 "Ignoring metadata file `{}" G_DIR_SEPARATOR_S "{}`",
537 ctf_fs_trace->path, basename);
538 continue;
539 }
540
541 if (basename[0] == '.') {
542 BT_CPPLOGI_SPEC(ctf_fs_trace->logger,
543 "Ignoring hidden file `{}" G_DIR_SEPARATOR_S "{}`", ctf_fs_trace->path,
544 basename);
545 continue;
546 }
547
548 /* Create the file. */
549 ctf_fs_file file {ctf_fs_trace->logger};
550
551 /* Create full path string. */
552 file.path = fmt::format("{}" G_DIR_SEPARATOR_S "{}", ctf_fs_trace->path, basename);
553
554 if (!g_file_test(file.path.c_str(), G_FILE_TEST_IS_REGULAR)) {
555 BT_CPPLOGI_SPEC(ctf_fs_trace->logger, "Ignoring non-regular file `{}`", file.path);
556 continue;
557 }
558
559 int ret = ctf_fs_file_open(&file, "rb");
560 if (ret) {
561 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger, "Cannot open stream file `{}`",
562 file.path);
563 return ret;
564 }
565
566 if (file.size == 0) {
567 /* Skip empty stream. */
568 BT_CPPLOGI_SPEC(ctf_fs_trace->logger, "Ignoring empty file `{}`", file.path);
569 continue;
570 }
571
572 ret = add_ds_file_to_ds_file_group(ctf_fs_trace, file.path.c_str());
573 if (ret) {
574 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
575 "Cannot add stream file `{}` to stream file group",
576 file.path);
577 return ret;
578 }
579 }
580
581 return 0;
582 }
583
584 static int set_trace_name(bt_trace *trace, const char *name_suffix)
585 {
586 std::string name;
587
588 /*
589 * Check if we have a trace environment string value named `hostname`.
590 * If so, use it as the trace name's prefix.
591 */
592 const bt_value *val = bt_trace_borrow_environment_entry_value_by_name_const(trace, "hostname");
593 if (val && bt_value_is_string(val)) {
594 name += bt_value_string_get(val);
595
596 if (name_suffix) {
597 name += G_DIR_SEPARATOR;
598 }
599 }
600
601 if (name_suffix) {
602 name += name_suffix;
603 }
604
605 return bt_trace_set_name(trace, name.c_str());
606 }
607
608 static ctf_fs_trace::UP ctf_fs_trace_create(const char *path, const char *name,
609 const ctf::src::ClkClsCfg& clkClsCfg,
610 bt_self_component *selfComp,
611 const bt2c::Logger& parentLogger)
612 {
613 ctf_fs_trace::UP ctf_fs_trace = bt2s::make_unique<struct ctf_fs_trace>(parentLogger);
614 ctf_fs_trace->path = path;
615 ctf_fs_trace->metadata = bt2s::make_unique<ctf_fs_metadata>();
616
617 int ret = ctf_fs_metadata_set_trace_class(selfComp, ctf_fs_trace.get(), clkClsCfg);
618 if (ret) {
619 return nullptr;
620 }
621
622 if (ctf_fs_trace->metadata->trace_class) {
623 bt_trace *trace = bt_trace_create(ctf_fs_trace->metadata->trace_class->libObjPtr());
624 if (!trace) {
625 return nullptr;
626 }
627
628 ctf_fs_trace->trace = bt2::Trace::Shared::createWithoutRef(trace);
629 }
630
631 if (ctf_fs_trace->trace) {
632 ret = ctf_trace_class_configure_ir_trace(ctf_fs_trace->metadata->tc,
633 ctf_fs_trace->trace->libObjPtr());
634 if (ret) {
635 return nullptr;
636 }
637
638 ret = set_trace_name(ctf_fs_trace->trace->libObjPtr(), name);
639 if (ret) {
640 return nullptr;
641 }
642 }
643
644 ret = create_ds_file_groups(ctf_fs_trace.get());
645 if (ret) {
646 return nullptr;
647 }
648
649 return ctf_fs_trace;
650 }
651
652 static int path_is_ctf_trace(const char *path)
653 {
654 return g_file_test(fmt::format("{}" G_DIR_SEPARATOR_S CTF_FS_METADATA_FILENAME, path).c_str(),
655 G_FILE_TEST_IS_REGULAR);
656 }
657
658 /* Helper for ctf_fs_component_create_ctf_fs_trace, to handle a single path. */
659
660 static int ctf_fs_component_create_ctf_fs_trace_one_path(struct ctf_fs_component *ctf_fs,
661 const char *path_param,
662 const char *trace_name,
663 std::vector<ctf_fs_trace::UP>& traces,
664 bt_self_component *selfComp)
665 {
666 bt2c::GStringUP norm_path {bt_common_normalize_path(path_param, NULL)};
667 if (!norm_path) {
668 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Failed to normalize path: `{}`.", path_param);
669 return -1;
670 }
671
672 int ret = path_is_ctf_trace(norm_path->str);
673 if (ret < 0) {
674 BT_CPPLOGE_APPEND_CAUSE_SPEC(
675 ctf_fs->logger, "Failed to check if path is a CTF trace: path={}", norm_path->str);
676 return ret;
677 } else if (ret == 0) {
678 BT_CPPLOGE_APPEND_CAUSE_SPEC(
679 ctf_fs->logger, "Path is not a CTF trace (does not contain a metadata file): `{}`.",
680 norm_path->str);
681 return -1;
682 }
683
684 // FIXME: Remove or ifdef for __MINGW32__
685 if (strcmp(norm_path->str, "/") == 0) {
686 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Opening a trace in `/` is not supported.");
687 return -1;
688 }
689
690 ctf_fs_trace::UP ctf_fs_trace = ctf_fs_trace_create(
691 norm_path->str, trace_name, ctf_fs->clkClsCfg, selfComp, ctf_fs->logger);
692 if (!ctf_fs_trace) {
693 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Cannot create trace for `{}`.",
694 norm_path->str);
695 return -1;
696 }
697
698 traces.emplace_back(std::move(ctf_fs_trace));
699
700 return 0;
701 }
702
703 /*
704 * Count the number of stream and event classes defined by this trace's metadata.
705 *
706 * This is used to determine which metadata is the "latest", out of multiple
707 * traces sharing the same UUID. It is assumed that amongst all these metadatas,
708 * a bigger metadata is a superset of a smaller metadata. Therefore, it is
709 * enough to just count the classes.
710 */
711
712 static unsigned int metadata_count_stream_and_event_classes(struct ctf_fs_trace *trace)
713 {
714 unsigned int num = trace->metadata->tc->stream_classes->len;
715
716 for (guint i = 0; i < trace->metadata->tc->stream_classes->len; i++) {
717 struct ctf_stream_class *sc =
718 (struct ctf_stream_class *) trace->metadata->tc->stream_classes->pdata[i];
719 num += sc->event_classes->len;
720 }
721
722 return num;
723 }
724
725 /*
726 * Merge the src ds_file_group into dest. This consists of merging their
727 * ds_file_infos, making sure to keep the result sorted.
728 */
729
730 static void merge_ctf_fs_ds_file_groups(struct ctf_fs_ds_file_group *dest,
731 ctf_fs_ds_file_group::UP src)
732 {
733 for (auto& ds_file_info : src->ds_file_infos) {
734 dest->insert_ds_file_info_sorted(std::move(ds_file_info));
735 }
736
737 /* Merge both indexes. */
738 merge_ctf_fs_ds_indexes(&dest->index, src->index);
739 }
740
741 /* Merge src_trace's data stream file groups into dest_trace's. */
742
743 static int merge_matching_ctf_fs_ds_file_groups(struct ctf_fs_trace *dest_trace,
744 ctf_fs_trace::UP src_trace)
745 {
746 std::vector<ctf_fs_ds_file_group::UP>& dest = dest_trace->ds_file_groups;
747 std::vector<ctf_fs_ds_file_group::UP>& src = src_trace->ds_file_groups;
748
749 /*
750 * Save the initial length of dest: we only want to check against the
751 * original elements in the inner loop.
752 */
753 size_t dest_len = dest.size();
754
755 for (auto& src_group : src) {
756 struct ctf_fs_ds_file_group *dest_group = NULL;
757
758 /* A stream instance without ID can't match a stream in the other trace. */
759 if (src_group->stream_id != -1) {
760 /* Let's search for a matching ds_file_group in the destination. */
761 for (size_t d_i = 0; d_i < dest_len; ++d_i) {
762 ctf_fs_ds_file_group *candidate_dest = dest[d_i].get();
763
764 /* Can't match a stream instance without ID. */
765 if (candidate_dest->stream_id == -1) {
766 continue;
767 }
768
769 /*
770 * If the two groups have the same stream instance id
771 * and belong to the same stream class (stream instance
772 * ids are per-stream class), they represent the same
773 * stream instance.
774 */
775 if (candidate_dest->stream_id != src_group->stream_id ||
776 candidate_dest->sc->id != src_group->sc->id) {
777 continue;
778 }
779
780 dest_group = candidate_dest;
781 break;
782 }
783 }
784
785 /*
786 * Didn't find a friend in dest to merge our src_group into?
787 * Create a new empty one. This can happen if a stream was
788 * active in the source trace chunk but not in the destination
789 * trace chunk.
790 */
791 if (!dest_group) {
792 ctf_stream_class *sc = ctf_trace_class_borrow_stream_class_by_id(
793 dest_trace->metadata->tc, src_group->sc->id);
794 BT_ASSERT(sc);
795
796 auto new_dest_group =
797 ctf_fs_ds_file_group_create(dest_trace, sc, src_group->stream_id, {});
798
799 if (!new_dest_group) {
800 return -1;
801 }
802
803 dest_group = new_dest_group.get();
804 dest_trace->ds_file_groups.emplace_back(std::move(new_dest_group));
805 }
806
807 BT_ASSERT(dest_group);
808 merge_ctf_fs_ds_file_groups(dest_group, std::move(src_group));
809 }
810
811 return 0;
812 }
813
814 /*
815 * Collapse the given traces, which must all share the same UUID, in a single
816 * one.
817 *
818 * The trace with the most expansive metadata is chosen and all other traces
819 * are merged into that one. On return, the elements of `traces` are nullptr
820 * and the merged trace is placed in `out_trace`.
821 */
822
823 static int merge_ctf_fs_traces(std::vector<ctf_fs_trace::UP> traces, ctf_fs_trace::UP& out_trace)
824 {
825 BT_ASSERT(traces.size() >= 2);
826
827 unsigned int winner_count = metadata_count_stream_and_event_classes(traces[0].get());
828 ctf_fs_trace *winner = traces[0].get();
829 guint winner_i = 0;
830
831 /* Find the trace with the largest metadata. */
832 for (guint i = 1; i < traces.size(); i++) {
833 ctf_fs_trace *candidate = traces[i].get();
834 unsigned int candidate_count;
835
836 /* A bit of sanity check. */
837 BT_ASSERT(bt_uuid_compare(winner->metadata->tc->uuid, candidate->metadata->tc->uuid) == 0);
838
839 candidate_count = metadata_count_stream_and_event_classes(candidate);
840
841 if (candidate_count > winner_count) {
842 winner_count = candidate_count;
843 winner = candidate;
844 winner_i = i;
845 }
846 }
847
848 /* Merge all the other traces in the winning trace. */
849 for (ctf_fs_trace::UP& trace : traces) {
850 /* Don't merge the winner into itself. */
851 if (trace.get() == winner) {
852 continue;
853 }
854
855 /* Merge trace's data stream file groups into winner's. */
856 int ret = merge_matching_ctf_fs_ds_file_groups(winner, std::move(trace));
857 if (ret) {
858 return ret;
859 }
860 }
861
862 /*
863 * Move the winner out of the array, into `*out_trace`.
864 */
865 out_trace = std::move(traces[winner_i]);
866
867 return 0;
868 }
869
870 enum target_event
871 {
872 FIRST_EVENT,
873 LAST_EVENT,
874 };
875
876 static int decode_clock_snapshot_after_event(struct ctf_fs_trace *ctf_fs_trace,
877 struct ctf_clock_class *default_cc,
878 const ctf_fs_ds_index_entry& index_entry,
879 enum target_event target_event, uint64_t *cs,
880 int64_t *ts_ns)
881 {
882 BT_ASSERT(ctf_fs_trace);
883 BT_ASSERT(index_entry.path);
884
885 const auto ds_file = ctf_fs_ds_file_create(ctf_fs_trace, bt2::Stream::Shared {},
886 index_entry.path, ctf_fs_trace->logger);
887 if (!ds_file) {
888 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger, "Failed to create a ctf_fs_ds_file");
889 return -1;
890 }
891
892 BT_ASSERT(ctf_fs_trace->metadata);
893 BT_ASSERT(ctf_fs_trace->metadata->tc);
894
895 ctf_msg_iter_up msg_iter = ctf_msg_iter_create(
896 ctf_fs_trace->metadata->tc,
897 bt_common_get_page_size(static_cast<int>(ctf_fs_trace->logger.level())) * 8,
898
899 ctf_fs_ds_file_medops, ds_file.get(), NULL, ctf_fs_trace->logger);
900 if (!msg_iter) {
901 /* ctf_msg_iter_create() logs errors. */
902 return -1;
903 }
904
905 /*
906 * Turn on dry run mode to prevent the creation and usage of Babeltrace
907 * library objects (bt_field, bt_message_*, etc.).
908 */
909 ctf_msg_iter_set_dry_run(msg_iter.get(), true);
910
911 /* Seek to the beginning of the target packet. */
912 enum ctf_msg_iter_status iter_status =
913 ctf_msg_iter_seek(msg_iter.get(), index_entry.offset.bytes());
914 if (iter_status) {
915 /* ctf_msg_iter_seek() logs errors. */
916 return -1;
917 }
918
919 switch (target_event) {
920 case FIRST_EVENT:
921 /*
922 * Start to decode the packet until we reach the end of
923 * the first event. To extract the first event's clock
924 * snapshot.
925 */
926 iter_status = ctf_msg_iter_curr_packet_first_event_clock_snapshot(msg_iter.get(), cs);
927 break;
928 case LAST_EVENT:
929 /* Decode the packet to extract the last event's clock snapshot. */
930 iter_status = ctf_msg_iter_curr_packet_last_event_clock_snapshot(msg_iter.get(), cs);
931 break;
932 default:
933 bt_common_abort();
934 }
935 if (iter_status) {
936 return -1;
937 }
938
939 /* Convert clock snapshot to timestamp. */
940 int ret = bt_util_clock_cycles_to_ns_from_origin(
941 *cs, default_cc->frequency, default_cc->offset_seconds, default_cc->offset_cycles, ts_ns);
942 if (ret) {
943 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
944 "Failed to convert clock snapshot to timestamp");
945 return ret;
946 }
947
948 return 0;
949 }
950
951 static int decode_packet_first_event_timestamp(struct ctf_fs_trace *ctf_fs_trace,
952 struct ctf_clock_class *default_cc,
953 const ctf_fs_ds_index_entry& index_entry,
954 uint64_t *cs, int64_t *ts_ns)
955 {
956 return decode_clock_snapshot_after_event(ctf_fs_trace, default_cc, index_entry, FIRST_EVENT, cs,
957 ts_ns);
958 }
959
960 static int decode_packet_last_event_timestamp(struct ctf_fs_trace *ctf_fs_trace,
961 struct ctf_clock_class *default_cc,
962 const ctf_fs_ds_index_entry& index_entry,
963 uint64_t *cs, int64_t *ts_ns)
964 {
965 return decode_clock_snapshot_after_event(ctf_fs_trace, default_cc, index_entry, LAST_EVENT, cs,
966 ts_ns);
967 }
968
969 /*
970 * Fix up packet index entries for lttng's "event-after-packet" bug.
971 * Some buggy lttng tracer versions may emit events with a timestamp that is
972 * larger (after) than the timestamp_end of the their packets.
973 *
974 * To fix up this erroneous data we do the following:
975 * 1. If it's not the stream file's last packet: set the packet index entry's
976 * end time to the next packet's beginning time.
977 * 2. If it's the stream file's last packet, set the packet index entry's end
978 * time to the packet's last event's time, if any, or to the packet's
979 * beginning time otherwise.
980 *
981 * Known buggy tracer versions:
982 * - before lttng-ust 2.11.0
983 * - before lttng-module 2.11.0
984 * - before lttng-module 2.10.10
985 * - before lttng-module 2.9.13
986 */
987 static int fix_index_lttng_event_after_packet_bug(struct ctf_fs_trace *trace)
988 {
989 for (const auto& ds_file_group : trace->ds_file_groups) {
990 BT_ASSERT(ds_file_group);
991 auto& index = ds_file_group->index;
992
993 BT_ASSERT(!index.entries.empty());
994
995 /*
996 * Iterate over all entries but the last one. The last one is
997 * fixed differently after.
998 */
999 for (size_t entry_i = 0; entry_i < index.entries.size() - 1; ++entry_i) {
1000 auto& curr_entry = index.entries[entry_i];
1001 const auto& next_entry = index.entries[entry_i + 1];
1002
1003 /*
1004 * 1. Set the current index entry `end` timestamp to
1005 * the next index entry `begin` timestamp.
1006 */
1007 curr_entry.timestamp_end = next_entry.timestamp_begin;
1008 curr_entry.timestamp_end_ns = next_entry.timestamp_begin_ns;
1009 }
1010
1011 /*
1012 * 2. Fix the last entry by decoding the last event of the last
1013 * packet.
1014 */
1015 auto& last_entry = index.entries.back();
1016
1017 BT_ASSERT(ds_file_group->sc->default_clock_class);
1018 ctf_clock_class *default_cc = ds_file_group->sc->default_clock_class;
1019
1020 /*
1021 * Decode packet to read the timestamp of the last event of the
1022 * entry.
1023 */
1024 int ret = decode_packet_last_event_timestamp(
1025 trace, default_cc, last_entry, &last_entry.timestamp_end, &last_entry.timestamp_end_ns);
1026 if (ret) {
1027 BT_CPPLOGE_APPEND_CAUSE_SPEC(
1028 trace->logger,
1029 "Failed to decode stream's last packet to get its last event's clock snapshot.");
1030 return ret;
1031 }
1032 }
1033
1034 return 0;
1035 }
1036
1037 /*
1038 * Fix up packet index entries for barectf's "event-before-packet" bug.
1039 * Some buggy barectf tracer versions may emit events with a timestamp that is
1040 * less than the timestamp_begin of the their packets.
1041 *
1042 * To fix up this erroneous data we do the following:
1043 * 1. Starting at the second index entry, set the timestamp_begin of the
1044 * current entry to the timestamp of the first event of the packet.
1045 * 2. Set the previous entry's timestamp_end to the timestamp_begin of the
1046 * current packet.
1047 *
1048 * Known buggy tracer versions:
1049 * - before barectf 2.3.1
1050 */
1051 static int fix_index_barectf_event_before_packet_bug(struct ctf_fs_trace *trace)
1052 {
1053 for (const auto& ds_file_group : trace->ds_file_groups) {
1054 auto& index = ds_file_group->index;
1055
1056 BT_ASSERT(!index.entries.empty());
1057
1058 BT_ASSERT(ds_file_group->sc->default_clock_class);
1059 ctf_clock_class *default_cc = ds_file_group->sc->default_clock_class;
1060
1061 /*
1062 * 1. Iterate over the index, starting from the second entry
1063 * (index = 1).
1064 */
1065 for (size_t entry_i = 1; entry_i < index.entries.size(); ++entry_i) {
1066 auto& prev_entry = index.entries[entry_i - 1];
1067 auto& curr_entry = index.entries[entry_i];
1068 /*
1069 * 2. Set the current entry `begin` timestamp to the
1070 * timestamp of the first event of the current packet.
1071 */
1072 int ret = decode_packet_first_event_timestamp(trace, default_cc, curr_entry,
1073 &curr_entry.timestamp_begin,
1074 &curr_entry.timestamp_begin_ns);
1075 if (ret) {
1076 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1077 "Failed to decode first event's clock snapshot");
1078 return ret;
1079 }
1080
1081 /*
1082 * 3. Set the previous entry `end` timestamp to the
1083 * timestamp of the first event of the current packet.
1084 */
1085 prev_entry.timestamp_end = curr_entry.timestamp_begin;
1086 prev_entry.timestamp_end_ns = curr_entry.timestamp_begin_ns;
1087 }
1088 }
1089
1090 return 0;
1091 }
1092
1093 /*
1094 * When using the lttng-crash feature it's likely that the last packets of each
1095 * stream have their timestamp_end set to zero. This is caused by the fact that
1096 * the tracer crashed and was not able to properly close the packets.
1097 *
1098 * To fix up this erroneous data we do the following:
1099 * For each index entry, if the entry's timestamp_end is 0 and the
1100 * timestamp_begin is not 0:
1101 * - If it's the stream file's last packet: set the packet index entry's end
1102 * time to the packet's last event's time, if any, or to the packet's
1103 * beginning time otherwise.
1104 * - If it's not the stream file's last packet: set the packet index
1105 * entry's end time to the next packet's beginning time.
1106 *
1107 * Affected versions:
1108 * - All current and future lttng-ust and lttng-modules versions.
1109 */
1110 static int fix_index_lttng_crash_quirk(struct ctf_fs_trace *trace)
1111 {
1112 for (const auto& ds_file_group : trace->ds_file_groups) {
1113 struct ctf_clock_class *default_cc;
1114
1115 BT_ASSERT(ds_file_group);
1116 auto& index = ds_file_group->index;
1117
1118 BT_ASSERT(ds_file_group->sc->default_clock_class);
1119 default_cc = ds_file_group->sc->default_clock_class;
1120
1121 BT_ASSERT(!index.entries.empty());
1122
1123 auto& last_entry = index.entries.back();
1124
1125 /* 1. Fix the last entry first. */
1126 if (last_entry.timestamp_end == 0 && last_entry.timestamp_begin != 0) {
1127 /*
1128 * Decode packet to read the timestamp of the
1129 * last event of the stream file.
1130 */
1131 int ret = decode_packet_last_event_timestamp(trace, default_cc, last_entry,
1132 &last_entry.timestamp_end,
1133 &last_entry.timestamp_end_ns);
1134 if (ret) {
1135 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1136 "Failed to decode last event's clock snapshot");
1137 return ret;
1138 }
1139 }
1140
1141 /* Iterate over all entries but the last one. */
1142 for (size_t entry_idx = 0; entry_idx < index.entries.size() - 1; ++entry_idx) {
1143 auto& curr_entry = index.entries[entry_idx];
1144 const auto& next_entry = index.entries[entry_idx + 1];
1145
1146 if (curr_entry.timestamp_end == 0 && curr_entry.timestamp_begin != 0) {
1147 /*
1148 * 2. Set the current index entry `end` timestamp to
1149 * the next index entry `begin` timestamp.
1150 */
1151 curr_entry.timestamp_end = next_entry.timestamp_begin;
1152 curr_entry.timestamp_end_ns = next_entry.timestamp_begin_ns;
1153 }
1154 }
1155 }
1156
1157 return 0;
1158 }
1159
1160 /*
1161 * Extract the tracer information necessary to compare versions.
1162 * Returns 0 on success, and -1 if the extraction is not successful because the
1163 * necessary fields are absents in the trace metadata.
1164 */
1165 static int extract_tracer_info(struct ctf_fs_trace *trace, struct tracer_info *current_tracer_info)
1166 {
1167 /* Clear the current_tracer_info struct */
1168 memset(current_tracer_info, 0, sizeof(*current_tracer_info));
1169
1170 /*
1171 * To compare 2 tracer versions, at least the tracer name and it's
1172 * major version are needed. If one of these is missing, consider it an
1173 * extraction failure.
1174 */
1175 ctf_trace_class_env_entry *entry =
1176 ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_name");
1177 if (!entry || entry->type != CTF_TRACE_CLASS_ENV_ENTRY_TYPE_STR) {
1178 return -1;
1179 }
1180
1181 /* Set tracer name. */
1182 current_tracer_info->name = entry->value.str->str;
1183
1184 entry = ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_major");
1185 if (!entry || entry->type != CTF_TRACE_CLASS_ENV_ENTRY_TYPE_INT) {
1186 return -1;
1187 }
1188
1189 /* Set major version number. */
1190 current_tracer_info->major = entry->value.i;
1191
1192 entry = ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_minor");
1193 if (!entry || entry->type != CTF_TRACE_CLASS_ENV_ENTRY_TYPE_INT) {
1194 return 0;
1195 }
1196
1197 /* Set minor version number. */
1198 current_tracer_info->minor = entry->value.i;
1199
1200 entry = ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_patch");
1201 if (!entry) {
1202 /*
1203 * If `tracer_patch` doesn't exist `tracer_patchlevel` might.
1204 * For example, `lttng-modules` uses entry name
1205 * `tracer_patchlevel`.
1206 */
1207 entry = ctf_trace_class_borrow_env_entry_by_name(trace->metadata->tc, "tracer_patchlevel");
1208 }
1209
1210 if (!entry || entry->type != CTF_TRACE_CLASS_ENV_ENTRY_TYPE_INT) {
1211 return 0;
1212 }
1213
1214 /* Set patch version number. */
1215 current_tracer_info->patch = entry->value.i;
1216
1217 return 0;
1218 }
1219
1220 static bool is_tracer_affected_by_lttng_event_after_packet_bug(struct tracer_info *curr_tracer_info)
1221 {
1222 bool is_affected = false;
1223
1224 if (strcmp(curr_tracer_info->name, "lttng-ust") == 0) {
1225 if (curr_tracer_info->major < 2) {
1226 is_affected = true;
1227 } else if (curr_tracer_info->major == 2) {
1228 /* fixed in lttng-ust 2.11.0 */
1229 if (curr_tracer_info->minor < 11) {
1230 is_affected = true;
1231 }
1232 }
1233 } else if (strcmp(curr_tracer_info->name, "lttng-modules") == 0) {
1234 if (curr_tracer_info->major < 2) {
1235 is_affected = true;
1236 } else if (curr_tracer_info->major == 2) {
1237 /* fixed in lttng-modules 2.11.0 */
1238 if (curr_tracer_info->minor == 10) {
1239 /* fixed in lttng-modules 2.10.10 */
1240 if (curr_tracer_info->patch < 10) {
1241 is_affected = true;
1242 }
1243 } else if (curr_tracer_info->minor == 9) {
1244 /* fixed in lttng-modules 2.9.13 */
1245 if (curr_tracer_info->patch < 13) {
1246 is_affected = true;
1247 }
1248 } else if (curr_tracer_info->minor < 9) {
1249 is_affected = true;
1250 }
1251 }
1252 }
1253
1254 return is_affected;
1255 }
1256
1257 static bool
1258 is_tracer_affected_by_barectf_event_before_packet_bug(struct tracer_info *curr_tracer_info)
1259 {
1260 bool is_affected = false;
1261
1262 if (strcmp(curr_tracer_info->name, "barectf") == 0) {
1263 if (curr_tracer_info->major < 2) {
1264 is_affected = true;
1265 } else if (curr_tracer_info->major == 2) {
1266 if (curr_tracer_info->minor < 3) {
1267 is_affected = true;
1268 } else if (curr_tracer_info->minor == 3) {
1269 /* fixed in barectf 2.3.1 */
1270 if (curr_tracer_info->patch < 1) {
1271 is_affected = true;
1272 }
1273 }
1274 }
1275 }
1276
1277 return is_affected;
1278 }
1279
1280 static bool is_tracer_affected_by_lttng_crash_quirk(struct tracer_info *curr_tracer_info)
1281 {
1282 bool is_affected = false;
1283
1284 /* All LTTng tracer may be affected by this lttng crash quirk. */
1285 if (strcmp(curr_tracer_info->name, "lttng-ust") == 0) {
1286 is_affected = true;
1287 } else if (strcmp(curr_tracer_info->name, "lttng-modules") == 0) {
1288 is_affected = true;
1289 }
1290
1291 return is_affected;
1292 }
1293
1294 /*
1295 * Looks for trace produced by known buggy tracers and fix up the index
1296 * produced earlier.
1297 */
1298 static int fix_packet_index_tracer_bugs(ctf_fs_trace *trace)
1299 {
1300 struct tracer_info current_tracer_info;
1301
1302 int ret = extract_tracer_info(trace, &current_tracer_info);
1303 if (ret) {
1304 /*
1305 * A trace may not have all the necessary environment
1306 * entries to do the tracer version comparison.
1307 * At least, the tracer name and major version number
1308 * are needed. Failing to extract these entries is not
1309 * an error.
1310 */
1311 BT_CPPLOGI_STR_SPEC(
1312 trace->logger,
1313 "Cannot extract tracer information necessary to compare with buggy versions.");
1314 return 0;
1315 }
1316
1317 /* Check if the trace may be affected by old tracer bugs. */
1318 if (is_tracer_affected_by_lttng_event_after_packet_bug(&current_tracer_info)) {
1319 BT_CPPLOGI_STR_SPEC(
1320 trace->logger,
1321 "Trace may be affected by LTTng tracer packet timestamp bug. Fixing up.");
1322 ret = fix_index_lttng_event_after_packet_bug(trace);
1323 if (ret) {
1324 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1325 "Failed to fix LTTng event-after-packet bug.");
1326 return ret;
1327 }
1328 trace->metadata->tc->quirks.lttng_event_after_packet = true;
1329 }
1330
1331 if (is_tracer_affected_by_barectf_event_before_packet_bug(&current_tracer_info)) {
1332 BT_CPPLOGI_STR_SPEC(
1333 trace->logger,
1334 "Trace may be affected by barectf tracer packet timestamp bug. Fixing up.");
1335 ret = fix_index_barectf_event_before_packet_bug(trace);
1336 if (ret) {
1337 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1338 "Failed to fix barectf event-before-packet bug.");
1339 return ret;
1340 }
1341 trace->metadata->tc->quirks.barectf_event_before_packet = true;
1342 }
1343
1344 if (is_tracer_affected_by_lttng_crash_quirk(&current_tracer_info)) {
1345 ret = fix_index_lttng_crash_quirk(trace);
1346 if (ret) {
1347 BT_CPPLOGE_APPEND_CAUSE_SPEC(trace->logger,
1348 "Failed to fix lttng-crash timestamp quirks.");
1349 return ret;
1350 }
1351 trace->metadata->tc->quirks.lttng_crash = true;
1352 }
1353
1354 return 0;
1355 }
1356
1357 static bool compare_ds_file_groups_by_first_path(const ctf_fs_ds_file_group::UP& ds_file_group_a,
1358 const ctf_fs_ds_file_group::UP& ds_file_group_b)
1359 {
1360 BT_ASSERT(!ds_file_group_a->ds_file_infos.empty());
1361 BT_ASSERT(!ds_file_group_b->ds_file_infos.empty());
1362
1363 const auto& first_ds_file_info_a = *ds_file_group_a->ds_file_infos[0];
1364 const auto& first_ds_file_info_b = *ds_file_group_b->ds_file_infos[0];
1365
1366 return first_ds_file_info_a.path < first_ds_file_info_b.path;
1367 }
1368
1369 int ctf_fs_component_create_ctf_fs_trace(struct ctf_fs_component *ctf_fs,
1370 const bt_value *paths_value,
1371 const bt_value *trace_name_value,
1372 bt_self_component *selfComp)
1373 {
1374 std::vector<std::string> paths;
1375
1376 BT_ASSERT(bt_value_get_type(paths_value) == BT_VALUE_TYPE_ARRAY);
1377 BT_ASSERT(!bt_value_array_is_empty(paths_value));
1378
1379 const char *trace_name = trace_name_value ? bt_value_string_get(trace_name_value) : NULL;
1380
1381 /*
1382 * Create a sorted array of the paths, to make the execution of this
1383 * component deterministic.
1384 */
1385 for (std::uint64_t i = 0; i < bt_value_array_get_length(paths_value); i++) {
1386 const bt_value *path_value = bt_value_array_borrow_element_by_index_const(paths_value, i);
1387 const char *input = bt_value_string_get(path_value);
1388 paths.emplace_back(input);
1389 }
1390
1391 std::sort(paths.begin(), paths.end());
1392
1393 /* Create a separate ctf_fs_trace object for each path. */
1394 std::vector<ctf_fs_trace::UP> traces;
1395 for (const auto& path : paths) {
1396 int ret = ctf_fs_component_create_ctf_fs_trace_one_path(ctf_fs, path.c_str(), trace_name,
1397 traces, selfComp);
1398 if (ret) {
1399 return ret;
1400 }
1401 }
1402
1403 if (traces.size() > 1) {
1404 ctf_fs_trace *first_trace = traces[0].get();
1405 const uint8_t *first_trace_uuid = first_trace->metadata->tc->uuid;
1406
1407 /*
1408 * We have more than one trace, they must all share the same
1409 * UUID, verify that.
1410 */
1411 for (size_t i = 0; i < traces.size(); i++) {
1412 ctf_fs_trace *this_trace = traces[i].get();
1413 const uint8_t *this_trace_uuid = this_trace->metadata->tc->uuid;
1414
1415 if (!this_trace->metadata->tc->is_uuid_set) {
1416 BT_CPPLOGE_APPEND_CAUSE_SPEC(
1417 ctf_fs->logger,
1418 "Multiple traces given, but a trace does not have a UUID: path={}",
1419 this_trace->path);
1420 return -1;
1421 }
1422
1423 if (bt_uuid_compare(first_trace_uuid, this_trace_uuid) != 0) {
1424 char first_trace_uuid_str[BT_UUID_STR_LEN + 1];
1425 char this_trace_uuid_str[BT_UUID_STR_LEN + 1];
1426
1427 bt_uuid_to_str(first_trace_uuid, first_trace_uuid_str);
1428 bt_uuid_to_str(this_trace_uuid, this_trace_uuid_str);
1429
1430 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger,
1431 "Multiple traces given, but UUIDs don't match: "
1432 "first-trace-uuid={}, first-trace-path={}, "
1433 "trace-uuid={}, trace-path={}",
1434 first_trace_uuid_str, first_trace->path,
1435 this_trace_uuid_str, this_trace->path);
1436 return -1;
1437 }
1438 }
1439
1440 int ret = merge_ctf_fs_traces(std::move(traces), ctf_fs->trace);
1441 if (ret) {
1442 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger,
1443 "Failed to merge traces with the same UUID.");
1444 return ret;
1445 }
1446 } else {
1447 /* Just one trace, it may or may not have a UUID, both are fine. */
1448 ctf_fs->trace = std::move(traces[0]);
1449 }
1450
1451 int ret = fix_packet_index_tracer_bugs(ctf_fs->trace.get());
1452 if (ret) {
1453 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "Failed to fix packet index tracer bugs.");
1454 return ret;
1455 }
1456
1457 /*
1458 * Sort data stream file groups by first data stream file info
1459 * path to get a deterministic order. This order influences the
1460 * order of the output ports. It also influences the order of
1461 * the automatic stream IDs if the trace's packet headers do not
1462 * contain a `stream_instance_id` field, in which case the data
1463 * stream file to stream ID association is always the same,
1464 * whatever the build and the system.
1465 *
1466 * Having a deterministic order here can help debugging and
1467 * testing.
1468 */
1469 std::sort(ctf_fs->trace->ds_file_groups.begin(), ctf_fs->trace->ds_file_groups.end(),
1470 compare_ds_file_groups_by_first_path);
1471
1472 return 0;
1473 }
1474
1475 static const std::string&
1476 get_stream_instance_unique_name(struct ctf_fs_ds_file_group *ds_file_group)
1477 {
1478 /*
1479 * The first (earliest) stream file's path is used as the stream's unique
1480 * name.
1481 */
1482 BT_ASSERT(!ds_file_group->ds_file_infos.empty());
1483 return ds_file_group->ds_file_infos[0]->path;
1484 }
1485
1486 /* Create the IR stream objects for ctf_fs_trace. */
1487
1488 static int create_streams_for_trace(struct ctf_fs_trace *ctf_fs_trace)
1489 {
1490 for (const auto& ds_file_group : ctf_fs_trace->ds_file_groups) {
1491 const std::string& name = get_stream_instance_unique_name(ds_file_group.get());
1492
1493 BT_ASSERT(ds_file_group->sc->ir_sc);
1494 BT_ASSERT(ctf_fs_trace->trace);
1495
1496 bt_stream *stream;
1497
1498 if (ds_file_group->stream_id == UINT64_C(-1)) {
1499 /* No stream ID: use 0 */
1500 stream =
1501 bt_stream_create_with_id(ds_file_group->sc->ir_sc, ctf_fs_trace->trace->libObjPtr(),
1502 ctf_fs_trace->next_stream_id);
1503 ctf_fs_trace->next_stream_id++;
1504 } else {
1505 /* Specific stream ID */
1506 stream =
1507 bt_stream_create_with_id(ds_file_group->sc->ir_sc, ctf_fs_trace->trace->libObjPtr(),
1508 (uint64_t) ds_file_group->stream_id);
1509 }
1510
1511 if (!stream) {
1512 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
1513 "Cannot create stream for DS file group: "
1514 "addr={}, stream-name=\"{}\"",
1515 fmt::ptr(ds_file_group), name);
1516 return -1;
1517 }
1518
1519 ds_file_group->stream = bt2::Stream::Shared::createWithoutRef(stream);
1520
1521 int ret = bt_stream_set_name(ds_file_group->stream->libObjPtr(), name.c_str());
1522 if (ret) {
1523 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs_trace->logger,
1524 "Cannot set stream's name: "
1525 "addr={}, stream-name=\"{}\"",
1526 fmt::ptr(ds_file_group->stream->libObjPtr()), name);
1527 return ret;
1528 }
1529 }
1530
1531 return 0;
1532 }
1533
1534 static const bt_param_validation_value_descr inputs_elem_descr =
1535 bt_param_validation_value_descr::makeString();
1536
1537 static bt_param_validation_map_value_entry_descr fs_params_entries_descr[] = {
1538 {"inputs", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_MANDATORY,
1539 bt_param_validation_value_descr::makeArray(1, BT_PARAM_VALIDATION_INFINITE,
1540 inputs_elem_descr)},
1541 {"trace-name", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_OPTIONAL,
1542 bt_param_validation_value_descr::makeString()},
1543 {"clock-class-offset-s", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_OPTIONAL,
1544 bt_param_validation_value_descr::makeSignedInteger()},
1545 {"clock-class-offset-ns", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_OPTIONAL,
1546 bt_param_validation_value_descr::makeSignedInteger()},
1547 {"force-clock-class-origin-unix-epoch", BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_OPTIONAL,
1548 bt_param_validation_value_descr::makeBool()},
1549 BT_PARAM_VALIDATION_MAP_VALUE_ENTRY_END};
1550
1551 bool read_src_fs_parameters(const bt_value *params, const bt_value **inputs,
1552 const bt_value **trace_name, struct ctf_fs_component *ctf_fs)
1553 {
1554 gchar *error = NULL;
1555 bt_param_validation_status validate_value_status =
1556 bt_param_validation_validate(params, fs_params_entries_descr, &error);
1557 if (validate_value_status != BT_PARAM_VALIDATION_STATUS_OK) {
1558 BT_CPPLOGE_APPEND_CAUSE_SPEC(ctf_fs->logger, "{}", error);
1559 g_free(error);
1560 return false;
1561 }
1562
1563 /* inputs parameter */
1564 *inputs = bt_value_map_borrow_entry_value_const(params, "inputs");
1565
1566 /* clock-class-offset-s parameter */
1567 const bt_value *value = bt_value_map_borrow_entry_value_const(params, "clock-class-offset-s");
1568 if (value) {
1569 ctf_fs->clkClsCfg.offsetSec = bt_value_integer_signed_get(value);
1570 }
1571
1572 /* clock-class-offset-ns parameter */
1573 value = bt_value_map_borrow_entry_value_const(params, "clock-class-offset-ns");
1574 if (value) {
1575 ctf_fs->clkClsCfg.offsetNanoSec = bt_value_integer_signed_get(value);
1576 }
1577
1578 /* force-clock-class-origin-unix-epoch parameter */
1579 value = bt_value_map_borrow_entry_value_const(params, "force-clock-class-origin-unix-epoch");
1580 if (value) {
1581 ctf_fs->clkClsCfg.forceOriginIsUnixEpoch = bt_value_bool_get(value);
1582 }
1583
1584 /* trace-name parameter */
1585 *trace_name = bt_value_map_borrow_entry_value_const(params, "trace-name");
1586
1587 return true;
1588 }
1589
1590 static ctf_fs_component::UP ctf_fs_create(const bt_value *params,
1591 bt_self_component_source *self_comp_src)
1592 {
1593 const bt_value *inputs_value;
1594 const bt_value *trace_name_value;
1595 bt_self_component *self_comp = bt_self_component_source_as_self_component(self_comp_src);
1596
1597 ctf_fs_component::UP ctf_fs = bt2s::make_unique<ctf_fs_component>(
1598 bt2c::Logger {bt2::SelfSourceComponent {self_comp_src}, "PLUGIN/SRC.CTF.FS/COMP"});
1599
1600 if (!read_src_fs_parameters(params, &inputs_value, &trace_name_value, ctf_fs.get())) {
1601 return nullptr;
1602 }
1603
1604 if (ctf_fs_component_create_ctf_fs_trace(ctf_fs.get(), inputs_value, trace_name_value,
1605 self_comp)) {
1606 return nullptr;
1607 }
1608
1609 if (create_streams_for_trace(ctf_fs->trace.get())) {
1610 return nullptr;
1611 }
1612
1613 if (create_ports_for_trace(ctf_fs.get(), ctf_fs->trace.get(), self_comp_src)) {
1614 return nullptr;
1615 }
1616
1617 return ctf_fs;
1618 }
1619
1620 bt_component_class_initialize_method_status ctf_fs_init(bt_self_component_source *self_comp_src,
1621 bt_self_component_source_configuration *,
1622 const bt_value *params, void *)
1623 {
1624 try {
1625 bt_component_class_initialize_method_status ret =
1626 BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_OK;
1627
1628 ctf_fs_component::UP ctf_fs = ctf_fs_create(params, self_comp_src);
1629 if (!ctf_fs) {
1630 ret = BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
1631 }
1632
1633 bt_self_component_set_data(bt_self_component_source_as_self_component(self_comp_src),
1634 ctf_fs.release());
1635 return ret;
1636 } catch (const std::bad_alloc&) {
1637 return BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_MEMORY_ERROR;
1638 } catch (const bt2::Error&) {
1639 return BT_COMPONENT_CLASS_INITIALIZE_METHOD_STATUS_ERROR;
1640 }
1641 }
1642
1643 bt_component_class_query_method_status ctf_fs_query(bt_self_component_class_source *comp_class_src,
1644 bt_private_query_executor *priv_query_exec,
1645 const char *object, const bt_value *params,
1646 __attribute__((unused)) void *method_data,
1647 const bt_value **result)
1648 {
1649 try {
1650 bt2c::Logger logger {bt2::SelfComponentClass {comp_class_src},
1651 bt2::PrivateQueryExecutor {priv_query_exec},
1652 "PLUGIN/SRC.CTF.FS/QUERY"};
1653 bt2::ConstMapValue paramsObj(params);
1654 bt2::Value::Shared resultObj;
1655
1656 if (strcmp(object, "metadata-info") == 0) {
1657 resultObj = metadata_info_query(paramsObj, logger);
1658 } else if (strcmp(object, "babeltrace.trace-infos") == 0) {
1659 resultObj = trace_infos_query(paramsObj, logger);
1660 } else if (!strcmp(object, "babeltrace.support-info")) {
1661 resultObj = support_info_query(paramsObj, logger);
1662 } else {
1663 BT_CPPLOGE_SPEC(logger, "Unknown query object `{}`", object);
1664 return BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_UNKNOWN_OBJECT;
1665 }
1666
1667 *result = resultObj.release().libObjPtr();
1668
1669 return BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_OK;
1670 } catch (const std::bad_alloc&) {
1671 return BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_MEMORY_ERROR;
1672 } catch (const bt2::Error&) {
1673 return BT_COMPONENT_CLASS_QUERY_METHOD_STATUS_ERROR;
1674 }
1675 }
This page took 0.072494 seconds and 5 git commands to generate.