+
+void ust_registry_session::_accept_on_clock_classes(lst::trace_class_visitor& visitor) const
+{
+ ASSERT_LOCKED(_lock);
+ _clock.accept(visitor);
+}
+
+void ust_registry_session::_accept_on_stream_classes(lst::trace_class_visitor& visitor) const
+{
+ ASSERT_LOCKED(_lock);
+
+ std::vector<const lttng::sessiond::ust::registry_channel *> sorted_stream_classes;
+
+ {
+ lttng::urcu::read_lock_guard rcu_lock_guard;
+ const lsu::registry_channel *channel;
+ lttng_ht_iter channel_it;
+
+ DIAGNOSTIC_PUSH
+ DIAGNOSTIC_IGNORE_INVALID_OFFSETOF
+ cds_lfht_for_each_entry(_channels->ht, &channel_it.iter, channel, _node.node) {
+ sorted_stream_classes.emplace_back(channel);
+ }
+ DIAGNOSTIC_POP
+ }
+
+ std::sort(sorted_stream_classes.begin(), sorted_stream_classes.end(),
+ [](const lttng::sessiond::ust::registry_channel *a,
+ const lttng::sessiond::ust::registry_channel *b) {
+ return a->id < b->id;
+ });
+
+ for (const auto stream_class : sorted_stream_classes) {
+ stream_class->accept(visitor);
+ }
+}
+
+/*
+ * Return next available channel id and increment the used counter. The
+ * is_max_channel_id function MUST be called before in order to validate
+ * if the maximum number of IDs have been reached. If not, it is safe to call
+ * this function.
+ *
+ * Return a unique channel ID. If max is reached, the used_channel_id counter
+ * is returned.
+ */
+uint32_t ust_registry_session::_get_next_channel_id()
+{
+ if (is_max_channel_id(_used_channel_id)) {
+ return _used_channel_id;
+ }
+
+ _used_channel_id++;
+ return _next_channel_id++;
+}
+
+void ust_registry_session::_increase_metadata_size(size_t reservation_length)
+{
+ const auto new_len = _metadata_len + reservation_length;
+ auto new_alloc_len = new_len;
+ const auto old_alloc_len = _metadata_alloc_len;
+
+ /* Rounding the new allocation length to the next power of 2 would overflow. */
+ if (new_alloc_len > (UINT32_MAX >> 1)) {
+ LTTNG_THROW_ERROR("Failed to reserve trace metadata storage as the new size would overflow");
+ }
+
+ /* The current allocation length is already the largest we can afford. */
+ if ((old_alloc_len << 1) > (UINT32_MAX >> 1)) {
+ LTTNG_THROW_ERROR("Failed to reserve trace metadata storage as the max size was already reached");
+ }
+
+ if (new_alloc_len > old_alloc_len) {
+ new_alloc_len = std::max<size_t>(
+ 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+
+ auto newptr = (char *) realloc(_metadata, new_alloc_len);
+ if (!newptr) {
+ LTTNG_THROW_POSIX("Failed to allocate trace metadata storage", errno);
+ }
+
+ _metadata = newptr;
+
+ /* We zero directly the memory from start of allocation. */
+ memset(&_metadata[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+ _metadata_alloc_len = new_alloc_len;
+ }
+
+ _metadata_len += reservation_length;
+}
+
+void ust_registry_session::_append_metadata_fragment(const std::string& fragment)
+{
+ const auto offset = _metadata_len;
+
+ _increase_metadata_size(fragment.size());
+ memcpy(&_metadata[offset], fragment.c_str(), fragment.size());
+
+ if (_metadata_fd >= 0) {
+ const auto bytes_written =
+ lttng_write(_metadata_fd, fragment.c_str(), fragment.size());
+
+ if (bytes_written != fragment.size()) {
+ LTTNG_THROW_POSIX("Failed to write trace metadata fragment to file",
+ errno);
+ }
+ }
+}
+
+void ust_registry_session::_reset_metadata()
+{
+ _metadata_len_sent = 0;
+ memset(_metadata, 0, _metadata_alloc_len);
+ _metadata_len = 0;
+
+ if (_metadata_fd > 0) {
+ /* Clear the metadata file's content. */
+ clear_metadata_file(_metadata_fd);
+ }
+}
+
+void ust_registry_session::_generate_metadata()
+{
+ accept(*_metadata_generating_visitor);
+}
+
+void ust_registry_session::regenerate_metadata()
+{
+ lttng::pthread::lock_guard registry_lock(_lock);
+
+ _metadata_version++;
+ _reset_metadata();
+ _generate_metadata();
+}