Fix: relayd: live connection fails to open file during clear
[lttng-tools.git] / src / bin / lttng-relayd / live.c
index 562a7fa522017ae4126d2d7b76847fbed9f81dc6..7d6dc1bc20a03a5af6cafe1e5ef170be1b7f5cb8 100644 (file)
@@ -1,25 +1,17 @@
 /*
- * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
- *                      David Goulet <dgoulet@efficios.com>
+ * Copyright (C) 2013 Julien Desfossez <jdesfossez@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2 only,
- * as published by the Free Software Foundation.
+ * SPDX-License-Identifier: GPL-2.0-only
  *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
-#define _GNU_SOURCE
 #define _LGPL_SOURCE
+#include <fcntl.h>
 #include <getopt.h>
 #include <grp.h>
+#include <inttypes.h>
 #include <limits.h>
 #include <pthread.h>
 #include <signal.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/wait.h>
-#include <inttypes.h>
+#include <unistd.h>
 #include <urcu/futex.h>
+#include <urcu/rculist.h>
 #include <urcu/uatomic.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <config.h>
 
-#include <lttng/lttng.h>
 #include <common/common.h>
+#include <common/compat/endian.h>
 #include <common/compat/poll.h>
 #include <common/compat/socket.h>
-#include <common/compat/endian.h>
 #include <common/defaults.h>
+#include <common/fd-tracker/utils.h>
+#include <common/fs-handle.h>
 #include <common/futex.h>
 #include <common/index/index.h>
-#include <common/sessiond-comm/sessiond-comm.h>
 #include <common/sessiond-comm/inet.h>
 #include <common/sessiond-comm/relayd.h>
+#include <common/sessiond-comm/sessiond-comm.h>
 #include <common/uri.h>
 #include <common/utils.h>
+#include <lttng/lttng.h>
 
 #include "cmd.h"
+#include "connection.h"
+#include "ctf-trace.h"
+#include "health-relayd.h"
 #include "live.h"
 #include "lttng-relayd.h"
-#include "utils.h"
-#include "health-relayd.h"
+#include "session.h"
+#include "stream.h"
 #include "testpoint.h"
+#include "utils.h"
+#include "viewer-session.h"
 #include "viewer-stream.h"
-#include "stream.h"
-#include "session.h"
-#include "ctf-trace.h"
-#include "connection.h"
+
+#define SESSION_BUF_DEFAULT_COUNT      16
 
 static struct lttng_uri *live_uri;
 
@@ -90,6 +85,8 @@ static pthread_t live_worker_thread;
 static struct relay_conn_queue viewer_conn_queue;
 
 static uint64_t last_relay_viewer_session_id;
+static pthread_mutex_t last_relay_viewer_session_id_lock =
+               PTHREAD_MUTEX_INITIALIZER;
 
 /*
  * Cleanup the daemon
@@ -114,9 +111,6 @@ ssize_t recv_request(struct lttcomm_sock *sock, void *buf, size_t size)
 {
        ssize_t ret;
 
-       assert(sock);
-       assert(buf);
-
        ret = sock->ops->recvmsg(sock, buf, size, 0);
        if (ret < 0 || ret != size) {
                if (ret == 0) {
@@ -143,9 +137,6 @@ ssize_t send_response(struct lttcomm_sock *sock, void *buf, size_t size)
 {
        ssize_t ret;
 
-       assert(sock);
-       assert(buf);
-
        ret = sock->ops->sendmsg(sock, buf, size, 0);
        if (ret < 0) {
                ERR("Relayd failed to send response.");
@@ -171,17 +162,22 @@ int check_new_streams(struct relay_connection *conn)
        if (!conn->viewer_session) {
                goto end;
        }
-       cds_list_for_each_entry(session,
-                       &conn->viewer_session->sessions_head,
-                       viewer_session_list) {
+       rcu_read_lock();
+       cds_list_for_each_entry_rcu(session,
+                       &conn->viewer_session->session_list,
+                       viewer_session_node) {
+               if (!session_get(session)) {
+                       continue;
+               }
                current_val = uatomic_cmpxchg(&session->new_streams, 1, 0);
                ret = current_val;
+               session_put(session);
                if (ret == 1) {
                        goto end;
                }
        }
-
 end:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -193,47 +189,65 @@ end:
  */
 static
 ssize_t send_viewer_streams(struct lttcomm_sock *sock,
-               struct relay_session *session, unsigned int ignore_sent_flag)
+               uint64_t session_id, unsigned int ignore_sent_flag)
 {
        ssize_t ret;
-       struct lttng_viewer_stream send_stream;
        struct lttng_ht_iter iter;
        struct relay_viewer_stream *vstream;
 
-       assert(session);
-
        rcu_read_lock();
 
        cds_lfht_for_each_entry(viewer_streams_ht->ht, &iter.iter, vstream,
                        stream_n.node) {
                struct ctf_trace *ctf_trace;
+               struct lttng_viewer_stream send_stream = {};
 
                health_code_update();
 
+               if (!viewer_stream_get(vstream)) {
+                       continue;
+               }
+
+               pthread_mutex_lock(&vstream->stream->lock);
                /* Ignore if not the same session. */
-               if (vstream->session_id != session->id ||
+               if (vstream->stream->trace->session->id != session_id ||
                                (!ignore_sent_flag && vstream->sent_flag)) {
+                       pthread_mutex_unlock(&vstream->stream->lock);
+                       viewer_stream_put(vstream);
                        continue;
                }
 
-               ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht,
-                               vstream->path_name);
-               assert(ctf_trace);
-
-               send_stream.id = htobe64(vstream->stream_handle);
+               ctf_trace = vstream->stream->trace;
+               send_stream.id = htobe64(vstream->stream->stream_handle);
                send_stream.ctf_trace_id = htobe64(ctf_trace->id);
-               send_stream.metadata_flag = htobe32(vstream->metadata_flag);
-               strncpy(send_stream.path_name, vstream->path_name,
-                               sizeof(send_stream.path_name));
-               strncpy(send_stream.channel_name, vstream->channel_name,
-                               sizeof(send_stream.channel_name));
+               send_stream.metadata_flag = htobe32(
+                               vstream->stream->is_metadata);
+               if (lttng_strncpy(send_stream.path_name, vstream->path_name,
+                               sizeof(send_stream.path_name))) {
+                       pthread_mutex_unlock(&vstream->stream->lock);
+                       viewer_stream_put(vstream);
+                       ret = -1;       /* Error. */
+                       goto end_unlock;
+               }
+               if (lttng_strncpy(send_stream.channel_name,
+                               vstream->channel_name,
+                               sizeof(send_stream.channel_name))) {
+                       pthread_mutex_unlock(&vstream->stream->lock);
+                       viewer_stream_put(vstream);
+                       ret = -1;       /* Error. */
+                       goto end_unlock;
+               }
+
+               DBG("Sending stream %" PRIu64 " to viewer",
+                               vstream->stream->stream_handle);
+               vstream->sent_flag = 1;
+               pthread_mutex_unlock(&vstream->stream->lock);
 
-               DBG("Sending stream %" PRIu64 " to viewer", vstream->stream_handle);
                ret = send_response(sock, &send_stream, sizeof(send_stream));
+               viewer_stream_put(vstream);
                if (ret < 0) {
                        goto end_unlock;
                }
-               vstream->sent_flag = 1;
        }
 
        ret = 0;
@@ -249,80 +263,209 @@ end_unlock:
  * viewer stream of the session, the number of unsent stream and the number of
  * stream created. Those counters can be NULL and thus will be ignored.
  *
+ * session must be locked to ensure that we see either none or all initial
+ * streams for a session, but no intermediate state..
+ *
  * Return 0 on success or else a negative value.
  */
-static
-int make_viewer_streams(struct relay_session *session,
-               enum lttng_viewer_seek seek_t, uint32_t *nb_total, uint32_t *nb_unsent,
-               uint32_t *nb_created)
+static int make_viewer_streams(struct relay_session *relay_session,
+               struct relay_viewer_session *viewer_session,
+               enum lttng_viewer_seek seek_t,
+               uint32_t *nb_total,
+               uint32_t *nb_unsent,
+               uint32_t *nb_created,
+               bool *closed)
 {
        int ret;
        struct lttng_ht_iter iter;
        struct ctf_trace *ctf_trace;
+       struct relay_stream *relay_stream = NULL;
 
-       assert(session);
+       assert(relay_session);
+       ASSERT_LOCKED(relay_session->lock);
 
-       /*
-        * This is to make sure we create viewer streams for a full received
-        * channel. For instance, if we have 8 streams for a channel that are
-        * concurrently being flagged ready, we can end up creating just a subset
-        * of the 8 streams (the ones that are flagged). This lock avoids this
-        * limbo state.
-        */
-       pthread_mutex_lock(&session->viewer_ready_lock);
+       if (!viewer_session->current_trace_chunk) {
+               ERR("Internal error: viewer session associated with session \"%s\" has a NULL trace chunk",
+                               relay_session->session_name);
+               ret = -1;
+               goto error;
+       }
+
+       if (relay_session->connection_closed) {
+               *closed = true;
+       }
 
        /*
-        * Create viewer streams for relay streams that are ready to be used for a
-        * the given session id only.
+        * Create viewer streams for relay streams that are ready to be
+        * used for a the given session id only.
         */
        rcu_read_lock();
-       cds_lfht_for_each_entry(session->ctf_traces_ht->ht, &iter.iter, ctf_trace,
-                       node.node) {
-               struct relay_stream *stream;
+       cds_lfht_for_each_entry (relay_session->ctf_traces_ht->ht, &iter.iter,
+                       ctf_trace, node.node) {
+               bool trace_has_metadata_stream = false;
 
                health_code_update();
 
-               if (ctf_trace->invalid_flag) {
+               if (!ctf_trace_get(ctf_trace)) {
+                       continue;
+               }
+
+               /*
+                * Iterate over all the streams of the trace to see if we have a
+                * metadata stream.
+                */
+               cds_list_for_each_entry_rcu(relay_stream,
+                               &ctf_trace->stream_list, stream_node)
+               {
+                       bool is_metadata_stream;
+
+                       pthread_mutex_lock(&relay_stream->lock);
+                       is_metadata_stream = relay_stream->is_metadata;
+                       pthread_mutex_unlock(&relay_stream->lock);
+
+                       if (is_metadata_stream) {
+                               trace_has_metadata_stream = true;
+                               break;
+                       }
+               }
+
+               relay_stream = NULL;
+
+               /*
+                * If there is no metadata stream in this trace at the moment
+                * and we never sent one to the viewer, skip the trace. We
+                * accept that the viewer will not see this trace at all.
+                */
+               if (!trace_has_metadata_stream &&
+                               !ctf_trace->metadata_stream_sent_to_viewer) {
+                       ctf_trace_put(ctf_trace);
                        continue;
                }
 
-               cds_list_for_each_entry(stream, &ctf_trace->stream_list, trace_list) {
-                       struct relay_viewer_stream *vstream;
+               cds_list_for_each_entry_rcu(relay_stream,
+                               &ctf_trace->stream_list, stream_node)
+               {
+                       struct relay_viewer_stream *viewer_stream;
 
-                       if (!stream->viewer_ready) {
+                       if (!stream_get(relay_stream)) {
                                continue;
                        }
 
-                       vstream = viewer_stream_find_by_id(stream->stream_handle);
-                       if (!vstream) {
-                               vstream = viewer_stream_create(stream, seek_t, ctf_trace);
-                               if (!vstream) {
+                       pthread_mutex_lock(&relay_stream->lock);
+                       /*
+                        * stream published is protected by the session lock.
+                        */
+                       if (!relay_stream->published) {
+                               goto next;
+                       }
+                       viewer_stream = viewer_stream_get_by_id(
+                                       relay_stream->stream_handle);
+                       if (!viewer_stream) {
+                               struct lttng_trace_chunk *viewer_stream_trace_chunk;
+
+                               /*
+                                * Save that we sent the metadata stream to the
+                                * viewer. So that we know what trace the viewer
+                                * is aware of.
+                                */
+                               if (relay_stream->is_metadata) {
+                                       ctf_trace->metadata_stream_sent_to_viewer = true;
+                               }
+
+                               /*
+                                * If a rotation is ongoing, use a copy of the
+                                * relay stream's chunk to ensure the stream
+                                * files exist.
+                                *
+                                * Otherwise, the viewer session's current trace
+                                * chunk can be used safely.
+                                */
+                               if ((relay_stream->ongoing_rotation.is_set ||
+                                                   relay_session->ongoing_rotation) &&
+                                               relay_stream->trace_chunk) {
+                                       viewer_stream_trace_chunk = lttng_trace_chunk_copy(
+                                                       relay_stream->trace_chunk);
+                                       if (!viewer_stream_trace_chunk) {
+                                               ret = -1;
+                                               ctf_trace_put(ctf_trace);
+                                               goto error_unlock;
+                                       }
+                               } else {
+                                       const bool reference_acquired = lttng_trace_chunk_get(
+                                                       viewer_session->current_trace_chunk);
+
+                                       assert(reference_acquired);
+                                       viewer_stream_trace_chunk =
+                                                       viewer_session->current_trace_chunk;
+                               }
+
+                               viewer_stream = viewer_stream_create(
+                                               relay_stream,
+                                               viewer_stream_trace_chunk,
+                                               seek_t);
+                               lttng_trace_chunk_put(viewer_stream_trace_chunk);
+                               viewer_stream_trace_chunk = NULL;
+                               if (!viewer_stream) {
                                        ret = -1;
+                                       ctf_trace_put(ctf_trace);
                                        goto error_unlock;
                                }
-                               /* Acquire reference to ctf_trace. */
-                               ctf_trace_get_ref(ctf_trace);
 
                                if (nb_created) {
                                        /* Update number of created stream counter. */
                                        (*nb_created)++;
                                }
-                       } else if (!vstream->sent_flag && nb_unsent) {
-                               /* Update number of unsent stream counter. */
-                               (*nb_unsent)++;
+                               /*
+                                * Ensure a self-reference is preserved even
+                                * after we have put our local reference.
+                                */
+                               if (!viewer_stream_get(viewer_stream)) {
+                                       ERR("Unable to get self-reference on viewer stream, logic error.");
+                                       abort();
+                               }
+                       } else {
+                               if (!viewer_stream->sent_flag && nb_unsent) {
+                                       /* Update number of unsent stream counter. */
+                                       (*nb_unsent)++;
+                               }
                        }
                        /* Update number of total stream counter. */
                        if (nb_total) {
-                               (*nb_total)++;
+                               if (relay_stream->is_metadata) {
+                                       if (!relay_stream->closed ||
+                                                       relay_stream->metadata_received >
+                                                                       viewer_stream->metadata_sent) {
+                                               (*nb_total)++;
+                                       }
+                               } else {
+                                       if (!relay_stream->closed ||
+                                                       !(((int64_t)(relay_stream->prev_data_seq -
+                                                                         relay_stream->last_net_seq_num)) >=
+                                                                       0)) {
+                                               (*nb_total)++;
+                                       }
+                               }
                        }
+                       /* Put local reference. */
+                       viewer_stream_put(viewer_stream);
+               next:
+                       pthread_mutex_unlock(&relay_stream->lock);
+                       stream_put(relay_stream);
                }
+               relay_stream = NULL;
+               ctf_trace_put(ctf_trace);
        }
 
        ret = 0;
 
 error_unlock:
        rcu_read_unlock();
-       pthread_mutex_unlock(&session->viewer_ready_lock);
+error:
+       if (relay_stream) {
+               pthread_mutex_unlock(&relay_stream->lock);
+               stream_put(relay_stream);
+       }
+
        return ret;
 }
 
@@ -338,7 +481,8 @@ int relayd_live_stop(void)
  * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
  */
 static
-int create_thread_poll_set(struct lttng_poll_event *events, int size)
+int create_named_thread_poll_set(struct lttng_poll_event *events,
+               int size, const char *name)
 {
        int ret;
 
@@ -347,8 +491,10 @@ int create_thread_poll_set(struct lttng_poll_event *events, int size)
                goto error;
        }
 
-       ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
-       if (ret < 0) {
+       ret = fd_tracker_util_poll_create(the_fd_tracker,
+                       name, events, 1, LTTNG_CLOEXEC);
+       if (ret) {
+               PERROR("Failed to create \"%s\" poll file descriptor", name);
                goto error;
        }
 
@@ -379,14 +525,76 @@ int check_thread_quit_pipe(int fd, uint32_t events)
        return 0;
 }
 
+static
+int create_sock(void *data, int *out_fd)
+{
+       int ret;
+       struct lttcomm_sock *sock = data;
+
+       ret = lttcomm_create_sock(sock);
+       if (ret < 0) {
+               goto end;
+       }
+
+       *out_fd = sock->fd;
+end:
+       return ret;
+}
+
+static
+int close_sock(void *data, int *in_fd)
+{
+       struct lttcomm_sock *sock = data;
+
+       return sock->ops->close(sock);
+}
+
+static int accept_sock(void *data, int *out_fd)
+{
+       int ret = 0;
+       /* Socks is an array of in_sock, out_sock. */
+       struct lttcomm_sock **socks = data;
+       struct lttcomm_sock *in_sock = socks[0];
+
+       socks[1] = in_sock->ops->accept(in_sock);
+       if (!socks[1]) {
+               ret = -1;
+               goto end;
+       }
+       *out_fd = socks[1]->fd;
+end:
+       return ret;
+}
+
+static
+struct lttcomm_sock *accept_live_sock(struct lttcomm_sock *listening_sock,
+               const char *name)
+{
+       int out_fd, ret;
+       struct lttcomm_sock *socks[2] = { listening_sock, NULL };
+       struct lttcomm_sock *new_sock = NULL;
+
+       ret = fd_tracker_open_unsuspendable_fd(the_fd_tracker, &out_fd,
+                       (const char **) &name, 1, accept_sock, &socks);
+       if (ret) {
+               goto end;
+       }
+       new_sock = socks[1];
+       DBG("%s accepted, socket %d", name, new_sock->fd);
+end:
+       return new_sock;
+}
+
 /*
  * Create and init socket from uri.
  */
 static
-struct lttcomm_sock *init_socket(struct lttng_uri *uri)
+struct lttcomm_sock *init_socket(struct lttng_uri *uri, const char *name)
 {
-       int ret;
+       int ret, sock_fd;
        struct lttcomm_sock *sock = NULL;
+       char uri_str[LTTNG_PATH_MAX];
+       char *formated_name = NULL;
 
        sock = lttcomm_alloc_sock_from_uri(uri);
        if (sock == NULL) {
@@ -394,14 +602,33 @@ struct lttcomm_sock *init_socket(struct lttng_uri *uri)
                goto error;
        }
 
-       ret = lttcomm_create_sock(sock);
-       if (ret < 0) {
+       /*
+        * Don't fail to create the socket if the name can't be built as it is
+        * only used for debugging purposes.
+        */
+       ret = uri_to_str_url(uri, uri_str, sizeof(uri_str));
+       uri_str[sizeof(uri_str) - 1] = '\0';
+       if (ret >= 0) {
+               ret = asprintf(&formated_name, "%s socket @ %s", name,
+                               uri_str);
+               if (ret < 0) {
+                       formated_name = NULL;
+               }
+       }
+
+       ret = fd_tracker_open_unsuspendable_fd(the_fd_tracker, &sock_fd,
+                       (const char **) (formated_name ? &formated_name : NULL),
+                       1, create_sock, sock);
+       if (ret) {
+               PERROR("Failed to create \"%s\" socket",
+                               formated_name ?: "Unknown");
                goto error;
        }
-       DBG("Listening on sock %d for live", sock->fd);
+       DBG("Listening on %s socket %d", name, sock->fd);
 
        ret = sock->ops->bind(sock);
        if (ret < 0) {
+               PERROR("Failed to bind lttng-live socket");
                goto error;
        }
 
@@ -411,12 +638,14 @@ struct lttcomm_sock *init_socket(struct lttng_uri *uri)
 
        }
 
+       free(formated_name);
        return sock;
 
 error:
        if (sock) {
                lttcomm_destroy_sock(sock);
        }
+       free(formated_name);
        return NULL;
 }
 
@@ -433,17 +662,19 @@ void *thread_listener(void *data)
 
        DBG("[thread] Relay live listener started");
 
+       rcu_register_thread();
        health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_LISTENER);
 
        health_code_update();
 
-       live_control_sock = init_socket(live_uri);
+       live_control_sock = init_socket(live_uri, "Live listener");
        if (!live_control_sock) {
                goto error_sock_control;
        }
 
        /* Pass 2 as size here for the thread quit pipe and control sockets. */
-       ret = create_thread_poll_set(&events, 2);
+       ret = create_named_thread_poll_set(&events, 2,
+                       "Live listener thread epoll");
        if (ret < 0) {
                goto error_create_poll;
        }
@@ -488,11 +719,6 @@ restart:
                        revents = LTTNG_POLL_GETEV(&events, i);
                        pollfd = LTTNG_POLL_GETFD(&events, i);
 
-                       if (!revents) {
-                               /* No activity for this FD (poll implementation). */
-                               continue;
-                       }
-
                        /* Thread quit pipe has been closed. Killing thread. */
                        ret = check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
@@ -500,27 +726,21 @@ restart:
                                goto exit;
                        }
 
-                       if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                               ERR("socket poll error");
-                               goto error;
-                       } else if (revents & LPOLLIN) {
+                       if (revents & LPOLLIN) {
                                /*
-                                * Get allocated in this thread, enqueued to a global queue,
-                                * dequeued and freed in the worker thread.
+                                * A new connection is requested, therefore a
+                                * viewer connection is allocated in this
+                                * thread, enqueued to a global queue and
+                                * dequeued (and freed) in the worker thread.
                                 */
                                int val = 1;
                                struct relay_connection *new_conn;
                                struct lttcomm_sock *newsock;
 
-                               new_conn = connection_create();
-                               if (!new_conn) {
-                                       goto error;
-                               }
-
-                               newsock = live_control_sock->ops->accept(live_control_sock);
+                               newsock = accept_live_sock(live_control_sock,
+                                               "Live socket to client");
                                if (!newsock) {
                                        PERROR("accepting control sock");
-                                       connection_free(new_conn);
                                        goto error;
                                }
                                DBG("Relay viewer connection accepted socket %d", newsock->fd);
@@ -530,20 +750,32 @@ restart:
                                if (ret < 0) {
                                        PERROR("setsockopt inet");
                                        lttcomm_destroy_sock(newsock);
-                                       connection_free(new_conn);
                                        goto error;
                                }
-                               new_conn->sock = newsock;
+                               new_conn = connection_create(newsock, RELAY_CONNECTION_UNKNOWN);
+                               if (!new_conn) {
+                                       lttcomm_destroy_sock(newsock);
+                                       goto error;
+                               }
+                               /* Ownership assumed by the connection. */
+                               newsock = NULL;
 
                                /* Enqueue request for the dispatcher thread. */
                                cds_wfcq_enqueue(&viewer_conn_queue.head, &viewer_conn_queue.tail,
                                                 &new_conn->qnode);
 
                                /*
-                                * Wake the dispatch queue futex. Implicit memory barrier with
-                                * the exchange in cds_wfcq_enqueue.
+                                * Wake the dispatch queue futex.
+                                * Implicit memory barrier with the
+                                * exchange in cds_wfcq_enqueue.
                                 */
                                futex_nto1_wake(&viewer_conn_queue.futex);
+                       } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                               ERR("socket poll error");
+                               goto error;
+                       } else {
+                               ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                               goto error;
                        }
                }
        }
@@ -552,13 +784,18 @@ exit:
 error:
 error_poll_add:
 error_testpoint:
-       lttng_poll_clean(&events);
+       (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
 error_create_poll:
        if (live_control_sock->fd >= 0) {
-               ret = live_control_sock->ops->close(live_control_sock);
+               int sock_fd = live_control_sock->fd;
+
+               ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker,
+                               &sock_fd, 1, close_sock,
+                               live_control_sock);
                if (ret) {
                        PERROR("close");
                }
+               live_control_sock->fd = -1;
        }
        lttcomm_destroy_sock(live_control_sock);
 error_sock_control:
@@ -567,6 +804,7 @@ error_sock_control:
                DBG("Live viewer listener thread exited with error");
        }
        health_unregister(health_relayd);
+       rcu_unregister_thread();
        DBG("Live viewer listener thread cleanup complete");
        if (lttng_relay_stop_threads()) {
                ERR("Error stopping threads");
@@ -595,12 +833,16 @@ void *thread_dispatcher(void *data)
 
        health_code_update();
 
-       while (!CMM_LOAD_SHARED(live_dispatch_thread_exit)) {
+       for (;;) {
                health_code_update();
 
                /* Atomically prepare the queue futex */
                futex_nto1_prepare(&viewer_conn_queue.futex);
 
+               if (CMM_LOAD_SHARED(live_dispatch_thread_exit)) {
+                       break;
+               }
+
                do {
                        health_code_update();
 
@@ -618,14 +860,15 @@ void *thread_dispatcher(void *data)
                                        conn->sock->fd);
 
                        /*
-                        * Inform worker thread of the new request. This call is blocking
-                        * so we can be assured that the data will be read at some point in
-                        * time or wait to the end of the world :)
+                        * Inform worker thread of the new request. This
+                        * call is blocking so we can be assured that
+                        * the data will be read at some point in time
+                        * or wait to the end of the world :)
                         */
                        ret = lttng_write(live_conn_pipe[1], &conn, sizeof(conn));
                        if (ret < 0) {
                                PERROR("write conn pipe");
-                               connection_destroy(conn);
+                               connection_put(conn);
                                goto error;
                        }
                } while (node != NULL);
@@ -664,8 +907,6 @@ int viewer_connect(struct relay_connection *conn)
        int ret;
        struct lttng_viewer_connect reply, msg;
 
-       assert(conn);
-
        conn->version_check_done = 1;
 
        health_code_update();
@@ -713,10 +954,13 @@ int viewer_connect(struct relay_connection *conn)
        reply.minor = htobe32(reply.minor);
        if (conn->type == RELAY_VIEWER_COMMAND) {
                /*
-                * Increment outside of htobe64 macro, because can be used more than once
-                * within the macro, and thus the operation may be undefined.
+                * Increment outside of htobe64 macro, because the argument can
+                * be used more than once within the macro, and thus the
+                * operation may be undefined.
                 */
+               pthread_mutex_lock(&last_relay_viewer_session_id_lock);
                last_relay_viewer_session_id++;
+               pthread_mutex_unlock(&last_relay_viewer_session_id_lock);
                reply.viewer_session_id = htobe64(last_relay_viewer_session_id);
        }
 
@@ -738,168 +982,134 @@ end:
 
 /*
  * Send the viewer the list of current sessions.
+ * We need to create a copy of the hash table content because otherwise
+ * we cannot assume the number of entries stays the same between getting
+ * the number of HT elements and iteration over the HT.
  *
  * Return 0 on success or else a negative value.
  */
 static
 int viewer_list_sessions(struct relay_connection *conn)
 {
-       int ret;
+       int ret = 0;
        struct lttng_viewer_list_sessions session_list;
-       unsigned long count;
-       long approx_before, approx_after;
        struct lttng_ht_iter iter;
-       struct lttng_viewer_session send_session;
        struct relay_session *session;
+       struct lttng_viewer_session *send_session_buf = NULL;
+       uint32_t buf_count = SESSION_BUF_DEFAULT_COUNT;
+       uint32_t count = 0;
 
        DBG("List sessions received");
 
-       rcu_read_lock();
-       cds_lfht_count_nodes(conn->sessions_ht->ht, &approx_before, &count,
-                       &approx_after);
-       session_list.sessions_count = htobe32(count);
-
-       health_code_update();
-
-       ret = send_response(conn->sock, &session_list, sizeof(session_list));
-       if (ret < 0) {
-               goto end_unlock;
+       send_session_buf = zmalloc(SESSION_BUF_DEFAULT_COUNT * sizeof(*send_session_buf));
+       if (!send_session_buf) {
+               return -1;
        }
 
-       health_code_update();
-
-       cds_lfht_for_each_entry(conn->sessions_ht->ht, &iter.iter, session,
+       rcu_read_lock();
+       cds_lfht_for_each_entry(sessions_ht->ht, &iter.iter, session,
                        session_n.node) {
-               health_code_update();
-
-               strncpy(send_session.session_name, session->session_name,
-                               sizeof(send_session.session_name));
-               strncpy(send_session.hostname, session->hostname,
-                               sizeof(send_session.hostname));
-               send_session.id = htobe64(session->id);
-               send_session.live_timer = htobe32(session->live_timer);
-               send_session.clients = htobe32(session->viewer_refcount);
-               send_session.streams = htobe32(session->stream_count);
+               struct lttng_viewer_session *send_session;
 
                health_code_update();
 
-               ret = send_response(conn->sock, &send_session, sizeof(send_session));
-               if (ret < 0) {
-                       goto end_unlock;
+               pthread_mutex_lock(&session->lock);
+               if (session->connection_closed) {
+                       /* Skip closed session */
+                       goto next_session;
                }
-       }
-       health_code_update();
-
-       ret = 0;
-end_unlock:
-       rcu_read_unlock();
-       return ret;
-}
-
-/*
- * Check if a connection is attached to a session.
- * Return 1 if attached, 0 if not attached, a negative value on error.
- */
-static
-int session_attached(struct relay_connection *conn, uint64_t session_id)
-{
-       struct relay_session *session;
-       int found = 0;
-
-       if (!conn->viewer_session) {
-               goto end;
-       }
-       cds_list_for_each_entry(session,
-                       &conn->viewer_session->sessions_head,
-                       viewer_session_list) {
-               if (session->id == session_id) {
-                       found = 1;
-                       goto end;
+               if (!session->current_trace_chunk) {
+                       /*
+                        * Skip un-attachable session. It is either
+                        * being destroyed or has not had a trace
+                        * chunk created against it yet.
+                        */
+                       goto next_session;
                }
-       }
-
-end:
-       return found;
-}
 
-/*
- * Delete all streams for a specific session ID.
- */
-static void destroy_viewer_streams_by_session(struct relay_session *session)
-{
-       struct relay_viewer_stream *stream;
-       struct lttng_ht_iter iter;
-
-       assert(session);
-
-       rcu_read_lock();
-       cds_lfht_for_each_entry(viewer_streams_ht->ht, &iter.iter, stream,
-                       stream_n.node) {
-               struct ctf_trace *ctf_trace;
+               if (count >= buf_count) {
+                       struct lttng_viewer_session *newbuf;
+                       uint32_t new_buf_count = buf_count << 1;
 
-               health_code_update();
-               if (stream->session_id != session->id) {
-                       continue;
+                       newbuf = realloc(send_session_buf,
+                               new_buf_count * sizeof(*send_session_buf));
+                       if (!newbuf) {
+                               ret = -1;
+                               goto break_loop;
+                       }
+                       send_session_buf = newbuf;
+                       buf_count = new_buf_count;
                }
-
-               ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht,
-                               stream->path_name);
-               assert(ctf_trace);
-
-               viewer_stream_delete(stream);
-
-               if (stream->metadata_flag) {
-                       ctf_trace->metadata_sent = 0;
-                       ctf_trace->viewer_metadata_stream = NULL;
+               send_session = &send_session_buf[count];
+               if (lttng_strncpy(send_session->session_name,
+                               session->session_name,
+                               sizeof(send_session->session_name))) {
+                       ret = -1;
+                       goto break_loop;
                }
-
-               viewer_stream_destroy(ctf_trace, stream);
+               if (lttng_strncpy(send_session->hostname, session->hostname,
+                               sizeof(send_session->hostname))) {
+                       ret = -1;
+                       goto break_loop;
+               }
+               send_session->id = htobe64(session->id);
+               send_session->live_timer = htobe32(session->live_timer);
+               if (session->viewer_attached) {
+                       send_session->clients = htobe32(1);
+               } else {
+                       send_session->clients = htobe32(0);
+               }
+               send_session->streams = htobe32(session->stream_count);
+               count++;
+       next_session:
+               pthread_mutex_unlock(&session->lock);
+               continue;
+       break_loop:
+               pthread_mutex_unlock(&session->lock);
+               break;
        }
        rcu_read_unlock();
-}
+       if (ret < 0) {
+               goto end_free;
+       }
 
-static void try_destroy_streams(struct relay_session *session)
-{
-       struct ctf_trace *ctf_trace;
-       struct lttng_ht_iter iter;
+       session_list.sessions_count = htobe32(count);
 
-       assert(session);
+       health_code_update();
 
-       cds_lfht_for_each_entry(session->ctf_traces_ht->ht, &iter.iter, ctf_trace,
-                       node.node) {
-               /* Attempt to destroy the ctf trace of that session. */
-               ctf_trace_try_destroy(session, ctf_trace);
+       ret = send_response(conn->sock, &session_list, sizeof(session_list));
+       if (ret < 0) {
+               goto end_free;
        }
-}
 
-/*
- * Cleanup a session.
- */
-static void cleanup_session(struct relay_connection *conn,
-               struct relay_session *session)
-{
-       /*
-        * Very important that this is done before destroying the session so we
-        * can put back every viewer stream reference from the ctf_trace.
-        */
-       destroy_viewer_streams_by_session(session);
-       try_destroy_streams(session);
-       cds_list_del(&session->viewer_session_list);
-       session_viewer_try_destroy(conn->sessions_ht, session);
+       health_code_update();
+
+       ret = send_response(conn->sock, send_session_buf,
+                       count * sizeof(*send_session_buf));
+       if (ret < 0) {
+               goto end_free;
+       }
+       health_code_update();
+
+       ret = 0;
+end_free:
+       free(send_session_buf);
+       return ret;
 }
 
 /*
- * Send the viewer the list of current sessions.
+ * Send the viewer the list of current streams.
  */
 static
 int viewer_get_new_streams(struct relay_connection *conn)
 {
        int ret, send_streams = 0;
-       uint32_t nb_created = 0, nb_unsent = 0, nb_streams = 0;
+       uint32_t nb_created = 0, nb_unsent = 0, nb_streams = 0, nb_total = 0;
        struct lttng_viewer_new_streams_request request;
        struct lttng_viewer_new_streams_response response;
-       struct relay_session *session;
+       struct relay_session *session = NULL;
        uint64_t session_id;
+       bool closed = false;
 
        assert(conn);
 
@@ -918,79 +1128,83 @@ int viewer_get_new_streams(struct relay_connection *conn)
 
        memset(&response, 0, sizeof(response));
 
-       rcu_read_lock();
-       session = session_find_by_id(conn->sessions_ht, session_id);
+       session = session_get_by_id(session_id);
        if (!session) {
                DBG("Relay session %" PRIu64 " not found", session_id);
                response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR);
                goto send_reply;
        }
 
-       if (!session_attached(conn, session_id)) {
-               send_streams = 0;
+       if (!viewer_session_is_attached(conn->viewer_session, session)) {
                response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR);
                goto send_reply;
        }
 
+       pthread_mutex_lock(&session->lock);
+       ret = make_viewer_streams(session,
+                       conn->viewer_session,
+                       LTTNG_VIEWER_SEEK_LAST, &nb_total, &nb_unsent,
+                       &nb_created, &closed);
+       if (ret < 0) {
+               goto error_unlock_session;
+       }
        send_streams = 1;
        response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_OK);
 
-       ret = make_viewer_streams(session, LTTNG_VIEWER_SEEK_LAST, NULL, &nb_unsent,
-                       &nb_created);
-       if (ret < 0) {
-               goto end_unlock;
-       }
        /* Only send back the newly created streams with the unsent ones. */
        nb_streams = nb_created + nb_unsent;
        response.streams_count = htobe32(nb_streams);
 
        /*
-        * If the session is closed and we have no new streams to send,
-        * it means that the viewer has already received the whole trace
-        * for this session and should now close it.
+        * If the session is closed, HUP when there are no more streams
+        * with data.
         */
-       if (nb_streams == 0 && session->close_flag) {
+       if (closed && nb_total == 0) {
                send_streams = 0;
+               response.streams_count = 0;
                response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_HUP);
-               /*
-                * Remove the session from the attached list of the connection
-                * and try to destroy it.
-                */
-               cds_list_del(&session->viewer_session_list);
-               cleanup_session(conn, session);
-               goto send_reply;
+               goto send_reply_unlock;
        }
+send_reply_unlock:
+       pthread_mutex_unlock(&session->lock);
 
 send_reply:
        health_code_update();
        ret = send_response(conn->sock, &response, sizeof(response));
        if (ret < 0) {
-               goto end_unlock;
+               goto end_put_session;
        }
        health_code_update();
 
        /*
-        * Unknown or empty session, just return gracefully, the viewer knows what
-        * is happening.
+        * Unknown or empty session, just return gracefully, the viewer
+        * knows what is happening.
         */
        if (!send_streams || !nb_streams) {
                ret = 0;
-               goto end_unlock;
+               goto end_put_session;
        }
 
        /*
-        * Send stream and *DON'T* ignore the sent flag so every viewer streams
-        * that were not sent from that point will be sent to the viewer.
+        * Send stream and *DON'T* ignore the sent flag so every viewer
+        * streams that were not sent from that point will be sent to
+        * the viewer.
         */
-       ret = send_viewer_streams(conn->sock, session, 0);
+       ret = send_viewer_streams(conn->sock, session_id, 0);
        if (ret < 0) {
-               goto end_unlock;
+               goto end_put_session;
        }
 
-end_unlock:
-       rcu_read_unlock();
+end_put_session:
+       if (session) {
+               session_put(session);
+       }
 error:
        return ret;
+error_unlock_session:
+       pthread_mutex_unlock(&session->lock);
+       session_put(session);
+       return ret;
 }
 
 /*
@@ -1005,7 +1219,10 @@ int viewer_attach_session(struct relay_connection *conn)
        enum lttng_viewer_seek seek_type;
        struct lttng_viewer_attach_session_request request;
        struct lttng_viewer_attach_session_response response;
-       struct relay_session *session;
+       struct relay_session *session = NULL;
+       enum lttng_viewer_attach_return_code viewer_attach_status;
+       bool closed = false;
+       uint64_t session_id;
 
        assert(conn);
 
@@ -1017,6 +1234,7 @@ int viewer_attach_session(struct relay_connection *conn)
                goto error;
        }
 
+       session_id = be64toh(request.session_id);
        health_code_update();
 
        memset(&response, 0, sizeof(response));
@@ -1027,37 +1245,42 @@ int viewer_attach_session(struct relay_connection *conn)
                goto send_reply;
        }
 
-       rcu_read_lock();
-       session = session_find_by_id(conn->sessions_ht,
-                       be64toh(request.session_id));
+       session = session_get_by_id(session_id);
        if (!session) {
-               DBG("Relay session %" PRIu64 " not found",
-                               be64toh(request.session_id));
+               DBG("Relay session %" PRIu64 " not found", session_id);
                response.status = htobe32(LTTNG_VIEWER_ATTACH_UNK);
                goto send_reply;
        }
-       session_viewer_attach(session);
-       DBG("Attach session ID %" PRIu64 " received", be64toh(request.session_id));
+       DBG("Attach session ID %" PRIu64 " received", session_id);
 
-       if (uatomic_read(&session->viewer_refcount) > 1) {
-               DBG("Already a viewer attached");
-               response.status = htobe32(LTTNG_VIEWER_ATTACH_ALREADY);
-               session_viewer_detach(session);
+       pthread_mutex_lock(&session->lock);
+       if (!session->current_trace_chunk) {
+               /*
+                * Session is either being destroyed or it never had a trace
+                * chunk created against it.
+                */
+               DBG("Session requested by live client has no current trace chunk, returning unknown session");
+               response.status = htobe32(LTTNG_VIEWER_ATTACH_UNK);
                goto send_reply;
-       } else if (session->live_timer == 0) {
+       }
+       if (session->live_timer == 0) {
                DBG("Not live session");
                response.status = htobe32(LTTNG_VIEWER_ATTACH_NOT_LIVE);
                goto send_reply;
-       } else {
-               send_streams = 1;
-               response.status = htobe32(LTTNG_VIEWER_ATTACH_OK);
-               cds_list_add(&session->viewer_session_list,
-                               &conn->viewer_session->sessions_head);
+       }
+
+       send_streams = 1;
+       viewer_attach_status = viewer_session_attach(conn->viewer_session,
+                       session);
+       if (viewer_attach_status != LTTNG_VIEWER_ATTACH_OK) {
+               response.status = htobe32(viewer_attach_status);
+               goto send_reply;
        }
 
        switch (be32toh(request.seek)) {
        case LTTNG_VIEWER_SEEK_BEGINNING:
        case LTTNG_VIEWER_SEEK_LAST:
+               response.status = htobe32(LTTNG_VIEWER_ATTACH_OK);
                seek_type = be32toh(request.seek);
                break;
        default:
@@ -1067,37 +1290,58 @@ int viewer_attach_session(struct relay_connection *conn)
                goto send_reply;
        }
 
-       ret = make_viewer_streams(session, seek_type, &nb_streams, NULL, NULL);
+       ret = make_viewer_streams(session,
+                       conn->viewer_session, seek_type,
+                       &nb_streams, NULL, NULL, &closed);
        if (ret < 0) {
-               goto end_unlock;
+               goto end_put_session;
        }
+       pthread_mutex_unlock(&session->lock);
+       session_put(session);
+       session = NULL;
+
        response.streams_count = htobe32(nb_streams);
+       /*
+        * If the session is closed when the viewer is attaching, it
+        * means some of the streams may have been concurrently removed,
+        * so we don't allow the viewer to attach, even if there are
+        * streams available.
+        */
+       if (closed) {
+               send_streams = 0;
+               response.streams_count = 0;
+               response.status = htobe32(LTTNG_VIEWER_ATTACH_UNK);
+               goto send_reply;
+       }
 
 send_reply:
        health_code_update();
        ret = send_response(conn->sock, &response, sizeof(response));
        if (ret < 0) {
-               goto end_unlock;
+               goto end_put_session;
        }
        health_code_update();
 
        /*
-        * Unknown or empty session, just return gracefully, the viewer knows what
-        * is happening.
+        * Unknown or empty session, just return gracefully, the viewer
+        * knows what is happening.
         */
        if (!send_streams || !nb_streams) {
                ret = 0;
-               goto end_unlock;
+               goto end_put_session;
        }
 
        /* Send stream and ignore the sent flag. */
-       ret = send_viewer_streams(conn->sock, session, 1);
+       ret = send_viewer_streams(conn->sock, session_id, 1);
        if (ret < 0) {
-               goto end_unlock;
+               goto end_put_session;
        }
 
-end_unlock:
-       rcu_read_unlock();
+end_put_session:
+       if (session) {
+               pthread_mutex_unlock(&session->lock);
+               session_put(session);
+       }
 error:
        return ret;
 }
@@ -1105,39 +1349,45 @@ error:
 /*
  * Open the index file if needed for the given vstream.
  *
- * If an index file is successfully opened, the index_read_fd of the stream is
- * set with it.
+ * If an index file is successfully opened, the vstream will set it as its
+ * current index file.
  *
  * Return 0 on success, a negative value on error (-ENOENT if not ready yet).
+ *
+ * Called with rstream lock held.
  */
 static int try_open_index(struct relay_viewer_stream *vstream,
                struct relay_stream *rstream)
 {
        int ret = 0;
+       const uint32_t connection_major = rstream->trace->session->major;
+       const uint32_t connection_minor = rstream->trace->session->minor;
+       enum lttng_trace_chunk_status chunk_status;
 
-       assert(vstream);
-       assert(rstream);
-
-       if (vstream->index_read_fd >= 0) {
+       if (vstream->index_file) {
                goto end;
        }
 
        /*
-        * First time, we open the index file and at least one index is ready.  The
-        * race between the read and write of the total_index_received is
-        * acceptable here since the client will be notified to simply come back
-        * and get the next index.
+        * First time, we open the index file and at least one index is ready.
         */
-       if (rstream->total_index_received <= 0) {
+       if (rstream->index_received_seqcount == 0) {
                ret = -ENOENT;
                goto end;
        }
-       ret = index_open(vstream->path_name, vstream->channel_name,
-                       vstream->tracefile_count, vstream->tracefile_count_current);
-       if (ret >= 0) {
-               vstream->index_read_fd = ret;
-               ret = 0;
-               goto end;
+       chunk_status = lttng_index_file_create_from_trace_chunk_read_only(
+                       vstream->stream_file.trace_chunk, rstream->path_name,
+                       rstream->channel_name, rstream->tracefile_size,
+                       vstream->current_tracefile_id,
+                       lttng_to_index_major(connection_major, connection_minor),
+                       lttng_to_index_minor(connection_major, connection_minor),
+                       true, &vstream->index_file);
+       if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+               if (chunk_status == LTTNG_TRACE_CHUNK_STATUS_NO_FILE) {
+                       ret = -ENOENT;
+               } else {
+                       ret = -1;
+               }
        }
 
 end:
@@ -1145,13 +1395,15 @@ end:
 }
 
 /*
- * Check the status of the index for the given stream. This function updates
- * the index structure if needed and can destroy the vstream also for the HUP
- * situation.
+ * Check the status of the index for the given stream. This function
+ * updates the index structure if needed and can put (close) the vstream
+ * in the HUP situation.
  *
- * Return 0 means that we can proceed with the index. A value of 1 means that
- * the index has been updated and is ready to be send to the client. A negative
- * value indicates an error that can't be handled.
+ * Return 0 means that we can proceed with the index. A value of 1 means
+ * that the index has been updated and is ready to be sent to the
+ * client. A negative value indicates an error that can't be handled.
+ *
+ * Called with rstream lock held.
  */
 static int check_index_status(struct relay_viewer_stream *vstream,
                struct relay_stream *rstream, struct ctf_trace *trace,
@@ -1159,68 +1411,119 @@ static int check_index_status(struct relay_viewer_stream *vstream,
 {
        int ret;
 
-       assert(vstream);
-       assert(rstream);
-       assert(index);
-       assert(trace);
-
-       if (!rstream->close_flag) {
-               /* Rotate on abort (overwrite). */
-               if (vstream->abort_flag) {
-                       DBG("Viewer stream %" PRIu64 " rotate because of overwrite",
-                                       vstream->stream_handle);
-                       ret = viewer_stream_rotate(vstream, rstream);
-                       if (ret < 0) {
-                               goto error;
-                       } else if (ret == 1) {
-                               /* EOF */
-                               index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
-                               goto hup;
-                       }
-                       /* ret == 0 means successful so we continue. */
-               }
-
-               /* Check if we are in the same trace file at this point. */
-               if (rstream->tracefile_count_current == vstream->tracefile_count_current) {
-                       if (rstream->beacon_ts_end != -1ULL &&
-                                       vstream->last_sent_index == rstream->total_index_received) {
-                               /*
-                                * We've received a synchronization beacon and the last index
-                                * available has been sent, the index for now is inactive.
-                                */
-                               index->status = htobe32(LTTNG_VIEWER_INDEX_INACTIVE);
-                               index->timestamp_end = htobe64(rstream->beacon_ts_end);
-                               index->stream_id = htobe64(rstream->ctf_stream_id);
-                               goto index_ready;
-                       } else if (rstream->total_index_received <= vstream->last_sent_index
-                                       && !vstream->close_write_flag) {
-                               /*
-                                * Reader and writer are working in the same tracefile, so we care
-                                * about the number of index received and sent. Otherwise, we read
-                                * up to EOF.
-                                */
-                               index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
-                               goto index_ready;
-                       }
-               }
-               /* Nothing to do with the index, continue with it. */
-               ret = 0;
-       } else if (rstream->close_flag && vstream->close_write_flag &&
-                       vstream->total_index_received == vstream->last_sent_index) {
-               /* Last index sent and current tracefile closed in write */
+       DBG("Check index status: index_received_seqcount %" PRIu64 " "
+                               "index_sent_seqcount %" PRIu64 " "
+                               "for stream %" PRIu64,
+                               rstream->index_received_seqcount,
+                               vstream->index_sent_seqcount,
+                               vstream->stream->stream_handle);
+       if ((trace->session->connection_closed || rstream->closed)
+                       && rstream->index_received_seqcount
+                               == vstream->index_sent_seqcount) {
+               /*
+                * Last index sent and session connection or relay
+                * stream are closed.
+                */
                index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
                goto hup;
-       } else {
-               vstream->close_write_flag = 1;
-               ret = 0;
+       } else if (rstream->beacon_ts_end != -1ULL &&
+                       (rstream->index_received_seqcount == 0 ||
+                       (vstream->index_sent_seqcount != 0 &&
+                       rstream->index_received_seqcount
+                               <= vstream->index_sent_seqcount))) {
+               /*
+                * We've received a synchronization beacon and the last index
+                * available has been sent, the index for now is inactive.
+                *
+                * In this case, we have received a beacon which allows us to
+                * inform the client of a time interval during which we can
+                * guarantee that there are no events to read (and never will
+                * be).
+                *
+                * The sent seqcount can grow higher than receive seqcount on
+                * clear because the rotation performed by clear will push
+                * the index_sent_seqcount ahead (see
+                * viewer_stream_sync_tracefile_array_tail) and skip over
+                * packet sequence numbers.
+                */
+               index->status = htobe32(LTTNG_VIEWER_INDEX_INACTIVE);
+               index->timestamp_end = htobe64(rstream->beacon_ts_end);
+               index->stream_id = htobe64(rstream->ctf_stream_id);
+               DBG("Check index status: inactive with beacon, for stream %" PRIu64,
+                               vstream->stream->stream_handle);
+               goto index_ready;
+       } else if (rstream->index_received_seqcount == 0 ||
+                       (vstream->index_sent_seqcount != 0 &&
+                       rstream->index_received_seqcount
+                               <= vstream->index_sent_seqcount)) {
+               /*
+                * This checks whether received <= sent seqcount. In
+                * this case, we have not received a beacon. Therefore,
+                * we can only ask the client to retry later.
+                *
+                * The sent seqcount can grow higher than receive seqcount on
+                * clear because the rotation performed by clear will push
+                * the index_sent_seqcount ahead (see
+                * viewer_stream_sync_tracefile_array_tail) and skip over
+                * packet sequence numbers.
+                */
+               index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
+               DBG("Check index status: retry for stream %" PRIu64,
+                               vstream->stream->stream_handle);
+               goto index_ready;
+       } else if (!tracefile_array_seq_in_file(rstream->tfa,
+                       vstream->current_tracefile_id,
+                       vstream->index_sent_seqcount)) {
+               /*
+                * The next index we want to send cannot be read either
+                * because we need to perform a rotation, or due to
+                * the producer having overwritten its trace file.
+                */
+               DBG("Viewer stream %" PRIu64 " rotation",
+                               vstream->stream->stream_handle);
+               ret = viewer_stream_rotate(vstream);
+               if (ret == 1) {
+                       /* EOF across entire stream. */
+                       index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
+                       goto hup;
+               }
+               /*
+                * If we have been pushed due to overwrite, it
+                * necessarily means there is data that can be read in
+                * the stream. If we rotated because we reached the end
+                * of a tracefile, it means the following tracefile
+                * needs to contain at least one index, else we would
+                * have already returned LTTNG_VIEWER_INDEX_RETRY to the
+                * viewer. The updated index_sent_seqcount needs to
+                * point to a readable index entry now.
+                *
+                * In the case where we "rotate" on a single file, we
+                * can end up in a case where the requested index is
+                * still unavailable.
+                */
+               if (rstream->tracefile_count == 1 &&
+                               !tracefile_array_seq_in_file(
+                                       rstream->tfa,
+                                       vstream->current_tracefile_id,
+                                       vstream->index_sent_seqcount)) {
+                       index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
+                       DBG("Check index status: retry: "
+                               "tracefile array sequence number %" PRIu64
+                               " not in file for stream %" PRIu64,
+                               vstream->index_sent_seqcount,
+                               vstream->stream->stream_handle);
+                       goto index_ready;
+               }
+               assert(tracefile_array_seq_in_file(rstream->tfa,
+                               vstream->current_tracefile_id,
+                               vstream->index_sent_seqcount));
        }
-
-error:
+       /* ret == 0 means successful so we continue. */
+       ret = 0;
        return ret;
 
 hup:
-       viewer_stream_delete(vstream);
-       viewer_stream_destroy(trace, vstream);
+       viewer_stream_put(vstream);
 index_ready:
        return 1;
 }
@@ -1234,19 +1537,19 @@ static
 int viewer_get_next_index(struct relay_connection *conn)
 {
        int ret;
-       ssize_t read_ret;
        struct lttng_viewer_get_next_index request_index;
        struct lttng_viewer_index viewer_index;
        struct ctf_packet_index packet_index;
-       struct relay_viewer_stream *vstream;
-       struct relay_stream *rstream;
-       struct ctf_trace *ctf_trace;
-       struct relay_session *session;
+       struct relay_viewer_stream *vstream = NULL;
+       struct relay_stream *rstream = NULL;
+       struct ctf_trace *ctf_trace = NULL;
+       struct relay_viewer_stream *metadata_viewer_stream = NULL;
 
        assert(conn);
 
        DBG("Viewer get next index");
 
+       memset(&viewer_index, 0, sizeof(viewer_index));
        health_code_update();
 
        ret = recv_request(conn->sock, &request_index, sizeof(request_index));
@@ -1255,134 +1558,167 @@ int viewer_get_next_index(struct relay_connection *conn)
        }
        health_code_update();
 
-       rcu_read_lock();
-       vstream = viewer_stream_find_by_id(be64toh(request_index.stream_id));
+       vstream = viewer_stream_get_by_id(be64toh(request_index.stream_id));
        if (!vstream) {
-               ret = -1;
-               goto end_unlock;
+               DBG("Client requested index of unknown stream id %" PRIu64,
+                               (uint64_t) be64toh(request_index.stream_id));
+               viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
+               goto send_reply;
        }
 
-       session = session_find_by_id(conn->sessions_ht, vstream->session_id);
-       if (!session) {
-               ret = -1;
-               goto end_unlock;
-       }
+       /* Use back. ref. Protected by refcounts. */
+       rstream = vstream->stream;
+       ctf_trace = rstream->trace;
 
-       ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht, vstream->path_name);
-       assert(ctf_trace);
+       /* metadata_viewer_stream may be NULL. */
+       metadata_viewer_stream =
+                       ctf_trace_get_viewer_metadata_stream(ctf_trace);
 
-       memset(&viewer_index, 0, sizeof(viewer_index));
+       pthread_mutex_lock(&rstream->lock);
 
        /*
         * The viewer should not ask for index on metadata stream.
         */
-       if (vstream->metadata_flag) {
+       if (rstream->is_metadata) {
                viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_HUP);
                goto send_reply;
        }
 
-       rstream = stream_find_by_id(relay_streams_ht, vstream->stream_handle);
-       assert(rstream);
+       if (rstream->ongoing_rotation.is_set) {
+               /* Rotation is ongoing, try again later. */
+               viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
+               goto send_reply;
+       }
 
-       /* Try to open an index if one is needed for that stream. */
-       ret = try_open_index(vstream, rstream);
-       if (ret < 0) {
-               if (ret == -ENOENT) {
-                       /*
-                        * The index is created only when the first data packet arrives, it
-                        * might not be ready at the beginning of the session
-                        */
-                       viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
-               } else {
-                       /* Unhandled error. */
+       if (rstream->trace->session->ongoing_rotation) {
+               /* Rotation is ongoing, try again later. */
+               viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
+               goto send_reply;
+       }
+
+       if (rstream->trace_chunk && !lttng_trace_chunk_ids_equal(
+                       conn->viewer_session->current_trace_chunk,
+                       rstream->trace_chunk)) {
+               DBG("Relay stream and viewer chunk ids differ");
+
+               ret = viewer_session_set_trace_chunk_copy(
+                               conn->viewer_session,
+                               rstream->trace_chunk);
+               if (ret) {
                        viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
+                       goto send_reply;
                }
-               goto send_reply;
+       }
+       if (conn->viewer_session->current_trace_chunk !=
+                       vstream->stream_file.trace_chunk) {
+               bool acquired_reference;
+
+               DBG("Viewer session and viewer stream chunk differ: "
+                               "vsession chunk %p vstream chunk %p",
+                               conn->viewer_session->current_trace_chunk,
+                               vstream->stream_file.trace_chunk);
+               lttng_trace_chunk_put(vstream->stream_file.trace_chunk);
+               acquired_reference = lttng_trace_chunk_get(conn->viewer_session->current_trace_chunk);
+               assert(acquired_reference);
+               vstream->stream_file.trace_chunk =
+                       conn->viewer_session->current_trace_chunk;
+               viewer_stream_sync_tracefile_array_tail(vstream);
+               viewer_stream_close_files(vstream);
        }
 
-       pthread_mutex_lock(&rstream->viewer_stream_rotation_lock);
        ret = check_index_status(vstream, rstream, ctf_trace, &viewer_index);
-       pthread_mutex_unlock(&rstream->viewer_stream_rotation_lock);
        if (ret < 0) {
-               goto end_unlock;
+               goto error_put;
        } else if (ret == 1) {
                /*
-                * This means the viewer index data structure has been populated by the
-                * check call thus we now send back the reply to the client.
+                * We have no index to send and check_index_status has populated
+                * viewer_index's status.
                 */
                goto send_reply;
        }
-       /* At this point, ret MUST be 0 thus we continue with the get. */
+       /* At this point, ret is 0 thus we will be able to read the index. */
        assert(!ret);
 
-       if (!ctf_trace->metadata_received ||
-                       ctf_trace->metadata_received > ctf_trace->metadata_sent) {
-               viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_METADATA;
+       /* Try to open an index if one is needed for that stream. */
+       ret = try_open_index(vstream, rstream);
+       if (ret == -ENOENT) {
+              if (rstream->closed) {
+                       viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_HUP);
+                       goto send_reply;
+              } else {
+                       viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
+                       goto send_reply;
+              }
        }
-
-       ret = check_new_streams(conn);
        if (ret < 0) {
-               goto end_unlock;
-       } else if (ret == 1) {
-               viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
+               viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
+               goto send_reply;
        }
 
-       pthread_mutex_lock(&rstream->viewer_stream_rotation_lock);
-       pthread_mutex_lock(&vstream->overwrite_lock);
-       if (vstream->abort_flag) {
-               /* The file is being overwritten by the writer, we cannot use it. */
-               pthread_mutex_unlock(&vstream->overwrite_lock);
-               ret = viewer_stream_rotate(vstream, rstream);
-               pthread_mutex_unlock(&rstream->viewer_stream_rotation_lock);
+       /*
+        * vstream->stream_fd may be NULL if it has been closed by
+        * tracefile rotation, or if we are at the beginning of the
+        * stream. We open the data stream file here to protect against
+        * overwrite caused by tracefile rotation (in association with
+        * unlink performed before overwrite).
+        */
+       if (!vstream->stream_file.handle) {
+               char file_path[LTTNG_PATH_MAX];
+               enum lttng_trace_chunk_status status;
+               struct fs_handle *fs_handle;
+
+               ret = utils_stream_file_path(rstream->path_name,
+                               rstream->channel_name, rstream->tracefile_size,
+                               vstream->current_tracefile_id, NULL, file_path,
+                               sizeof(file_path));
                if (ret < 0) {
-                       goto end_unlock;
-               } else if (ret == 1) {
-                       viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_HUP);
-                       viewer_stream_delete(vstream);
-                       viewer_stream_destroy(ctf_trace, vstream);
-               } else {
-                       viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
+                       goto error_put;
                }
-               goto send_reply;
-       }
 
-       read_ret = lttng_read(vstream->index_read_fd, &packet_index,
-                       sizeof(packet_index));
-       pthread_mutex_unlock(&vstream->overwrite_lock);
-       pthread_mutex_unlock(&rstream->viewer_stream_rotation_lock);
-       if (read_ret < 0) {
-               viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_HUP);
-               viewer_stream_delete(vstream);
-               viewer_stream_destroy(ctf_trace, vstream);
-               goto send_reply;
-       } else if (read_ret < sizeof(packet_index)) {
-               pthread_mutex_lock(&rstream->viewer_stream_rotation_lock);
-               if (vstream->close_write_flag) {
-                       ret = viewer_stream_rotate(vstream, rstream);
-                       if (ret < 0) {
-                               pthread_mutex_unlock(&rstream->viewer_stream_rotation_lock);
-                               goto end_unlock;
-                       } else if (ret == 1) {
+               /*
+                * It is possible the the file we are trying to open is
+                * missing if the stream has been closed (application exits with
+                * per-pid buffers) and a clear command has been performed.
+                */
+               status = lttng_trace_chunk_open_fs_handle(
+                               vstream->stream_file.trace_chunk,
+                               file_path, O_RDONLY, 0, &fs_handle, true);
+               if (status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       if (status == LTTNG_TRACE_CHUNK_STATUS_NO_FILE &&
+                                       rstream->closed) {
                                viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_HUP);
-                               viewer_stream_delete(vstream);
-                               viewer_stream_destroy(ctf_trace, vstream);
-                       } else {
-                               viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
+                               goto send_reply;
                        }
-               } else {
-                       ERR("Relay reading index file %d", vstream->index_read_fd);
-                       viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
+                       PERROR("Failed to open trace file for viewer stream");
+                       goto error_put;
                }
-               pthread_mutex_unlock(&rstream->viewer_stream_rotation_lock);
+               vstream->stream_file.handle = fs_handle;
+       }
+
+       ret = check_new_streams(conn);
+       if (ret < 0) {
+               viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
+               goto send_reply;
+       } else if (ret == 1) {
+               viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
+       }
+
+       ret = lttng_index_file_read(vstream->index_file, &packet_index);
+       if (ret) {
+               ERR("Relay error reading index file");
+               viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
                goto send_reply;
        } else {
                viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_OK);
-               vstream->last_sent_index++;
+               vstream->index_sent_seqcount++;
        }
 
        /*
         * Indexes are stored in big endian, no need to switch before sending.
         */
+       DBG("Sending viewer index for stream %" PRIu64 " offset %" PRIu64,
+               rstream->stream_handle,
+               (uint64_t) be64toh(packet_index.offset));
        viewer_index.offset = packet_index.offset;
        viewer_index.packet_size = packet_index.packet_size;
        viewer_index.content_size = packet_index.content_size;
@@ -1392,22 +1728,53 @@ int viewer_get_next_index(struct relay_connection *conn)
        viewer_index.stream_id = packet_index.stream_id;
 
 send_reply:
+       if (rstream) {
+               pthread_mutex_unlock(&rstream->lock);
+       }
+
+       if (metadata_viewer_stream) {
+               pthread_mutex_lock(&metadata_viewer_stream->stream->lock);
+               DBG("get next index metadata check: recv %" PRIu64
+                               " sent %" PRIu64,
+                       metadata_viewer_stream->stream->metadata_received,
+                       metadata_viewer_stream->metadata_sent);
+               if (!metadata_viewer_stream->stream->metadata_received ||
+                               metadata_viewer_stream->stream->metadata_received >
+                                       metadata_viewer_stream->metadata_sent) {
+                       viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_METADATA;
+               }
+               pthread_mutex_unlock(&metadata_viewer_stream->stream->lock);
+       }
+
        viewer_index.flags = htobe32(viewer_index.flags);
        health_code_update();
 
        ret = send_response(conn->sock, &viewer_index, sizeof(viewer_index));
        if (ret < 0) {
-               goto end_unlock;
+               goto end;
        }
        health_code_update();
 
-       DBG("Index %" PRIu64 " for stream %" PRIu64 " sent",
-                       vstream->last_sent_index, vstream->stream_handle);
-
-end_unlock:
-       rcu_read_unlock();
-
+       if (vstream) {
+               DBG("Index %" PRIu64 " for stream %" PRIu64 " sent",
+                               vstream->index_sent_seqcount,
+                               vstream->stream->stream_handle);
+       }
 end:
+       if (metadata_viewer_stream) {
+               viewer_stream_put(metadata_viewer_stream);
+       }
+       if (vstream) {
+               viewer_stream_put(vstream);
+       }
+       return ret;
+
+error_put:
+       pthread_mutex_unlock(&rstream->lock);
+       if (metadata_viewer_stream) {
+               viewer_stream_put(metadata_viewer_stream);
+       }
+       viewer_stream_put(vstream);
        return ret;
 }
 
@@ -1419,162 +1786,107 @@ end:
 static
 int viewer_get_packet(struct relay_connection *conn)
 {
-       int ret, send_data = 0;
-       char *data = NULL;
-       uint32_t len = 0;
-       ssize_t read_len;
+       int ret;
+       off_t lseek_ret;
+       char *reply = NULL;
        struct lttng_viewer_get_packet get_packet_info;
-       struct lttng_viewer_trace_packet reply;
-       struct relay_viewer_stream *stream;
-       struct relay_session *session;
-       struct ctf_trace *ctf_trace;
-
-       assert(conn);
+       struct lttng_viewer_trace_packet reply_header;
+       struct relay_viewer_stream *vstream = NULL;
+       uint32_t reply_size = sizeof(reply_header);
+       uint32_t packet_data_len = 0;
+       ssize_t read_len;
+       uint64_t stream_id;
 
        DBG2("Relay get data packet");
 
        health_code_update();
 
-       ret = recv_request(conn->sock, &get_packet_info, sizeof(get_packet_info));
+       ret = recv_request(conn->sock, &get_packet_info,
+                       sizeof(get_packet_info));
        if (ret < 0) {
                goto end;
        }
        health_code_update();
 
        /* From this point on, the error label can be reached. */
-       memset(&reply, 0, sizeof(reply));
+       memset(&reply_header, 0, sizeof(reply_header));
+       stream_id = (uint64_t) be64toh(get_packet_info.stream_id);
 
-       rcu_read_lock();
-       stream = viewer_stream_find_by_id(be64toh(get_packet_info.stream_id));
-       if (!stream) {
-               goto error;
+       vstream = viewer_stream_get_by_id(stream_id);
+       if (!vstream) {
+               DBG("Client requested packet of unknown stream id %" PRIu64,
+                               stream_id);
+               reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
+               goto send_reply_nolock;
+       } else {
+               packet_data_len = be32toh(get_packet_info.len);
+               reply_size += packet_data_len;
        }
 
-       session = session_find_by_id(conn->sessions_ht, stream->session_id);
-       if (!session) {
-               ret = -1;
+       reply = zmalloc(reply_size);
+       if (!reply) {
+               PERROR("packet reply zmalloc");
+               reply_size = sizeof(reply_header);
                goto error;
        }
 
-       ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht,
-                       stream->path_name);
-       assert(ctf_trace);
-
-       /*
-        * First time we read this stream, we need open the tracefile, we should
-        * only arrive here if an index has already been sent to the viewer, so the
-        * tracefile must exist, if it does not it is a fatal error.
-        */
-       if (stream->read_fd < 0) {
-               char fullpath[PATH_MAX];
-
-               if (stream->tracefile_count > 0) {
-                       ret = snprintf(fullpath, PATH_MAX, "%s/%s_%" PRIu64, stream->path_name,
-                                       stream->channel_name,
-                                       stream->tracefile_count_current);
-               } else {
-                       ret = snprintf(fullpath, PATH_MAX, "%s/%s", stream->path_name,
-                                       stream->channel_name);
-               }
-               if (ret < 0) {
-                       goto error;
-               }
-               ret = open(fullpath, O_RDONLY);
-               if (ret < 0) {
-                       PERROR("Relay opening trace file");
-                       goto error;
-               }
-               stream->read_fd = ret;
-       }
-
-       if (!ctf_trace->metadata_received ||
-                       ctf_trace->metadata_received > ctf_trace->metadata_sent) {
-               reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
-               reply.flags |= LTTNG_VIEWER_FLAG_NEW_METADATA;
-               goto send_reply;
-       }
-
-       ret = check_new_streams(conn);
-       if (ret < 0) {
-               goto end_unlock;
-       } else if (ret == 1) {
-               reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
-               reply.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
-               goto send_reply;
-       }
-
-       len = be32toh(get_packet_info.len);
-       data = zmalloc(len);
-       if (!data) {
-               PERROR("relay data zmalloc");
+       pthread_mutex_lock(&vstream->stream->lock);
+       lseek_ret = fs_handle_seek(vstream->stream_file.handle,
+                       be64toh(get_packet_info.offset), SEEK_SET);
+       if (lseek_ret < 0) {
+               PERROR("Failed to seek file system handle of viewer stream %" PRIu64
+                      " to offset %" PRIu64,
+                               stream_id,
+                               (uint64_t) be64toh(get_packet_info.offset));
                goto error;
        }
-
-       ret = lseek(stream->read_fd, be64toh(get_packet_info.offset), SEEK_SET);
-       if (ret < 0) {
-               /*
-                * If the read fd was closed by the streaming side, the
-                * abort_flag will be set to 1, otherwise it is an error.
-                */
-               if (stream->abort_flag == 0) {
-                       PERROR("lseek");
-                       goto error;
-               }
-               reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_EOF);
-               goto send_reply;
-       }
-       read_len = lttng_read(stream->read_fd, data, len);
-       if (read_len < len) {
-               /*
-                * If the read fd was closed by the streaming side, the
-                * abort_flag will be set to 1, otherwise it is an error.
-                */
-               if (stream->abort_flag == 0) {
-                       PERROR("Relay reading trace file, fd: %d, offset: %" PRIu64,
-                                       stream->read_fd,
-                                       be64toh(get_packet_info.offset));
-                       goto error;
-               } else {
-                       reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_EOF);
-                       goto send_reply;
-               }
+       read_len = fs_handle_read(vstream->stream_file.handle,
+                       reply + sizeof(reply_header), packet_data_len);
+       if (read_len < packet_data_len) {
+               PERROR("Failed to read from file system handle of viewer stream id %" PRIu64
+                      ", offset: %" PRIu64,
+                               stream_id,
+                               (uint64_t) be64toh(get_packet_info.offset));
+               goto error;
        }
-       reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_OK);
-       reply.len = htobe32(len);
-       send_data = 1;
+       reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_OK);
+       reply_header.len = htobe32(packet_data_len);
        goto send_reply;
 
 error:
-       reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
+       reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
 
 send_reply:
-       reply.flags = htobe32(reply.flags);
+       if (vstream) {
+               pthread_mutex_unlock(&vstream->stream->lock);
+       }
+send_reply_nolock:
 
        health_code_update();
 
-       ret = send_response(conn->sock, &reply, sizeof(reply));
-       if (ret < 0) {
-               goto end_unlock;
+       if (reply) {
+               memcpy(reply, &reply_header, sizeof(reply_header));
+               ret = send_response(conn->sock, reply, reply_size);
+       } else {
+               /* No reply to send. */
+               ret = send_response(conn->sock, &reply_header,
+                               reply_size);
        }
-       health_code_update();
 
-       if (send_data) {
-               health_code_update();
-               ret = send_response(conn->sock, data, len);
-               if (ret < 0) {
-                       goto end_unlock;
-               }
-               health_code_update();
+       health_code_update();
+       if (ret < 0) {
+               PERROR("sendmsg of packet data failed");
+               goto end_free;
        }
 
-       DBG("Sent %u bytes for stream %" PRIu64, len,
-                       be64toh(get_packet_info.stream_id));
-
-end_unlock:
-       free(data);
-       rcu_read_unlock();
+       DBG("Sent %u bytes for stream %" PRIu64, reply_size, stream_id);
 
+end_free:
+       free(reply);
 end:
+       if (vstream) {
+               viewer_stream_put(vstream);
+       }
        return ret;
 }
 
@@ -1587,14 +1899,13 @@ static
 int viewer_get_metadata(struct relay_connection *conn)
 {
        int ret = 0;
+       int fd = -1;
        ssize_t read_len;
        uint64_t len = 0;
        char *data = NULL;
        struct lttng_viewer_get_metadata request;
        struct lttng_viewer_metadata_packet reply;
-       struct relay_viewer_stream *stream;
-       struct ctf_trace *ctf_trace;
-       struct relay_session *session;
+       struct relay_viewer_stream *vstream = NULL;
 
        assert(conn);
 
@@ -1610,45 +1921,151 @@ int viewer_get_metadata(struct relay_connection *conn)
 
        memset(&reply, 0, sizeof(reply));
 
-       rcu_read_lock();
-       stream = viewer_stream_find_by_id(be64toh(request.stream_id));
-       if (!stream || !stream->metadata_flag) {
+       vstream = viewer_stream_get_by_id(be64toh(request.stream_id));
+       if (!vstream) {
+               /*
+                * The metadata stream can be closed by a CLOSE command
+                * just before we attach. It can also be closed by
+                * per-pid tracing during tracing. Therefore, it is
+                * possible that we cannot find this viewer stream.
+                * Reply back to the client with an error if we cannot
+                * find it.
+                */
+               DBG("Client requested metadata of unknown stream id %" PRIu64,
+                               (uint64_t) be64toh(request.stream_id));
+               reply.status = htobe32(LTTNG_VIEWER_METADATA_ERR);
+               goto send_reply;
+       }
+       pthread_mutex_lock(&vstream->stream->lock);
+       if (!vstream->stream->is_metadata) {
                ERR("Invalid metadata stream");
                goto error;
        }
 
-       session = session_find_by_id(conn->sessions_ht, stream->session_id);
-       if (!session) {
-               ret = -1;
-               goto error;
+       if (vstream->metadata_sent >= vstream->stream->metadata_received) {
+               /*
+                * The live viewers expect to receive a NO_NEW_METADATA
+                * status before a stream disappears, otherwise they abort the
+                * entire live connection when receiving an error status.
+                *
+                * Clear feature resets the metadata_sent to 0 until the
+                * same metadata is received again.
+                */
+               reply.status = htobe32(LTTNG_VIEWER_NO_NEW_METADATA);
+               /*
+                * The live viewer considers a closed 0 byte metadata stream as
+                * an error.
+                */
+               if (vstream->metadata_sent > 0) {
+                       vstream->stream->no_new_metadata_notified = true;
+                       if (vstream->stream->closed) {
+                               /* Release ownership for the viewer metadata stream. */
+                               viewer_stream_put(vstream);
+                       }
+               }
+               goto send_reply;
        }
 
-       ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht,
-                       stream->path_name);
-       assert(ctf_trace);
-       assert(ctf_trace->metadata_sent <= ctf_trace->metadata_received);
+       if (vstream->stream->trace_chunk &&
+                       !lttng_trace_chunk_ids_equal(
+                               conn->viewer_session->current_trace_chunk,
+                               vstream->stream->trace_chunk)) {
+               /* A rotation has occurred on the relay stream. */
+               DBG("Metadata relay stream and viewer chunk ids differ");
 
-       len = ctf_trace->metadata_received - ctf_trace->metadata_sent;
-       if (len == 0) {
-               reply.status = htobe32(LTTNG_VIEWER_NO_NEW_METADATA);
-               goto send_reply;
+               ret = viewer_session_set_trace_chunk_copy(
+                               conn->viewer_session,
+                               vstream->stream->trace_chunk);
+               if (ret) {
+                       reply.status = htobe32(LTTNG_VIEWER_METADATA_ERR);
+                       goto send_reply;
+               }
+       }
+
+       if (conn->viewer_session->current_trace_chunk !=
+                       vstream->stream_file.trace_chunk) {
+               bool acquired_reference;
+
+               DBG("Viewer session and viewer stream chunk differ: "
+                               "vsession chunk %p vstream chunk %p",
+                               conn->viewer_session->current_trace_chunk,
+                               vstream->stream_file.trace_chunk);
+               lttng_trace_chunk_put(vstream->stream_file.trace_chunk);
+               acquired_reference = lttng_trace_chunk_get(conn->viewer_session->current_trace_chunk);
+               assert(acquired_reference);
+               vstream->stream_file.trace_chunk =
+                       conn->viewer_session->current_trace_chunk;
+               viewer_stream_close_files(vstream);
        }
 
-       /* first time, we open the metadata file */
-       if (stream->read_fd < 0) {
-               char fullpath[PATH_MAX];
+       len = vstream->stream->metadata_received - vstream->metadata_sent;
 
-               ret = snprintf(fullpath, PATH_MAX, "%s/%s", stream->path_name,
-                               stream->channel_name);
+       /*
+        * Either this is the first time the metadata file is read, or a
+        * rotation of the corresponding relay stream has occured.
+        */
+       if (!vstream->stream_file.handle && len > 0) {
+               struct fs_handle *fs_handle;
+               char file_path[LTTNG_PATH_MAX];
+               enum lttng_trace_chunk_status status;
+               struct relay_stream *rstream = vstream->stream;
+
+               ret = utils_stream_file_path(rstream->path_name,
+                               rstream->channel_name, rstream->tracefile_size,
+                               vstream->current_tracefile_id, NULL, file_path,
+                               sizeof(file_path));
                if (ret < 0) {
                        goto error;
                }
-               ret = open(fullpath, O_RDONLY);
-               if (ret < 0) {
-                       PERROR("Relay opening metadata file");
+
+               /*
+                * It is possible the the metadata file we are trying to open is
+                * missing if the stream has been closed (application exits with
+                * per-pid buffers) and a clear command has been performed.
+                */
+               status = lttng_trace_chunk_open_fs_handle(
+                               vstream->stream_file.trace_chunk,
+                               file_path, O_RDONLY, 0, &fs_handle, true);
+               if (status != LTTNG_TRACE_CHUNK_STATUS_OK) {
+                       if (status == LTTNG_TRACE_CHUNK_STATUS_NO_FILE) {
+                               reply.status = htobe32(LTTNG_VIEWER_NO_NEW_METADATA);
+                               len = 0;
+                               if (vstream->stream->closed) {
+                                       viewer_stream_put(vstream);
+                               }
+                               goto send_reply;
+                       }
+                       PERROR("Failed to open metadata file for viewer stream");
                        goto error;
                }
-               stream->read_fd = ret;
+               vstream->stream_file.handle = fs_handle;
+
+               if (vstream->metadata_sent != 0) {
+                       /*
+                        * The client does not expect to receive any metadata
+                        * it has received and metadata files in successive
+                        * chunks must be a strict superset of one another.
+                        *
+                        * Skip the first `metadata_sent` bytes to ensure
+                        * they are not sent a second time to the client.
+                        *
+                        * Baring a block layer error or an internal error,
+                        * this seek should not fail as
+                        * `vstream->stream->metadata_received` is reset when
+                        * a relay stream is rotated. If this is reached, it is
+                        * safe to assume that
+                        * `metadata_received` > `metadata_sent`.
+                        */
+                       const off_t seek_ret = fs_handle_seek(fs_handle,
+                                       vstream->metadata_sent, SEEK_SET);
+
+                       if (seek_ret < 0) {
+                               PERROR("Failed to seek metadata viewer stream file to `sent` position: pos = %" PRId64,
+                                               vstream->metadata_sent);
+                               reply.status = htobe32(LTTNG_VIEWER_METADATA_ERR);
+                               goto send_reply;
+                       }
+               }
        }
 
        reply.len = htobe64(len);
@@ -1658,13 +2075,47 @@ int viewer_get_metadata(struct relay_connection *conn)
                goto error;
        }
 
-       read_len = lttng_read(stream->read_fd, data, len);
-       if (read_len < len) {
-               PERROR("Relay reading metadata file");
+       fd = fs_handle_get_fd(vstream->stream_file.handle);
+       if (fd < 0) {
+               ERR("Failed to restore viewer stream file system handle");
                goto error;
        }
-       ctf_trace->metadata_sent += read_len;
+       read_len = lttng_read(fd, data, len);
+       fs_handle_put_fd(vstream->stream_file.handle);
+       fd = -1;
+       if (read_len < len) {
+               if (read_len < 0) {
+                       PERROR("Failed to read metadata file");
+                       goto error;
+               } else {
+                       /*
+                        * A clear has been performed which prevents the relay
+                        * from sending `len` bytes of metadata.
+                        *
+                        * It is important not to send any metadata if we
+                        * couldn't read all the available metadata in one shot:
+                        * sending partial metadata can cause the client to
+                        * attempt to parse an incomplete (incoherent) metadata
+                        * stream, which would result in an error.
+                        */
+                       const off_t seek_ret = fs_handle_seek(
+                                       vstream->stream_file.handle, -read_len,
+                                       SEEK_CUR);
+
+                       DBG("Failed to read metadata: requested = %" PRIu64 ", got = %zd",
+                                       len, read_len);
+                       read_len = 0;
+                       len = 0;
+                       if (seek_ret < 0) {
+                               PERROR("Failed to restore metadata file position after partial read");
+                               ret = -1;
+                               goto error;
+                       }
+               }
+       }
+       vstream->metadata_sent += read_len;
        reply.status = htobe32(LTTNG_VIEWER_METADATA_OK);
+
        goto send_reply;
 
 error:
@@ -1672,28 +2123,33 @@ error:
 
 send_reply:
        health_code_update();
+       if (vstream) {
+               pthread_mutex_unlock(&vstream->stream->lock);
+       }
        ret = send_response(conn->sock, &reply, sizeof(reply));
        if (ret < 0) {
-               goto end_unlock;
+               goto end_free;
        }
        health_code_update();
 
        if (len > 0) {
                ret = send_response(conn->sock, data, len);
                if (ret < 0) {
-                       goto end_unlock;
+                       goto end_free;
                }
        }
 
        DBG("Sent %" PRIu64 " bytes of metadata for stream %" PRIu64, len,
-                       be64toh(request.stream_id));
+                       (uint64_t) be64toh(request.stream_id));
 
        DBG("Metadata sent");
 
-end_unlock:
+end_free:
        free(data);
-       rcu_read_unlock();
 end:
+       if (vstream) {
+               viewer_stream_put(vstream);
+       }
        return ret;
 }
 
@@ -1712,13 +2168,12 @@ int viewer_create_session(struct relay_connection *conn)
 
        memset(&resp, 0, sizeof(resp));
        resp.status = htobe32(LTTNG_VIEWER_CREATE_SESSION_OK);
-       conn->viewer_session = zmalloc(sizeof(*conn->viewer_session));
+       conn->viewer_session = viewer_session_create();
        if (!conn->viewer_session) {
                ERR("Allocation viewer session");
                resp.status = htobe32(LTTNG_VIEWER_CREATE_SESSION_ERR);
                goto send_reply;
        }
-       CDS_INIT_LIST_HEAD(&conn->viewer_session->sessions_head);
 
 send_reply:
        health_code_update();
@@ -1733,6 +2188,78 @@ end:
        return ret;
 }
 
+/*
+ * Detach a viewer session.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static
+int viewer_detach_session(struct relay_connection *conn)
+{
+       int ret;
+       struct lttng_viewer_detach_session_response response;
+       struct lttng_viewer_detach_session_request request;
+       struct relay_session *session = NULL;
+       uint64_t viewer_session_to_close;
+
+       DBG("Viewer detach session received");
+
+       assert(conn);
+
+       health_code_update();
+
+       /* Receive the request from the connected client. */
+       ret = recv_request(conn->sock, &request, sizeof(request));
+       if (ret < 0) {
+               goto end;
+       }
+       viewer_session_to_close = be64toh(request.session_id);
+
+       if (!conn->viewer_session) {
+               DBG("Client trying to detach before creating a live viewer session");
+               response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR);
+               goto send_reply;
+       }
+
+       health_code_update();
+
+       memset(&response, 0, sizeof(response));
+       DBG("Detaching from session ID %" PRIu64, viewer_session_to_close);
+
+       session = session_get_by_id(be64toh(request.session_id));
+       if (!session) {
+               DBG("Relay session %" PRIu64 " not found",
+                               (uint64_t) be64toh(request.session_id));
+               response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_UNK);
+               goto send_reply;
+       }
+
+       ret = viewer_session_is_attached(conn->viewer_session, session);
+       if (ret != 1) {
+               DBG("Not attached to this session");
+               response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR);
+               goto send_reply_put;
+       }
+
+       viewer_session_close_one_session(conn->viewer_session, session);
+       response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_OK);
+       DBG("Session %" PRIu64 " detached.", viewer_session_to_close);
+
+send_reply_put:
+       session_put(session);
+
+send_reply:
+       health_code_update();
+       ret = send_response(conn->sock, &response, sizeof(response));
+       if (ret < 0) {
+               goto end;
+       }
+       health_code_update();
+       ret = 0;
+
+end:
+       return ret;
+}
 
 /*
  * live_relay_unknown_command: send -1 if received unknown command
@@ -1757,9 +2284,6 @@ int process_control(struct lttng_viewer_cmd *recv_hdr,
        int ret = 0;
        uint32_t msg_value;
 
-       assert(recv_hdr);
-       assert(conn);
-
        msg_value = be32toh(recv_hdr->cmd);
 
        /*
@@ -1797,8 +2321,12 @@ int process_control(struct lttng_viewer_cmd *recv_hdr,
        case LTTNG_VIEWER_CREATE_SESSION:
                ret = viewer_create_session(conn);
                break;
+       case LTTNG_VIEWER_DETACH_SESSION:
+               ret = viewer_detach_session(conn);
+               break;
        default:
-               ERR("Received unknown viewer command (%u)", be32toh(recv_hdr->cmd));
+               ERR("Received unknown viewer command (%u)",
+                               be32toh(recv_hdr->cmd));
                live_relay_unknown_command(conn);
                ret = -1;
                goto end;
@@ -1813,48 +2341,15 @@ void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
 {
        int ret;
 
-       assert(events);
-
        (void) lttng_poll_del(events, pollfd);
 
-       ret = close(pollfd);
+       ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker, &pollfd, 1,
+                       fd_tracker_util_close_fd, NULL);
        if (ret < 0) {
                ERR("Closing pollfd %d", pollfd);
        }
 }
 
-/*
- * Delete and destroy a connection.
- *
- * RCU read side lock MUST be acquired.
- */
-static void destroy_connection(struct lttng_ht *relay_connections_ht,
-               struct relay_connection *conn)
-{
-       struct relay_session *session, *tmp_session;
-
-       assert(relay_connections_ht);
-       assert(conn);
-
-       connection_delete(relay_connections_ht, conn);
-
-       if (!conn->viewer_session) {
-               goto end;
-       }
-
-       rcu_read_lock();
-       cds_list_for_each_entry_safe(session, tmp_session,
-                       &conn->viewer_session->sessions_head,
-                       viewer_session_list) {
-               DBG("Cleaning connection of session ID %" PRIu64, session->id);
-               cleanup_session(conn, session);
-       }
-       rcu_read_unlock();
-
-end:
-       connection_destroy(conn);
-}
-
 /*
  * This thread does the actual work
  */
@@ -1864,11 +2359,9 @@ void *thread_worker(void *data)
        int ret, err = -1;
        uint32_t nb_fd;
        struct lttng_poll_event events;
-       struct lttng_ht *relay_connections_ht;
+       struct lttng_ht *viewer_connections_ht;
        struct lttng_ht_iter iter;
        struct lttng_viewer_cmd recv_hdr;
-       struct relay_local_data *relay_ctx = (struct relay_local_data *) data;
-       struct lttng_ht *sessions_ht = relay_ctx->sessions_ht;
        struct relay_connection *destroy_conn;
 
        DBG("[thread] Live viewer relay worker started");
@@ -1882,12 +2375,13 @@ void *thread_worker(void *data)
        }
 
        /* table of connections indexed on socket */
-       relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
-       if (!relay_connections_ht) {
-               goto relay_connections_ht_error;
+       viewer_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+       if (!viewer_connections_ht) {
+               goto viewer_connections_ht_error;
        }
 
-       ret = create_thread_poll_set(&events, 2);
+       ret = create_named_thread_poll_set(&events, 2,
+                       "Live viewer worker thread epoll");
        if (ret < 0) {
                goto error_poll_create;
        }
@@ -1932,11 +2426,6 @@ restart:
 
                        health_code_update();
 
-                       if (!revents) {
-                               /* No activity for this FD (poll implementation). */
-                               continue;
-                       }
-
                        /* Thread quit pipe has been closed. Killing thread. */
                        ret = check_thread_quit_pipe(pollfd, revents);
                        if (ret) {
@@ -1944,80 +2433,93 @@ restart:
                                goto exit;
                        }
 
-                       /* Inspect the relay conn pipe for new connection */
+                       /* Inspect the relay conn pipe for new connection. */
                        if (pollfd == live_conn_pipe[0]) {
-                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       ERR("Relay live pipe error");
-                                       goto error;
-                               } else if (revents & LPOLLIN) {
+                               if (revents & LPOLLIN) {
                                        struct relay_connection *conn;
 
-                                       ret = lttng_read(live_conn_pipe[0], &conn, sizeof(conn));
+                                       ret = lttng_read(live_conn_pipe[0],
+                                                       &conn, sizeof(conn));
                                        if (ret < 0) {
                                                goto error;
                                        }
-                                       conn->sessions_ht = sessions_ht;
-                                       connection_init(conn);
-                                       lttng_poll_add(&events, conn->sock->fd,
+                                       ret = lttng_poll_add(&events,
+                                                       conn->sock->fd,
                                                        LPOLLIN | LPOLLRDHUP);
-                                       rcu_read_lock();
-                                       lttng_ht_add_unique_ulong(relay_connections_ht,
-                                                       &conn->sock_n);
-                                       rcu_read_unlock();
-                                       DBG("Connection socket %d added", conn->sock->fd);
+                                       if (ret) {
+                                               ERR("Failed to add new live connection file descriptor to poll set");
+                                               goto error;
+                                       }
+                                       connection_ht_add(viewer_connections_ht, conn);
+                                       DBG("Connection socket %d added to poll", conn->sock->fd);
+                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       ERR("Relay live pipe error");
+                                       goto error;
+                               } else {
+                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                                       goto error;
                                }
                        } else {
+                               /* Connection activity. */
                                struct relay_connection *conn;
 
-                               rcu_read_lock();
-                               conn = connection_find_by_sock(relay_connections_ht, pollfd);
-                               /* If not found, there is a synchronization issue. */
-                               assert(conn);
+                               conn = connection_get_by_sock(viewer_connections_ht, pollfd);
+                               if (!conn) {
+                                       continue;
+                               }
 
-                               if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
-                                       cleanup_connection_pollfd(&events, pollfd);
-                                       destroy_connection(relay_connections_ht, conn);
-                               } else if (revents & LPOLLIN) {
+                               if (revents & LPOLLIN) {
                                        ret = conn->sock->ops->recvmsg(conn->sock, &recv_hdr,
                                                        sizeof(recv_hdr), 0);
                                        if (ret <= 0) {
-                                               /* Connection closed */
+                                               /* Connection closed. */
                                                cleanup_connection_pollfd(&events, pollfd);
-                                               destroy_connection(relay_connections_ht, conn);
+                                               /* Put "create" ownership reference. */
+                                               connection_put(conn);
                                                DBG("Viewer control conn closed with %d", pollfd);
                                        } else {
                                                ret = process_control(&recv_hdr, conn);
                                                if (ret < 0) {
                                                        /* Clear the session on error. */
                                                        cleanup_connection_pollfd(&events, pollfd);
-                                                       destroy_connection(relay_connections_ht, conn);
+                                                       /* Put "create" ownership reference. */
+                                                       connection_put(conn);
                                                        DBG("Viewer connection closed with %d", pollfd);
                                                }
                                        }
+                               } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+                                       cleanup_connection_pollfd(&events, pollfd);
+                                       /* Put "create" ownership reference. */
+                                       connection_put(conn);
+                               } else {
+                                       ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+                                       connection_put(conn);
+                                       goto error;
                                }
-                               rcu_read_unlock();
+                               /* Put local "get_by_sock" reference. */
+                               connection_put(conn);
                        }
                }
        }
 
 exit:
 error:
-       lttng_poll_clean(&events);
+       (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
 
-       /* Cleanup reamaining connection object. */
+       /* Cleanup remaining connection object. */
        rcu_read_lock();
-       cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter,
+       cds_lfht_for_each_entry(viewer_connections_ht->ht, &iter.iter,
                        destroy_conn,
                        sock_n.node) {
                health_code_update();
-               destroy_connection(relay_connections_ht, destroy_conn);
+               connection_put(destroy_conn);
        }
        rcu_read_unlock();
 error_poll_create:
-       lttng_ht_destroy(relay_connections_ht);
-relay_connections_ht_error:
+       lttng_ht_destroy(viewer_connections_ht);
+viewer_connections_ht_error:
        /* Close relay conn pipes */
-       utils_close_pipe(live_conn_pipe);
+       (void) fd_tracker_util_pipe_close(the_fd_tracker, live_conn_pipe);
        if (err) {
                DBG("Viewer worker thread exited with error");
        }
@@ -2041,7 +2543,8 @@ error_testpoint:
  */
 static int create_conn_pipe(void)
 {
-       return utils_create_pipe_cloexec(live_conn_pipe);
+       return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
+                       "Live connection pipe", live_conn_pipe);
 }
 
 int relayd_live_join(void)
@@ -2078,8 +2581,7 @@ int relayd_live_join(void)
 /*
  * main
  */
-int relayd_live_create(struct lttng_uri *uri,
-               struct relay_local_data *relay_ctx)
+int relayd_live_create(struct lttng_uri *uri)
 {
        int ret = 0, retval = 0;
        void *status;
@@ -2118,7 +2620,7 @@ int relayd_live_create(struct lttng_uri *uri,
        }
 
        /* Setup the dispatcher thread */
-       ret = pthread_create(&live_dispatcher_thread, NULL,
+       ret = pthread_create(&live_dispatcher_thread, default_pthread_attr(),
                        thread_dispatcher, (void *) NULL);
        if (ret) {
                errno = ret;
@@ -2128,8 +2630,8 @@ int relayd_live_create(struct lttng_uri *uri,
        }
 
        /* Setup the worker thread */
-       ret = pthread_create(&live_worker_thread, NULL,
-                       thread_worker, relay_ctx);
+       ret = pthread_create(&live_worker_thread, default_pthread_attr(),
+                       thread_worker, NULL);
        if (ret) {
                errno = ret;
                PERROR("pthread_create viewer worker");
@@ -2138,7 +2640,7 @@ int relayd_live_create(struct lttng_uri *uri,
        }
 
        /* Setup the listener thread */
-       ret = pthread_create(&live_listener_thread, NULL,
+       ret = pthread_create(&live_listener_thread, default_pthread_attr(),
                        thread_listener, (void *) NULL);
        if (ret) {
                errno = ret;
This page took 0.089533 seconds and 5 git commands to generate.