* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#define _GNU_SOURCE
#define _LGPL_SOURCE
#include <getopt.h>
#include <grp.h>
#include <urcu/rculist.h>
#include <unistd.h>
#include <fcntl.h>
-#include <config.h>
#include <lttng/lttng.h>
#include <common/common.h>
#include <common/sessiond-comm/relayd.h>
#include <common/uri.h>
#include <common/utils.h>
+#include <common/fd-tracker/utils.h>
#include "cmd.h"
#include "live.h"
send_stream.ctf_trace_id = htobe64(ctf_trace->id);
send_stream.metadata_flag = htobe32(
vstream->stream->is_metadata);
- strncpy(send_stream.path_name, vstream->path_name,
- sizeof(send_stream.path_name));
- strncpy(send_stream.channel_name, vstream->channel_name,
- sizeof(send_stream.channel_name));
+ if (lttng_strncpy(send_stream.path_name, vstream->path_name,
+ sizeof(send_stream.path_name))) {
+ pthread_mutex_unlock(&vstream->stream->lock);
+ viewer_stream_put(vstream);
+ ret = -1; /* Error. */
+ goto end_unlock;
+ }
+ if (lttng_strncpy(send_stream.channel_name,
+ vstream->channel_name,
+ sizeof(send_stream.channel_name))) {
+ pthread_mutex_unlock(&vstream->stream->lock);
+ viewer_stream_put(vstream);
+ ret = -1; /* Error. */
+ goto end_unlock;
+ }
DBG("Sending stream %" PRIu64 " to viewer",
vstream->stream->stream_handle);
continue;
}
/*
- * stream published is protected by the session
- * lock.
+ * stream published is protected by the session lock.
*/
if (!stream->published) {
goto next;
/* Update number of created stream counter. */
(*nb_created)++;
}
+ /*
+ * Ensure a self-reference is preserved even
+ * after we have put our local reference.
+ */
+ if (!viewer_stream_get(vstream)) {
+ ERR("Unable to get self-reference on viewer stream, logic error.");
+ abort();
+ }
} else {
if (!vstream->sent_flag && nb_unsent) {
/* Update number of unsent stream counter. */
(*nb_unsent)++;
}
- viewer_stream_put(vstream);
}
/* Update number of total stream counter. */
if (nb_total) {
- (*nb_total)++;
+ if (stream->is_metadata) {
+ if (!stream->closed ||
+ stream->metadata_received > vstream->metadata_sent) {
+ (*nb_total)++;
+ }
+ } else {
+ if (!stream->closed ||
+ !(((int64_t) (stream->prev_seq - stream->last_net_seq_num)) >= 0)) {
+
+ (*nb_total)++;
+ }
+ }
}
+ /* Put local reference. */
+ viewer_stream_put(vstream);
next:
stream_put(stream);
}
* Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
*/
static
-int create_thread_poll_set(struct lttng_poll_event *events, int size)
+int create_named_thread_poll_set(struct lttng_poll_event *events,
+ int size, const char *name)
{
int ret;
goto error;
}
- ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
- if (ret < 0) {
- goto error;
- }
+ ret = fd_tracker_util_poll_create(the_fd_tracker,
+ name, events, 1, LTTNG_CLOEXEC);
/* Add quit pipe */
ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
return 0;
}
+static
+int create_sock(void *data, int *out_fd)
+{
+ int ret;
+ struct lttcomm_sock *sock = data;
+
+ ret = lttcomm_create_sock(sock);
+ if (ret < 0) {
+ goto end;
+ }
+
+ *out_fd = sock->fd;
+end:
+ return ret;
+}
+
+static
+int close_sock(void *data, int *in_fd)
+{
+ struct lttcomm_sock *sock = data;
+
+ return sock->ops->close(sock);
+}
+
+static int accept_sock(void *data, int *out_fd)
+{
+ int ret = 0;
+ /* Socks is an array of in_sock, out_sock. */
+ struct lttcomm_sock **socks = data;
+ struct lttcomm_sock *in_sock = socks[0];
+
+ socks[1] = in_sock->ops->accept(in_sock);
+ if (!socks[1]) {
+ ret = -1;
+ goto end;
+ }
+ *out_fd = socks[1]->fd;
+end:
+ return ret;
+}
+
+static
+struct lttcomm_sock *accept_live_sock(struct lttcomm_sock *listening_sock,
+ const char *name)
+{
+ int out_fd, ret;
+ struct lttcomm_sock *socks[2] = { listening_sock, NULL };
+ struct lttcomm_sock *new_sock = NULL;
+
+ ret = fd_tracker_open_unsuspendable_fd(
+ the_fd_tracker, &out_fd,
+ (const char **) &name,
+ 1, accept_sock, &socks);
+ if (ret) {
+ goto end;
+ }
+ new_sock = socks[1];
+ DBG("%s accepted, socket %d", name, new_sock->fd);
+end:
+ return new_sock;
+}
+
/*
* Create and init socket from uri.
*/
static
-struct lttcomm_sock *init_socket(struct lttng_uri *uri)
+struct lttcomm_sock *init_socket(struct lttng_uri *uri, const char *name)
{
- int ret;
+ int ret, sock_fd;
struct lttcomm_sock *sock = NULL;
+ char uri_str[PATH_MAX];
+ char *formated_name = NULL;
sock = lttcomm_alloc_sock_from_uri(uri);
if (sock == NULL) {
goto error;
}
- ret = lttcomm_create_sock(sock);
- if (ret < 0) {
- goto error;
+ /*
+ * Don't fail to create the socket if the name can't be built as it is
+ * only used for debugging purposes.
+ */
+ ret = uri_to_str_url(uri, uri_str, sizeof(uri_str));
+ uri_str[sizeof(uri_str) - 1] = '\0';
+ if (ret >= 0) {
+ ret = asprintf(&formated_name, "%s socket @ %s", name,
+ uri_str);
+ if (ret < 0) {
+ formated_name = NULL;
+ }
}
- DBG("Listening on sock %d for live", sock->fd);
+
+ ret = fd_tracker_open_unsuspendable_fd(the_fd_tracker, &sock_fd,
+ (const char **) (formated_name ? &formated_name : NULL),
+ 1, create_sock, sock);
+ free(formated_name);
+ DBG("Listening on %s socket %d", name, sock->fd);
ret = sock->ops->bind(sock);
if (ret < 0) {
health_code_update();
- live_control_sock = init_socket(live_uri);
+ live_control_sock = init_socket(live_uri, "Live listener");
if (!live_control_sock) {
goto error_sock_control;
}
/* Pass 2 as size here for the thread quit pipe and control sockets. */
- ret = create_thread_poll_set(&events, 2);
+ ret = create_named_thread_poll_set(&events, 2,
+ "Live listener thread epoll");
if (ret < 0) {
goto error_create_poll;
}
goto exit;
}
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
- ERR("socket poll error");
- goto error;
- } else if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
/*
* A new connection is requested, therefore a
* viewer connection is allocated in this
struct relay_connection *new_conn;
struct lttcomm_sock *newsock;
- newsock = live_control_sock->ops->accept(live_control_sock);
+ newsock = accept_live_sock(live_control_sock,
+ "Live socket to client");
if (!newsock) {
PERROR("accepting control sock");
goto error;
* exchange in cds_wfcq_enqueue.
*/
futex_nto1_wake(&viewer_conn_queue.futex);
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ ERR("socket poll error");
+ goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
error:
error_poll_add:
error_testpoint:
- lttng_poll_clean(&events);
+ (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
error_create_poll:
if (live_control_sock->fd >= 0) {
- ret = live_control_sock->ops->close(live_control_sock);
+ ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker,
+ &live_control_sock->fd, 1, close_sock,
+ live_control_sock);
if (ret) {
PERROR("close");
}
health_code_update();
- while (!CMM_LOAD_SHARED(live_dispatch_thread_exit)) {
+ for (;;) {
health_code_update();
/* Atomically prepare the queue futex */
futex_nto1_prepare(&viewer_conn_queue.futex);
+ if (CMM_LOAD_SHARED(live_dispatch_thread_exit)) {
+ break;
+ }
+
do {
health_code_update();
static
int viewer_list_sessions(struct relay_connection *conn)
{
- int ret;
+ int ret = 0;
struct lttng_viewer_list_sessions session_list;
struct lttng_ht_iter iter;
struct relay_session *session;
new_buf_count * sizeof(*send_session_buf));
if (!newbuf) {
ret = -1;
- rcu_read_unlock();
- goto end_free;
+ break;
}
send_session_buf = newbuf;
buf_count = new_buf_count;
}
send_session = &send_session_buf[count];
- strncpy(send_session->session_name, session->session_name,
- sizeof(send_session->session_name));
- strncpy(send_session->hostname, session->hostname,
- sizeof(send_session->hostname));
+ if (lttng_strncpy(send_session->session_name,
+ session->session_name,
+ sizeof(send_session->session_name))) {
+ ret = -1;
+ break;
+ }
+ if (lttng_strncpy(send_session->hostname, session->hostname,
+ sizeof(send_session->hostname))) {
+ ret = -1;
+ break;
+ }
send_session->id = htobe64(session->id);
send_session->live_timer = htobe32(session->live_timer);
if (session->viewer_attached) {
count++;
}
rcu_read_unlock();
+ if (ret < 0) {
+ goto end_free;
+ }
session_list.sessions_count = htobe32(count);
response.streams_count = htobe32(nb_streams);
/*
- * If the session is closed, HUP when there are no more streams.
+ * If the session is closed, HUP when there are no more streams
+ * with data.
*/
if (closed && nb_total == 0) {
send_streams = 0;
session = session_get_by_id(be64toh(request.session_id));
if (!session) {
DBG("Relay session %" PRIu64 " not found",
- be64toh(request.session_id));
+ (uint64_t) be64toh(request.session_id));
response.status = htobe32(LTTNG_VIEWER_ATTACH_UNK);
goto send_reply;
}
DBG("Attach session ID %" PRIu64 " received",
- be64toh(request.session_id));
+ (uint64_t) be64toh(request.session_id));
if (session->live_timer == 0) {
DBG("Not live session");
/*
* Open the index file if needed for the given vstream.
*
- * If an index file is successfully opened, the vstream index_fd set with
- * it.
+ * If an index file is successfully opened, the vstream will set it as its
+ * current index file.
*
* Return 0 on success, a negative value on error (-ENOENT if not ready yet).
*
{
int ret = 0;
- if (vstream->index_fd) {
+ if (vstream->index_file) {
goto end;
}
/*
* First time, we open the index file and at least one index is ready.
*/
- if (rstream->total_index_received == 0) {
+ if (rstream->index_received_seqcount == 0) {
ret = -ENOENT;
goto end;
}
- ret = index_open(vstream->path_name, vstream->channel_name,
+ vstream->index_file = lttng_index_file_open(vstream->path_name,
+ vstream->channel_name,
vstream->stream->tracefile_count,
vstream->current_tracefile_id);
- if (ret >= 0) {
- vstream->index_fd = stream_fd_create(ret);
- if (!vstream->index_fd) {
- if (close(ret)) {
- PERROR("close");
- }
- ret = -1;
- } else {
- ret = 0;
- }
- goto end;
+ if (!vstream->index_file) {
+ ret = -1;
}
end:
int ret;
if (trace->session->connection_closed
- && rstream->total_index_received
- == vstream->last_sent_index) {
+ && rstream->index_received_seqcount
+ == vstream->index_sent_seqcount) {
/* Last index sent and session connection is closed. */
index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
goto hup;
} else if (rstream->beacon_ts_end != -1ULL &&
- rstream->total_index_received
- == vstream->last_sent_index) {
+ rstream->index_received_seqcount
+ == vstream->index_sent_seqcount) {
/*
* We've received a synchronization beacon and the last index
* available has been sent, the index for now is inactive.
index->timestamp_end = htobe64(rstream->beacon_ts_end);
index->stream_id = htobe64(rstream->ctf_stream_id);
goto index_ready;
- } else if (rstream->total_index_received <= vstream->last_sent_index) {
+ } else if (rstream->index_received_seqcount
+ == vstream->index_sent_seqcount) {
/*
- * This actually checks the case where recv == last_sent.
- * In this case, we have not received a beacon. Therefore, we
- * can only ask the client to retry later.
+ * This checks whether received == sent seqcount. In
+ * this case, we have not received a beacon. Therefore,
+ * we can only ask the client to retry later.
*/
index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
goto index_ready;
- } else if (!viewer_stream_is_tracefile_seq_readable(vstream,
- vstream->current_tracefile_seq)) {
+ } else if (!tracefile_array_seq_in_file(rstream->tfa,
+ vstream->current_tracefile_id,
+ vstream->index_sent_seqcount)) {
/*
- * The producer has overwritten our current file. We
- * need to rotate.
+ * The next index we want to send cannot be read either
+ * because we need to perform a rotation, or due to
+ * the producer having overwritten its trace file.
*/
- DBG("Viewer stream %" PRIu64 " rotation due to overwrite",
+ DBG("Viewer stream %" PRIu64 " rotation",
vstream->stream->stream_handle);
ret = viewer_stream_rotate(vstream);
if (ret < 0) {
index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
goto hup;
}
- assert(viewer_stream_is_tracefile_seq_readable(vstream,
- vstream->current_tracefile_seq));
- /* ret == 0 means successful so we continue. */
- ret = 0;
- } else {
- ssize_t read_ret;
- char tmp[1];
-
/*
- * Use EOF on current index file to find out when we
- * need to rotate.
+ * If we have been pushed due to overwrite, it
+ * necessarily means there is data that can be read in
+ * the stream. If we rotated because we reached the end
+ * of a tracefile, it means the following tracefile
+ * needs to contain at least one index, else we would
+ * have already returned LTTNG_VIEWER_INDEX_RETRY to the
+ * viewer. The updated index_sent_seqcount needs to
+ * point to a readable index entry now.
+ *
+ * In the case where we "rotate" on a single file, we
+ * can end up in a case where the requested index is
+ * still unavailable.
*/
- read_ret = lttng_read(vstream->index_fd->fd, tmp, 1);
- if (read_ret == 1) {
- off_t seek_ret;
-
- /* There is still data to read. Rewind position. */
- seek_ret = lseek(vstream->index_fd->fd, -1, SEEK_CUR);
- if (seek_ret < 0) {
- ret = -1;
- goto end;
- }
- ret = 0;
- } else if (read_ret == 0) {
- /* EOF. We need to rotate. */
- DBG("Viewer stream %" PRIu64 " rotation due to EOF",
- vstream->stream->stream_handle);
- ret = viewer_stream_rotate(vstream);
- if (ret < 0) {
- goto end;
- } else if (ret == 1) {
- /* EOF across entire stream. */
- index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
- goto hup;
- }
- assert(viewer_stream_is_tracefile_seq_readable(vstream,
- vstream->current_tracefile_seq));
- /* ret == 0 means successful so we continue. */
- ret = 0;
- } else {
- /* Error reading index. */
- ret = -1;
+ if (rstream->tracefile_count == 1 &&
+ !tracefile_array_seq_in_file(
+ rstream->tfa,
+ vstream->current_tracefile_id,
+ vstream->index_sent_seqcount)) {
+ index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
+ goto index_ready;
}
+ assert(tracefile_array_seq_in_file(rstream->tfa,
+ vstream->current_tracefile_id,
+ vstream->index_sent_seqcount));
}
+ /* ret == 0 means successful so we continue. */
+ ret = 0;
end:
return ret;
int viewer_get_next_index(struct relay_connection *conn)
{
int ret;
- ssize_t read_ret;
struct lttng_viewer_get_next_index request_index;
struct lttng_viewer_index viewer_index;
struct ctf_packet_index packet_index;
vstream = viewer_stream_get_by_id(be64toh(request_index.stream_id));
if (!vstream) {
+ DBG("Client requested index of unknown stream id %" PRIu64,
+ (uint64_t) be64toh(request_index.stream_id));
viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
goto send_reply;
}
viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
}
- read_ret = lttng_read(vstream->index_fd->fd, &packet_index,
- sizeof(packet_index));
- if (read_ret < sizeof(packet_index)) {
- ERR("Relay reading index file %d returned %zd",
- vstream->index_fd->fd, read_ret);
+ ret = lttng_index_file_read(vstream->index_file, &packet_index);
+ if (ret) {
+ ERR("Relay error reading index file %d",
+ vstream->index_file->fd);
viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
goto send_reply;
} else {
viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_OK);
- vstream->last_sent_index++;
+ vstream->index_sent_seqcount++;
}
/*
*/
DBG("Sending viewer index for stream %" PRIu64 " offset %" PRIu64,
rstream->stream_handle,
- be64toh(packet_index.offset));
+ (uint64_t) be64toh(packet_index.offset));
viewer_index.offset = packet_index.offset;
viewer_index.packet_size = packet_index.packet_size;
viewer_index.content_size = packet_index.content_size;
}
health_code_update();
- DBG("Index %" PRIu64 " for stream %" PRIu64 " sent",
- vstream->last_sent_index,
- vstream->stream->stream_handle);
+ if (vstream) {
+ DBG("Index %" PRIu64 " for stream %" PRIu64 " sent",
+ vstream->index_sent_seqcount,
+ vstream->stream->stream_handle);
+ }
end:
if (metadata_viewer_stream) {
viewer_stream_put(metadata_viewer_stream);
static
int viewer_get_packet(struct relay_connection *conn)
{
- int ret, send_data = 0;
- char *data = NULL;
- uint32_t len = 0;
- ssize_t read_len;
+ int ret;
+ off_t lseek_ret;
+ char *reply = NULL;
struct lttng_viewer_get_packet get_packet_info;
- struct lttng_viewer_trace_packet reply;
+ struct lttng_viewer_trace_packet reply_header;
struct relay_viewer_stream *vstream = NULL;
- struct ctf_trace *ctf_trace;
- struct relay_viewer_stream *metadata_viewer_stream = NULL;
+ uint32_t reply_size = sizeof(reply_header);
+ uint32_t packet_data_len = 0;
+ ssize_t read_len;
DBG2("Relay get data packet");
health_code_update();
/* From this point on, the error label can be reached. */
- memset(&reply, 0, sizeof(reply));
+ memset(&reply_header, 0, sizeof(reply_header));
vstream = viewer_stream_get_by_id(be64toh(get_packet_info.stream_id));
if (!vstream) {
- reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
+ DBG("Client requested packet of unknown stream id %" PRIu64,
+ (uint64_t) be64toh(get_packet_info.stream_id));
+ reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
goto send_reply_nolock;
+ } else {
+ packet_data_len = be32toh(get_packet_info.len);
+ reply_size += packet_data_len;
}
- ctf_trace = vstream->stream->trace;
-
- /* metadata_viewer_stream may be NULL. */
- metadata_viewer_stream =
- ctf_trace_get_viewer_metadata_stream(ctf_trace);
-
- if (metadata_viewer_stream) {
- bool get_packet_err = false;
-
- pthread_mutex_lock(&metadata_viewer_stream->stream->lock);
- DBG("get packet metadata check: recv %" PRIu64 " sent %" PRIu64,
- metadata_viewer_stream->stream->metadata_received,
- metadata_viewer_stream->metadata_sent);
- if (!metadata_viewer_stream->stream->metadata_received ||
- metadata_viewer_stream->stream->metadata_received >
- metadata_viewer_stream->metadata_sent) {
- /*
- * We prevent the client from reading a data stream as
- * long as there is metadata left to consume. This
- * ensures that the client won't receive data of which
- * it can't make sense.
- */
- get_packet_err = true;
- }
- pthread_mutex_unlock(&metadata_viewer_stream->stream->lock);
- viewer_stream_put(metadata_viewer_stream);
- if (get_packet_err) {
- reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
- reply.flags |= LTTNG_VIEWER_FLAG_NEW_METADATA;
- goto send_reply_nolock;
- }
- }
-
- pthread_mutex_lock(&vstream->stream->lock);
- /*
- * The vstream->stream_fd used here has been opened by
- * get_next_index. It is opened there because this is what
- * allows us to grab a reference to the file with stream lock
- * held, thus protecting us against overwrite caused by
- * tracefile rotation. Since tracefile rotation unlinks the old
- * data file, we are ensured that we won't have our data
- * overwritten under us.
- */
- ret = check_new_streams(conn);
- if (ret < 0) {
- goto end_free;
- } else if (ret == 1) {
- reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
- reply.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
- goto send_reply;
- }
-
- len = be32toh(get_packet_info.len);
- data = zmalloc(len);
- if (!data) {
- PERROR("relay data zmalloc");
+ reply = zmalloc(reply_size);
+ if (!reply) {
+ PERROR("packet reply zmalloc");
+ reply_size = sizeof(reply_header);
goto error;
}
- ret = lseek(vstream->stream_fd->fd, be64toh(get_packet_info.offset),
+ pthread_mutex_lock(&vstream->stream->lock);
+ lseek_ret = lseek(vstream->stream_fd->fd, be64toh(get_packet_info.offset),
SEEK_SET);
- if (ret < 0) {
+ if (lseek_ret < 0) {
PERROR("lseek fd %d to offset %" PRIu64, vstream->stream_fd->fd,
- be64toh(get_packet_info.offset));
+ (uint64_t) be64toh(get_packet_info.offset));
goto error;
}
- read_len = lttng_read(vstream->stream_fd->fd, data, len);
- if (read_len < len) {
+ read_len = lttng_read(vstream->stream_fd->fd,
+ reply + sizeof(reply_header),
+ packet_data_len);
+ if (read_len < packet_data_len) {
PERROR("Relay reading trace file, fd: %d, offset: %" PRIu64,
vstream->stream_fd->fd,
- be64toh(get_packet_info.offset));
+ (uint64_t) be64toh(get_packet_info.offset));
goto error;
}
- reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_OK);
- reply.len = htobe32(len);
- send_data = 1;
+ reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_OK);
+ reply_header.len = htobe32(packet_data_len);
goto send_reply;
error:
- reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
+ reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
send_reply:
if (vstream) {
pthread_mutex_unlock(&vstream->stream->lock);
}
send_reply_nolock:
- reply.flags = htobe32(reply.flags);
health_code_update();
- ret = send_response(conn->sock, &reply, sizeof(reply));
- if (ret < 0) {
- goto end_free;
+ if (reply) {
+ memcpy(reply, &reply_header, sizeof(reply_header));
+ ret = send_response(conn->sock, reply, reply_size);
+ } else {
+ /* No reply to send. */
+ ret = send_response(conn->sock, &reply_header,
+ reply_size);
}
- health_code_update();
- if (send_data) {
- health_code_update();
- ret = send_response(conn->sock, data, len);
- if (ret < 0) {
- goto end_free;
- }
- health_code_update();
+ health_code_update();
+ if (ret < 0) {
+ PERROR("sendmsg of packet data failed");
+ goto end_free;
}
- DBG("Sent %u bytes for stream %" PRIu64, len,
- be64toh(get_packet_info.stream_id));
+ DBG("Sent %u bytes for stream %" PRIu64, reply_size,
+ (uint64_t) be64toh(get_packet_info.stream_id));
end_free:
- free(data);
+ free(reply);
end:
if (vstream) {
viewer_stream_put(vstream);
* Reply back to the client with an error if we cannot
* find it.
*/
+ DBG("Client requested metadata of unknown stream id %" PRIu64,
+ (uint64_t) be64toh(request.stream_id));
reply.status = htobe32(LTTNG_VIEWER_METADATA_ERR);
goto send_reply;
}
}
DBG("Sent %" PRIu64 " bytes of metadata for stream %" PRIu64, len,
- be64toh(request.stream_id));
+ (uint64_t) be64toh(request.stream_id));
DBG("Metadata sent");
return ret;
}
+/*
+ * Detach a viewer session.
+ *
+ * Return 0 on success or else a negative value.
+ */
+static
+int viewer_detach_session(struct relay_connection *conn)
+{
+ int ret;
+ struct lttng_viewer_detach_session_response response;
+ struct lttng_viewer_detach_session_request request;
+ struct relay_session *session = NULL;
+ uint64_t viewer_session_to_close;
+
+ DBG("Viewer detach session received");
+
+ assert(conn);
+
+ health_code_update();
+
+ /* Receive the request from the connected client. */
+ ret = recv_request(conn->sock, &request, sizeof(request));
+ if (ret < 0) {
+ goto end;
+ }
+ viewer_session_to_close = be64toh(request.session_id);
+
+ if (!conn->viewer_session) {
+ DBG("Client trying to detach before creating a live viewer session");
+ response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR);
+ goto send_reply;
+ }
+
+ health_code_update();
+
+ memset(&response, 0, sizeof(response));
+ DBG("Detaching from session ID %" PRIu64, viewer_session_to_close);
+
+ session = session_get_by_id(be64toh(request.session_id));
+ if (!session) {
+ DBG("Relay session %" PRIu64 " not found",
+ (uint64_t) be64toh(request.session_id));
+ response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_UNK);
+ goto send_reply;
+ }
+
+ ret = viewer_session_is_attached(conn->viewer_session, session);
+ if (ret != 1) {
+ DBG("Not attached to this session");
+ response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR);
+ goto send_reply_put;
+ }
+
+ viewer_session_close_one_session(conn->viewer_session, session);
+ response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_OK);
+ DBG("Session %" PRIu64 " detached.", viewer_session_to_close);
+
+send_reply_put:
+ session_put(session);
+
+send_reply:
+ health_code_update();
+ ret = send_response(conn->sock, &response, sizeof(response));
+ if (ret < 0) {
+ goto end;
+ }
+ health_code_update();
+ ret = 0;
+
+end:
+ return ret;
+}
/*
* live_relay_unknown_command: send -1 if received unknown command
case LTTNG_VIEWER_CREATE_SESSION:
ret = viewer_create_session(conn);
break;
+ case LTTNG_VIEWER_DETACH_SESSION:
+ ret = viewer_detach_session(conn);
+ break;
default:
ERR("Received unknown viewer command (%u)",
be32toh(recv_hdr->cmd));
(void) lttng_poll_del(events, pollfd);
- ret = close(pollfd);
+ ret = fd_tracker_close_unsuspendable_fd(the_fd_tracker, &pollfd, 1,
+ fd_tracker_util_close_fd, NULL);
if (ret < 0) {
ERR("Closing pollfd %d", pollfd);
}
goto viewer_connections_ht_error;
}
- ret = create_thread_poll_set(&events, 2);
+ ret = create_named_thread_poll_set(&events, 2,
+ "Live viewer worker thread epoll");
if (ret < 0) {
goto error_poll_create;
}
/* Inspect the relay conn pipe for new connection. */
if (pollfd == live_conn_pipe[0]) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
- ERR("Relay live pipe error");
- goto error;
- } else if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
struct relay_connection *conn;
ret = lttng_read(live_conn_pipe[0],
LPOLLIN | LPOLLRDHUP);
connection_ht_add(viewer_connections_ht, conn);
DBG("Connection socket %d added to poll", conn->sock->fd);
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ ERR("Relay live pipe error");
+ goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
} else {
/* Connection activity. */
continue;
}
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
- cleanup_connection_pollfd(&events, pollfd);
- /* Put "create" ownership reference. */
- connection_put(conn);
- } else if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
ret = conn->sock->ops->recvmsg(conn->sock, &recv_hdr,
sizeof(recv_hdr), 0);
if (ret <= 0) {
DBG("Viewer connection closed with %d", pollfd);
}
}
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ cleanup_connection_pollfd(&events, pollfd);
+ /* Put "create" ownership reference. */
+ connection_put(conn);
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ connection_put(conn);
+ goto error;
}
/* Put local "get_by_sock" reference. */
connection_put(conn);
exit:
error:
- lttng_poll_clean(&events);
+ (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
/* Cleanup reamaining connection object. */
rcu_read_lock();
lttng_ht_destroy(viewer_connections_ht);
viewer_connections_ht_error:
/* Close relay conn pipes */
- utils_close_pipe(live_conn_pipe);
+ (void) fd_tracker_util_pipe_close(the_fd_tracker, live_conn_pipe);
if (err) {
DBG("Viewer worker thread exited with error");
}
*/
static int create_conn_pipe(void)
{
- return utils_create_pipe_cloexec(live_conn_pipe);
+ return fd_tracker_util_pipe_open_cloexec(the_fd_tracker,
+ "Live connection pipe", live_conn_pipe);
}
int relayd_live_join(void)
}
/* Setup the dispatcher thread */
- ret = pthread_create(&live_dispatcher_thread, NULL,
+ ret = pthread_create(&live_dispatcher_thread, default_pthread_attr(),
thread_dispatcher, (void *) NULL);
if (ret) {
errno = ret;
}
/* Setup the worker thread */
- ret = pthread_create(&live_worker_thread, NULL,
+ ret = pthread_create(&live_worker_thread, default_pthread_attr(),
thread_worker, NULL);
if (ret) {
errno = ret;
}
/* Setup the listener thread */
- ret = pthread_create(&live_listener_thread, NULL,
+ ret = pthread_create(&live_listener_thread, default_pthread_attr(),
thread_listener, (void *) NULL);
if (ret) {
errno = ret;