consumer: Rename net_seq_idx to relayd_id
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
index 7fbba8a9080f066ed001996c8081a42befa96fcb..4d0b530a7ce798e5fe0295dbe6a1caf1a5ddabf0 100644 (file)
@@ -553,7 +553,7 @@ static int send_sessiond_channel(int sock,
 {
        int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
        struct lttng_consumer_stream *stream;
-       uint64_t net_seq_idx = -1ULL;
+       uint64_t relayd_id = -1ULL;
 
        assert(channel);
        assert(ctx);
@@ -578,8 +578,8 @@ static int send_sessiond_channel(int sock,
                                }
                                ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
                        }
-                       if (net_seq_idx == -1ULL) {
-                               net_seq_idx = stream->net_seq_idx;
+                       if (relayd_id == -1ULL) {
+                               relayd_id = stream->relayd_id;
                        }
                }
        }
@@ -831,6 +831,7 @@ static int close_metadata(uint64_t chan_key)
 {
        int ret = 0;
        struct lttng_consumer_channel *channel;
+       unsigned int channel_monitor;
 
        DBG("UST consumer close metadata key %" PRIu64, chan_key);
 
@@ -849,13 +850,48 @@ static int close_metadata(uint64_t chan_key)
 
        pthread_mutex_lock(&consumer_data.lock);
        pthread_mutex_lock(&channel->lock);
-
+       channel_monitor = channel->monitor;
        if (cds_lfht_is_node_deleted(&channel->node.node)) {
                goto error_unlock;
        }
 
        lttng_ustconsumer_close_metadata(channel);
+       pthread_mutex_unlock(&channel->lock);
+       pthread_mutex_unlock(&consumer_data.lock);
 
+       /*
+        * The ownership of a metadata channel depends on the type of
+        * session to which it belongs. In effect, the monitor flag is checked
+        * to determine if this metadata channel is in "snapshot" mode or not.
+        *
+        * In the non-snapshot case, the metadata channel is created along with
+        * a single stream which will remain present until the metadata channel
+        * is destroyed (on the destruction of its session). In this case, the
+        * metadata stream in "monitored" by the metadata poll thread and holds
+        * the ownership of its channel.
+        *
+        * Closing the metadata will cause the metadata stream's "metadata poll
+        * pipe" to be closed. Closing this pipe will wake-up the metadata poll
+        * thread which will teardown the metadata stream which, in return,
+        * deletes the metadata channel.
+        *
+        * In the snapshot case, the metadata stream is created and destroyed
+        * on every snapshot record. Since the channel doesn't have an owner
+        * other than the session daemon, it is safe to destroy it immediately
+        * on reception of the CLOSE_METADATA command.
+        */
+       if (!channel_monitor) {
+               /*
+                * The channel and consumer_data locks must be
+                * released before this call since consumer_del_channel
+                * re-acquires the channel and consumer_data locks to teardown
+                * the channel and queue its reclamation by the "call_rcu"
+                * worker thread.
+                */
+               consumer_del_channel(channel);
+       }
+
+       return ret;
 error_unlock:
        pthread_mutex_unlock(&channel->lock);
        pthread_mutex_unlock(&consumer_data.lock);
@@ -903,7 +939,7 @@ static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
        }
 
        /* Send metadata stream to relayd if needed. */
-       if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
+       if (metadata->metadata_stream->relayd_id != (uint64_t) -1ULL) {
                ret = consumer_send_relayd_stream(metadata->metadata_stream,
                                metadata->pathname);
                if (ret < 0) {
@@ -911,7 +947,7 @@ static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
                        goto error;
                }
                ret = consumer_send_relayd_streams_sent(
-                               metadata->metadata_stream->net_seq_idx);
+                               metadata->metadata_stream->relayd_id);
                if (ret < 0) {
                        ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
                        goto error;
@@ -1003,7 +1039,7 @@ static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
        assert(metadata_stream);
 
        if (relayd_id != (uint64_t) -1ULL) {
-               metadata_stream->net_seq_idx = relayd_id;
+               metadata_stream->relayd_id = relayd_id;
                ret = consumer_send_relayd_stream(metadata_stream, path);
                if (ret < 0) {
                        goto error_stream;
@@ -1076,14 +1112,11 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
        DBG("UST consumer snapshot channel %" PRIu64, key);
 
        cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
-               /* Are we at a position _before_ the first available packet ? */
-               bool before_first_packet = true;
-
                health_code_update();
 
                /* Lock stream because we are about to change its state. */
                pthread_mutex_lock(&stream->lock);
-               stream->net_seq_idx = relayd_id;
+               stream->relayd_id = relayd_id;
 
                if (use_relayd) {
                        ret = consumer_send_relayd_stream(stream, path);
@@ -1150,7 +1183,6 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                while (consumed_pos < produced_pos) {
                        ssize_t read_len;
                        unsigned long len, padded_len;
-                       int lost_packet = 0;
 
                        health_code_update();
 
@@ -1164,15 +1196,7 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                                }
                                DBG("UST consumer get subbuf failed. Skipping it.");
                                consumed_pos += stream->max_sb_size;
-
-                               /*
-                                * Start accounting lost packets only when we
-                                * already have extracted packets (to match the
-                                * content of the final snapshot).
-                                */
-                               if (!before_first_packet) {
-                                       lost_packet = 1;
-                               }
+                               stream->chan->lost_packets++;
                                continue;
                        }
 
@@ -1208,16 +1232,6 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                                goto error_close_stream;
                        }
                        consumed_pos += stream->max_sb_size;
-
-                       /*
-                        * Only account lost packets located between
-                        * succesfully extracted packets (do not account before
-                        * and after since they are not visible in the
-                        * resulting snapshot).
-                        */
-                       stream->chan->lost_packets += lost_packet;
-                       lost_packet = 0;
-                       before_first_packet = false;
                }
 
                /* Simply close the stream so we can use it on the next snapshot. */
@@ -1354,7 +1368,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
        {
                /* Session daemon status message are handled in the following call. */
-               ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
+               consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
                                msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
                                &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
                                msg.u.relayd_sock.relayd_session_id);
@@ -2531,8 +2545,8 @@ retry:
         * The mmap operation should write subbuf_size amount of data when network
         * streaming or the full padding (len) size when we are _not_ streaming.
         */
-       if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) ||
-                       (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) {
+       if ((ret != subbuf_size && stream->relayd_id != (uint64_t) -1ULL) ||
+                       (ret != len && stream->relayd_id == (uint64_t) -1ULL)) {
                /*
                 * Display the error but continue processing to try to release the
                 * subbuffer. This is a DBG statement since any unexpected kill or
@@ -2613,7 +2627,7 @@ int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
        assert(stream);
 
        /* Don't create anything if this is set for streaming. */
-       if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
+       if (stream->relayd_id == (uint64_t) -1ULL && stream->chan->monitor) {
                ret = utils_create_stream_file(stream->chan->pathname, stream->name,
                                stream->chan->tracefile_size, stream->tracefile_count_current,
                                stream->uid, stream->gid, NULL);
@@ -2841,7 +2855,7 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        request.key = channel->key;
 
        DBG("Sending metadata request to sessiond, session id %" PRIu64
-                       ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
+                       ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
                        request.session_id, request.session_id_per_pid, request.uid,
                        request.key);
 
This page took 0.028127 seconds and 5 git commands to generate.