/*
- * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
+ * Copyright (C) 2011 EfficiOS Inc.
* Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
*
#include <unistd.h>
#include <common/common.h>
+#include <common/consumer/consumer-timer.h>
+#include <common/consumer/consumer.h>
+#include <common/consumer/metadata-bucket.h>
#include <common/index/index.h>
#include <common/kernel-consumer/kernel-consumer.h>
+#include <common/macros.h>
#include <common/relayd/relayd.h>
#include <common/ust-consumer/ust-consumer.h>
#include <common/utils.h>
-#include <common/consumer/consumer.h>
-#include <common/consumer/consumer-timer.h>
-#include <common/consumer/metadata-bucket.h>
#include "consumer-stream.h"
pthread_mutex_unlock(&stream->chan->lock);
}
+static void consumer_stream_data_assert_locked_all(struct lttng_consumer_stream *stream)
+{
+ ASSERT_LOCKED(stream->lock);
+ ASSERT_LOCKED(stream->chan->lock);
+}
+
static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream)
{
consumer_stream_data_lock_all(stream);
consumer_stream_data_unlock_all(stream);
}
+static void consumer_stream_metadata_assert_locked_all(struct lttng_consumer_stream *stream)
+{
+ ASSERT_LOCKED(stream->metadata_rdv_lock);
+ consumer_stream_data_assert_locked_all(stream);
+}
+
/* Only used for data streams. */
static int consumer_stream_update_stats(struct lttng_consumer_stream *stream,
const struct stream_subbuffer *subbuf)
{
int ret = 0;
uint64_t sequence_number;
- const uint64_t discarded_events =
- LTTNG_OPTIONAL_GET(subbuf->info.data.sequence_number);
+ const uint64_t discarded_events = subbuf->info.data.events_discarded;
if (!subbuf->info.data.sequence_number.is_set) {
/* Command not supported by the tracer. */
const unsigned long padding_size =
subbuffer->info.data.padded_subbuf_size -
subbuffer->info.data.subbuf_size;
-
- return lttng_consumer_on_read_subbuffer_mmap(
+ const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_mmap(
stream, &subbuffer->buffer.buffer, padding_size);
+
+ if (stream->net_seq_idx == -1ULL) {
+ /*
+ * When writing on disk, check that only the subbuffer (no
+ * padding) was written to disk.
+ */
+ if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
+ DBG("Failed to write the entire padded subbuffer on disk (written_bytes: %zd, padded subbuffer size %lu)",
+ written_bytes,
+ subbuffer->info.data.padded_subbuf_size);
+ }
+ } else {
+ /*
+ * When streaming over the network, check that the entire
+ * subbuffer including padding was successfully written.
+ */
+ if (written_bytes != subbuffer->info.data.subbuf_size) {
+ DBG("Failed to write only the subbuffer over the network (written_bytes: %zd, subbuffer size %lu)",
+ written_bytes,
+ subbuffer->info.data.subbuf_size);
+ }
+ }
+
+ /*
+ * If `lttng_consumer_on_read_subbuffer_mmap()` returned an error, pass
+ * it along to the caller, else return zero.
+ */
+ if (written_bytes < 0) {
+ ERR("Error reading mmap subbuffer: %zd", written_bytes);
+ }
+
+ return written_bytes;
}
static ssize_t consumer_stream_consume_splice(
struct lttng_consumer_stream *stream,
const struct stream_subbuffer *subbuffer)
{
- return lttng_consumer_on_read_subbuffer_splice(ctx, stream,
- subbuffer->info.data.padded_subbuf_size, 0);
+ const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_splice(
+ ctx, stream, subbuffer->info.data.padded_subbuf_size, 0);
+
+ if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
+ DBG("Failed to write the entire padded subbuffer (written_bytes: %zd, padded subbuffer size %lu)",
+ written_bytes,
+ subbuffer->info.data.padded_subbuf_size);
+ }
+
+ /*
+ * If `lttng_consumer_on_read_subbuffer_splice()` returned an error,
+ * pass it along to the caller, else return zero.
+ */
+ if (written_bytes < 0) {
+ ERR("Error reading splice subbuffer: %zd", written_bytes);
+ }
+
+ return written_bytes;
}
static int consumer_stream_send_index(
struct lttng_consumer_local_data *ctx)
{
int ret;
+ enum sync_metadata_status status;
assert(metadata);
assert(metadata->metadata_flag);
/*
* Empty the metadata cache and flush the current stream.
*/
- ret = lttng_kconsumer_sync_metadata(metadata);
+ status = lttng_kconsumer_sync_metadata(metadata);
break;
case LTTNG_CONSUMER32_UST:
case LTTNG_CONSUMER64_UST:
* Ask the sessiond if we have new metadata waiting and update the
* consumer metadata cache.
*/
- ret = lttng_ustconsumer_sync_metadata(ctx, metadata);
+ status = lttng_ustconsumer_sync_metadata(ctx, metadata);
break;
default:
- assert(0);
- ret = -1;
- break;
+ abort();
}
- /*
- * Error or no new metadata, we exit here.
- */
- if (ret <= 0 || ret == ENODATA) {
+
+ switch (status) {
+ case SYNC_METADATA_STATUS_NEW_DATA:
+ break;
+ case SYNC_METADATA_STATUS_NO_DATA:
+ ret = 0;
+ goto end_unlock_mutex;
+ case SYNC_METADATA_STATUS_ERROR:
+ ret = -1;
goto end_unlock_mutex;
+ default:
+ abort();
}
/*
*/
pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
pthread_mutex_unlock(&metadata->metadata_rdv_lock);
- } while (ret == EAGAIN);
+ } while (status == SYNC_METADATA_STATUS_NEW_DATA);
/* Success */
return 0;
const struct stream_subbuffer *subbuffer,
struct lttng_consumer_local_data *ctx)
{
+ bool missed_metadata_flush;
int ret;
/* Block until all the metadata is sent. */
pthread_mutex_lock(&stream->metadata_timer_lock);
stream->waiting_on_metadata = false;
- if (stream->missed_metadata_flush) {
+ missed_metadata_flush = stream->missed_metadata_flush;
+ if (missed_metadata_flush) {
stream->missed_metadata_flush = false;
- pthread_mutex_unlock(&stream->metadata_timer_lock);
- (void) stream->read_subbuffer_ops.send_live_beacon(stream);
- } else {
- pthread_mutex_unlock(&stream->metadata_timer_lock);
}
+ pthread_mutex_unlock(&stream->metadata_timer_lock);
if (ret < 0) {
goto end;
}
ret = consumer_stream_send_index(stream, subbuffer, ctx);
+ /*
+ * Send the live inactivity beacon to handle the situation where
+ * the live timer is prevented from sampling this stream
+ * because the stream lock was being held while this stream is
+ * waiting on metadata. This ensures live viewer progress in the
+ * unlikely scenario where a live timer would be prevented from
+ * locking a stream lock repeatedly due to a steady flow of
+ * incoming metadata, for a stream which is mostly inactive.
+ *
+ * It is important to send the inactivity beacon packet to
+ * relayd _after_ sending the index associated with the data
+ * that was just sent, otherwise this can cause live viewers to
+ * observe timestamps going backwards between an inactivity
+ * beacon and a following trace packet.
+ */
+ if (missed_metadata_flush) {
+ (void) stream->read_subbuffer_ops.send_live_beacon(stream);
+ }
end:
return ret;
}
goto end;
}
+ rcu_read_lock();
+
if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) {
ERR("Failed to acquire trace chunk reference during the creation of a stream");
ret = -1;
goto error;
}
- rcu_read_lock();
+ stream->send_node = (typeof(stream->send_node))
+ CDS_LIST_HEAD_INIT(stream->send_node);
stream->chan = channel;
stream->key = stream_key;
stream->trace_chunk = trace_chunk;
stream->index_file = NULL;
stream->last_sequence_number = -1ULL;
stream->rotate_position = -1ULL;
+ /* Buffer is created with an open packet. */
+ stream->opened_packet_in_current_trace_chunk = true;
pthread_mutex_init(&stream->lock, NULL);
pthread_mutex_init(&stream->metadata_timer_lock, NULL);
consumer_stream_metadata_lock_all;
stream->read_subbuffer_ops.unlock =
consumer_stream_metadata_unlock_all;
+ stream->read_subbuffer_ops.assert_locked =
+ consumer_stream_metadata_assert_locked_all;
stream->read_subbuffer_ops.pre_consume_subbuffer =
metadata_stream_check_version;
} else {
stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all;
stream->read_subbuffer_ops.unlock =
consumer_stream_data_unlock_all;
+ stream->read_subbuffer_ops.assert_locked =
+ consumer_stream_data_assert_locked_all;
stream->read_subbuffer_ops.pre_consume_subbuffer =
consumer_stream_update_stats;
if (channel->is_live) {
{
assert(stream);
+ cds_list_del_init(&stream->send_node);
+
/* Stream is in monitor mode. */
if (stream->monitor) {
struct lttng_consumer_channel *free_chan = NULL;
* If the stream is not visible globally, this needs to be done
* outside of the consumer data lock section.
*/
+ destroy_close_stream(stream);
free_chan = unref_channel(stream);
}