X-Git-Url: http://git.efficios.com/?p=lttng-tools.git;a=blobdiff_plain;f=src%2Fcommon%2Fconsumer-metadata-cache.c;h=cbd3ef3945a5065017dbe1dc46c354fa10446aa5;hp=0c20e7fc7cbba6850a3337fc4ebb788d3e5d5f84;hb=ea26306076d26c40ed31e4f9170dc2852bda502f;hpb=fe81e5c901c1dd23495620c8efd9e1ed6df86c8b diff --git a/src/common/consumer-metadata-cache.c b/src/common/consumer-metadata-cache.c index 0c20e7fc7..cbd3ef394 100644 --- a/src/common/consumer-metadata-cache.c +++ b/src/common/consumer-metadata-cache.c @@ -16,7 +16,7 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ -#define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include @@ -46,14 +46,13 @@ static int extend_metadata_cache(struct lttng_consumer_channel *channel, { int ret = 0; char *tmp_data_ptr; - unsigned int new_size; + unsigned int new_size, old_size; assert(channel); assert(channel->metadata_cache); - new_size = max_t(unsigned int, - channel->metadata_cache->cache_alloc_size + size, - channel->metadata_cache->cache_alloc_size << 1); + old_size = channel->metadata_cache->cache_alloc_size; + new_size = max_t(unsigned int, old_size + size, old_size << 1); DBG("Extending metadata cache to %u", new_size); tmp_data_ptr = realloc(channel->metadata_cache->data, new_size); if (!tmp_data_ptr) { @@ -62,6 +61,8 @@ static int extend_metadata_cache(struct lttng_consumer_channel *channel, ret = -1; goto end; } + /* Zero newly allocated memory */ + memset(tmp_data_ptr + old_size, 0, new_size - old_size); channel->metadata_cache->data = tmp_data_ptr; channel->metadata_cache->cache_alloc_size = new_size; @@ -71,8 +72,8 @@ end: /* * Write metadata to the cache, extend the cache if necessary. We support - * non-contiguous updates but not overlapping ones. If there is contiguous - * metadata in the cache, we send it to the ring buffer. The metadata cache + * overlapping updates, but they need to be contiguous. Send the + * contiguous metadata in cache to the ring buffer. The metadata cache * lock MUST be acquired to write in the cache. * * Return 0 on success, a negative value on error. @@ -81,6 +82,7 @@ int consumer_metadata_cache_write(struct lttng_consumer_channel *channel, unsigned int offset, unsigned int len, char *data) { int ret = 0; + int size_ret; struct consumer_metadata_cache *cache; assert(channel); @@ -99,21 +101,19 @@ int consumer_metadata_cache_write(struct lttng_consumer_channel *channel, } memcpy(cache->data + offset, data, len); - cache->total_bytes_written += len; if (offset + len > cache->max_offset) { - cache->max_offset = offset + len; - } + char dummy = 'c'; - if (cache->max_offset == cache->total_bytes_written) { - offset = cache->rb_pushed; - len = cache->total_bytes_written - cache->rb_pushed; - ret = lttng_ustconsumer_push_metadata(channel, cache->data, offset, - len); - if (ret < 0) { - ERR("Pushing metadata"); - goto end; + cache->max_offset = offset + len; + if (channel->monitor) { + size_ret = lttng_write(channel->metadata_stream->ust_metadata_poll_pipe[1], + &dummy, 1); + if (size_ret < 1) { + ERR("Wakeup UST metadata pipe"); + ret = -1; + goto end; + } } - cache->rb_pushed += len; } end: @@ -177,11 +177,6 @@ void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel) DBG("Destroying metadata cache"); - if (channel->metadata_cache->max_offset > - channel->metadata_cache->rb_pushed) { - ERR("Destroying a cache not entirely commited"); - } - pthread_mutex_destroy(&channel->metadata_cache->lock); free(channel->metadata_cache->data); free(channel->metadata_cache); @@ -193,27 +188,37 @@ void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel) * Return 0 if everything has been flushed, 1 if there is data not flushed. */ int consumer_metadata_cache_flushed(struct lttng_consumer_channel *channel, - uint64_t offset) + uint64_t offset, int timer) { - int ret; - struct consumer_metadata_cache *cache; + int ret = 0; + struct lttng_consumer_stream *metadata_stream; assert(channel); assert(channel->metadata_cache); - cache = channel->metadata_cache; - - pthread_mutex_lock(&consumer_data.lock); + /* + * If not called from a timer handler, we have to take the + * channel lock to be mutually exclusive with channel teardown. + * Timer handler does not need to take this lock because it is + * already synchronized by timer stop (and, more importantly, + * taking this lock in a timer handler would cause a deadlock). + */ + if (!timer) { + pthread_mutex_lock(&channel->lock); + } + pthread_mutex_lock(&channel->timer_lock); pthread_mutex_lock(&channel->metadata_cache->lock); - if (cache->rb_pushed >= offset) { - ret = 0; - } else if (!channel->metadata_stream) { + metadata_stream = channel->metadata_stream; + + if (!metadata_stream) { /* * Having no metadata stream means the channel is being destroyed so there * is no cache to flush anymore. */ ret = 0; + } else if (metadata_stream->ust_metadata_pushed >= offset) { + ret = 0; } else if (channel->metadata_stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) { /* An inactive endpoint means we don't have to flush anymore. */ @@ -224,7 +229,10 @@ int consumer_metadata_cache_flushed(struct lttng_consumer_channel *channel, } pthread_mutex_unlock(&channel->metadata_cache->lock); - pthread_mutex_unlock(&consumer_data.lock); + pthread_mutex_unlock(&channel->timer_lock); + if (!timer) { + pthread_mutex_unlock(&channel->lock); + } return ret; }