struct lttng_ust_shm_handle *handle)
{
unsigned long offset, idx, commit_count;
+ struct commit_counters_hot *cc_hot = shmp_index(handle, buf->commit_hot, idx);
CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
+ if (caa_unlikely(!cc_hot))
+ return 0;
+
/*
* Read offset and commit count in a loop so they are both read
* atomically wrt interrupts. By deal with interrupt concurrency by
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
- commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->cc);
+ commit_count = v_read(config, &cc_hot->cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
- unsigned long idx,
unsigned long buf_offset,
unsigned long commit_count,
- struct lttng_ust_shm_handle *handle)
+ struct lttng_ust_shm_handle *handle,
+ struct commit_counters_hot *cc_hot)
{
unsigned long commit_seq_old;
if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan)))
return;
- commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq);
- while ((long) (commit_seq_old - commit_count) < 0)
- commit_seq_old = v_cmpxchg(config, &shmp_index(handle, buf->commit_hot, idx)->seq,
- commit_seq_old, commit_count);
+ commit_seq_old = v_read(config, &cc_hot->seq);
+ if (caa_likely((long) (commit_seq_old - commit_count) < 0))
+ v_set(config, &cc_hot->seq, commit_count);
}
extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,