* See linux/ringbuffer/frontend.h for channel allocation and read-side API.
*/
-#include "../../wrapper/ringbuffer/frontend.h"
+#include <wrapper/ringbuffer/frontend.h>
+#include <wrapper/percpu-defs.h>
#include <linux/errno.h>
#include <linux/prefetch.h>
void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
{
barrier();
- __get_cpu_var(lib_ring_buffer_nesting)--;
+ (*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--;
rcu_read_unlock_sched_notrace();
}
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
- if (atomic_read(&chan->record_disabled))
+ if (unlikely(atomic_read(&chan->record_disabled)))
return -EAGAIN;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
else
buf = chan->backend.buf;
- if (atomic_read(&buf->record_disabled))
+ if (unlikely(atomic_read(&buf->record_disabled)))
return -EAGAIN;
ctx->buf = buf;
unsigned long offset_end = ctx->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
+ struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx];
/*
* Must count record before incrementing the commit count.
} else
smp_wmb();
- v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
+ v_add(config, ctx->slot_size, &cc_hot->cc);
/*
* commit count read can race with concurrent OOO commit count updates.
* count reaches back the reserve offset for a specific sub-buffer,
* which is completely independent of the order.
*/
- commit_count = v_read(config, &buf->commit_hot[endidx].cc);
+ commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
commit_count, endidx, ctx->tsc);
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
*/
- lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
- offset_end, commit_count);
+ lib_ring_buffer_write_commit_counter(config, buf, chan,
+ offset_end, commit_count, cc_hot);
}
/**