1 #ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
2 #define _LTTNG_RING_BUFFER_FRONTEND_API_H
5 * libringbuffer/frontend_api.h
7 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; only
12 * version 2.1 of the License.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * Ring Buffer Library Synchronization Header (buffer write API).
26 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
28 * See ring_buffer_frontend.c for more information on wait-free
30 * See frontend.h for channel allocation and read-side API.
35 #include <urcu/compiler.h>
39 int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config
*config
)
41 return lttng_ust_get_cpu();
45 * lib_ring_buffer_begin - Precedes ring buffer reserve/commit.
47 * Keeps a ring buffer nesting count as supplementary safety net to
48 * ensure tracer client code will never trigger an endless recursion.
49 * Returns the processor ID on success, -EPERM on failure (nesting count
52 * asm volatile and "memory" clobber prevent the compiler from moving
53 * instructions out of the ring buffer nesting count. This is required to ensure
54 * that probe side-effects which can cause recursion (e.g. unforeseen traps,
55 * divisions by 0, ...) are triggered within the incremented nesting count
59 int lib_ring_buffer_begin(const struct lttng_ust_lib_ring_buffer_config
*config
)
63 nesting
= ++URCU_TLS(lib_ring_buffer_nesting
);
66 if (caa_unlikely(nesting
> 4)) {
68 URCU_TLS(lib_ring_buffer_nesting
)--;
75 * lib_ring_buffer_end - Follows ring buffer reserve/commit.
78 void lib_ring_buffer_end(const struct lttng_ust_lib_ring_buffer_config
*config
)
81 URCU_TLS(lib_ring_buffer_nesting
)--; /* TLS */
85 * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
86 * part of the API per se.
88 * returns 0 if reserve ok, or 1 if the slow path must be taken.
91 int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
92 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
93 unsigned long *o_begin
, unsigned long *o_end
,
94 unsigned long *o_old
, size_t *before_hdr_pad
)
96 struct channel
*chan
= ctx
->chan
;
97 struct lttng_ust_lib_ring_buffer
*buf
= ctx
->buf
;
98 *o_begin
= v_read(config
, &buf
->offset
);
101 ctx
->tsc
= lib_ring_buffer_clock_read(chan
);
102 if ((int64_t) ctx
->tsc
== -EIO
)
106 * Prefetch cacheline for read because we have to read the previous
107 * commit counter to increment it and commit seq value to compare it to
108 * the commit counter.
110 //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
112 if (last_tsc_overflow(config
, buf
, ctx
->tsc
))
113 ctx
->rflags
|= RING_BUFFER_RFLAG_FULL_TSC
;
115 if (caa_unlikely(subbuf_offset(*o_begin
, chan
) == 0))
118 ctx
->slot_size
= record_header_size(config
, chan
, *o_begin
,
119 before_hdr_pad
, ctx
);
121 lib_ring_buffer_align(*o_begin
+ ctx
->slot_size
,
122 ctx
->largest_align
) + ctx
->data_size
;
123 if (caa_unlikely((subbuf_offset(*o_begin
, chan
) + ctx
->slot_size
)
124 > chan
->backend
.subbuf_size
))
128 * Record fits in the current buffer and we are not on a switch
129 * boundary. It's safe to write.
131 *o_end
= *o_begin
+ ctx
->slot_size
;
133 if (caa_unlikely((subbuf_offset(*o_end
, chan
)) == 0))
135 * The offset_end will fall at the very beginning of the next
144 * lib_ring_buffer_reserve - Reserve space in a ring buffer.
145 * @config: ring buffer instance configuration.
146 * @ctx: ring buffer context. (input and output) Must be already initialized.
148 * Atomic wait-free slot reservation. The reserved space starts at the context
149 * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
153 * -EAGAIN if channel is disabled.
154 * -ENOSPC if event size is too large for packet.
155 * -ENOBUFS if there is currently not enough space in buffer for the event.
156 * -EIO if data cannot be written into the buffer for any other reason.
160 int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
161 struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
163 struct channel
*chan
= ctx
->chan
;
164 struct lttng_ust_shm_handle
*handle
= ctx
->handle
;
165 struct lttng_ust_lib_ring_buffer
*buf
;
166 unsigned long o_begin
, o_end
, o_old
;
167 size_t before_hdr_pad
= 0;
169 if (caa_unlikely(uatomic_read(&chan
->record_disabled
)))
172 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
173 buf
= shmp(handle
, chan
->backend
.buf
[ctx
->cpu
].shmp
);
175 buf
= shmp(handle
, chan
->backend
.buf
[0].shmp
);
176 if (caa_unlikely(!buf
))
178 if (caa_unlikely(uatomic_read(&buf
->record_disabled
)))
183 * Perform retryable operations.
185 if (caa_unlikely(lib_ring_buffer_try_reserve(config
, ctx
, &o_begin
,
186 &o_end
, &o_old
, &before_hdr_pad
)))
189 if (caa_unlikely(v_cmpxchg(config
, &ctx
->buf
->offset
, o_old
, o_end
)
194 * Atomically update last_tsc. This update races against concurrent
195 * atomic updates, but the race will always cause supplementary full TSC
196 * record headers, never the opposite (missing a full TSC record header
197 * when it would be needed).
199 save_last_tsc(config
, ctx
->buf
, ctx
->tsc
);
202 * Push the reader if necessary
204 lib_ring_buffer_reserve_push_reader(ctx
->buf
, chan
, o_end
- 1);
207 * Clear noref flag for this subbuffer.
209 lib_ring_buffer_clear_noref(config
, &ctx
->buf
->backend
,
210 subbuf_index(o_end
- 1, chan
), handle
);
212 ctx
->pre_offset
= o_begin
;
213 ctx
->buf_offset
= o_begin
+ before_hdr_pad
;
216 return lib_ring_buffer_reserve_slow(ctx
);
220 * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
221 * @config: ring buffer instance configuration.
223 * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
225 * This operation is completely reentrant : can be called while tracing is
226 * active with absolutely no lock held.
228 * Note, however, that as a v_cmpxchg is used for some atomic operations and
229 * requires to be executed locally for per-CPU buffers, this function must be
230 * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
231 * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
234 void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config
*config
,
235 struct lttng_ust_lib_ring_buffer
*buf
, enum switch_mode mode
,
236 struct lttng_ust_shm_handle
*handle
)
238 lib_ring_buffer_switch_slow(buf
, mode
, handle
);
241 /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
244 * lib_ring_buffer_commit - Commit an record.
245 * @config: ring buffer instance configuration.
246 * @ctx: ring buffer context. (input arguments only)
248 * Atomic unordered slot commit. Increments the commit count in the
249 * specified sub-buffer, and delivers it if necessary.
252 void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config
*config
,
253 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
255 struct channel
*chan
= ctx
->chan
;
256 struct lttng_ust_shm_handle
*handle
= ctx
->handle
;
257 struct lttng_ust_lib_ring_buffer
*buf
= ctx
->buf
;
258 unsigned long offset_end
= ctx
->buf_offset
;
259 unsigned long endidx
= subbuf_index(offset_end
- 1, chan
);
260 unsigned long commit_count
;
261 struct commit_counters_hot
*cc_hot
= shmp_index(handle
,
262 buf
->commit_hot
, endidx
);
263 struct lttng_rseq_state rseq_state
;
265 if (caa_likely(ctx
->ctx_len
266 >= sizeof(struct lttng_ust_lib_ring_buffer_ctx
))) {
267 rseq_state
= ctx
->rseq_state
;
269 rseq_state
.cpu_id
= -2;
272 if (caa_unlikely(!cc_hot
))
276 * Must count record before incrementing the commit count.
278 subbuffer_count_record(config
, ctx
, &buf
->backend
, endidx
, handle
);
281 * Order all writes to buffer before the commit count update that will
282 * determine that the subbuffer is full.
286 if (caa_likely(rseq_state
.cpu_id
>= 0)) {
289 newv
= cc_hot
->cc_rseq
+ ctx
->slot_size
;
290 if (caa_likely(__rseq_finish(NULL
, 0, NULL
, NULL
, 0,
291 (intptr_t *)&cc_hot
->cc_rseq
,
293 rseq_state
, RSEQ_FINISH_SINGLE
, false)))
296 v_add(config
, ctx
->slot_size
, &cc_hot
->cc
);
300 * commit count read can race with concurrent OOO commit count updates.
301 * This is only needed for lib_ring_buffer_check_deliver (for
302 * non-polling delivery only) and for
303 * lib_ring_buffer_write_commit_counter. The race can only cause the
304 * counter to be read with the same value more than once, which could
306 * - Multiple delivery for the same sub-buffer (which is handled
307 * gracefully by the reader code) if the value is for a full
308 * sub-buffer. It's important that we can never miss a sub-buffer
309 * delivery. Re-reading the value after the v_add ensures this.
310 * - Reading a commit_count with a higher value that what was actually
311 * added to it for the lib_ring_buffer_write_commit_counter call
312 * (again caused by a concurrent committer). It does not matter,
313 * because this function is interested in the fact that the commit
314 * count reaches back the reserve offset for a specific sub-buffer,
315 * which is completely independent of the order.
317 commit_count
= v_read(config
, &cc_hot
->cc
);
318 commit_count
+= cc_hot
->cc_rseq
;
320 lib_ring_buffer_check_deliver(config
, buf
, chan
, offset_end
- 1,
321 commit_count
, endidx
, handle
, ctx
->tsc
);
323 * Update used size at each commit. It's needed only for extracting
324 * ring_buffer buffers from vmcore, after crash.
326 lib_ring_buffer_write_commit_counter(config
, buf
, chan
,
327 offset_end
, commit_count
, handle
, cc_hot
);
331 * lib_ring_buffer_try_discard_reserve - Try discarding a record.
332 * @config: ring buffer instance configuration.
333 * @ctx: ring buffer context. (input arguments only)
335 * Only succeeds if no other record has been written after the record to
336 * discard. If discard fails, the record must be committed to the buffer.
338 * Returns 0 upon success, -EPERM if the record cannot be discarded.
341 int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
342 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
344 struct lttng_ust_lib_ring_buffer
*buf
= ctx
->buf
;
345 unsigned long end_offset
= ctx
->pre_offset
+ ctx
->slot_size
;
348 * We need to ensure that if the cmpxchg succeeds and discards the
349 * record, the next record will record a full TSC, because it cannot
350 * rely on the last_tsc associated with the discarded record to detect
351 * overflows. The only way to ensure this is to set the last_tsc to 0
352 * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
353 * timestamp in the next record.
355 * Note: if discard fails, we must leave the TSC in the record header.
356 * It is needed to keep track of TSC overflows for the following
359 save_last_tsc(config
, buf
, 0ULL);
361 if (caa_likely(v_cmpxchg(config
, &buf
->offset
, end_offset
, ctx
->pre_offset
)
369 void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config
*config
,
370 struct channel
*chan
)
372 uatomic_inc(&chan
->record_disabled
);
376 void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config
*config
,
377 struct channel
*chan
)
379 uatomic_dec(&chan
->record_disabled
);
383 void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config
*config
,
384 struct lttng_ust_lib_ring_buffer
*buf
)
386 uatomic_inc(&buf
->record_disabled
);
390 void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config
*config
,
391 struct lttng_ust_lib_ring_buffer
*buf
)
393 uatomic_dec(&buf
->record_disabled
);
396 #endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */