2 * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring Buffer Library Synchronization Header (internal helpers).
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
14 #include <urcu/compiler.h>
15 #include <urcu/tls-compat.h>
20 #include <lttng/ringbuffer-config.h>
21 #include "backend_types.h"
22 #include "frontend_types.h"
24 #include "ust-helper.h"
26 /* Buffer offset macros */
28 /* buf_trunc mask selects only the buffer number. */
30 unsigned long buf_trunc(unsigned long offset
, struct channel
*chan
)
32 return offset
& ~(chan
->backend
.buf_size
- 1);
36 /* Select the buffer number value (counter). */
38 unsigned long buf_trunc_val(unsigned long offset
, struct channel
*chan
)
40 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
43 /* buf_offset mask selects only the offset within the current buffer. */
45 unsigned long buf_offset(unsigned long offset
, struct channel
*chan
)
47 return offset
& (chan
->backend
.buf_size
- 1);
50 /* subbuf_offset mask selects the offset within the current subbuffer. */
52 unsigned long subbuf_offset(unsigned long offset
, struct channel
*chan
)
54 return offset
& (chan
->backend
.subbuf_size
- 1);
57 /* subbuf_trunc mask selects the subbuffer number. */
59 unsigned long subbuf_trunc(unsigned long offset
, struct channel
*chan
)
61 return offset
& ~(chan
->backend
.subbuf_size
- 1);
64 /* subbuf_align aligns the offset to the next subbuffer. */
66 unsigned long subbuf_align(unsigned long offset
, struct channel
*chan
)
68 return (offset
+ chan
->backend
.subbuf_size
)
69 & ~(chan
->backend
.subbuf_size
- 1);
72 /* subbuf_index returns the index of the current subbuffer within the buffer. */
74 unsigned long subbuf_index(unsigned long offset
, struct channel
*chan
)
76 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
80 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
81 * bits from the last TSC read. When overflows are detected, the full 64-bit
82 * timestamp counter should be written in the record header. Reads and writes
83 * last_tsc atomically.
86 #if (CAA_BITS_PER_LONG == 32)
88 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
89 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
91 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
95 * Ensure the compiler performs this update in a single instruction.
97 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
101 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
102 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
104 unsigned long tsc_shifted
;
106 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
109 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
110 if (caa_unlikely(tsc_shifted
111 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
118 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
119 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
121 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
124 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
128 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
129 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
131 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
134 if (caa_unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
135 >> config
->tsc_bits
))
144 int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
149 void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer
*buf
,
150 enum switch_mode mode
,
151 struct lttng_ust_shm_handle
*handle
);
154 void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config
*config
,
155 struct lttng_ust_lib_ring_buffer
*buf
,
156 struct channel
*chan
,
157 unsigned long offset
,
158 unsigned long commit_count
,
160 struct lttng_ust_shm_handle
*handle
,
163 /* Buffer write helpers */
166 void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer
*buf
,
167 struct channel
*chan
,
168 unsigned long offset
)
170 unsigned long consumed_old
, consumed_new
;
173 consumed_old
= uatomic_read(&buf
->consumed
);
175 * If buffer is in overwrite mode, push the reader consumed
176 * count if the write position has reached it and we are not
177 * at the first iteration (don't push the reader farther than
178 * the writer). This operation can be done concurrently by many
179 * writers in the same buffer, the writer being at the farthest
180 * write position sub-buffer index in the buffer being the one
181 * which will win this loop.
183 if (caa_unlikely(subbuf_trunc(offset
, chan
)
184 - subbuf_trunc(consumed_old
, chan
)
185 >= chan
->backend
.buf_size
))
186 consumed_new
= subbuf_align(consumed_old
, chan
);
189 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
190 consumed_new
) != consumed_old
));
194 * Move consumed position to the beginning of subbuffer in which the
195 * write offset is. Should only be used on ring buffers that are not
196 * actively being written into, because clear_reader does not take into
197 * account the commit counters when moving the consumed position, which
198 * can make concurrent trace producers or consumers observe consumed
199 * position further than the write offset, which breaks ring buffer
200 * algorithm guarantees.
203 void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer
*buf
,
204 struct lttng_ust_shm_handle
*handle
)
206 struct channel
*chan
;
207 const struct lttng_ust_lib_ring_buffer_config
*config
;
208 unsigned long offset
, consumed_old
, consumed_new
;
210 chan
= shmp(handle
, buf
->backend
.chan
);
213 config
= &chan
->backend
.config
;
216 offset
= v_read(config
, &buf
->offset
);
217 consumed_old
= uatomic_read(&buf
->consumed
);
218 CHAN_WARN_ON(chan
, (long) (subbuf_trunc(offset
, chan
)
219 - subbuf_trunc(consumed_old
, chan
))
221 consumed_new
= subbuf_trunc(offset
, chan
);
222 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
223 consumed_new
) != consumed_old
));
227 int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config
*config
,
228 struct lttng_ust_lib_ring_buffer
*buf
,
229 struct channel
*chan
)
231 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
235 unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
236 struct lttng_ust_lib_ring_buffer
*buf
,
238 struct lttng_ust_shm_handle
*handle
)
240 return subbuffer_get_data_size(config
, &buf
->backend
, idx
, handle
);
244 * Check if all space reservation in a buffer have been committed. This helps
245 * knowing if an execution context is nested (for per-cpu buffers only).
246 * This is a very specific ftrace use-case, so we keep this as "internal" API.
249 int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config
*config
,
250 struct lttng_ust_lib_ring_buffer
*buf
,
251 struct channel
*chan
,
252 struct lttng_ust_shm_handle
*handle
)
254 unsigned long offset
, idx
, commit_count
;
255 struct commit_counters_hot
*cc_hot
;
257 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
258 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
261 * Read offset and commit count in a loop so they are both read
262 * atomically wrt interrupts. By deal with interrupt concurrency by
263 * restarting both reads if the offset has been pushed. Note that given
264 * we only have to deal with interrupt concurrency here, an interrupt
265 * modifying the commit count will also modify "offset", so it is safe
266 * to only check for offset modifications.
269 offset
= v_read(config
, &buf
->offset
);
270 idx
= subbuf_index(offset
, chan
);
271 cc_hot
= shmp_index(handle
, buf
->commit_hot
, idx
);
272 if (caa_unlikely(!cc_hot
))
274 commit_count
= v_read(config
, &cc_hot
->cc
);
275 } while (offset
!= v_read(config
, &buf
->offset
));
277 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
278 - (commit_count
& chan
->commit_count_mask
) == 0);
282 * Receive end of subbuffer TSC as parameter. It has been read in the
283 * space reservation loop of either reserve or switch, which ensures it
284 * progresses monotonically with event records in the buffer. Therefore,
285 * it ensures that the end timestamp of a subbuffer is <= begin
286 * timestamp of the following subbuffers.
289 void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config
*config
,
290 struct lttng_ust_lib_ring_buffer
*buf
,
291 struct channel
*chan
,
292 unsigned long offset
,
293 unsigned long commit_count
,
295 struct lttng_ust_shm_handle
*handle
,
298 unsigned long old_commit_count
= commit_count
299 - chan
->backend
.subbuf_size
;
301 /* Check if all commits have been done */
302 if (caa_unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
303 - (old_commit_count
& chan
->commit_count_mask
) == 0))
304 lib_ring_buffer_check_deliver_slow(config
, buf
, chan
, offset
,
305 commit_count
, idx
, handle
, tsc
);
309 * lib_ring_buffer_write_commit_counter
311 * For flight recording. must be called after commit.
312 * This function increments the subbuffer's commit_seq counter each time the
313 * commit count reaches back the reserve offset (modulo subbuffer size). It is
314 * useful for crash dump.
317 void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config
*config
,
318 struct lttng_ust_lib_ring_buffer
*buf
,
319 struct channel
*chan
,
320 unsigned long buf_offset
,
321 unsigned long commit_count
,
322 struct lttng_ust_shm_handle
*handle
,
323 struct commit_counters_hot
*cc_hot
)
325 unsigned long commit_seq_old
;
327 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
331 * subbuf_offset includes commit_count_mask. We can simply
332 * compare the offsets within the subbuffer without caring about
333 * buffer full/empty mismatch because offset is never zero here
334 * (subbuffer header and record headers have non-zero length).
336 if (caa_unlikely(subbuf_offset(buf_offset
- commit_count
, chan
)))
339 commit_seq_old
= v_read(config
, &cc_hot
->seq
);
340 if (caa_likely((long) (commit_seq_old
- commit_count
) < 0))
341 v_set(config
, &cc_hot
->seq
, commit_count
);
345 extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer
*buf
,
346 struct channel_backend
*chanb
, int cpu
,
347 struct lttng_ust_shm_handle
*handle
,
348 struct shm_object
*shmobj
);
350 extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer
*buf
,
351 struct lttng_ust_shm_handle
*handle
);
353 /* Keep track of trap nesting inside ring buffer code */
355 extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting
);
357 #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */