Implement ring buffer clear
[deliverable/lttng-modules.git] / lib / ringbuffer / frontend_internal.h
CommitLineData
886d51a3
MD
1#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
2#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
f3bc08c5
MD
3
4/*
5 * linux/ringbuffer/frontend_internal.h
6 *
f3bc08c5
MD
7 * Ring Buffer Library Synchronization Header (internal helpers).
8 *
886d51a3
MD
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
f3bc08c5
MD
25 * Author:
26 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
27 *
28 * See ring_buffer_frontend.c for more information on wait-free algorithms.
f3bc08c5
MD
29 */
30
5671a661
MD
31#include <wrapper/ringbuffer/config.h>
32#include <wrapper/ringbuffer/backend_types.h>
33#include <wrapper/ringbuffer/frontend_types.h>
34#include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
f3bc08c5
MD
35
36/* Buffer offset macros */
37
38/* buf_trunc mask selects only the buffer number. */
39static inline
40unsigned long buf_trunc(unsigned long offset, struct channel *chan)
41{
42 return offset & ~(chan->backend.buf_size - 1);
43
44}
45
46/* Select the buffer number value (counter). */
47static inline
48unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
49{
50 return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
51}
52
53/* buf_offset mask selects only the offset within the current buffer. */
54static inline
55unsigned long buf_offset(unsigned long offset, struct channel *chan)
56{
57 return offset & (chan->backend.buf_size - 1);
58}
59
60/* subbuf_offset mask selects the offset within the current subbuffer. */
61static inline
62unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
63{
64 return offset & (chan->backend.subbuf_size - 1);
65}
66
67/* subbuf_trunc mask selects the subbuffer number. */
68static inline
69unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
70{
71 return offset & ~(chan->backend.subbuf_size - 1);
72}
73
74/* subbuf_align aligns the offset to the next subbuffer. */
75static inline
76unsigned long subbuf_align(unsigned long offset, struct channel *chan)
77{
78 return (offset + chan->backend.subbuf_size)
79 & ~(chan->backend.subbuf_size - 1);
80}
81
82/* subbuf_index returns the index of the current subbuffer within the buffer. */
83static inline
84unsigned long subbuf_index(unsigned long offset, struct channel *chan)
85{
86 return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
87}
88
89/*
90 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
91 * bits from the last TSC read. When overflows are detected, the full 64-bit
92 * timestamp counter should be written in the record header. Reads and writes
93 * last_tsc atomically.
94 */
95
96#if (BITS_PER_LONG == 32)
97static inline
98void save_last_tsc(const struct lib_ring_buffer_config *config,
99 struct lib_ring_buffer *buf, u64 tsc)
100{
101 if (config->tsc_bits == 0 || config->tsc_bits == 64)
102 return;
103
104 /*
105 * Ensure the compiler performs this update in a single instruction.
106 */
107 v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
108}
109
110static inline
111int last_tsc_overflow(const struct lib_ring_buffer_config *config,
112 struct lib_ring_buffer *buf, u64 tsc)
113{
114 unsigned long tsc_shifted;
115
116 if (config->tsc_bits == 0 || config->tsc_bits == 64)
117 return 0;
118
119 tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
120 if (unlikely(tsc_shifted
121 - (unsigned long)v_read(config, &buf->last_tsc)))
122 return 1;
123 else
124 return 0;
125}
126#else
127static inline
128void save_last_tsc(const struct lib_ring_buffer_config *config,
129 struct lib_ring_buffer *buf, u64 tsc)
130{
131 if (config->tsc_bits == 0 || config->tsc_bits == 64)
132 return;
133
134 v_set(config, &buf->last_tsc, (unsigned long)tsc);
135}
136
137static inline
138int last_tsc_overflow(const struct lib_ring_buffer_config *config,
139 struct lib_ring_buffer *buf, u64 tsc)
140{
141 if (config->tsc_bits == 0 || config->tsc_bits == 64)
142 return 0;
143
144 if (unlikely((tsc - v_read(config, &buf->last_tsc))
145 >> config->tsc_bits))
146 return 1;
147 else
148 return 0;
149}
150#endif
151
152extern
153int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
154
155extern
156void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
157 enum switch_mode mode);
158
aece661f
MD
159extern
160void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
161 struct lib_ring_buffer *buf,
162 struct channel *chan,
163 unsigned long offset,
164 unsigned long commit_count,
165 unsigned long idx,
166 u64 tsc);
167
5e391252
MD
168extern
169void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf);
e1cdd6e9
MD
170extern
171void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf);
43bd70bc
MD
172extern
173void lib_ring_buffer_clear(struct lib_ring_buffer *buf);
5e391252 174
f3bc08c5
MD
175/* Buffer write helpers */
176
177static inline
178void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
179 struct channel *chan,
180 unsigned long offset)
181{
182 unsigned long consumed_old, consumed_new;
183
184 do {
185 consumed_old = atomic_long_read(&buf->consumed);
186 /*
187 * If buffer is in overwrite mode, push the reader consumed
188 * count if the write position has reached it and we are not
189 * at the first iteration (don't push the reader farther than
190 * the writer). This operation can be done concurrently by many
191 * writers in the same buffer, the writer being at the farthest
192 * write position sub-buffer index in the buffer being the one
193 * which will win this loop.
194 */
195 if (unlikely(subbuf_trunc(offset, chan)
196 - subbuf_trunc(consumed_old, chan)
197 >= chan->backend.buf_size))
198 consumed_new = subbuf_align(consumed_old, chan);
199 else
200 return;
201 } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
202 consumed_new) != consumed_old));
203}
204
43bd70bc
MD
205/*
206 * Move consumed position to the beginning of subbuffer in which the
207 * write offset is.
208 */
209static inline
210void lib_ring_buffer_clear_reader(struct lib_ring_buffer *buf,
211 struct channel *chan)
212{
213 const struct lib_ring_buffer_config *config = &chan->backend.config;
214 unsigned long offset, consumed_old, consumed_new;
215
216 do {
217 offset = v_read(config, &buf->offset);
218 consumed_old = atomic_long_read(&buf->consumed);
219 if (unlikely(subbuf_trunc(offset, chan)
220 - subbuf_trunc(consumed_old, chan)
221 > 0))
222 consumed_new = subbuf_trunc(offset, chan);
223 else
224 return;
225 } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
226 consumed_new) != consumed_old));
227}
228
f3bc08c5
MD
229static inline
230int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
231 struct lib_ring_buffer *buf,
232 struct channel *chan)
233{
234 return !!subbuf_offset(v_read(config, &buf->offset), chan);
235}
236
237static inline
238unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
239 struct lib_ring_buffer *buf,
240 unsigned long idx)
241{
242 return subbuffer_get_data_size(config, &buf->backend, idx);
243}
244
245/*
246 * Check if all space reservation in a buffer have been committed. This helps
247 * knowing if an execution context is nested (for per-cpu buffers only).
248 * This is a very specific ftrace use-case, so we keep this as "internal" API.
249 */
250static inline
251int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
252 struct lib_ring_buffer *buf,
253 struct channel *chan)
254{
255 unsigned long offset, idx, commit_count;
256
257 CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
258 CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
259
260 /*
261 * Read offset and commit count in a loop so they are both read
262 * atomically wrt interrupts. By deal with interrupt concurrency by
263 * restarting both reads if the offset has been pushed. Note that given
264 * we only have to deal with interrupt concurrency here, an interrupt
265 * modifying the commit count will also modify "offset", so it is safe
266 * to only check for offset modifications.
267 */
268 do {
269 offset = v_read(config, &buf->offset);
270 idx = subbuf_index(offset, chan);
271 commit_count = v_read(config, &buf->commit_hot[idx].cc);
272 } while (offset != v_read(config, &buf->offset));
273
274 return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
275 - (commit_count & chan->commit_count_mask) == 0);
276}
277
635e457c
MD
278/*
279 * Receive end of subbuffer TSC as parameter. It has been read in the
280 * space reservation loop of either reserve or switch, which ensures it
281 * progresses monotonically with event records in the buffer. Therefore,
282 * it ensures that the end timestamp of a subbuffer is <= begin
283 * timestamp of the following subbuffers.
284 */
f3bc08c5
MD
285static inline
286void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
287 struct lib_ring_buffer *buf,
288 struct channel *chan,
289 unsigned long offset,
290 unsigned long commit_count,
635e457c
MD
291 unsigned long idx,
292 u64 tsc)
f3bc08c5
MD
293{
294 unsigned long old_commit_count = commit_count
295 - chan->backend.subbuf_size;
f3bc08c5
MD
296
297 /* Check if all commits have been done */
298 if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
aece661f
MD
299 - (old_commit_count & chan->commit_count_mask) == 0))
300 lib_ring_buffer_check_deliver_slow(config, buf, chan, offset,
301 commit_count, idx, tsc);
f3bc08c5
MD
302}
303
304/*
305 * lib_ring_buffer_write_commit_counter
306 *
307 * For flight recording. must be called after commit.
308 * This function increments the subbuffer's commit_seq counter each time the
309 * commit count reaches back the reserve offset (modulo subbuffer size). It is
310 * useful for crash dump.
311 */
312static inline
313void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
314 struct lib_ring_buffer *buf,
315 struct channel *chan,
f3bc08c5 316 unsigned long buf_offset,
8ec496cf
MD
317 unsigned long commit_count,
318 struct commit_counters_hot *cc_hot)
f3bc08c5 319{
7915e163 320 unsigned long commit_seq_old;
f3bc08c5
MD
321
322 if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
323 return;
324
f3bc08c5
MD
325 /*
326 * subbuf_offset includes commit_count_mask. We can simply
327 * compare the offsets within the subbuffer without caring about
328 * buffer full/empty mismatch because offset is never zero here
329 * (subbuffer header and record headers have non-zero length).
330 */
7915e163 331 if (unlikely(subbuf_offset(buf_offset - commit_count, chan)))
f3bc08c5
MD
332 return;
333
8ec496cf 334 commit_seq_old = v_read(config, &cc_hot->seq);
6212b6b6 335 if (likely((long) (commit_seq_old - commit_count) < 0))
8ec496cf 336 v_set(config, &cc_hot->seq, commit_count);
f3bc08c5
MD
337}
338
339extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
340 struct channel_backend *chanb, int cpu);
341extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
342
343/* Keep track of trap nesting inside ring buffer code */
344DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
345
886d51a3 346#endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */
This page took 0.144074 seconds and 5 git commands to generate.