rseq: output whether configure finds rseq syscall
[lttng-ust.git] / libringbuffer / frontend_api.h
CommitLineData
e92f3e28
MD
1#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
2#define _LTTNG_RING_BUFFER_FRONTEND_API_H
852c2936
MD
3
4/*
e92f3e28 5 * libringbuffer/frontend_api.h
852c2936 6 *
e92f3e28
MD
7 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; only
12 * version 2.1 of the License.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
852c2936
MD
22 *
23 * Ring Buffer Library Synchronization Header (buffer write API).
24 *
25 * Author:
e92f3e28 26 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
852c2936 27 *
e92f3e28
MD
28 * See ring_buffer_frontend.c for more information on wait-free
29 * algorithms.
30 * See frontend.h for channel allocation and read-side API.
852c2936
MD
31 */
32
4931a13e 33#include "frontend.h"
9f3fdbc6
MD
34#include <urcu-bp.h>
35#include <urcu/compiler.h>
3ea36dea 36#include "rseq.h"
852c2936 37
26cc635c
MD
38static inline
39int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
40{
41 return lttng_ust_get_cpu();
42}
43
852c2936 44/**
26cc635c 45 * lib_ring_buffer_begin - Precedes ring buffer reserve/commit.
852c2936 46 *
36cb884c
MD
47 * Keeps a ring buffer nesting count as supplementary safety net to
48 * ensure tracer client code will never trigger an endless recursion.
49 * Returns the processor ID on success, -EPERM on failure (nesting count
50 * too high).
852c2936
MD
51 *
52 * asm volatile and "memory" clobber prevent the compiler from moving
53 * instructions out of the ring buffer nesting count. This is required to ensure
54 * that probe side-effects which can cause recursion (e.g. unforeseen traps,
55 * divisions by 0, ...) are triggered within the incremented nesting count
56 * section.
57 */
58static inline
26cc635c 59int lib_ring_buffer_begin(const struct lttng_ust_lib_ring_buffer_config *config)
852c2936 60{
26cc635c 61 int nesting;
852c2936 62
8c90a710 63 nesting = ++URCU_TLS(lib_ring_buffer_nesting);
9f3fdbc6 64 cmm_barrier();
852c2936 65
b5a3dfa5 66 if (caa_unlikely(nesting > 4)) {
852c2936 67 WARN_ON_ONCE(1);
8c90a710 68 URCU_TLS(lib_ring_buffer_nesting)--;
852c2936 69 return -EPERM;
26cc635c
MD
70 }
71 return 0;
852c2936
MD
72}
73
74/**
26cc635c 75 * lib_ring_buffer_end - Follows ring buffer reserve/commit.
852c2936
MD
76 */
77static inline
26cc635c 78void lib_ring_buffer_end(const struct lttng_ust_lib_ring_buffer_config *config)
852c2936 79{
9f3fdbc6 80 cmm_barrier();
8c90a710 81 URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
852c2936
MD
82}
83
84/*
85 * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
86 * part of the API per se.
87 *
88 * returns 0 if reserve ok, or 1 if the slow path must be taken.
89 */
90static inline
4cfec15c
MD
91int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
92 struct lttng_ust_lib_ring_buffer_ctx *ctx,
852c2936
MD
93 unsigned long *o_begin, unsigned long *o_end,
94 unsigned long *o_old, size_t *before_hdr_pad)
95{
96 struct channel *chan = ctx->chan;
4cfec15c 97 struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
852c2936
MD
98 *o_begin = v_read(config, &buf->offset);
99 *o_old = *o_begin;
100
101 ctx->tsc = lib_ring_buffer_clock_read(chan);
102 if ((int64_t) ctx->tsc == -EIO)
103 return 1;
104
105 /*
106 * Prefetch cacheline for read because we have to read the previous
107 * commit counter to increment it and commit seq value to compare it to
108 * the commit counter.
109 */
9f3fdbc6 110 //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
852c2936
MD
111
112 if (last_tsc_overflow(config, buf, ctx->tsc))
113 ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
114
b5a3dfa5 115 if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
852c2936
MD
116 return 1;
117
118 ctx->slot_size = record_header_size(config, chan, *o_begin,
119 before_hdr_pad, ctx);
120 ctx->slot_size +=
121 lib_ring_buffer_align(*o_begin + ctx->slot_size,
122 ctx->largest_align) + ctx->data_size;
b5a3dfa5 123 if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
852c2936
MD
124 > chan->backend.subbuf_size))
125 return 1;
126
127 /*
128 * Record fits in the current buffer and we are not on a switch
129 * boundary. It's safe to write.
130 */
131 *o_end = *o_begin + ctx->slot_size;
1ad21f70
MD
132
133 if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
134 /*
135 * The offset_end will fall at the very beginning of the next
136 * subbuffer.
137 */
138 return 1;
139
852c2936
MD
140 return 0;
141}
142
143/**
144 * lib_ring_buffer_reserve - Reserve space in a ring buffer.
145 * @config: ring buffer instance configuration.
146 * @ctx: ring buffer context. (input and output) Must be already initialized.
147 *
148 * Atomic wait-free slot reservation. The reserved space starts at the context
149 * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
150 *
151 * Return :
152 * 0 on success.
6fd21280 153 * -EPERM if channel is disabled.
852c2936
MD
154 * -ENOSPC if event size is too large for packet.
155 * -ENOBUFS if there is currently not enough space in buffer for the event.
156 * -EIO if data cannot be written into the buffer for any other reason.
6fd21280 157 * -EAGAIN reserve aborted, should be attempted again.
852c2936
MD
158 */
159
160static inline
4cfec15c
MD
161int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
162 struct lttng_ust_lib_ring_buffer_ctx *ctx)
852c2936
MD
163{
164 struct channel *chan = ctx->chan;
38fae1d3 165 struct lttng_ust_shm_handle *handle = ctx->handle;
4cfec15c 166 struct lttng_ust_lib_ring_buffer *buf;
852c2936
MD
167 unsigned long o_begin, o_end, o_old;
168 size_t before_hdr_pad = 0;
6fd21280 169 struct lttng_rseq_state rseq_state;
852c2936 170
6fd21280
MD
171 if (caa_likely(ctx->ctx_len
172 >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
173 rseq_state = ctx->rseq_state;
174 } else {
175 rseq_state.cpu_id = -2;
176 rseq_state.event_counter = 0;
177 rseq_state.rseqp = NULL;
178 }
852c2936 179
6fd21280
MD
180 if (caa_unlikely(uatomic_read(&chan->record_disabled)))
181 return -EPERM;
852c2936 182 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
1d498196 183 buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
852c2936 184 else
1d498196 185 buf = shmp(handle, chan->backend.buf[0].shmp);
15500a1b
MD
186 if (caa_unlikely(!buf))
187 return -EIO;
f52a5702 188 if (caa_unlikely(uatomic_read(&buf->record_disabled)))
6fd21280 189 return -EPERM;
852c2936
MD
190 ctx->buf = buf;
191
192 /*
193 * Perform retryable operations.
194 */
b5a3dfa5 195 if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
852c2936
MD
196 &o_end, &o_old, &before_hdr_pad)))
197 goto slow_path;
198
6fd21280
MD
199 if (caa_unlikely(config->sync == RING_BUFFER_SYNC_GLOBAL
200 || rseq_state.cpu_id < 0
201 || uatomic_read(&chan->u.reserve_fallback_ref))) {
202 if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old,
203 o_end) != o_old))
204 goto slow_path;
205 } else {
206 /*
207 * Load reserve_fallback_ref before offset. Matches the
208 * implicit memory barrier after v_cmpxchg of offset.
209 */
210 cmm_smp_rmb();
211 if (caa_unlikely(ctx->buf->offset.a != o_old))
212 return -EAGAIN;
213 if (caa_unlikely(!__rseq_finish(NULL, 0, NULL, NULL, 0,
214 (intptr_t *) &ctx->buf->offset.a,
215 (intptr_t) o_end,
216 rseq_state, RSEQ_FINISH_SINGLE, false)))
217 return -EAGAIN;
218 }
852c2936
MD
219 /*
220 * Atomically update last_tsc. This update races against concurrent
221 * atomic updates, but the race will always cause supplementary full TSC
222 * record headers, never the opposite (missing a full TSC record header
223 * when it would be needed).
224 */
225 save_last_tsc(config, ctx->buf, ctx->tsc);
226
227 /*
228 * Push the reader if necessary
229 */
230 lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
231
232 /*
233 * Clear noref flag for this subbuffer.
234 */
235 lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
1d498196 236 subbuf_index(o_end - 1, chan), handle);
852c2936
MD
237
238 ctx->pre_offset = o_begin;
239 ctx->buf_offset = o_begin + before_hdr_pad;
240 return 0;
241slow_path:
242 return lib_ring_buffer_reserve_slow(ctx);
243}
244
245/**
246 * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
247 * @config: ring buffer instance configuration.
248 * @buf: buffer
249 * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
250 *
251 * This operation is completely reentrant : can be called while tracing is
252 * active with absolutely no lock held.
253 *
254 * Note, however, that as a v_cmpxchg is used for some atomic operations and
255 * requires to be executed locally for per-CPU buffers, this function must be
256 * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
257 * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
258 */
259static inline
4cfec15c
MD
260void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config,
261 struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
38fae1d3 262 struct lttng_ust_shm_handle *handle)
852c2936 263{
1d498196 264 lib_ring_buffer_switch_slow(buf, mode, handle);
852c2936
MD
265}
266
267/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
268
269/**
270 * lib_ring_buffer_commit - Commit an record.
271 * @config: ring buffer instance configuration.
272 * @ctx: ring buffer context. (input arguments only)
273 *
274 * Atomic unordered slot commit. Increments the commit count in the
275 * specified sub-buffer, and delivers it if necessary.
276 */
277static inline
4cfec15c
MD
278void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
279 const struct lttng_ust_lib_ring_buffer_ctx *ctx)
852c2936
MD
280{
281 struct channel *chan = ctx->chan;
38fae1d3 282 struct lttng_ust_shm_handle *handle = ctx->handle;
4cfec15c 283 struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
852c2936
MD
284 unsigned long offset_end = ctx->buf_offset;
285 unsigned long endidx = subbuf_index(offset_end - 1, chan);
286 unsigned long commit_count;
d2fe4771
MD
287 struct commit_counters_hot *cc_hot = shmp_index(handle,
288 buf->commit_hot, endidx);
3ea36dea
MD
289 struct lttng_rseq_state rseq_state;
290
291 if (caa_likely(ctx->ctx_len
292 >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
293 rseq_state = ctx->rseq_state;
294 } else {
295 rseq_state.cpu_id = -2;
6fd21280
MD
296 rseq_state.event_counter = 0;
297 rseq_state.rseqp = NULL;
3ea36dea 298 }
852c2936 299
15500a1b
MD
300 if (caa_unlikely(!cc_hot))
301 return;
302
852c2936
MD
303 /*
304 * Must count record before incrementing the commit count.
305 */
15500a1b 306 subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
852c2936
MD
307
308 /*
309 * Order all writes to buffer before the commit count update that will
310 * determine that the subbuffer is full.
311 */
9f3fdbc6 312 cmm_smp_wmb();
852c2936 313
6fd21280
MD
314 if (caa_likely(config->sync == RING_BUFFER_SYNC_PER_CPU
315 && rseq_state.cpu_id >= 0)) {
3ea36dea
MD
316 unsigned long newv;
317
318 newv = cc_hot->cc_rseq + ctx->slot_size;
319 if (caa_likely(__rseq_finish(NULL, 0, NULL, NULL, 0,
320 (intptr_t *)&cc_hot->cc_rseq,
321 (intptr_t) newv,
322 rseq_state, RSEQ_FINISH_SINGLE, false)))
323 goto add_done;
324 }
d2fe4771 325 v_add(config, ctx->slot_size, &cc_hot->cc);
3ea36dea 326add_done:
852c2936
MD
327
328 /*
329 * commit count read can race with concurrent OOO commit count updates.
330 * This is only needed for lib_ring_buffer_check_deliver (for
331 * non-polling delivery only) and for
332 * lib_ring_buffer_write_commit_counter. The race can only cause the
333 * counter to be read with the same value more than once, which could
334 * cause :
335 * - Multiple delivery for the same sub-buffer (which is handled
336 * gracefully by the reader code) if the value is for a full
337 * sub-buffer. It's important that we can never miss a sub-buffer
338 * delivery. Re-reading the value after the v_add ensures this.
339 * - Reading a commit_count with a higher value that what was actually
340 * added to it for the lib_ring_buffer_write_commit_counter call
341 * (again caused by a concurrent committer). It does not matter,
342 * because this function is interested in the fact that the commit
343 * count reaches back the reserve offset for a specific sub-buffer,
344 * which is completely independent of the order.
345 */
d2fe4771 346 commit_count = v_read(config, &cc_hot->cc);
3ea36dea 347 commit_count += cc_hot->cc_rseq;
852c2936
MD
348
349 lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
1b7b0501 350 commit_count, endidx, handle, ctx->tsc);
852c2936
MD
351 /*
352 * Update used size at each commit. It's needed only for extracting
353 * ring_buffer buffers from vmcore, after crash.
354 */
d2fe4771
MD
355 lib_ring_buffer_write_commit_counter(config, buf, chan,
356 offset_end, commit_count, handle, cc_hot);
852c2936
MD
357}
358
359/**
360 * lib_ring_buffer_try_discard_reserve - Try discarding a record.
361 * @config: ring buffer instance configuration.
362 * @ctx: ring buffer context. (input arguments only)
363 *
364 * Only succeeds if no other record has been written after the record to
365 * discard. If discard fails, the record must be committed to the buffer.
366 *
367 * Returns 0 upon success, -EPERM if the record cannot be discarded.
368 */
369static inline
4cfec15c
MD
370int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
371 const struct lttng_ust_lib_ring_buffer_ctx *ctx)
852c2936 372{
4cfec15c 373 struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
852c2936
MD
374 unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
375
376 /*
377 * We need to ensure that if the cmpxchg succeeds and discards the
378 * record, the next record will record a full TSC, because it cannot
379 * rely on the last_tsc associated with the discarded record to detect
380 * overflows. The only way to ensure this is to set the last_tsc to 0
381 * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
382 * timestamp in the next record.
383 *
384 * Note: if discard fails, we must leave the TSC in the record header.
385 * It is needed to keep track of TSC overflows for the following
386 * records.
387 */
388 save_last_tsc(config, buf, 0ULL);
389
b5a3dfa5 390 if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
852c2936
MD
391 != end_offset))
392 return -EPERM;
393 else
394 return 0;
395}
396
397static inline
4cfec15c 398void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
399 struct channel *chan)
400{
9f3fdbc6 401 uatomic_inc(&chan->record_disabled);
852c2936
MD
402}
403
404static inline
4cfec15c 405void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
406 struct channel *chan)
407{
9f3fdbc6 408 uatomic_dec(&chan->record_disabled);
852c2936
MD
409}
410
411static inline
4cfec15c
MD
412void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
413 struct lttng_ust_lib_ring_buffer *buf)
852c2936 414{
9f3fdbc6 415 uatomic_inc(&buf->record_disabled);
852c2936
MD
416}
417
418static inline
4cfec15c
MD
419void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
420 struct lttng_ust_lib_ring_buffer *buf)
852c2936 421{
9f3fdbc6 422 uatomic_dec(&buf->record_disabled);
852c2936
MD
423}
424
e92f3e28 425#endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */
This page took 0.088329 seconds and 5 git commands to generate.