Commit | Line | Data |
---|---|---|
e92f3e28 MD |
1 | #ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H |
2 | #define _LTTNG_RING_BUFFER_FRONTEND_API_H | |
852c2936 MD |
3 | |
4 | /* | |
e92f3e28 | 5 | * libringbuffer/frontend_api.h |
852c2936 | 6 | * |
e92f3e28 MD |
7 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; only | |
12 | * version 2.1 of the License. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
852c2936 MD |
22 | * |
23 | * Ring Buffer Library Synchronization Header (buffer write API). | |
24 | * | |
25 | * Author: | |
e92f3e28 | 26 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
852c2936 | 27 | * |
e92f3e28 MD |
28 | * See ring_buffer_frontend.c for more information on wait-free |
29 | * algorithms. | |
30 | * See frontend.h for channel allocation and read-side API. | |
852c2936 MD |
31 | */ |
32 | ||
4931a13e | 33 | #include "frontend.h" |
9f3fdbc6 MD |
34 | #include <urcu-bp.h> |
35 | #include <urcu/compiler.h> | |
3ea36dea | 36 | #include "rseq.h" |
852c2936 | 37 | |
26cc635c MD |
38 | static inline |
39 | int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config) | |
40 | { | |
41 | return lttng_ust_get_cpu(); | |
42 | } | |
43 | ||
852c2936 | 44 | /** |
26cc635c | 45 | * lib_ring_buffer_begin - Precedes ring buffer reserve/commit. |
852c2936 | 46 | * |
36cb884c MD |
47 | * Keeps a ring buffer nesting count as supplementary safety net to |
48 | * ensure tracer client code will never trigger an endless recursion. | |
49 | * Returns the processor ID on success, -EPERM on failure (nesting count | |
50 | * too high). | |
852c2936 MD |
51 | * |
52 | * asm volatile and "memory" clobber prevent the compiler from moving | |
53 | * instructions out of the ring buffer nesting count. This is required to ensure | |
54 | * that probe side-effects which can cause recursion (e.g. unforeseen traps, | |
55 | * divisions by 0, ...) are triggered within the incremented nesting count | |
56 | * section. | |
57 | */ | |
58 | static inline | |
26cc635c | 59 | int lib_ring_buffer_begin(const struct lttng_ust_lib_ring_buffer_config *config) |
852c2936 | 60 | { |
26cc635c | 61 | int nesting; |
852c2936 | 62 | |
8c90a710 | 63 | nesting = ++URCU_TLS(lib_ring_buffer_nesting); |
9f3fdbc6 | 64 | cmm_barrier(); |
852c2936 | 65 | |
b5a3dfa5 | 66 | if (caa_unlikely(nesting > 4)) { |
852c2936 | 67 | WARN_ON_ONCE(1); |
8c90a710 | 68 | URCU_TLS(lib_ring_buffer_nesting)--; |
852c2936 | 69 | return -EPERM; |
26cc635c MD |
70 | } |
71 | return 0; | |
852c2936 MD |
72 | } |
73 | ||
74 | /** | |
26cc635c | 75 | * lib_ring_buffer_end - Follows ring buffer reserve/commit. |
852c2936 MD |
76 | */ |
77 | static inline | |
26cc635c | 78 | void lib_ring_buffer_end(const struct lttng_ust_lib_ring_buffer_config *config) |
852c2936 | 79 | { |
9f3fdbc6 | 80 | cmm_barrier(); |
8c90a710 | 81 | URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */ |
852c2936 MD |
82 | } |
83 | ||
84 | /* | |
85 | * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not | |
86 | * part of the API per se. | |
87 | * | |
88 | * returns 0 if reserve ok, or 1 if the slow path must be taken. | |
89 | */ | |
90 | static inline | |
4cfec15c MD |
91 | int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config, |
92 | struct lttng_ust_lib_ring_buffer_ctx *ctx, | |
852c2936 MD |
93 | unsigned long *o_begin, unsigned long *o_end, |
94 | unsigned long *o_old, size_t *before_hdr_pad) | |
95 | { | |
96 | struct channel *chan = ctx->chan; | |
4cfec15c | 97 | struct lttng_ust_lib_ring_buffer *buf = ctx->buf; |
852c2936 MD |
98 | *o_begin = v_read(config, &buf->offset); |
99 | *o_old = *o_begin; | |
100 | ||
101 | ctx->tsc = lib_ring_buffer_clock_read(chan); | |
102 | if ((int64_t) ctx->tsc == -EIO) | |
103 | return 1; | |
104 | ||
105 | /* | |
106 | * Prefetch cacheline for read because we have to read the previous | |
107 | * commit counter to increment it and commit seq value to compare it to | |
108 | * the commit counter. | |
109 | */ | |
9f3fdbc6 | 110 | //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]); |
852c2936 MD |
111 | |
112 | if (last_tsc_overflow(config, buf, ctx->tsc)) | |
113 | ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; | |
114 | ||
b5a3dfa5 | 115 | if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0)) |
852c2936 MD |
116 | return 1; |
117 | ||
118 | ctx->slot_size = record_header_size(config, chan, *o_begin, | |
119 | before_hdr_pad, ctx); | |
120 | ctx->slot_size += | |
121 | lib_ring_buffer_align(*o_begin + ctx->slot_size, | |
122 | ctx->largest_align) + ctx->data_size; | |
b5a3dfa5 | 123 | if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) |
852c2936 MD |
124 | > chan->backend.subbuf_size)) |
125 | return 1; | |
126 | ||
127 | /* | |
128 | * Record fits in the current buffer and we are not on a switch | |
129 | * boundary. It's safe to write. | |
130 | */ | |
131 | *o_end = *o_begin + ctx->slot_size; | |
1ad21f70 MD |
132 | |
133 | if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0)) | |
134 | /* | |
135 | * The offset_end will fall at the very beginning of the next | |
136 | * subbuffer. | |
137 | */ | |
138 | return 1; | |
139 | ||
852c2936 MD |
140 | return 0; |
141 | } | |
142 | ||
143 | /** | |
144 | * lib_ring_buffer_reserve - Reserve space in a ring buffer. | |
145 | * @config: ring buffer instance configuration. | |
146 | * @ctx: ring buffer context. (input and output) Must be already initialized. | |
147 | * | |
148 | * Atomic wait-free slot reservation. The reserved space starts at the context | |
149 | * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc". | |
150 | * | |
151 | * Return : | |
152 | * 0 on success. | |
153 | * -EAGAIN if channel is disabled. | |
154 | * -ENOSPC if event size is too large for packet. | |
155 | * -ENOBUFS if there is currently not enough space in buffer for the event. | |
156 | * -EIO if data cannot be written into the buffer for any other reason. | |
157 | */ | |
158 | ||
159 | static inline | |
4cfec15c MD |
160 | int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config, |
161 | struct lttng_ust_lib_ring_buffer_ctx *ctx) | |
852c2936 MD |
162 | { |
163 | struct channel *chan = ctx->chan; | |
38fae1d3 | 164 | struct lttng_ust_shm_handle *handle = ctx->handle; |
4cfec15c | 165 | struct lttng_ust_lib_ring_buffer *buf; |
852c2936 MD |
166 | unsigned long o_begin, o_end, o_old; |
167 | size_t before_hdr_pad = 0; | |
168 | ||
f52a5702 | 169 | if (caa_unlikely(uatomic_read(&chan->record_disabled))) |
852c2936 MD |
170 | return -EAGAIN; |
171 | ||
172 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) | |
1d498196 | 173 | buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); |
852c2936 | 174 | else |
1d498196 | 175 | buf = shmp(handle, chan->backend.buf[0].shmp); |
15500a1b MD |
176 | if (caa_unlikely(!buf)) |
177 | return -EIO; | |
f52a5702 | 178 | if (caa_unlikely(uatomic_read(&buf->record_disabled))) |
852c2936 MD |
179 | return -EAGAIN; |
180 | ctx->buf = buf; | |
181 | ||
182 | /* | |
183 | * Perform retryable operations. | |
184 | */ | |
b5a3dfa5 | 185 | if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, |
852c2936 MD |
186 | &o_end, &o_old, &before_hdr_pad))) |
187 | goto slow_path; | |
188 | ||
b5a3dfa5 | 189 | if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end) |
852c2936 MD |
190 | != o_old)) |
191 | goto slow_path; | |
192 | ||
193 | /* | |
194 | * Atomically update last_tsc. This update races against concurrent | |
195 | * atomic updates, but the race will always cause supplementary full TSC | |
196 | * record headers, never the opposite (missing a full TSC record header | |
197 | * when it would be needed). | |
198 | */ | |
199 | save_last_tsc(config, ctx->buf, ctx->tsc); | |
200 | ||
201 | /* | |
202 | * Push the reader if necessary | |
203 | */ | |
204 | lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1); | |
205 | ||
206 | /* | |
207 | * Clear noref flag for this subbuffer. | |
208 | */ | |
209 | lib_ring_buffer_clear_noref(config, &ctx->buf->backend, | |
1d498196 | 210 | subbuf_index(o_end - 1, chan), handle); |
852c2936 MD |
211 | |
212 | ctx->pre_offset = o_begin; | |
213 | ctx->buf_offset = o_begin + before_hdr_pad; | |
214 | return 0; | |
215 | slow_path: | |
216 | return lib_ring_buffer_reserve_slow(ctx); | |
217 | } | |
218 | ||
219 | /** | |
220 | * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer. | |
221 | * @config: ring buffer instance configuration. | |
222 | * @buf: buffer | |
223 | * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH) | |
224 | * | |
225 | * This operation is completely reentrant : can be called while tracing is | |
226 | * active with absolutely no lock held. | |
227 | * | |
228 | * Note, however, that as a v_cmpxchg is used for some atomic operations and | |
229 | * requires to be executed locally for per-CPU buffers, this function must be | |
230 | * called from the CPU which owns the buffer for a ACTIVE flush, with preemption | |
231 | * disabled, for RING_BUFFER_SYNC_PER_CPU configuration. | |
232 | */ | |
233 | static inline | |
4cfec15c MD |
234 | void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config, |
235 | struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode, | |
38fae1d3 | 236 | struct lttng_ust_shm_handle *handle) |
852c2936 | 237 | { |
1d498196 | 238 | lib_ring_buffer_switch_slow(buf, mode, handle); |
852c2936 MD |
239 | } |
240 | ||
241 | /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */ | |
242 | ||
243 | /** | |
244 | * lib_ring_buffer_commit - Commit an record. | |
245 | * @config: ring buffer instance configuration. | |
246 | * @ctx: ring buffer context. (input arguments only) | |
247 | * | |
248 | * Atomic unordered slot commit. Increments the commit count in the | |
249 | * specified sub-buffer, and delivers it if necessary. | |
250 | */ | |
251 | static inline | |
4cfec15c MD |
252 | void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config, |
253 | const struct lttng_ust_lib_ring_buffer_ctx *ctx) | |
852c2936 MD |
254 | { |
255 | struct channel *chan = ctx->chan; | |
38fae1d3 | 256 | struct lttng_ust_shm_handle *handle = ctx->handle; |
4cfec15c | 257 | struct lttng_ust_lib_ring_buffer *buf = ctx->buf; |
852c2936 MD |
258 | unsigned long offset_end = ctx->buf_offset; |
259 | unsigned long endidx = subbuf_index(offset_end - 1, chan); | |
260 | unsigned long commit_count; | |
d2fe4771 MD |
261 | struct commit_counters_hot *cc_hot = shmp_index(handle, |
262 | buf->commit_hot, endidx); | |
3ea36dea MD |
263 | struct lttng_rseq_state rseq_state; |
264 | ||
265 | if (caa_likely(ctx->ctx_len | |
266 | >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) { | |
267 | rseq_state = ctx->rseq_state; | |
268 | } else { | |
269 | rseq_state.cpu_id = -2; | |
270 | } | |
852c2936 | 271 | |
15500a1b MD |
272 | if (caa_unlikely(!cc_hot)) |
273 | return; | |
274 | ||
852c2936 MD |
275 | /* |
276 | * Must count record before incrementing the commit count. | |
277 | */ | |
15500a1b | 278 | subbuffer_count_record(config, ctx, &buf->backend, endidx, handle); |
852c2936 MD |
279 | |
280 | /* | |
281 | * Order all writes to buffer before the commit count update that will | |
282 | * determine that the subbuffer is full. | |
283 | */ | |
9f3fdbc6 | 284 | cmm_smp_wmb(); |
852c2936 | 285 | |
3ea36dea MD |
286 | if (caa_likely(rseq_state.cpu_id >= 0)) { |
287 | unsigned long newv; | |
288 | ||
289 | newv = cc_hot->cc_rseq + ctx->slot_size; | |
290 | if (caa_likely(__rseq_finish(NULL, 0, NULL, NULL, 0, | |
291 | (intptr_t *)&cc_hot->cc_rseq, | |
292 | (intptr_t) newv, | |
293 | rseq_state, RSEQ_FINISH_SINGLE, false))) | |
294 | goto add_done; | |
295 | } | |
d2fe4771 | 296 | v_add(config, ctx->slot_size, &cc_hot->cc); |
3ea36dea | 297 | add_done: |
852c2936 MD |
298 | |
299 | /* | |
300 | * commit count read can race with concurrent OOO commit count updates. | |
301 | * This is only needed for lib_ring_buffer_check_deliver (for | |
302 | * non-polling delivery only) and for | |
303 | * lib_ring_buffer_write_commit_counter. The race can only cause the | |
304 | * counter to be read with the same value more than once, which could | |
305 | * cause : | |
306 | * - Multiple delivery for the same sub-buffer (which is handled | |
307 | * gracefully by the reader code) if the value is for a full | |
308 | * sub-buffer. It's important that we can never miss a sub-buffer | |
309 | * delivery. Re-reading the value after the v_add ensures this. | |
310 | * - Reading a commit_count with a higher value that what was actually | |
311 | * added to it for the lib_ring_buffer_write_commit_counter call | |
312 | * (again caused by a concurrent committer). It does not matter, | |
313 | * because this function is interested in the fact that the commit | |
314 | * count reaches back the reserve offset for a specific sub-buffer, | |
315 | * which is completely independent of the order. | |
316 | */ | |
d2fe4771 | 317 | commit_count = v_read(config, &cc_hot->cc); |
3ea36dea | 318 | commit_count += cc_hot->cc_rseq; |
852c2936 MD |
319 | |
320 | lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, | |
1b7b0501 | 321 | commit_count, endidx, handle, ctx->tsc); |
852c2936 MD |
322 | /* |
323 | * Update used size at each commit. It's needed only for extracting | |
324 | * ring_buffer buffers from vmcore, after crash. | |
325 | */ | |
d2fe4771 MD |
326 | lib_ring_buffer_write_commit_counter(config, buf, chan, |
327 | offset_end, commit_count, handle, cc_hot); | |
852c2936 MD |
328 | } |
329 | ||
330 | /** | |
331 | * lib_ring_buffer_try_discard_reserve - Try discarding a record. | |
332 | * @config: ring buffer instance configuration. | |
333 | * @ctx: ring buffer context. (input arguments only) | |
334 | * | |
335 | * Only succeeds if no other record has been written after the record to | |
336 | * discard. If discard fails, the record must be committed to the buffer. | |
337 | * | |
338 | * Returns 0 upon success, -EPERM if the record cannot be discarded. | |
339 | */ | |
340 | static inline | |
4cfec15c MD |
341 | int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config, |
342 | const struct lttng_ust_lib_ring_buffer_ctx *ctx) | |
852c2936 | 343 | { |
4cfec15c | 344 | struct lttng_ust_lib_ring_buffer *buf = ctx->buf; |
852c2936 MD |
345 | unsigned long end_offset = ctx->pre_offset + ctx->slot_size; |
346 | ||
347 | /* | |
348 | * We need to ensure that if the cmpxchg succeeds and discards the | |
349 | * record, the next record will record a full TSC, because it cannot | |
350 | * rely on the last_tsc associated with the discarded record to detect | |
351 | * overflows. The only way to ensure this is to set the last_tsc to 0 | |
352 | * (assuming no 64-bit TSC overflow), which forces to write a 64-bit | |
353 | * timestamp in the next record. | |
354 | * | |
355 | * Note: if discard fails, we must leave the TSC in the record header. | |
356 | * It is needed to keep track of TSC overflows for the following | |
357 | * records. | |
358 | */ | |
359 | save_last_tsc(config, buf, 0ULL); | |
360 | ||
b5a3dfa5 | 361 | if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset) |
852c2936 MD |
362 | != end_offset)) |
363 | return -EPERM; | |
364 | else | |
365 | return 0; | |
366 | } | |
367 | ||
368 | static inline | |
4cfec15c | 369 | void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
370 | struct channel *chan) |
371 | { | |
9f3fdbc6 | 372 | uatomic_inc(&chan->record_disabled); |
852c2936 MD |
373 | } |
374 | ||
375 | static inline | |
4cfec15c | 376 | void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
377 | struct channel *chan) |
378 | { | |
9f3fdbc6 | 379 | uatomic_dec(&chan->record_disabled); |
852c2936 MD |
380 | } |
381 | ||
382 | static inline | |
4cfec15c MD |
383 | void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config, |
384 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 | 385 | { |
9f3fdbc6 | 386 | uatomic_inc(&buf->record_disabled); |
852c2936 MD |
387 | } |
388 | ||
389 | static inline | |
4cfec15c MD |
390 | void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config, |
391 | struct lttng_ust_lib_ring_buffer *buf) | |
852c2936 | 392 | { |
9f3fdbc6 | 393 | uatomic_dec(&buf->record_disabled); |
852c2936 MD |
394 | } |
395 | ||
e92f3e28 | 396 | #endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */ |