Use rseq for commit counter
[lttng-ust.git] / include / lttng / ringbuffer-config.h
1 #ifndef _LTTNG_RING_BUFFER_CONFIG_H
2 #define _LTTNG_RING_BUFFER_CONFIG_H
3
4 /*
5 * lttng/ringbuffer-config.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer configuration header. Note: after declaring the standard inline
10 * functions, clients should also include linux/ringbuffer/api.h.
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 * SOFTWARE.
29 */
30
31 #include <errno.h>
32 #include "lttng/ust-tracer.h"
33 #include <stdint.h>
34 #include <stddef.h>
35 #include <urcu/arch.h>
36 #include <string.h>
37 #include "lttng/align.h"
38 #include <lttng/ust-compiler.h>
39
40 struct lttng_ust_lib_ring_buffer;
41 struct channel;
42 struct lttng_ust_lib_ring_buffer_config;
43 struct lttng_ust_lib_ring_buffer_ctx;
44 struct lttng_ust_shm_handle;
45
46 /*
47 * Ring buffer client callbacks. Only used by slow path, never on fast path.
48 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
49 * provided as inline functions too. These may simply return 0 if not used by
50 * the client.
51 */
52 struct lttng_ust_lib_ring_buffer_client_cb {
53 /* Mandatory callbacks */
54
55 /* A static inline version is also required for fast path */
56 uint64_t (*ring_buffer_clock_read) (struct channel *chan);
57 size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
58 struct channel *chan, size_t offset,
59 size_t *pre_header_padding,
60 struct lttng_ust_lib_ring_buffer_ctx *ctx);
61
62 /* Slow path only, at subbuffer switch */
63 size_t (*subbuffer_header_size) (void);
64 void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
65 unsigned int subbuf_idx,
66 struct lttng_ust_shm_handle *handle);
67 void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
68 unsigned int subbuf_idx, unsigned long data_size,
69 struct lttng_ust_shm_handle *handle);
70
71 /* Optional callbacks (can be set to NULL) */
72
73 /* Called at buffer creation/finalize */
74 int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
75 int cpu, const char *name,
76 struct lttng_ust_shm_handle *handle);
77 /*
78 * Clients should guarantee that no new reader handle can be opened
79 * after finalize.
80 */
81 void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
82 void *priv, int cpu,
83 struct lttng_ust_shm_handle *handle);
84
85 /*
86 * Extract header length, payload length and timestamp from event
87 * record. Used by buffer iterators. Timestamp is only used by channel
88 * iterator.
89 */
90 void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
91 struct channel *chan, struct lttng_ust_lib_ring_buffer *buf,
92 size_t offset, size_t *header_len,
93 size_t *payload_len, uint64_t *timestamp,
94 struct lttng_ust_shm_handle *handle);
95 /*
96 * Offset and size of content size field in client.
97 */
98 void (*content_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
99 size_t *offset, size_t *length);
100 void (*packet_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
101 size_t *offset, size_t *length);
102 };
103
104 /*
105 * Ring buffer instance configuration.
106 *
107 * Declare as "static const" within the client object to ensure the inline fast
108 * paths can be optimized.
109 *
110 * alloc/sync pairs:
111 *
112 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
113 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
114 * with preemption disabled (lib_ring_buffer_get_cpu() and
115 * lib_ring_buffer_put_cpu()).
116 *
117 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
118 * Per-cpu buffer with global synchronization. Tracing can be performed with
119 * preemption enabled, statistically stays on the local buffers.
120 *
121 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
122 * Should only be used for buffers belonging to a single thread or protected
123 * by mutual exclusion by the client. Note that periodical sub-buffer switch
124 * should be disabled in this kind of configuration.
125 *
126 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
127 * Global shared buffer with global synchronization.
128 *
129 * wakeup:
130 *
131 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
132 * buffers and wake up readers if data is ready. Mainly useful for tracers which
133 * don't want to call into the wakeup code on the tracing path. Use in
134 * combination with "read_timer_interval" channel_create() argument.
135 *
136 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
137 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
138 * for drivers.
139 *
140 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
141 * has the responsibility to perform wakeups.
142 */
143 #define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 20
144
145 enum lttng_ust_lib_ring_buffer_alloc_types {
146 RING_BUFFER_ALLOC_PER_CPU,
147 RING_BUFFER_ALLOC_GLOBAL,
148 };
149
150 enum lttng_ust_lib_ring_buffer_sync_types {
151 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
152 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
153 };
154
155 enum lttng_ust_lib_ring_buffer_mode_types {
156 RING_BUFFER_OVERWRITE = 0, /* Overwrite when buffer full */
157 RING_BUFFER_DISCARD = 1, /* Discard when buffer full */
158 };
159
160 enum lttng_ust_lib_ring_buffer_output_types {
161 RING_BUFFER_SPLICE,
162 RING_BUFFER_MMAP,
163 RING_BUFFER_READ, /* TODO */
164 RING_BUFFER_ITERATOR,
165 RING_BUFFER_NONE,
166 };
167
168 enum lttng_ust_lib_ring_buffer_backend_types {
169 RING_BUFFER_PAGE,
170 RING_BUFFER_VMAP, /* TODO */
171 RING_BUFFER_STATIC, /* TODO */
172 };
173
174 enum lttng_ust_lib_ring_buffer_oops_types {
175 RING_BUFFER_NO_OOPS_CONSISTENCY,
176 RING_BUFFER_OOPS_CONSISTENCY,
177 };
178
179 enum lttng_ust_lib_ring_buffer_ipi_types {
180 RING_BUFFER_IPI_BARRIER,
181 RING_BUFFER_NO_IPI_BARRIER,
182 };
183
184 enum lttng_ust_lib_ring_buffer_wakeup_types {
185 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
186 RING_BUFFER_WAKEUP_BY_WRITER, /*
187 * writer wakes up reader,
188 * not lock-free
189 * (takes spinlock).
190 */
191 };
192
193 struct lttng_ust_lib_ring_buffer_config {
194 enum lttng_ust_lib_ring_buffer_alloc_types alloc;
195 enum lttng_ust_lib_ring_buffer_sync_types sync;
196 enum lttng_ust_lib_ring_buffer_mode_types mode;
197 enum lttng_ust_lib_ring_buffer_output_types output;
198 enum lttng_ust_lib_ring_buffer_backend_types backend;
199 enum lttng_ust_lib_ring_buffer_oops_types oops;
200 enum lttng_ust_lib_ring_buffer_ipi_types ipi;
201 enum lttng_ust_lib_ring_buffer_wakeup_types wakeup;
202 /*
203 * tsc_bits: timestamp bits saved at each record.
204 * 0 and 64 disable the timestamp compression scheme.
205 */
206 unsigned int tsc_bits;
207 struct lttng_ust_lib_ring_buffer_client_cb cb;
208 /*
209 * client_type is used by the consumer process (which is in a
210 * different address space) to lookup the appropriate client
211 * callbacks and update the cb pointers.
212 */
213 int client_type;
214 int _unused1;
215 const struct lttng_ust_lib_ring_buffer_client_cb *cb_ptr;
216 char padding[LTTNG_UST_RING_BUFFER_CONFIG_PADDING];
217 };
218
219 /* State returned by rseq_start, passed as argument to rseq_finish. */
220 struct lttng_rseq_state {
221 volatile struct rseq *rseqp;
222 int32_t cpu_id; /* cpu_id at start. */
223 uint32_t event_counter; /* event_counter at start. */
224 };
225
226 /*
227 * ring buffer context
228 *
229 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
230 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
231 * lib_ring_buffer_write().
232 *
233 * IMPORTANT: this structure is part of the ABI between the probe and
234 * UST. Fields need to be only added at the end, never reordered, never
235 * removed.
236 */
237 #define LTTNG_UST_RING_BUFFER_CTX_PADDING \
238 (24 - sizeof(int) - sizeof(void *) - sizeof(void *))
239 struct lttng_ust_lib_ring_buffer_ctx {
240 /* input received by lib_ring_buffer_reserve(), saved here. */
241 struct channel *chan; /* channel */
242 void *priv; /* client private data */
243 struct lttng_ust_shm_handle *handle; /* shared-memory handle */
244 size_t data_size; /* size of payload */
245 int largest_align; /*
246 * alignment of the largest element
247 * in the payload
248 */
249 int cpu; /* processor id */
250
251 /* output from lib_ring_buffer_reserve() */
252 struct lttng_ust_lib_ring_buffer *buf; /*
253 * buffer corresponding to processor id
254 * for this channel
255 */
256 size_t slot_size; /* size of the reserved slot */
257 unsigned long buf_offset; /* offset following the record header */
258 unsigned long pre_offset; /*
259 * Initial offset position _before_
260 * the record is written. Positioned
261 * prior to record header alignment
262 * padding.
263 */
264 uint64_t tsc; /* time-stamp counter value */
265 unsigned int rflags; /* reservation flags */
266 /*
267 * The field ctx_len is the length of struct
268 * lttng_ust_lib_ring_buffer_ctx as known by the user of
269 * lib_ring_buffer_ctx_init.
270 */
271 unsigned int ctx_len;
272 void *ip; /* caller ip address */
273 void *priv2; /* 2nd priv data */
274 char padding2[LTTNG_UST_RING_BUFFER_CTX_PADDING];
275 /*
276 * This is the end of the initial fields expected by the original ABI
277 * between probes and UST. Only the fields above can be used if
278 * ctx_len is 0. Use the value of ctx_len to find out which of the
279 * following fields may be used.
280 */
281 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
282 struct lttng_rseq_state rseq_state;
283 };
284
285 /**
286 * lib_ring_buffer_ctx_init - initialize ring buffer context
287 * @ctx: ring buffer context to initialize
288 * @chan: channel
289 * @priv: client private data
290 * @data_size: size of record data payload
291 * @largest_align: largest alignment within data payload types
292 * @cpu: processor id
293 */
294 static inline lttng_ust_notrace
295 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
296 struct channel *chan, void *priv,
297 size_t data_size, int largest_align,
298 int cpu, struct lttng_ust_shm_handle *handle,
299 void *priv2);
300 static inline
301 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
302 struct channel *chan, void *priv,
303 size_t data_size, int largest_align,
304 int cpu, struct lttng_ust_shm_handle *handle,
305 void *priv2)
306 {
307 ctx->chan = chan;
308 ctx->priv = priv;
309 ctx->data_size = data_size;
310 ctx->largest_align = largest_align;
311 ctx->cpu = cpu;
312 ctx->rflags = 0;
313 ctx->handle = handle;
314 ctx->ctx_len = sizeof(struct lttng_ust_lib_ring_buffer_ctx);
315 ctx->ip = 0;
316 ctx->priv2 = priv2;
317 memset(ctx->padding2, 0, LTTNG_UST_RING_BUFFER_CTX_PADDING);
318 ctx->rseq_state.rseqp = NULL;
319 ctx->rseq_state.cpu_id = -1;
320 ctx->rseq_state.event_counter = 0;
321 }
322
323 /*
324 * Reservation flags.
325 *
326 * RING_BUFFER_RFLAG_FULL_TSC
327 *
328 * This flag is passed to record_header_size() and to the primitive used to
329 * write the record header. It indicates that the full 64-bit time value is
330 * needed in the record header. If this flag is not set, the record header needs
331 * only to contain "tsc_bits" bit of time value.
332 *
333 * Reservation flags can be added by the client, starting from
334 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
335 * record_header_size() to lib_ring_buffer_write_record_header().
336 */
337 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
338 #define RING_BUFFER_RFLAG_END (1U << 1)
339
340 /*
341 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
342 * compile-time. We have to duplicate the "config->align" information and the
343 * definition here because config->align is used both in the slow and fast
344 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
345 */
346 #ifdef RING_BUFFER_ALIGN
347
348 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
349
350 /*
351 * Calculate the offset needed to align the type.
352 * size_of_type must be non-zero.
353 */
354 static inline lttng_ust_notrace
355 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type);
356 static inline
357 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
358 {
359 return offset_align(align_drift, size_of_type);
360 }
361
362 #else
363
364 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
365
366 /*
367 * Calculate the offset needed to align the type.
368 * size_of_type must be non-zero.
369 */
370 static inline lttng_ust_notrace
371 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type);
372 static inline
373 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
374 {
375 return 0;
376 }
377
378 #endif
379
380 /**
381 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
382 * @ctx: ring buffer context.
383 */
384 static inline lttng_ust_notrace
385 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
386 size_t alignment);
387 static inline
388 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
389 size_t alignment)
390 {
391 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
392 alignment);
393 }
394
395 /*
396 * lib_ring_buffer_check_config() returns 0 on success.
397 * Used internally to check for valid configurations at channel creation.
398 */
399 static inline lttng_ust_notrace
400 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
401 unsigned int switch_timer_interval,
402 unsigned int read_timer_interval);
403 static inline
404 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
405 unsigned int switch_timer_interval,
406 unsigned int read_timer_interval)
407 {
408 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
409 && config->sync == RING_BUFFER_SYNC_PER_CPU
410 && switch_timer_interval)
411 return -EINVAL;
412 return 0;
413 }
414
415 #endif /* _LTTNG_RING_BUFFER_CONFIG_H */
This page took 0.040074 seconds and 5 git commands to generate.