SoW-2020-0003: Trace Hit Counters
[deliverable/lttng-modules.git] / include / lttng / tracepoint-event-impl.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng/tracepoint-event-impl.h
4 *
5 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 */
8
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/rculist.h>
12 #include <asm/byteorder.h>
13 #include <linux/swab.h>
14
15 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
16 #include <ringbuffer/frontend_types.h>
17 #include <ringbuffer/backend.h>
18 #include <wrapper/rcu.h>
19 #include <wrapper/user_namespace.h>
20 #include <lttng/types.h>
21 #include <lttng/probe-user.h>
22 #include <lttng/events.h>
23 #include <lttng/tracer-core.h>
24 #include <lttng/tp-mempool.h>
25
26 #define __LTTNG_NULL_STRING "(null)"
27
28 #undef PARAMS
29 #define PARAMS(args...) args
30
31 /*
32 * Macro declarations used for all stages.
33 */
34
35 /*
36 * LTTng name mapping macros. LTTng remaps some of the kernel events to
37 * enforce name-spacing.
38 */
39 #undef LTTNG_TRACEPOINT_EVENT_MAP
40 #define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
41 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
42 PARAMS(proto), \
43 PARAMS(args), \
44 PARAMS(fields)) \
45 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
46
47 #undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
48 #define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
49 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
50 PARAMS(fields)) \
51 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
52
53 #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
54 #define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
55 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
56 PARAMS(proto), \
57 PARAMS(args), \
58 PARAMS(_locvar), \
59 PARAMS(_code_pre), \
60 PARAMS(fields), \
61 PARAMS(_code_post)) \
62 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
63
64 #undef LTTNG_TRACEPOINT_EVENT_CODE
65 #define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
66 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
67 PARAMS(proto), \
68 PARAMS(args), \
69 PARAMS(_locvar), \
70 PARAMS(_code_pre), \
71 PARAMS(fields), \
72 PARAMS(_code_post))
73
74 /*
75 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
76 * handlers for events. That is, if all events have the same parameters
77 * and just have distinct trace points. Each tracepoint can be defined
78 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
79 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
80 *
81 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
82 * template.
83 */
84
85 #undef LTTNG_TRACEPOINT_EVENT
86 #define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
87 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
88 PARAMS(proto), \
89 PARAMS(args), \
90 PARAMS(fields))
91
92 #undef LTTNG_TRACEPOINT_EVENT_NOARGS
93 #define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
94 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
95
96 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE
97 #define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
98 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
99
100 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
101 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
102 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
103
104 #undef LTTNG_TRACEPOINT_EVENT_CLASS
105 #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
106 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
107 PARAMS(_fields), )
108
109 #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
110 #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
111 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
112
113
114 /*
115 * Stage 1 of the trace events.
116 *
117 * Create dummy trace calls for each events, verifying that the LTTng module
118 * instrumentation headers match the kernel arguments. Will be optimized
119 * out by the compiler.
120 */
121
122 /* Reset all macros within TRACEPOINT_EVENT */
123 #include <lttng/events-reset.h>
124
125 #undef TP_PROTO
126 #define TP_PROTO(...) __VA_ARGS__
127
128 #undef TP_ARGS
129 #define TP_ARGS(...) __VA_ARGS__
130
131 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
132 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
133 void trace_##_name(_proto);
134
135 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
136 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
137 void trace_##_name(void);
138
139 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
140
141 /*
142 * Stage 1.1 of the trace events.
143 *
144 * Create dummy trace prototypes for each event class, and for each used
145 * template. This will allow checking whether the prototypes from the
146 * class and the instance using the class actually match.
147 */
148
149 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
150
151 #undef TP_PROTO
152 #define TP_PROTO(...) __VA_ARGS__
153
154 #undef TP_ARGS
155 #define TP_ARGS(...) __VA_ARGS__
156
157 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
158 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
159 void __event_template_proto___##_template(_proto);
160
161 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
162 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
163 void __event_template_proto___##_template(void);
164
165 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
166 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
167 void __event_template_proto___##_name(_proto);
168
169 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
170 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
171 void __event_template_proto___##_name(void);
172
173 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
174
175 /*
176 * Stage 1.2 of the trace event_notifier.
177 *
178 * Create dummy trace prototypes for each event class, and for each used
179 * template. This will allow checking whether the prototypes from the
180 * class and the instance using the class actually match.
181 */
182
183 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
184
185 #undef TP_PROTO
186 #define TP_PROTO(...) __VA_ARGS__
187
188 #undef TP_ARGS
189 #define TP_ARGS(...) __VA_ARGS__
190
191 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
192 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
193 void __event_notifier_template_proto___##_template(_proto);
194
195 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
196 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
197 void __event_notifier_template_proto___##_template(void);
198
199 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
200 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
201 void __event_notifier_template_proto___##_name(_proto);
202
203 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
204 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
205 void __event_notifier_template_proto___##_name(void);
206
207 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
208
209
210 /*
211 * Stage 1.2 of tracepoint event generation
212 *
213 * Unfolding the enums
214 */
215 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
216
217 /* Enumeration entry (single value) */
218 #undef ctf_enum_value
219 #define ctf_enum_value(_string, _value) \
220 { \
221 .start = { \
222 .signedness = lttng_is_signed_type(__typeof__(_value)), \
223 .value = lttng_is_signed_type(__typeof__(_value)) ? \
224 (long long) (_value) : (_value), \
225 }, \
226 .end = { \
227 .signedness = lttng_is_signed_type(__typeof__(_value)), \
228 .value = lttng_is_signed_type(__typeof__(_value)) ? \
229 (long long) (_value) : (_value), \
230 }, \
231 .string = (_string), \
232 },
233
234 /* Enumeration entry (range) */
235 #undef ctf_enum_range
236 #define ctf_enum_range(_string, _range_start, _range_end) \
237 { \
238 .start = { \
239 .signedness = lttng_is_signed_type(__typeof__(_range_start)), \
240 .value = lttng_is_signed_type(__typeof__(_range_start)) ? \
241 (long long) (_range_start) : (_range_start), \
242 }, \
243 .end = { \
244 .signedness = lttng_is_signed_type(__typeof__(_range_end)), \
245 .value = lttng_is_signed_type(__typeof__(_range_end)) ? \
246 (long long) (_range_end) : (_range_end), \
247 }, \
248 .string = (_string), \
249 },
250
251 /* Enumeration entry (automatic value; follows the rules of CTF) */
252 #undef ctf_enum_auto
253 #define ctf_enum_auto(_string) \
254 { \
255 .start = { \
256 .signedness = -1, \
257 .value = -1, \
258 }, \
259 .end = { \
260 .signedness = -1, \
261 .value = -1, \
262 }, \
263 .string = (_string), \
264 .options = { \
265 .is_auto = 1, \
266 } \
267 },
268
269 #undef TP_ENUM_VALUES
270 #define TP_ENUM_VALUES(...) \
271 __VA_ARGS__
272
273 #undef LTTNG_TRACEPOINT_ENUM
274 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
275 const struct lttng_enum_entry __enum_values__##_name[] = { \
276 _values \
277 };
278
279 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
280
281 /*
282 * Stage 2 of the trace events.
283 *
284 * Create event field type metadata section.
285 * Each event produce an array of fields.
286 */
287
288 /* Reset all macros within TRACEPOINT_EVENT */
289 #include <lttng/events-reset.h>
290 #include <lttng/events-write.h>
291 #include <lttng/events-nowrite.h>
292
293 #undef _ctf_integer_ext
294 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
295 { \
296 .name = #_item, \
297 .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none), \
298 .nowrite = _nowrite, \
299 .user = _user, \
300 .nofilter = 0, \
301 },
302
303 #undef _ctf_array_encoded
304 #define _ctf_array_encoded(_type, _item, _src, _length, \
305 _encoding, _byte_order, _elem_type_base, _user, _nowrite) \
306 { \
307 .name = #_item, \
308 .type = \
309 { \
310 .atype = atype_array_nestable, \
311 .u = \
312 { \
313 .array_nestable = \
314 { \
315 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
316 __type_integer(_type, 0, 0, -1, _byte_order, _elem_type_base, _encoding)), \
317 .length = _length, \
318 .alignment = 0, \
319 } \
320 } \
321 }, \
322 .nowrite = _nowrite, \
323 .user = _user, \
324 .nofilter = 0, \
325 },
326
327 #undef _ctf_array_bitfield
328 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
329 { \
330 .name = #_item, \
331 .type = \
332 { \
333 .atype = atype_array_nestable, \
334 .u = \
335 { \
336 .array_nestable = \
337 { \
338 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
339 __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none)), \
340 .length = (_length) * sizeof(_type) * CHAR_BIT, \
341 .alignment = lttng_alignof(_type), \
342 } \
343 } \
344 }, \
345 .nowrite = _nowrite, \
346 .user = _user, \
347 .nofilter = 0, \
348 },
349
350
351 #undef _ctf_sequence_encoded
352 #define _ctf_sequence_encoded(_type, _item, _src, \
353 _length_type, _src_length, _encoding, \
354 _byte_order, _elem_type_base, _user, _nowrite) \
355 { \
356 .name = "_" #_item "_length", \
357 .type = __type_integer(_length_type, 0, 0, -1, __BYTE_ORDER, 10, none), \
358 .nowrite = _nowrite, \
359 .nofilter = 1, \
360 }, \
361 { \
362 .name = #_item, \
363 .type = \
364 { \
365 .atype = atype_sequence_nestable, \
366 .u = \
367 { \
368 .sequence_nestable = \
369 { \
370 .length_name = "_" #_item "_length", \
371 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
372 __type_integer(_type, 0, 0, -1, _byte_order, _elem_type_base, _encoding)), \
373 .alignment = 0, \
374 }, \
375 }, \
376 }, \
377 .nowrite = _nowrite, \
378 .user = _user, \
379 .nofilter = 0, \
380 },
381
382 #undef _ctf_sequence_bitfield
383 #define _ctf_sequence_bitfield(_type, _item, _src, \
384 _length_type, _src_length, \
385 _user, _nowrite) \
386 { \
387 .name = "_" #_item "_length", \
388 .type = __type_integer(_length_type, 0, 0, -1, __BYTE_ORDER, 10, none), \
389 .nowrite = _nowrite, \
390 .nofilter = 1, \
391 }, \
392 { \
393 .name = #_item, \
394 .type = \
395 { \
396 .atype = atype_sequence_nestable, \
397 .u = \
398 { \
399 .sequence_nestable = \
400 { \
401 .length_name = "_" #_item "_length", \
402 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
403 __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none)), \
404 .alignment = lttng_alignof(_type), \
405 }, \
406 }, \
407 }, \
408 .nowrite = _nowrite, \
409 .user = _user, \
410 .nofilter = 0, \
411 },
412
413 #undef _ctf_string
414 #define _ctf_string(_item, _src, _user, _nowrite) \
415 { \
416 .name = #_item, \
417 .type = \
418 { \
419 .atype = atype_string, \
420 .u = \
421 { \
422 .string = { .encoding = lttng_encode_UTF8 }, \
423 }, \
424 }, \
425 .nowrite = _nowrite, \
426 .user = _user, \
427 .nofilter = 0, \
428 },
429
430 #undef _ctf_enum
431 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
432 { \
433 .name = #_item, \
434 .type = { \
435 .atype = atype_enum_nestable, \
436 .u = { \
437 .enum_nestable = { \
438 .desc = &__enum_##_name, \
439 .container_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
440 __type_integer(_type, 0, 0, -1, __BYTE_ORDER, 10, none)), \
441 }, \
442 }, \
443 }, \
444 .nowrite = _nowrite, \
445 .user = _user, \
446 .nofilter = 0, \
447 },
448
449 #undef ctf_custom_field
450 #define ctf_custom_field(_type, _item, _code) \
451 { \
452 .name = #_item, \
453 .type = _type, \
454 .nowrite = 0, \
455 .user = 0, \
456 .nofilter = 1, \
457 },
458
459 #undef ctf_custom_type
460 #define ctf_custom_type(...) __VA_ARGS__
461
462 #undef TP_FIELDS
463 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
464
465 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
466 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
467 static const struct lttng_event_field __event_fields___##_name[] = { \
468 _fields \
469 };
470
471 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
472 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
473 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
474
475 #undef LTTNG_TRACEPOINT_ENUM
476 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
477 static const struct lttng_enum_desc __enum_##_name = { \
478 .name = #_name, \
479 .entries = __enum_values__##_name, \
480 .nr_entries = ARRAY_SIZE(__enum_values__##_name), \
481 };
482
483 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
484
485 /*
486 * Stage 3 of the trace events.
487 *
488 * Create probe callback prototypes.
489 */
490
491 /* Reset all macros within TRACEPOINT_EVENT */
492 #include <lttng/events-reset.h>
493
494 #undef TP_PROTO
495 #define TP_PROTO(...) __VA_ARGS__
496
497 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
498 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
499 static void __event_probe__##_name(void *__data, _proto); \
500 static void __event_notifier_probe__##_name(void *__data, _proto);
501
502 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
503 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
504 static void __event_probe__##_name(void *__data); \
505 static void __event_notifier_probe__##_name(void *__data);
506
507 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
508
509 /*
510 * Stage 4 of the trace events.
511 *
512 * Create static inline function that calculates event size.
513 */
514
515 /* Reset all macros within TRACEPOINT_EVENT */
516 #include <lttng/events-reset.h>
517 #include <lttng/events-write.h>
518
519 #undef _ctf_integer_ext
520 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
521 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
522 __event_len += sizeof(_type);
523
524 #undef _ctf_array_encoded
525 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
526 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
527 __event_len += sizeof(_type) * (_length);
528
529 #undef _ctf_array_bitfield
530 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
531 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
532
533 #undef _ctf_sequence_encoded
534 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
535 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
536 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
537 __event_len += sizeof(_length_type); \
538 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
539 { \
540 size_t __seqlen = (_src_length); \
541 \
542 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
543 goto error; \
544 barrier(); /* reserve before use. */ \
545 this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = __seqlen; \
546 __event_len += sizeof(_type) * __seqlen; \
547 }
548
549 #undef _ctf_sequence_bitfield
550 #define _ctf_sequence_bitfield(_type, _item, _src, \
551 _length_type, _src_length, \
552 _user, _nowrite) \
553 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
554 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
555
556 /*
557 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
558 * 1 (\0 only).
559 */
560 #undef _ctf_string
561 #define _ctf_string(_item, _src, _user, _nowrite) \
562 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
563 goto error; \
564 barrier(); /* reserve before use. */ \
565 if (_user) { \
566 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
567 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
568 } else { \
569 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
570 strlen((_src) ? (_src) : __LTTNG_NULL_STRING) + 1; \
571 }
572
573 #undef _ctf_enum
574 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
575 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
576
577 #undef ctf_align
578 #define ctf_align(_type) \
579 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type));
580
581 #undef ctf_custom_field
582 #define ctf_custom_field(_type, _item, _code) \
583 { \
584 _code \
585 }
586
587 #undef ctf_custom_code
588 #define ctf_custom_code(...) __VA_ARGS__
589
590 #undef TP_PROTO
591 #define TP_PROTO(...) __VA_ARGS__
592
593 #undef TP_FIELDS
594 #define TP_FIELDS(...) __VA_ARGS__
595
596 #undef TP_locvar
597 #define TP_locvar(...) __VA_ARGS__
598
599 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
600 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
601 static inline ssize_t __event_get_size__##_name(void *__tp_locvar, _proto) \
602 { \
603 size_t __event_len = 0; \
604 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
605 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
606 \
607 _fields \
608 return __event_len; \
609 \
610 error: \
611 __attribute__((unused)); \
612 return -1; \
613 }
614
615 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
616 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
617 static inline ssize_t __event_get_size__##_name(void *__tp_locvar) \
618 { \
619 size_t __event_len = 0; \
620 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
621 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
622 \
623 _fields \
624 return __event_len; \
625 \
626 error: \
627 __attribute__((unused)); \
628 return -1; \
629 }
630
631 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
632
633
634 /*
635 * Stage 4.1 of tracepoint event generation.
636 *
637 * Create static inline function that layout the filter stack data.
638 * We make both write and nowrite data available to the filter.
639 */
640
641 /* Reset all macros within TRACEPOINT_EVENT */
642 #include <lttng/events-reset.h>
643 #include <lttng/events-write.h>
644 #include <lttng/events-nowrite.h>
645
646 #undef _ctf_integer_ext_fetched
647 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
648 if (lttng_is_signed_type(_type)) { \
649 int64_t __ctf_tmp_int64; \
650 switch (sizeof(_type)) { \
651 case 1: \
652 { \
653 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
654 __ctf_tmp_int64 = (int64_t) __tmp.v; \
655 break; \
656 } \
657 case 2: \
658 { \
659 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
660 if (_byte_order != __BYTE_ORDER) \
661 __swab16s(&__tmp.v); \
662 __ctf_tmp_int64 = (int64_t) __tmp.v; \
663 break; \
664 } \
665 case 4: \
666 { \
667 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
668 if (_byte_order != __BYTE_ORDER) \
669 __swab32s(&__tmp.v); \
670 __ctf_tmp_int64 = (int64_t) __tmp.v; \
671 break; \
672 } \
673 case 8: \
674 { \
675 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
676 if (_byte_order != __BYTE_ORDER) \
677 __swab64s(&__tmp.v); \
678 __ctf_tmp_int64 = (int64_t) __tmp.v; \
679 break; \
680 } \
681 default: \
682 BUG_ON(1); \
683 }; \
684 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
685 } else { \
686 uint64_t __ctf_tmp_uint64; \
687 switch (sizeof(_type)) { \
688 case 1: \
689 { \
690 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
691 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
692 break; \
693 } \
694 case 2: \
695 { \
696 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
697 if (_byte_order != __BYTE_ORDER) \
698 __swab16s(&__tmp.v); \
699 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
700 break; \
701 } \
702 case 4: \
703 { \
704 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
705 if (_byte_order != __BYTE_ORDER) \
706 __swab32s(&__tmp.v); \
707 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
708 break; \
709 } \
710 case 8: \
711 { \
712 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
713 if (_byte_order != __BYTE_ORDER) \
714 __swab64s(&__tmp.v); \
715 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
716 break; \
717 } \
718 default: \
719 BUG_ON(1); \
720 }; \
721 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
722 } \
723 __stack_data += sizeof(int64_t);
724
725 #undef _ctf_integer_ext_isuser0
726 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
727 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
728
729 #undef _ctf_integer_ext_isuser1
730 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
731 { \
732 union { \
733 char __array[sizeof(_user_src)]; \
734 __typeof__(_user_src) __v; \
735 } __tmp_fetch; \
736 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
737 &(_user_src), sizeof(_user_src))) \
738 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
739 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
740 }
741
742 #undef _ctf_integer_ext
743 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
744 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
745
746 #undef _ctf_array_encoded
747 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
748 { \
749 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
750 const void *__ctf_tmp_ptr = (_src); \
751 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
752 __stack_data += sizeof(unsigned long); \
753 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
754 __stack_data += sizeof(void *); \
755 }
756
757 #undef _ctf_array_bitfield
758 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
759 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
760
761 #undef _ctf_sequence_encoded
762 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
763 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
764 { \
765 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
766 const void *__ctf_tmp_ptr = (_src); \
767 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
768 __stack_data += sizeof(unsigned long); \
769 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
770 __stack_data += sizeof(void *); \
771 }
772
773 #undef _ctf_sequence_bitfield
774 #define _ctf_sequence_bitfield(_type, _item, _src, \
775 _length_type, _src_length, \
776 _user, _nowrite) \
777 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
778 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
779
780 #undef _ctf_string
781 #define _ctf_string(_item, _src, _user, _nowrite) \
782 { \
783 const void *__ctf_tmp_ptr = \
784 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
785 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
786 __stack_data += sizeof(void *); \
787 }
788
789 #undef _ctf_enum
790 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
791 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
792
793 #undef TP_PROTO
794 #define TP_PROTO(...) __VA_ARGS__
795
796 #undef TP_FIELDS
797 #define TP_FIELDS(...) __VA_ARGS__
798
799 #undef TP_locvar
800 #define TP_locvar(...) __VA_ARGS__
801
802 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
803 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
804 static inline \
805 void __event_prepare_interpreter_stack__##_name(char *__stack_data, \
806 void *__tp_locvar) \
807 { \
808 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
809 \
810 _fields \
811 }
812
813 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
814 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
815 static inline \
816 void __event_prepare_interpreter_stack__##_name(char *__stack_data, \
817 void *__tp_locvar, _proto) \
818 { \
819 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
820 \
821 _fields \
822 }
823
824 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
825
826 /*
827 * Stage 5 of the trace events.
828 *
829 * Create static inline function that calculates event payload alignment.
830 */
831
832 /* Reset all macros within TRACEPOINT_EVENT */
833 #include <lttng/events-reset.h>
834 #include <lttng/events-write.h>
835
836 #undef _ctf_integer_ext
837 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
838 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
839
840 #undef _ctf_array_encoded
841 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
842 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
843
844 #undef _ctf_array_bitfield
845 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
846 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
847
848 #undef _ctf_sequence_encoded
849 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
850 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
851 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
852 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
853
854 #undef _ctf_sequence_bitfield
855 #define _ctf_sequence_bitfield(_type, _item, _src, \
856 _length_type, _src_length, \
857 _user, _nowrite) \
858 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
859 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
860
861 #undef _ctf_string
862 #define _ctf_string(_item, _src, _user, _nowrite)
863
864 #undef _ctf_enum
865 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
866 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
867
868 #undef ctf_align
869 #define ctf_align(_type) \
870 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
871
872 #undef TP_PROTO
873 #define TP_PROTO(...) __VA_ARGS__
874
875 #undef TP_FIELDS
876 #define TP_FIELDS(...) __VA_ARGS__
877
878 #undef TP_locvar
879 #define TP_locvar(...) __VA_ARGS__
880
881 #undef ctf_custom_field
882 #define ctf_custom_field(_type, _item, _code) _code
883
884 #undef ctf_custom_code
885 #define ctf_custom_code(...) \
886 { \
887 __VA_ARGS__ \
888 }
889
890 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
891 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
892 static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
893 { \
894 size_t __event_align = 1; \
895 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
896 \
897 _fields \
898 return __event_align; \
899 }
900
901 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
902 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
903 static inline size_t __event_get_align__##_name(void *__tp_locvar) \
904 { \
905 size_t __event_align = 1; \
906 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
907 \
908 _fields \
909 return __event_align; \
910 }
911
912 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
913
914 /*
915 * Stage 6 of tracepoint event generation.
916 *
917 * Create the probe function. This function calls event size calculation
918 * and writes event data into the buffer.
919 */
920
921 /* Reset all macros within TRACEPOINT_EVENT */
922 #include <lttng/events-reset.h>
923 #include <lttng/events-write.h>
924
925 #undef _ctf_integer_ext_fetched
926 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
927 { \
928 _type __tmp = _src; \
929 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
930 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
931 }
932
933 #undef _ctf_integer_ext_isuser0
934 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
935 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
936
937 #undef _ctf_integer_ext_isuser1
938 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
939 { \
940 union { \
941 char __array[sizeof(_user_src)]; \
942 __typeof__(_user_src) __v; \
943 } __tmp_fetch; \
944 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
945 &(_user_src), sizeof(_user_src))) \
946 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
947 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
948 }
949
950 #undef _ctf_integer_ext
951 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
952 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
953
954 #undef _ctf_array_encoded
955 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
956 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
957 if (_user) { \
958 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
959 } else { \
960 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
961 }
962
963 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
964 #undef _ctf_array_bitfield
965 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
966 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
967 if (_user) { \
968 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
969 } else { \
970 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
971 }
972 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
973 /*
974 * For big endian, we need to byteswap into little endian.
975 */
976 #undef _ctf_array_bitfield
977 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
978 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
979 { \
980 size_t _i; \
981 \
982 for (_i = 0; _i < (_length); _i++) { \
983 _type _tmp; \
984 \
985 if (_user) { \
986 if (get_user(_tmp, (_type *) _src + _i)) \
987 _tmp = 0; \
988 } else { \
989 _tmp = ((_type *) _src)[_i]; \
990 } \
991 switch (sizeof(_type)) { \
992 case 1: \
993 break; \
994 case 2: \
995 _tmp = cpu_to_le16(_tmp); \
996 break; \
997 case 4: \
998 _tmp = cpu_to_le32(_tmp); \
999 break; \
1000 case 8: \
1001 _tmp = cpu_to_le64(_tmp); \
1002 break; \
1003 default: \
1004 BUG_ON(1); \
1005 } \
1006 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
1007 } \
1008 }
1009 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1010
1011 #undef _ctf_sequence_encoded
1012 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
1013 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
1014 { \
1015 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx]; \
1016 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1017 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1018 } \
1019 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1020 if (_user) { \
1021 __chan->ops->event_write_from_user(&__ctx, _src, \
1022 sizeof(_type) * __get_dynamic_len(dest)); \
1023 } else { \
1024 __chan->ops->event_write(&__ctx, _src, \
1025 sizeof(_type) * __get_dynamic_len(dest)); \
1026 }
1027
1028 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
1029 #undef _ctf_sequence_bitfield
1030 #define _ctf_sequence_bitfield(_type, _item, _src, \
1031 _length_type, _src_length, \
1032 _user, _nowrite) \
1033 { \
1034 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
1035 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1036 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1037 } \
1038 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1039 if (_user) { \
1040 __chan->ops->event_write_from_user(&__ctx, _src, \
1041 sizeof(_type) * __get_dynamic_len(dest)); \
1042 } else { \
1043 __chan->ops->event_write(&__ctx, _src, \
1044 sizeof(_type) * __get_dynamic_len(dest)); \
1045 }
1046 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1047 /*
1048 * For big endian, we need to byteswap into little endian.
1049 */
1050 #undef _ctf_sequence_bitfield
1051 #define _ctf_sequence_bitfield(_type, _item, _src, \
1052 _length_type, _src_length, \
1053 _user, _nowrite) \
1054 { \
1055 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
1056 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1057 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1058 } \
1059 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1060 { \
1061 size_t _i, _length; \
1062 \
1063 _length = __get_dynamic_len(dest); \
1064 for (_i = 0; _i < _length; _i++) { \
1065 _type _tmp; \
1066 \
1067 if (_user) { \
1068 if (get_user(_tmp, (_type *) _src + _i)) \
1069 _tmp = 0; \
1070 } else { \
1071 _tmp = ((_type *) _src)[_i]; \
1072 } \
1073 switch (sizeof(_type)) { \
1074 case 1: \
1075 break; \
1076 case 2: \
1077 _tmp = cpu_to_le16(_tmp); \
1078 break; \
1079 case 4: \
1080 _tmp = cpu_to_le32(_tmp); \
1081 break; \
1082 case 8: \
1083 _tmp = cpu_to_le64(_tmp); \
1084 break; \
1085 default: \
1086 BUG_ON(1); \
1087 } \
1088 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
1089 } \
1090 }
1091 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1092
1093 #undef _ctf_string
1094 #define _ctf_string(_item, _src, _user, _nowrite) \
1095 if (_user) { \
1096 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
1097 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
1098 __get_dynamic_len(dest)); \
1099 } else { \
1100 const char *__ctf_tmp_string = \
1101 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
1102 lib_ring_buffer_align_ctx(&__ctx, \
1103 lttng_alignof(*__ctf_tmp_string)); \
1104 __chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \
1105 __get_dynamic_len(dest)); \
1106 }
1107
1108 #undef _ctf_enum
1109 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
1110 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
1111
1112 #undef ctf_align
1113 #define ctf_align(_type) \
1114 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type));
1115
1116 #undef ctf_custom_field
1117 #define ctf_custom_field(_type, _item, _code) _code
1118
1119 #undef ctf_custom_code
1120 #define ctf_custom_code(...) \
1121 { \
1122 __VA_ARGS__ \
1123 }
1124
1125 /* Beware: this get len actually consumes the len value */
1126 #undef __get_dynamic_len
1127 #define __get_dynamic_len(field) this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx++]
1128
1129 #undef TP_PROTO
1130 #define TP_PROTO(...) __VA_ARGS__
1131
1132 #undef TP_ARGS
1133 #define TP_ARGS(...) __VA_ARGS__
1134
1135 #undef TP_FIELDS
1136 #define TP_FIELDS(...) __VA_ARGS__
1137
1138 #undef TP_locvar
1139 #define TP_locvar(...) __VA_ARGS__
1140
1141 #undef TP_code_pre
1142 #define TP_code_pre(...) __VA_ARGS__
1143
1144 #undef TP_code_post
1145 #define TP_code_post(...) __VA_ARGS__
1146
1147 /*
1148 * For state dump, check that "session" argument (mandatory) matches the
1149 * session this event belongs to. Ensures that we write state dump data only
1150 * into the started session, not into all sessions.
1151 */
1152 #ifdef TP_SESSION_CHECK
1153 #define _TP_SESSION_CHECK(session, csession) (session == csession)
1154 #else /* TP_SESSION_CHECK */
1155 #define _TP_SESSION_CHECK(session, csession) 1
1156 #endif /* TP_SESSION_CHECK */
1157
1158 /*
1159 * Using twice size for filter stack data to hold size and pointer for
1160 * each field (worse case). For integers, max size required is 64-bit.
1161 * Same for double-precision floats. Those fit within
1162 * 2*sizeof(unsigned long) for all supported architectures.
1163 * Perform UNION (||) of filter runtime list.
1164 */
1165 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1166 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1167 static void __event_probe__##_name(void *__data, _proto) \
1168 { \
1169 struct probe_local_vars { _locvar }; \
1170 struct lttng_event *__event = __data; \
1171 struct lttng_probe_ctx __lttng_probe_ctx = { \
1172 .event = __event, \
1173 .event_notifier = NULL, \
1174 .interruptible = !irqs_disabled(), \
1175 }; \
1176 struct lttng_event_container *__container = __event->container; \
1177 struct lttng_session *__session = __container->session; \
1178 struct lib_ring_buffer_ctx __ctx; \
1179 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1180 union { \
1181 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1182 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1183 } __stackvar; \
1184 int __ret; \
1185 struct probe_local_vars __tp_locvar; \
1186 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1187 &__tp_locvar; \
1188 struct lttng_id_tracker_rcu *__lf; \
1189 \
1190 if (!_TP_SESSION_CHECK(session, __session)) \
1191 return; \
1192 if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
1193 return; \
1194 if (unlikely(!LTTNG_READ_ONCE(__container->enabled))) \
1195 return; \
1196 if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \
1197 return; \
1198 __lf = lttng_rcu_dereference(__session->pid_tracker.p); \
1199 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1200 return; \
1201 __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \
1202 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1203 return; \
1204 __lf = lttng_rcu_dereference(__session->uid_tracker.p); \
1205 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1206 lttng_current_uid()))) \
1207 return; \
1208 __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \
1209 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1210 lttng_current_vuid()))) \
1211 return; \
1212 __lf = lttng_rcu_dereference(__session->gid_tracker.p); \
1213 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1214 lttng_current_gid()))) \
1215 return; \
1216 __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \
1217 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1218 lttng_current_vgid()))) \
1219 return; \
1220 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1221 __dynamic_len_idx = __orig_dynamic_len_offset; \
1222 _code_pre \
1223 if (unlikely(!list_empty(&__event->filter_bytecode_runtime_head))) { \
1224 struct lttng_bytecode_runtime *bc_runtime; \
1225 int __filter_record = __event->has_enablers_without_bytecode; \
1226 \
1227 __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
1228 tp_locvar, _args); \
1229 lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
1230 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1231 __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
1232 __filter_record = 1; \
1233 break; \
1234 } \
1235 } \
1236 if (likely(!__filter_record)) \
1237 goto __post; \
1238 } \
1239 switch (__container->type) { \
1240 case LTTNG_EVENT_CONTAINER_CHANNEL: \
1241 { \
1242 struct lttng_channel *__chan = lttng_event_container_get_channel(__container); \
1243 ssize_t __event_len; \
1244 size_t __event_align; \
1245 \
1246 __event_len = __event_get_size__##_name(tp_locvar, _args); \
1247 if (unlikely(__event_len < 0)) { \
1248 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1249 goto __post; \
1250 } \
1251 __event_align = __event_get_align__##_name(tp_locvar, _args); \
1252 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1253 __event_align, -1); \
1254 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1255 if (__ret < 0) \
1256 goto __post; \
1257 _fields \
1258 __chan->ops->event_commit(&__ctx); \
1259 break; \
1260 } \
1261 case LTTNG_EVENT_CONTAINER_COUNTER: \
1262 { \
1263 struct lttng_counter *__counter = lttng_event_container_get_counter(__container); \
1264 size_t __index = __event->id; \
1265 \
1266 (void) __counter->ops->counter_add(__counter->counter, &__index, 1); \
1267 break; \
1268 } \
1269 } \
1270 __post: \
1271 _code_post \
1272 barrier(); /* use before un-reserve. */ \
1273 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1274 return; \
1275 }
1276
1277 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1278 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1279 static void __event_probe__##_name(void *__data) \
1280 { \
1281 struct probe_local_vars { _locvar }; \
1282 struct lttng_event *__event = __data; \
1283 struct lttng_probe_ctx __lttng_probe_ctx = { \
1284 .event = __event, \
1285 .event_notifier = NULL, \
1286 .interruptible = !irqs_disabled(), \
1287 }; \
1288 struct lttng_event_container *__container = __event->container; \
1289 struct lttng_session *__session = __container->session; \
1290 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1291 union { \
1292 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1293 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1294 } __stackvar; \
1295 int __ret; \
1296 struct probe_local_vars __tp_locvar; \
1297 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1298 &__tp_locvar; \
1299 struct lttng_id_tracker_rcu *__lf; \
1300 \
1301 if (!_TP_SESSION_CHECK(session, __session)) \
1302 return; \
1303 if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
1304 return; \
1305 if (unlikely(!LTTNG_READ_ONCE(__container->enabled))) \
1306 return; \
1307 if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \
1308 return; \
1309 __lf = lttng_rcu_dereference(__session->pid_tracker.p); \
1310 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1311 return; \
1312 __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \
1313 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1314 return; \
1315 __lf = lttng_rcu_dereference(__session->uid_tracker.p); \
1316 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1317 lttng_current_uid()))) \
1318 return; \
1319 __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \
1320 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1321 lttng_current_vuid()))) \
1322 return; \
1323 __lf = lttng_rcu_dereference(__session->gid_tracker.p); \
1324 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1325 lttng_current_gid()))) \
1326 return; \
1327 __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \
1328 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1329 lttng_current_vgid()))) \
1330 return; \
1331 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1332 __dynamic_len_idx = __orig_dynamic_len_offset; \
1333 _code_pre \
1334 if (unlikely(!list_empty(&__event->filter_bytecode_runtime_head))) { \
1335 struct lttng_bytecode_runtime *bc_runtime; \
1336 int __filter_record = __event->has_enablers_without_bytecode; \
1337 \
1338 __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
1339 tp_locvar); \
1340 lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
1341 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1342 __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
1343 __filter_record = 1; \
1344 break; \
1345 } \
1346 } \
1347 if (likely(!__filter_record)) \
1348 goto __post; \
1349 } \
1350 switch (__container->type) { \
1351 case LTTNG_EVENT_CONTAINER_CHANNEL: \
1352 { \
1353 struct lttng_channel *__chan = lttng_event_container_get_channel(__container); \
1354 struct lib_ring_buffer_ctx __ctx; \
1355 ssize_t __event_len; \
1356 size_t __event_align; \
1357 \
1358 __event_len = __event_get_size__##_name(tp_locvar); \
1359 if (unlikely(__event_len < 0)) { \
1360 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1361 goto __post; \
1362 } \
1363 __event_align = __event_get_align__##_name(tp_locvar); \
1364 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1365 __event_align, -1); \
1366 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1367 if (__ret < 0) \
1368 goto __post; \
1369 _fields \
1370 __chan->ops->event_commit(&__ctx); \
1371 break; \
1372 } \
1373 case LTTNG_EVENT_CONTAINER_COUNTER: \
1374 { \
1375 struct lttng_counter *__counter = lttng_event_container_get_counter(__container); \
1376 size_t __index = __event->id; \
1377 \
1378 (void) __counter->ops->counter_add(__counter->counter, &__index, 1); \
1379 break; \
1380 } \
1381 } \
1382 __post: \
1383 _code_post \
1384 barrier(); /* use before un-reserve. */ \
1385 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1386 return; \
1387 }
1388
1389 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1390
1391 #undef __get_dynamic_len
1392
1393
1394 /*
1395 * Stage 6.1 of tracepoint generation: generate event notifier probes
1396 *
1397 * Create the probe function. This function evaluates the filter bytecode and
1398 * queue a notification to be sent to userspace.
1399 */
1400
1401 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1402
1403 #undef TP_PROTO
1404 #define TP_PROTO(...) __VA_ARGS__
1405
1406 #undef TP_ARGS
1407 #define TP_ARGS(...) __VA_ARGS__
1408
1409 #undef TP_FIELDS
1410 #define TP_FIELDS(...) __VA_ARGS__
1411
1412 #undef TP_locvar
1413 #define TP_locvar(...) __VA_ARGS__
1414
1415 #undef TP_code_pre
1416 #define TP_code_pre(...) __VA_ARGS__
1417
1418 #undef TP_code_post
1419 #define TP_code_post(...) __VA_ARGS__
1420
1421 /*
1422 * Using twice size for filter stack data to hold size and pointer for
1423 * each field (worse case). For integers, max size required is 64-bit.
1424 * Same for double-precision floats. Those fit within
1425 * 2*sizeof(unsigned long) for all supported architectures.
1426 * Perform UNION (||) of filter runtime list.
1427 */
1428 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1429 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1430 static void __event_notifier_probe__##_name(void *__data, _proto) \
1431 { \
1432 struct probe_local_vars { _locvar }; \
1433 struct lttng_event_notifier *__event_notifier = __data; \
1434 struct lttng_probe_ctx __lttng_probe_ctx = { \
1435 .event = NULL, \
1436 .event_notifier = __event_notifier, \
1437 .interruptible = !irqs_disabled(), \
1438 }; \
1439 union { \
1440 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1441 char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1442 } __stackvar; \
1443 struct probe_local_vars __tp_locvar; \
1444 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1445 &__tp_locvar; \
1446 struct lttng_kernel_notifier_ctx __notif_ctx; \
1447 bool __interpreter_stack_prepared = false; \
1448 \
1449 if (unlikely(!READ_ONCE(__event_notifier->enabled))) \
1450 return; \
1451 _code_pre \
1452 if (unlikely(!list_empty(&__event_notifier->filter_bytecode_runtime_head))) { \
1453 struct lttng_bytecode_runtime *bc_runtime; \
1454 int __filter_record = __event_notifier->has_enablers_without_bytecode; \
1455 \
1456 __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
1457 tp_locvar, _args); \
1458 __interpreter_stack_prepared = true; \
1459 lttng_list_for_each_entry_rcu(bc_runtime, &__event_notifier->filter_bytecode_runtime_head, node) { \
1460 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1461 __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
1462 __filter_record = 1; \
1463 } \
1464 if (likely(!__filter_record)) \
1465 goto __post; \
1466 } \
1467 \
1468 __notif_ctx.eval_capture = LTTNG_READ_ONCE(__event_notifier->eval_capture); \
1469 if (unlikely(!__interpreter_stack_prepared && __notif_ctx.eval_capture)) \
1470 __event_prepare_interpreter_stack__##_name( \
1471 __stackvar.__interpreter_stack_data, \
1472 tp_locvar, _args); \
1473 \
1474 __event_notifier->send_notification(__event_notifier, \
1475 &__lttng_probe_ctx, \
1476 __stackvar.__interpreter_stack_data, \
1477 &__notif_ctx); \
1478 \
1479 __post: \
1480 _code_post \
1481 return; \
1482 }
1483
1484 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1485 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1486 static void __event_notifier_probe__##_name(void *__data) \
1487 { \
1488 struct probe_local_vars { _locvar }; \
1489 struct lttng_event_notifier *__event_notifier = __data; \
1490 struct lttng_probe_ctx __lttng_probe_ctx = { \
1491 .event = NULL, \
1492 .event_notifier = __event_notifier, \
1493 .interruptible = !irqs_disabled(), \
1494 }; \
1495 union { \
1496 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1497 char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1498 } __stackvar; \
1499 struct probe_local_vars __tp_locvar; \
1500 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1501 &__tp_locvar; \
1502 struct lttng_kernel_notifier_ctx __notif_ctx; \
1503 bool __interpreter_stack_prepared = false; \
1504 \
1505 if (unlikely(!READ_ONCE(__event_notifier->enabled))) \
1506 return; \
1507 _code_pre \
1508 if (unlikely(!list_empty(&__event_notifier->filter_bytecode_runtime_head))) { \
1509 struct lttng_bytecode_runtime *bc_runtime; \
1510 int __filter_record = __event_notifier->has_enablers_without_bytecode; \
1511 \
1512 __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
1513 tp_locvar); \
1514 __interpreter_stack_prepared = true; \
1515 lttng_list_for_each_entry_rcu(bc_runtime, &__event_notifier->filter_bytecode_runtime_head, node) { \
1516 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1517 __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
1518 __filter_record = 1; \
1519 } \
1520 if (likely(!__filter_record)) \
1521 goto __post; \
1522 } \
1523 \
1524 __notif_ctx.eval_capture = LTTNG_READ_ONCE(__event_notifier->eval_capture); \
1525 if (unlikely(!__interpreter_stack_prepared && __notif_ctx.eval_capture)) \
1526 __event_prepare_interpreter_stack__##_name( \
1527 __stackvar.__interpreter_stack_data, \
1528 tp_locvar); \
1529 \
1530 __event_notifier->send_notification(__event_notifier, \
1531 &__lttng_probe_ctx, \
1532 __stackvar.__interpreter_stack_data, \
1533 &__notif_ctx); \
1534 __post: \
1535 _code_post \
1536 return; \
1537 }
1538
1539 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1540 /*
1541 * Stage 7 of the trace events.
1542 *
1543 * Create event descriptions.
1544 */
1545
1546 /* Named field types must be defined in lttng-types.h */
1547
1548 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1549
1550 #ifndef TP_PROBE_CB
1551 #define TP_PROBE_CB(_template) &__event_probe__##_template
1552 #endif
1553
1554 #ifndef TP_EVENT_NOTIFIER_PROBE_CB
1555 #define TP_EVENT_NOTIFIER_PROBE_CB(_template) &__event_notifier_probe__##_template
1556 #endif
1557
1558 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1559 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1560 static const struct lttng_event_desc __event_desc___##_map = { \
1561 .fields = __event_fields___##_template, \
1562 .name = #_map, \
1563 .kname = #_name, \
1564 .probe_callback = (void *) TP_PROBE_CB(_template), \
1565 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1566 .owner = THIS_MODULE, \
1567 .event_notifier_callback = (void *) TP_EVENT_NOTIFIER_PROBE_CB(_template), \
1568 };
1569
1570 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1571 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1572 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1573
1574 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1575
1576 /*
1577 * Stage 8 of the trace events.
1578 *
1579 * Create an array of event description pointers.
1580 */
1581
1582 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1583
1584 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1585 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1586 &__event_desc___##_map,
1587
1588 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1589 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1590 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1591
1592 #define TP_ID1(_token, _system) _token##_system
1593 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1594
1595 static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
1596 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1597 };
1598
1599 #undef TP_ID1
1600 #undef TP_ID
1601
1602 /*
1603 * Stage 9 of the trace events.
1604 *
1605 * Create a toplevel descriptor for the whole probe.
1606 */
1607
1608 #define TP_ID1(_token, _system) _token##_system
1609 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1610
1611 /* non-const because list head will be modified when registered. */
1612 static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
1613 .provider = __stringify(TRACE_SYSTEM),
1614 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
1615 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
1616 .head = { NULL, NULL },
1617 .lazy_init_head = { NULL, NULL },
1618 .lazy = 0,
1619 };
1620
1621 #undef TP_ID1
1622 #undef TP_ID
1623
1624 /*
1625 * Stage 10 of the trace events.
1626 *
1627 * Register/unregister probes at module load/unload.
1628 */
1629
1630 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1631
1632 #define TP_ID1(_token, _system) _token##_system
1633 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1634 #define module_init_eval1(_token, _system) module_init(_token##_system)
1635 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1636 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
1637 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1638
1639 #ifndef TP_MODULE_NOINIT
1640 static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
1641 {
1642 wrapper_vmalloc_sync_mappings();
1643 return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1644 }
1645
1646 static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
1647 {
1648 lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1649 }
1650
1651 #ifndef TP_MODULE_NOAUTOLOAD
1652 module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
1653 module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
1654 #endif
1655
1656 #endif
1657
1658 #undef module_init_eval
1659 #undef module_exit_eval
1660 #undef TP_ID1
1661 #undef TP_ID
1662
1663 #undef TP_PROTO
1664 #undef TP_ARGS
This page took 0.100821 seconds and 5 git commands to generate.