1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * lttng/tracepoint-event-impl.h
5 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/rculist.h>
12 #include <asm/byteorder.h>
13 #include <linux/swab.h>
15 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
16 #include <ringbuffer/frontend_types.h>
17 #include <ringbuffer/backend.h>
18 #include <wrapper/rcu.h>
19 #include <wrapper/user_namespace.h>
20 #include <lttng/types.h>
21 #include <lttng/probe-user.h>
22 #include <lttng/events.h>
23 #include <lttng/tracer-core.h>
24 #include <lttng/tp-mempool.h>
26 #define __LTTNG_NULL_STRING "(null)"
29 #define PARAMS(args...) args
32 * Macro declarations used for all stages.
36 * LTTng name mapping macros. LTTng remaps some of the kernel events to
37 * enforce name-spacing.
39 #undef LTTNG_TRACEPOINT_EVENT_MAP
40 #define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
41 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
45 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
47 #undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
48 #define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
49 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
51 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
53 #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
54 #define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
55 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
62 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
64 #undef LTTNG_TRACEPOINT_EVENT_CODE
65 #define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
66 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
75 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
76 * handlers for events. That is, if all events have the same parameters
77 * and just have distinct trace points. Each tracepoint can be defined
78 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
79 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
81 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
85 #undef LTTNG_TRACEPOINT_EVENT
86 #define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
87 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
92 #undef LTTNG_TRACEPOINT_EVENT_NOARGS
93 #define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
94 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
96 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE
97 #define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
98 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
100 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
101 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
102 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
104 #undef LTTNG_TRACEPOINT_EVENT_CLASS
105 #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
106 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
109 #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
110 #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
111 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
115 * Stage 1 of the trace events.
117 * Create dummy trace calls for each events, verifying that the LTTng module
118 * instrumentation headers match the kernel arguments. Will be optimized
119 * out by the compiler.
122 /* Reset all macros within TRACEPOINT_EVENT */
123 #include <lttng/events-reset.h>
126 #define TP_PROTO(...) __VA_ARGS__
129 #define TP_ARGS(...) __VA_ARGS__
131 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
132 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
133 void trace_##_name(_proto);
135 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
136 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
137 void trace_##_name(void);
139 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
142 * Stage 1.1 of the trace events.
144 * Create dummy trace prototypes for each event class, and for each used
145 * template. This will allow checking whether the prototypes from the
146 * class and the instance using the class actually match.
149 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
152 #define TP_PROTO(...) __VA_ARGS__
155 #define TP_ARGS(...) __VA_ARGS__
157 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
158 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
159 void __event_template_proto___##_template(_proto);
161 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
162 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
163 void __event_template_proto___##_template(void);
165 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
166 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
167 void __event_template_proto___##_name(_proto);
169 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
170 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
171 void __event_template_proto___##_name(void);
173 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
176 * Stage 1.2 of the trace event_notifier.
178 * Create dummy trace prototypes for each event class, and for each used
179 * template. This will allow checking whether the prototypes from the
180 * class and the instance using the class actually match.
183 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
186 #define TP_PROTO(...) __VA_ARGS__
189 #define TP_ARGS(...) __VA_ARGS__
191 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
192 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
193 void __event_notifier_template_proto___##_template(_proto);
195 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
196 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
197 void __event_notifier_template_proto___##_template(void);
199 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
200 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
201 void __event_notifier_template_proto___##_name(_proto);
203 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
204 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
205 void __event_notifier_template_proto___##_name(void);
207 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
211 * Stage 1.2 of tracepoint event generation
213 * Unfolding the enums
215 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
217 /* Enumeration entry (single value) */
218 #undef ctf_enum_value
219 #define ctf_enum_value(_string, _value) \
222 .signedness = lttng_is_signed_type(__typeof__(_value)), \
223 .value = lttng_is_signed_type(__typeof__(_value)) ? \
224 (long long) (_value) : (_value), \
227 .signedness = lttng_is_signed_type(__typeof__(_value)), \
228 .value = lttng_is_signed_type(__typeof__(_value)) ? \
229 (long long) (_value) : (_value), \
231 .string = (_string), \
234 /* Enumeration entry (range) */
235 #undef ctf_enum_range
236 #define ctf_enum_range(_string, _range_start, _range_end) \
239 .signedness = lttng_is_signed_type(__typeof__(_range_start)), \
240 .value = lttng_is_signed_type(__typeof__(_range_start)) ? \
241 (long long) (_range_start) : (_range_start), \
244 .signedness = lttng_is_signed_type(__typeof__(_range_end)), \
245 .value = lttng_is_signed_type(__typeof__(_range_end)) ? \
246 (long long) (_range_end) : (_range_end), \
248 .string = (_string), \
251 /* Enumeration entry (automatic value; follows the rules of CTF) */
253 #define ctf_enum_auto(_string) \
263 .string = (_string), \
269 #undef TP_ENUM_VALUES
270 #define TP_ENUM_VALUES(...) \
273 #undef LTTNG_TRACEPOINT_ENUM
274 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
275 const struct lttng_enum_entry __enum_values__##_name[] = { \
279 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
282 * Stage 2 of the trace events.
284 * Create event field type metadata section.
285 * Each event produce an array of fields.
288 /* Reset all macros within TRACEPOINT_EVENT */
289 #include <lttng/events-reset.h>
290 #include <lttng/events-write.h>
291 #include <lttng/events-nowrite.h>
293 #undef _ctf_integer_ext
294 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
297 .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none), \
298 .nowrite = _nowrite, \
303 #undef _ctf_array_encoded
304 #define _ctf_array_encoded(_type, _item, _src, _length, \
305 _encoding, _byte_order, _elem_type_base, _user, _nowrite) \
310 .atype = atype_array_nestable, \
315 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
316 __type_integer(_type, 0, 0, -1, _byte_order, _elem_type_base, _encoding)), \
322 .nowrite = _nowrite, \
327 #undef _ctf_array_bitfield
328 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
333 .atype = atype_array_nestable, \
338 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
339 __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none)), \
340 .length = (_length) * sizeof(_type) * CHAR_BIT, \
341 .alignment = lttng_alignof(_type), \
345 .nowrite = _nowrite, \
351 #undef _ctf_sequence_encoded
352 #define _ctf_sequence_encoded(_type, _item, _src, \
353 _length_type, _src_length, _encoding, \
354 _byte_order, _elem_type_base, _user, _nowrite) \
356 .name = "_" #_item "_length", \
357 .type = __type_integer(_length_type, 0, 0, -1, __BYTE_ORDER, 10, none), \
358 .nowrite = _nowrite, \
365 .atype = atype_sequence_nestable, \
368 .sequence_nestable = \
370 .length_name = "_" #_item "_length", \
371 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
372 __type_integer(_type, 0, 0, -1, _byte_order, _elem_type_base, _encoding)), \
377 .nowrite = _nowrite, \
382 #undef _ctf_sequence_bitfield
383 #define _ctf_sequence_bitfield(_type, _item, _src, \
384 _length_type, _src_length, \
387 .name = "_" #_item "_length", \
388 .type = __type_integer(_length_type, 0, 0, -1, __BYTE_ORDER, 10, none), \
389 .nowrite = _nowrite, \
396 .atype = atype_sequence_nestable, \
399 .sequence_nestable = \
401 .length_name = "_" #_item "_length", \
402 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
403 __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none)), \
404 .alignment = lttng_alignof(_type), \
408 .nowrite = _nowrite, \
414 #define _ctf_string(_item, _src, _user, _nowrite) \
419 .atype = atype_string, \
422 .string = { .encoding = lttng_encode_UTF8 }, \
425 .nowrite = _nowrite, \
431 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
435 .atype = atype_enum_nestable, \
438 .desc = &__enum_##_name, \
439 .container_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
440 __type_integer(_type, 0, 0, -1, __BYTE_ORDER, 10, none)), \
444 .nowrite = _nowrite, \
449 #undef ctf_custom_field
450 #define ctf_custom_field(_type, _item, _code) \
459 #undef ctf_custom_type
460 #define ctf_custom_type(...) __VA_ARGS__
463 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
465 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
466 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
467 static const struct lttng_event_field __event_fields___##_name[] = { \
471 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
472 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
473 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
475 #undef LTTNG_TRACEPOINT_ENUM
476 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
477 static const struct lttng_enum_desc __enum_##_name = { \
479 .entries = __enum_values__##_name, \
480 .nr_entries = ARRAY_SIZE(__enum_values__##_name), \
483 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
486 * Stage 3 of the trace events.
488 * Create probe callback prototypes.
491 /* Reset all macros within TRACEPOINT_EVENT */
492 #include <lttng/events-reset.h>
495 #define TP_PROTO(...) __VA_ARGS__
497 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
498 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
499 static void __event_probe__##_name(void *__data, _proto); \
500 static void __event_notifier_probe__##_name(void *__data, _proto);
502 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
503 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
504 static void __event_probe__##_name(void *__data); \
505 static void __event_notifier_probe__##_name(void *__data);
507 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
510 * Stage 4 of the trace events.
512 * Create static inline function that calculates event size.
515 /* Reset all macros within TRACEPOINT_EVENT */
516 #include <lttng/events-reset.h>
517 #include <lttng/events-write.h>
519 #undef _ctf_integer_ext
520 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
521 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
522 __event_len += sizeof(_type);
524 #undef _ctf_array_encoded
525 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
526 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
527 __event_len += sizeof(_type) * (_length);
529 #undef _ctf_array_bitfield
530 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
531 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
533 #undef _ctf_sequence_encoded
534 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
535 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
536 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
537 __event_len += sizeof(_length_type); \
538 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
540 size_t __seqlen = (_src_length); \
542 if (unlikely(++this_cpu_ptr(<tng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
544 barrier(); /* reserve before use. */ \
545 this_cpu_ptr(<tng_dynamic_len_stack)->stack[this_cpu_ptr(<tng_dynamic_len_stack)->offset - 1] = __seqlen; \
546 __event_len += sizeof(_type) * __seqlen; \
549 #undef _ctf_sequence_bitfield
550 #define _ctf_sequence_bitfield(_type, _item, _src, \
551 _length_type, _src_length, \
553 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
554 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
557 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
561 #define _ctf_string(_item, _src, _user, _nowrite) \
562 if (unlikely(++this_cpu_ptr(<tng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
564 barrier(); /* reserve before use. */ \
566 __event_len += this_cpu_ptr(<tng_dynamic_len_stack)->stack[this_cpu_ptr(<tng_dynamic_len_stack)->offset - 1] = \
567 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
569 __event_len += this_cpu_ptr(<tng_dynamic_len_stack)->stack[this_cpu_ptr(<tng_dynamic_len_stack)->offset - 1] = \
570 strlen((_src) ? (_src) : __LTTNG_NULL_STRING) + 1; \
574 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
575 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
578 #define ctf_align(_type) \
579 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type));
581 #undef ctf_custom_field
582 #define ctf_custom_field(_type, _item, _code) \
587 #undef ctf_custom_code
588 #define ctf_custom_code(...) __VA_ARGS__
591 #define TP_PROTO(...) __VA_ARGS__
594 #define TP_FIELDS(...) __VA_ARGS__
597 #define TP_locvar(...) __VA_ARGS__
599 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
600 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
601 static inline ssize_t __event_get_size__##_name(void *__tp_locvar, _proto) \
603 size_t __event_len = 0; \
604 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
605 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
608 return __event_len; \
611 __attribute__((unused)); \
615 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
616 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
617 static inline ssize_t __event_get_size__##_name(void *__tp_locvar) \
619 size_t __event_len = 0; \
620 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
621 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
624 return __event_len; \
627 __attribute__((unused)); \
631 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
635 * Stage 4.1 of tracepoint event generation.
637 * Create static inline function that layout the filter stack data.
638 * We make both write and nowrite data available to the filter.
641 /* Reset all macros within TRACEPOINT_EVENT */
642 #include <lttng/events-reset.h>
643 #include <lttng/events-write.h>
644 #include <lttng/events-nowrite.h>
646 #undef _ctf_integer_ext_fetched
647 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
648 if (lttng_is_signed_type(_type)) { \
649 int64_t __ctf_tmp_int64; \
650 switch (sizeof(_type)) { \
653 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
654 __ctf_tmp_int64 = (int64_t) __tmp.v; \
659 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
660 if (_byte_order != __BYTE_ORDER) \
661 __swab16s(&__tmp.v); \
662 __ctf_tmp_int64 = (int64_t) __tmp.v; \
667 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
668 if (_byte_order != __BYTE_ORDER) \
669 __swab32s(&__tmp.v); \
670 __ctf_tmp_int64 = (int64_t) __tmp.v; \
675 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
676 if (_byte_order != __BYTE_ORDER) \
677 __swab64s(&__tmp.v); \
678 __ctf_tmp_int64 = (int64_t) __tmp.v; \
684 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
686 uint64_t __ctf_tmp_uint64; \
687 switch (sizeof(_type)) { \
690 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
691 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
696 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
697 if (_byte_order != __BYTE_ORDER) \
698 __swab16s(&__tmp.v); \
699 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
704 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
705 if (_byte_order != __BYTE_ORDER) \
706 __swab32s(&__tmp.v); \
707 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
712 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
713 if (_byte_order != __BYTE_ORDER) \
714 __swab64s(&__tmp.v); \
715 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
721 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
723 __stack_data += sizeof(int64_t);
725 #undef _ctf_integer_ext_isuser0
726 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
727 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
729 #undef _ctf_integer_ext_isuser1
730 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
733 char __array[sizeof(_user_src)]; \
734 __typeof__(_user_src) __v; \
736 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
737 &(_user_src), sizeof(_user_src))) \
738 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
739 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
742 #undef _ctf_integer_ext
743 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
744 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
746 #undef _ctf_array_encoded
747 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
749 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
750 const void *__ctf_tmp_ptr = (_src); \
751 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
752 __stack_data += sizeof(unsigned long); \
753 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
754 __stack_data += sizeof(void *); \
757 #undef _ctf_array_bitfield
758 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
759 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
761 #undef _ctf_sequence_encoded
762 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
763 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
765 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
766 const void *__ctf_tmp_ptr = (_src); \
767 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
768 __stack_data += sizeof(unsigned long); \
769 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
770 __stack_data += sizeof(void *); \
773 #undef _ctf_sequence_bitfield
774 #define _ctf_sequence_bitfield(_type, _item, _src, \
775 _length_type, _src_length, \
777 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
778 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
781 #define _ctf_string(_item, _src, _user, _nowrite) \
783 const void *__ctf_tmp_ptr = \
784 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
785 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
786 __stack_data += sizeof(void *); \
790 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
791 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
794 #define TP_PROTO(...) __VA_ARGS__
797 #define TP_FIELDS(...) __VA_ARGS__
800 #define TP_locvar(...) __VA_ARGS__
802 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
803 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
805 void __event_prepare_interpreter_stack__##_name(char *__stack_data, \
808 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
813 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
814 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
816 void __event_prepare_interpreter_stack__##_name(char *__stack_data, \
817 void *__tp_locvar, _proto) \
819 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
824 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
827 * Stage 5 of the trace events.
829 * Create static inline function that calculates event payload alignment.
832 /* Reset all macros within TRACEPOINT_EVENT */
833 #include <lttng/events-reset.h>
834 #include <lttng/events-write.h>
836 #undef _ctf_integer_ext
837 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
838 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
840 #undef _ctf_array_encoded
841 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
842 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
844 #undef _ctf_array_bitfield
845 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
846 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
848 #undef _ctf_sequence_encoded
849 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
850 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
851 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
852 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
854 #undef _ctf_sequence_bitfield
855 #define _ctf_sequence_bitfield(_type, _item, _src, \
856 _length_type, _src_length, \
858 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
859 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
862 #define _ctf_string(_item, _src, _user, _nowrite)
865 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
866 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
869 #define ctf_align(_type) \
870 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
873 #define TP_PROTO(...) __VA_ARGS__
876 #define TP_FIELDS(...) __VA_ARGS__
879 #define TP_locvar(...) __VA_ARGS__
881 #undef ctf_custom_field
882 #define ctf_custom_field(_type, _item, _code) _code
884 #undef ctf_custom_code
885 #define ctf_custom_code(...) \
890 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
891 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
892 static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
894 size_t __event_align = 1; \
895 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
898 return __event_align; \
901 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
902 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
903 static inline size_t __event_get_align__##_name(void *__tp_locvar) \
905 size_t __event_align = 1; \
906 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
909 return __event_align; \
912 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
915 * Stage 6 of tracepoint event generation.
917 * Create the probe function. This function calls event size calculation
918 * and writes event data into the buffer.
921 /* Reset all macros within TRACEPOINT_EVENT */
922 #include <lttng/events-reset.h>
923 #include <lttng/events-write.h>
925 #undef _ctf_integer_ext_fetched
926 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
928 _type __tmp = _src; \
929 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
930 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
933 #undef _ctf_integer_ext_isuser0
934 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
935 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
937 #undef _ctf_integer_ext_isuser1
938 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
941 char __array[sizeof(_user_src)]; \
942 __typeof__(_user_src) __v; \
944 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
945 &(_user_src), sizeof(_user_src))) \
946 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
947 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
950 #undef _ctf_integer_ext
951 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
952 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
954 #undef _ctf_array_encoded
955 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
956 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
958 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
960 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
963 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
964 #undef _ctf_array_bitfield
965 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
966 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
968 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
970 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
972 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
974 * For big endian, we need to byteswap into little endian.
976 #undef _ctf_array_bitfield
977 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
978 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
982 for (_i = 0; _i < (_length); _i++) { \
986 if (get_user(_tmp, (_type *) _src + _i)) \
989 _tmp = ((_type *) _src)[_i]; \
991 switch (sizeof(_type)) { \
995 _tmp = cpu_to_le16(_tmp); \
998 _tmp = cpu_to_le32(_tmp); \
1001 _tmp = cpu_to_le64(_tmp); \
1006 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
1009 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1011 #undef _ctf_sequence_encoded
1012 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
1013 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
1015 _length_type __tmpl = this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx]; \
1016 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1017 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1019 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1021 __chan->ops->event_write_from_user(&__ctx, _src, \
1022 sizeof(_type) * __get_dynamic_len(dest)); \
1024 __chan->ops->event_write(&__ctx, _src, \
1025 sizeof(_type) * __get_dynamic_len(dest)); \
1028 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
1029 #undef _ctf_sequence_bitfield
1030 #define _ctf_sequence_bitfield(_type, _item, _src, \
1031 _length_type, _src_length, \
1034 _length_type __tmpl = this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
1035 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1036 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1038 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1040 __chan->ops->event_write_from_user(&__ctx, _src, \
1041 sizeof(_type) * __get_dynamic_len(dest)); \
1043 __chan->ops->event_write(&__ctx, _src, \
1044 sizeof(_type) * __get_dynamic_len(dest)); \
1046 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1048 * For big endian, we need to byteswap into little endian.
1050 #undef _ctf_sequence_bitfield
1051 #define _ctf_sequence_bitfield(_type, _item, _src, \
1052 _length_type, _src_length, \
1055 _length_type __tmpl = this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
1056 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1057 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1059 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1061 size_t _i, _length; \
1063 _length = __get_dynamic_len(dest); \
1064 for (_i = 0; _i < _length; _i++) { \
1068 if (get_user(_tmp, (_type *) _src + _i)) \
1071 _tmp = ((_type *) _src)[_i]; \
1073 switch (sizeof(_type)) { \
1077 _tmp = cpu_to_le16(_tmp); \
1080 _tmp = cpu_to_le32(_tmp); \
1083 _tmp = cpu_to_le64(_tmp); \
1088 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
1091 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1094 #define _ctf_string(_item, _src, _user, _nowrite) \
1096 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
1097 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
1098 __get_dynamic_len(dest)); \
1100 const char *__ctf_tmp_string = \
1101 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
1102 lib_ring_buffer_align_ctx(&__ctx, \
1103 lttng_alignof(*__ctf_tmp_string)); \
1104 __chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \
1105 __get_dynamic_len(dest)); \
1109 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
1110 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
1113 #define ctf_align(_type) \
1114 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type));
1116 #undef ctf_custom_field
1117 #define ctf_custom_field(_type, _item, _code) _code
1119 #undef ctf_custom_code
1120 #define ctf_custom_code(...) \
1125 /* Beware: this get len actually consumes the len value */
1126 #undef __get_dynamic_len
1127 #define __get_dynamic_len(field) this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx++]
1130 #define TP_PROTO(...) __VA_ARGS__
1133 #define TP_ARGS(...) __VA_ARGS__
1136 #define TP_FIELDS(...) __VA_ARGS__
1139 #define TP_locvar(...) __VA_ARGS__
1142 #define TP_code_pre(...) __VA_ARGS__
1145 #define TP_code_post(...) __VA_ARGS__
1148 * For state dump, check that "session" argument (mandatory) matches the
1149 * session this event belongs to. Ensures that we write state dump data only
1150 * into the started session, not into all sessions.
1152 #ifdef TP_SESSION_CHECK
1153 #define _TP_SESSION_CHECK(session, csession) (session == csession)
1154 #else /* TP_SESSION_CHECK */
1155 #define _TP_SESSION_CHECK(session, csession) 1
1156 #endif /* TP_SESSION_CHECK */
1159 * Using twice size for filter stack data to hold size and pointer for
1160 * each field (worse case). For integers, max size required is 64-bit.
1161 * Same for double-precision floats. Those fit within
1162 * 2*sizeof(unsigned long) for all supported architectures.
1163 * Perform UNION (||) of filter runtime list.
1165 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1166 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1167 static void __event_probe__##_name(void *__data, _proto) \
1169 struct probe_local_vars { _locvar }; \
1170 struct lttng_event *__event = __data; \
1171 struct lttng_probe_ctx __lttng_probe_ctx = { \
1173 .event_notifier = NULL, \
1174 .interruptible = !irqs_disabled(), \
1176 struct lttng_event_container *__container = __event->container; \
1177 struct lttng_session *__session = __container->session; \
1178 struct lib_ring_buffer_ctx __ctx; \
1179 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1181 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1182 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1185 struct probe_local_vars __tp_locvar; \
1186 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1188 struct lttng_id_tracker_rcu *__lf; \
1190 if (!_TP_SESSION_CHECK(session, __session)) \
1192 if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
1194 if (unlikely(!LTTNG_READ_ONCE(__container->enabled))) \
1196 if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \
1198 __lf = lttng_rcu_dereference(__session->pid_tracker.p); \
1199 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1201 __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \
1202 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1204 __lf = lttng_rcu_dereference(__session->uid_tracker.p); \
1205 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1206 lttng_current_uid()))) \
1208 __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \
1209 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1210 lttng_current_vuid()))) \
1212 __lf = lttng_rcu_dereference(__session->gid_tracker.p); \
1213 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1214 lttng_current_gid()))) \
1216 __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \
1217 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1218 lttng_current_vgid()))) \
1220 __orig_dynamic_len_offset = this_cpu_ptr(<tng_dynamic_len_stack)->offset; \
1221 __dynamic_len_idx = __orig_dynamic_len_offset; \
1223 if (unlikely(!list_empty(&__event->filter_bytecode_runtime_head))) { \
1224 struct lttng_bytecode_runtime *bc_runtime; \
1225 int __filter_record = __event->has_enablers_without_bytecode; \
1227 __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
1228 tp_locvar, _args); \
1229 lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
1230 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1231 __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
1232 __filter_record = 1; \
1236 if (likely(!__filter_record)) \
1239 switch (__container->type) { \
1240 case LTTNG_EVENT_CONTAINER_CHANNEL: \
1242 struct lttng_channel *__chan = lttng_event_container_get_channel(__container); \
1243 ssize_t __event_len; \
1244 size_t __event_align; \
1246 __event_len = __event_get_size__##_name(tp_locvar, _args); \
1247 if (unlikely(__event_len < 0)) { \
1248 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1251 __event_align = __event_get_align__##_name(tp_locvar, _args); \
1252 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1253 __event_align, -1); \
1254 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1258 __chan->ops->event_commit(&__ctx); \
1261 case LTTNG_EVENT_CONTAINER_COUNTER: \
1263 struct lttng_counter *__counter = lttng_event_container_get_counter(__container); \
1264 size_t __index = __event->id; \
1266 (void) __counter->ops->counter_add(__counter->counter, &__index, 1); \
1272 barrier(); /* use before un-reserve. */ \
1273 this_cpu_ptr(<tng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1277 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1278 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1279 static void __event_probe__##_name(void *__data) \
1281 struct probe_local_vars { _locvar }; \
1282 struct lttng_event *__event = __data; \
1283 struct lttng_probe_ctx __lttng_probe_ctx = { \
1285 .event_notifier = NULL, \
1286 .interruptible = !irqs_disabled(), \
1288 struct lttng_event_container *__container = __event->container; \
1289 struct lttng_session *__session = __container->session; \
1290 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1292 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1293 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1296 struct probe_local_vars __tp_locvar; \
1297 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1299 struct lttng_id_tracker_rcu *__lf; \
1301 if (!_TP_SESSION_CHECK(session, __session)) \
1303 if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
1305 if (unlikely(!LTTNG_READ_ONCE(__container->enabled))) \
1307 if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \
1309 __lf = lttng_rcu_dereference(__session->pid_tracker.p); \
1310 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1312 __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \
1313 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1315 __lf = lttng_rcu_dereference(__session->uid_tracker.p); \
1316 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1317 lttng_current_uid()))) \
1319 __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \
1320 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1321 lttng_current_vuid()))) \
1323 __lf = lttng_rcu_dereference(__session->gid_tracker.p); \
1324 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1325 lttng_current_gid()))) \
1327 __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \
1328 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1329 lttng_current_vgid()))) \
1331 __orig_dynamic_len_offset = this_cpu_ptr(<tng_dynamic_len_stack)->offset; \
1332 __dynamic_len_idx = __orig_dynamic_len_offset; \
1334 if (unlikely(!list_empty(&__event->filter_bytecode_runtime_head))) { \
1335 struct lttng_bytecode_runtime *bc_runtime; \
1336 int __filter_record = __event->has_enablers_without_bytecode; \
1338 __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
1340 lttng_list_for_each_entry_rcu(bc_runtime, &__event->filter_bytecode_runtime_head, node) { \
1341 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1342 __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
1343 __filter_record = 1; \
1347 if (likely(!__filter_record)) \
1350 switch (__container->type) { \
1351 case LTTNG_EVENT_CONTAINER_CHANNEL: \
1353 struct lttng_channel *__chan = lttng_event_container_get_channel(__container); \
1354 struct lib_ring_buffer_ctx __ctx; \
1355 ssize_t __event_len; \
1356 size_t __event_align; \
1358 __event_len = __event_get_size__##_name(tp_locvar); \
1359 if (unlikely(__event_len < 0)) { \
1360 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1363 __event_align = __event_get_align__##_name(tp_locvar); \
1364 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1365 __event_align, -1); \
1366 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1370 __chan->ops->event_commit(&__ctx); \
1373 case LTTNG_EVENT_CONTAINER_COUNTER: \
1375 struct lttng_counter *__counter = lttng_event_container_get_counter(__container); \
1376 size_t __index = __event->id; \
1378 (void) __counter->ops->counter_add(__counter->counter, &__index, 1); \
1384 barrier(); /* use before un-reserve. */ \
1385 this_cpu_ptr(<tng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1389 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1391 #undef __get_dynamic_len
1395 * Stage 6.1 of tracepoint generation: generate event notifier probes
1397 * Create the probe function. This function evaluates the filter bytecode and
1398 * queue a notification to be sent to userspace.
1401 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1404 #define TP_PROTO(...) __VA_ARGS__
1407 #define TP_ARGS(...) __VA_ARGS__
1410 #define TP_FIELDS(...) __VA_ARGS__
1413 #define TP_locvar(...) __VA_ARGS__
1416 #define TP_code_pre(...) __VA_ARGS__
1419 #define TP_code_post(...) __VA_ARGS__
1422 * Using twice size for filter stack data to hold size and pointer for
1423 * each field (worse case). For integers, max size required is 64-bit.
1424 * Same for double-precision floats. Those fit within
1425 * 2*sizeof(unsigned long) for all supported architectures.
1426 * Perform UNION (||) of filter runtime list.
1428 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1429 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1430 static void __event_notifier_probe__##_name(void *__data, _proto) \
1432 struct probe_local_vars { _locvar }; \
1433 struct lttng_event_notifier *__event_notifier = __data; \
1434 struct lttng_probe_ctx __lttng_probe_ctx = { \
1436 .event_notifier = __event_notifier, \
1437 .interruptible = !irqs_disabled(), \
1440 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1441 char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1443 struct probe_local_vars __tp_locvar; \
1444 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1446 struct lttng_kernel_notifier_ctx __notif_ctx; \
1447 bool __interpreter_stack_prepared = false; \
1449 if (unlikely(!READ_ONCE(__event_notifier->enabled))) \
1452 if (unlikely(!list_empty(&__event_notifier->filter_bytecode_runtime_head))) { \
1453 struct lttng_bytecode_runtime *bc_runtime; \
1454 int __filter_record = __event_notifier->has_enablers_without_bytecode; \
1456 __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
1457 tp_locvar, _args); \
1458 __interpreter_stack_prepared = true; \
1459 lttng_list_for_each_entry_rcu(bc_runtime, &__event_notifier->filter_bytecode_runtime_head, node) { \
1460 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1461 __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
1462 __filter_record = 1; \
1464 if (likely(!__filter_record)) \
1468 __notif_ctx.eval_capture = LTTNG_READ_ONCE(__event_notifier->eval_capture); \
1469 if (unlikely(!__interpreter_stack_prepared && __notif_ctx.eval_capture)) \
1470 __event_prepare_interpreter_stack__##_name( \
1471 __stackvar.__interpreter_stack_data, \
1472 tp_locvar, _args); \
1474 __event_notifier->send_notification(__event_notifier, \
1475 &__lttng_probe_ctx, \
1476 __stackvar.__interpreter_stack_data, \
1484 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1485 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1486 static void __event_notifier_probe__##_name(void *__data) \
1488 struct probe_local_vars { _locvar }; \
1489 struct lttng_event_notifier *__event_notifier = __data; \
1490 struct lttng_probe_ctx __lttng_probe_ctx = { \
1492 .event_notifier = __event_notifier, \
1493 .interruptible = !irqs_disabled(), \
1496 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1497 char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1499 struct probe_local_vars __tp_locvar; \
1500 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1502 struct lttng_kernel_notifier_ctx __notif_ctx; \
1503 bool __interpreter_stack_prepared = false; \
1505 if (unlikely(!READ_ONCE(__event_notifier->enabled))) \
1508 if (unlikely(!list_empty(&__event_notifier->filter_bytecode_runtime_head))) { \
1509 struct lttng_bytecode_runtime *bc_runtime; \
1510 int __filter_record = __event_notifier->has_enablers_without_bytecode; \
1512 __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
1514 __interpreter_stack_prepared = true; \
1515 lttng_list_for_each_entry_rcu(bc_runtime, &__event_notifier->filter_bytecode_runtime_head, node) { \
1516 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1517 __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
1518 __filter_record = 1; \
1520 if (likely(!__filter_record)) \
1524 __notif_ctx.eval_capture = LTTNG_READ_ONCE(__event_notifier->eval_capture); \
1525 if (unlikely(!__interpreter_stack_prepared && __notif_ctx.eval_capture)) \
1526 __event_prepare_interpreter_stack__##_name( \
1527 __stackvar.__interpreter_stack_data, \
1530 __event_notifier->send_notification(__event_notifier, \
1531 &__lttng_probe_ctx, \
1532 __stackvar.__interpreter_stack_data, \
1539 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1541 * Stage 7 of the trace events.
1543 * Create event descriptions.
1546 /* Named field types must be defined in lttng-types.h */
1548 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1551 #define TP_PROBE_CB(_template) &__event_probe__##_template
1554 #ifndef TP_EVENT_NOTIFIER_PROBE_CB
1555 #define TP_EVENT_NOTIFIER_PROBE_CB(_template) &__event_notifier_probe__##_template
1558 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1559 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1560 static const struct lttng_event_desc __event_desc___##_map = { \
1561 .fields = __event_fields___##_template, \
1564 .probe_callback = (void *) TP_PROBE_CB(_template), \
1565 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1566 .owner = THIS_MODULE, \
1567 .event_notifier_callback = (void *) TP_EVENT_NOTIFIER_PROBE_CB(_template), \
1570 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1571 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1572 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1574 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1577 * Stage 8 of the trace events.
1579 * Create an array of event description pointers.
1582 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1584 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1585 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1586 &__event_desc___##_map,
1588 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1589 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1590 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1592 #define TP_ID1(_token, _system) _token##_system
1593 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1595 static const struct lttng_event_desc
*TP_ID(__event_desc___
, TRACE_SYSTEM
)[] = {
1596 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1603 * Stage 9 of the trace events.
1605 * Create a toplevel descriptor for the whole probe.
1608 #define TP_ID1(_token, _system) _token##_system
1609 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1611 /* non-const because list head will be modified when registered. */
1612 static __used
struct lttng_probe_desc
TP_ID(__probe_desc___
, TRACE_SYSTEM
) = {
1613 .provider
= __stringify(TRACE_SYSTEM
),
1614 .event_desc
= TP_ID(__event_desc___
, TRACE_SYSTEM
),
1615 .nr_events
= ARRAY_SIZE(TP_ID(__event_desc___
, TRACE_SYSTEM
)),
1616 .head
= { NULL
, NULL
},
1617 .lazy_init_head
= { NULL
, NULL
},
1625 * Stage 10 of the trace events.
1627 * Register/unregister probes at module load/unload.
1630 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1632 #define TP_ID1(_token, _system) _token##_system
1633 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1634 #define module_init_eval1(_token, _system) module_init(_token##_system)
1635 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1636 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
1637 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1639 #ifndef TP_MODULE_NOINIT
1640 static int TP_ID(__lttng_events_init__
, TRACE_SYSTEM
)(void)
1642 wrapper_vmalloc_sync_mappings();
1643 return lttng_probe_register(&TP_ID(__probe_desc___
, TRACE_SYSTEM
));
1646 static void TP_ID(__lttng_events_exit__
, TRACE_SYSTEM
)(void)
1648 lttng_probe_unregister(&TP_ID(__probe_desc___
, TRACE_SYSTEM
));
1651 #ifndef TP_MODULE_NOAUTOLOAD
1652 module_init_eval(__lttng_events_init__
, TRACE_SYSTEM
);
1653 module_exit_eval(__lttng_events_exit__
, TRACE_SYSTEM
);
1658 #undef module_init_eval
1659 #undef module_exit_eval
This page took 0.065674 seconds and 5 git commands to generate.