Commit | Line | Data |
---|---|---|
ee53bbd1 SRRH |
1 | /* |
2 | * Stage 4 of the trace events. | |
3 | * | |
4 | * Override the macros in <trace/trace_events.h> to include the following: | |
5 | * | |
6 | * For those macros defined with TRACE_EVENT: | |
7 | * | |
2425bcb9 | 8 | * static struct trace_event_call event_<call>; |
ee53bbd1 | 9 | * |
a7237765 | 10 | * static void trace_event_raw_event_<call>(void *__data, proto) |
ee53bbd1 | 11 | * { |
7f1d2f82 | 12 | * struct trace_event_file *trace_file = __data; |
2425bcb9 | 13 | * struct trace_event_call *event_call = trace_file->event_call; |
62323a14 | 14 | * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; |
7f1d2f82 | 15 | * unsigned long eflags = trace_file->flags; |
ee53bbd1 SRRH |
16 | * enum event_trigger_type __tt = ETT_NONE; |
17 | * struct ring_buffer_event *event; | |
a7237765 | 18 | * struct trace_event_raw_<call> *entry; <-- defined in stage 1 |
ee53bbd1 SRRH |
19 | * struct ring_buffer *buffer; |
20 | * unsigned long irq_flags; | |
21 | * int __data_size; | |
22 | * int pc; | |
23 | * | |
5d6ad960 SRRH |
24 | * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { |
25 | * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) | |
7f1d2f82 | 26 | * event_triggers_call(trace_file, NULL); |
5d6ad960 | 27 | * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) |
ee53bbd1 SRRH |
28 | * return; |
29 | * } | |
30 | * | |
31 | * local_save_flags(irq_flags); | |
32 | * pc = preempt_count(); | |
33 | * | |
d0ee8f4a | 34 | * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); |
ee53bbd1 | 35 | * |
7f1d2f82 | 36 | * event = trace_event_buffer_lock_reserve(&buffer, trace_file, |
ee53bbd1 SRRH |
37 | * event_<call>->event.type, |
38 | * sizeof(*entry) + __data_size, | |
39 | * irq_flags, pc); | |
40 | * if (!event) | |
41 | * return; | |
42 | * entry = ring_buffer_event_data(event); | |
43 | * | |
44 | * { <assign>; } <-- Here we assign the entries by the __field and | |
45 | * __array macros. | |
46 | * | |
5d6ad960 | 47 | * if (eflags & EVENT_FILE_FL_TRIGGER_COND) |
7f1d2f82 | 48 | * __tt = event_triggers_call(trace_file, entry); |
ee53bbd1 | 49 | * |
5d6ad960 | 50 | * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, |
7f1d2f82 | 51 | * &trace_file->flags)) |
ee53bbd1 | 52 | * ring_buffer_discard_commit(buffer, event); |
7f1d2f82 | 53 | * else if (!filter_check_discard(trace_file, entry, buffer, event)) |
ee53bbd1 SRRH |
54 | * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); |
55 | * | |
56 | * if (__tt) | |
7f1d2f82 | 57 | * event_triggers_post_call(trace_file, __tt); |
ee53bbd1 SRRH |
58 | * } |
59 | * | |
60 | * static struct trace_event ftrace_event_type_<call> = { | |
892c505a | 61 | * .trace = trace_raw_output_<call>, <-- stage 2 |
ee53bbd1 SRRH |
62 | * }; |
63 | * | |
64 | * static char print_fmt_<call>[] = <TP_printk>; | |
65 | * | |
2425bcb9 | 66 | * static struct trace_event_class __used event_class_<template> = { |
ee53bbd1 | 67 | * .system = "<system>", |
33d0f35e | 68 | * .define_fields = trace_event_define_fields_<call>, |
ee53bbd1 SRRH |
69 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), |
70 | * .raw_init = trace_event_raw_init, | |
a7237765 | 71 | * .probe = trace_event_raw_event_##call, |
9023c930 | 72 | * .reg = trace_event_reg, |
ee53bbd1 SRRH |
73 | * }; |
74 | * | |
2425bcb9 | 75 | * static struct trace_event_call event_<call> = { |
ee53bbd1 SRRH |
76 | * .class = event_class_<template>, |
77 | * { | |
78 | * .tp = &__tracepoint_<call>, | |
79 | * }, | |
80 | * .event = &ftrace_event_type_<call>, | |
81 | * .print_fmt = print_fmt_<call>, | |
82 | * .flags = TRACE_EVENT_FL_TRACEPOINT, | |
83 | * }; | |
84 | * // its only safe to use pointers when doing linker tricks to | |
85 | * // create an array. | |
2425bcb9 | 86 | * static struct trace_event_call __used |
ee53bbd1 SRRH |
87 | * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; |
88 | * | |
89 | */ | |
90 | ||
91 | #ifdef CONFIG_PERF_EVENTS | |
92 | ||
93 | #define _TRACE_PERF_PROTO(call, proto) \ | |
94 | static notrace void \ | |
95 | perf_trace_##call(void *__data, proto); | |
96 | ||
97 | #define _TRACE_PERF_INIT(call) \ | |
98 | .perf_probe = perf_trace_##call, | |
99 | ||
100 | #else | |
101 | #define _TRACE_PERF_PROTO(call, proto) | |
102 | #define _TRACE_PERF_INIT(call) | |
103 | #endif /* CONFIG_PERF_EVENTS */ | |
104 | ||
105 | #undef __entry | |
106 | #define __entry entry | |
107 | ||
108 | #undef __field | |
109 | #define __field(type, item) | |
110 | ||
111 | #undef __field_struct | |
112 | #define __field_struct(type, item) | |
113 | ||
114 | #undef __array | |
115 | #define __array(type, item, len) | |
116 | ||
117 | #undef __dynamic_array | |
118 | #define __dynamic_array(type, item, len) \ | |
119 | __entry->__data_loc_##item = __data_offsets.item; | |
120 | ||
121 | #undef __string | |
122 | #define __string(item, src) __dynamic_array(char, item, -1) | |
123 | ||
124 | #undef __assign_str | |
125 | #define __assign_str(dst, src) \ | |
126 | strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); | |
127 | ||
128 | #undef __bitmask | |
129 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | |
130 | ||
131 | #undef __get_bitmask | |
132 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | |
133 | ||
134 | #undef __assign_bitmask | |
135 | #define __assign_bitmask(dst, src, nr_bits) \ | |
136 | memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) | |
137 | ||
138 | #undef TP_fast_assign | |
139 | #define TP_fast_assign(args...) args | |
140 | ||
141 | #undef __perf_addr | |
142 | #define __perf_addr(a) (a) | |
143 | ||
144 | #undef __perf_count | |
145 | #define __perf_count(c) (c) | |
146 | ||
147 | #undef __perf_task | |
148 | #define __perf_task(t) (t) | |
149 | ||
150 | #undef DECLARE_EVENT_CLASS | |
151 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
152 | \ | |
153 | static notrace void \ | |
a7237765 | 154 | trace_event_raw_event_##call(void *__data, proto) \ |
ee53bbd1 | 155 | { \ |
7f1d2f82 | 156 | struct trace_event_file *trace_file = __data; \ |
62323a14 | 157 | struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ |
3f795dcf | 158 | struct trace_event_buffer fbuffer; \ |
a7237765 | 159 | struct trace_event_raw_##call *entry; \ |
ee53bbd1 SRRH |
160 | int __data_size; \ |
161 | \ | |
09a5059a | 162 | if (trace_trigger_soft_disabled(trace_file)) \ |
ee53bbd1 SRRH |
163 | return; \ |
164 | \ | |
d0ee8f4a | 165 | __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ |
ee53bbd1 | 166 | \ |
3f795dcf | 167 | entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ |
ee53bbd1 SRRH |
168 | sizeof(*entry) + __data_size); \ |
169 | \ | |
170 | if (!entry) \ | |
171 | return; \ | |
172 | \ | |
173 | tstruct \ | |
174 | \ | |
175 | { assign; } \ | |
176 | \ | |
3f795dcf | 177 | trace_event_buffer_commit(&fbuffer); \ |
ee53bbd1 SRRH |
178 | } |
179 | /* | |
180 | * The ftrace_test_probe is compiled out, it is only here as a build time check | |
181 | * to make sure that if the tracepoint handling changes, the ftrace probe will | |
182 | * fail to compile unless it too is updated. | |
183 | */ | |
184 | ||
185 | #undef DEFINE_EVENT | |
186 | #define DEFINE_EVENT(template, call, proto, args) \ | |
187 | static inline void ftrace_test_probe_##call(void) \ | |
188 | { \ | |
a7237765 | 189 | check_trace_callback_type_##call(trace_event_raw_event_##template); \ |
ee53bbd1 SRRH |
190 | } |
191 | ||
192 | #undef DEFINE_EVENT_PRINT | |
193 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) | |
194 | ||
195 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
196 | ||
197 | #undef __entry | |
198 | #define __entry REC | |
199 | ||
200 | #undef __print_flags | |
201 | #undef __print_symbolic | |
202 | #undef __print_hex | |
203 | #undef __get_dynamic_array | |
204 | #undef __get_dynamic_array_len | |
205 | #undef __get_str | |
206 | #undef __get_bitmask | |
207 | #undef __print_array | |
208 | ||
209 | #undef TP_printk | |
210 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) | |
211 | ||
212 | #undef DECLARE_EVENT_CLASS | |
213 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
214 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ | |
215 | static char print_fmt_##call[] = print; \ | |
2425bcb9 | 216 | static struct trace_event_class __used __refdata event_class_##call = { \ |
ee53bbd1 | 217 | .system = TRACE_SYSTEM_STRING, \ |
33d0f35e | 218 | .define_fields = trace_event_define_fields_##call, \ |
ee53bbd1 SRRH |
219 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ |
220 | .raw_init = trace_event_raw_init, \ | |
a7237765 | 221 | .probe = trace_event_raw_event_##call, \ |
9023c930 | 222 | .reg = trace_event_reg, \ |
ee53bbd1 SRRH |
223 | _TRACE_PERF_INIT(call) \ |
224 | }; | |
225 | ||
226 | #undef DEFINE_EVENT | |
227 | #define DEFINE_EVENT(template, call, proto, args) \ | |
228 | \ | |
2425bcb9 | 229 | static struct trace_event_call __used event_##call = { \ |
ee53bbd1 SRRH |
230 | .class = &event_class_##template, \ |
231 | { \ | |
232 | .tp = &__tracepoint_##call, \ | |
233 | }, \ | |
3ad017ba | 234 | .event.funcs = &trace_event_type_funcs_##template, \ |
ee53bbd1 SRRH |
235 | .print_fmt = print_fmt_##template, \ |
236 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | |
237 | }; \ | |
2425bcb9 | 238 | static struct trace_event_call __used \ |
ee53bbd1 SRRH |
239 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call |
240 | ||
241 | #undef DEFINE_EVENT_PRINT | |
242 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | |
243 | \ | |
244 | static char print_fmt_##call[] = print; \ | |
245 | \ | |
2425bcb9 | 246 | static struct trace_event_call __used event_##call = { \ |
ee53bbd1 SRRH |
247 | .class = &event_class_##template, \ |
248 | { \ | |
249 | .tp = &__tracepoint_##call, \ | |
250 | }, \ | |
3ad017ba | 251 | .event.funcs = &trace_event_type_funcs_##call, \ |
ee53bbd1 SRRH |
252 | .print_fmt = print_fmt_##call, \ |
253 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | |
254 | }; \ | |
2425bcb9 | 255 | static struct trace_event_call __used \ |
ee53bbd1 SRRH |
256 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call |
257 | ||
258 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
259 | ||
260 | #undef TRACE_SYSTEM_VAR | |
261 | ||
262 | #ifdef CONFIG_PERF_EVENTS | |
263 | ||
264 | #undef __entry | |
265 | #define __entry entry | |
266 | ||
267 | #undef __get_dynamic_array | |
268 | #define __get_dynamic_array(field) \ | |
269 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | |
270 | ||
271 | #undef __get_dynamic_array_len | |
272 | #define __get_dynamic_array_len(field) \ | |
273 | ((__entry->__data_loc_##field >> 16) & 0xffff) | |
274 | ||
275 | #undef __get_str | |
276 | #define __get_str(field) (char *)__get_dynamic_array(field) | |
277 | ||
278 | #undef __get_bitmask | |
279 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | |
280 | ||
281 | #undef __perf_addr | |
282 | #define __perf_addr(a) (__addr = (a)) | |
283 | ||
284 | #undef __perf_count | |
285 | #define __perf_count(c) (__count = (c)) | |
286 | ||
287 | #undef __perf_task | |
288 | #define __perf_task(t) (__task = (t)) | |
289 | ||
290 | #undef DECLARE_EVENT_CLASS | |
291 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
292 | static notrace void \ | |
293 | perf_trace_##call(void *__data, proto) \ | |
294 | { \ | |
2425bcb9 | 295 | struct trace_event_call *event_call = __data; \ |
62323a14 | 296 | struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ |
a7237765 | 297 | struct trace_event_raw_##call *entry; \ |
ee53bbd1 SRRH |
298 | struct pt_regs *__regs; \ |
299 | u64 __addr = 0, __count = 1; \ | |
300 | struct task_struct *__task = NULL; \ | |
301 | struct hlist_head *head; \ | |
302 | int __entry_size; \ | |
303 | int __data_size; \ | |
304 | int rctx; \ | |
305 | \ | |
d0ee8f4a | 306 | __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ |
ee53bbd1 SRRH |
307 | \ |
308 | head = this_cpu_ptr(event_call->perf_events); \ | |
309 | if (__builtin_constant_p(!__task) && !__task && \ | |
310 | hlist_empty(head)) \ | |
311 | return; \ | |
312 | \ | |
313 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | |
314 | sizeof(u64)); \ | |
315 | __entry_size -= sizeof(u32); \ | |
316 | \ | |
317 | entry = perf_trace_buf_prepare(__entry_size, \ | |
318 | event_call->event.type, &__regs, &rctx); \ | |
319 | if (!entry) \ | |
320 | return; \ | |
321 | \ | |
322 | perf_fetch_caller_regs(__regs); \ | |
323 | \ | |
324 | tstruct \ | |
325 | \ | |
326 | { assign; } \ | |
327 | \ | |
328 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | |
329 | __count, __regs, head, __task); \ | |
330 | } | |
331 | ||
332 | /* | |
333 | * This part is compiled out, it is only here as a build time check | |
334 | * to make sure that if the tracepoint handling changes, the | |
335 | * perf probe will fail to compile unless it too is updated. | |
336 | */ | |
337 | #undef DEFINE_EVENT | |
338 | #define DEFINE_EVENT(template, call, proto, args) \ | |
339 | static inline void perf_test_probe_##call(void) \ | |
340 | { \ | |
341 | check_trace_callback_type_##call(perf_trace_##template); \ | |
342 | } | |
343 | ||
344 | ||
345 | #undef DEFINE_EVENT_PRINT | |
346 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
347 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
348 | ||
349 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
350 | #endif /* CONFIG_PERF_EVENTS */ |