Commit | Line | Data |
---|---|---|
f42c85e7 SR |
1 | /* |
2 | * Stage 1 of the trace events. | |
3 | * | |
4 | * Override the macros in <trace/trace_events.h> to include the following: | |
5 | * | |
a7237765 | 6 | * struct trace_event_raw_<call> { |
f42c85e7 SR |
7 | * struct trace_entry ent; |
8 | * <type> <item>; | |
9 | * <type2> <item2>[<len>]; | |
10 | * [...] | |
11 | * }; | |
12 | * | |
13 | * The <type> <item> is created by the __field(type, item) macro or | |
14 | * the __array(type2, item2, len) macro. | |
15 | * We simply do "type item;", and that will create the fields | |
16 | * in the structure. | |
17 | */ | |
18 | ||
af658dca | 19 | #include <linux/trace_events.h> |
f42c85e7 | 20 | |
acd388fd SRRH |
21 | #ifndef TRACE_SYSTEM_VAR |
22 | #define TRACE_SYSTEM_VAR TRACE_SYSTEM | |
23 | #endif | |
24 | ||
25 | #define __app__(x, y) str__##x##y | |
26 | #define __app(x, y) __app__(x, y) | |
27 | ||
28 | #define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name) | |
29 | ||
30 | #define TRACE_MAKE_SYSTEM_STR() \ | |
31 | static const char TRACE_SYSTEM_STRING[] = \ | |
32 | __stringify(TRACE_SYSTEM) | |
33 | ||
34 | TRACE_MAKE_SYSTEM_STR(); | |
35 | ||
0c564a53 SRRH |
36 | #undef TRACE_DEFINE_ENUM |
37 | #define TRACE_DEFINE_ENUM(a) \ | |
38 | static struct trace_enum_map __used __initdata \ | |
39 | __##TRACE_SYSTEM##_##a = \ | |
40 | { \ | |
41 | .system = TRACE_SYSTEM_STRING, \ | |
42 | .enum_string = #a, \ | |
43 | .enum_value = a \ | |
44 | }; \ | |
45 | static struct trace_enum_map __used \ | |
46 | __attribute__((section("_ftrace_enum_map"))) \ | |
47 | *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a | |
48 | ||
ff038f5c | 49 | /* |
091ad365 | 50 | * DECLARE_EVENT_CLASS can be used to add a generic function |
ff038f5c SR |
51 | * handlers for events. That is, if all events have the same |
52 | * parameters and just have distinct trace points. | |
53 | * Each tracepoint can be defined with DEFINE_EVENT and that | |
091ad365 | 54 | * will map the DECLARE_EVENT_CLASS to the tracepoint. |
ff038f5c SR |
55 | * |
56 | * TRACE_EVENT is a one to one mapping between tracepoint and template. | |
57 | */ | |
58 | #undef TRACE_EVENT | |
59 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
091ad365 | 60 | DECLARE_EVENT_CLASS(name, \ |
ff038f5c SR |
61 | PARAMS(proto), \ |
62 | PARAMS(args), \ | |
63 | PARAMS(tstruct), \ | |
64 | PARAMS(assign), \ | |
65 | PARAMS(print)); \ | |
66 | DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); | |
67 | ||
b1dace68 JD |
68 | #undef TRACE_EVENT_MAP |
69 | #define TRACE_EVENT_MAP(name, map, proto, args, tstruct, assign, print) \ | |
70 | DECLARE_EVENT_CLASS(map, \ | |
71 | PARAMS(proto), \ | |
72 | PARAMS(args), \ | |
73 | PARAMS(tstruct), \ | |
74 | PARAMS(assign), \ | |
75 | PARAMS(print)); \ | |
76 | DEFINE_EVENT_MAP(map, name, map, PARAMS(proto), PARAMS(args)); | |
ff038f5c | 77 | |
7fcb7c47 LZ |
78 | #undef __field |
79 | #define __field(type, item) type item; | |
80 | ||
43b51ead LZ |
81 | #undef __field_ext |
82 | #define __field_ext(type, item, filter_type) type item; | |
83 | ||
4d4c9cc8 SR |
84 | #undef __field_struct |
85 | #define __field_struct(type, item) type item; | |
86 | ||
87 | #undef __field_struct_ext | |
88 | #define __field_struct_ext(type, item, filter_type) type item; | |
89 | ||
f42c85e7 SR |
90 | #undef __array |
91 | #define __array(type, item, len) type item[len]; | |
92 | ||
7fcb7c47 | 93 | #undef __dynamic_array |
7d536cb3 | 94 | #define __dynamic_array(type, item, len) u32 __data_loc_##item; |
f42c85e7 | 95 | |
9cbf1176 | 96 | #undef __string |
7fcb7c47 | 97 | #define __string(item, src) __dynamic_array(char, item, -1) |
9cbf1176 | 98 | |
4449bf92 SRRH |
99 | #undef __bitmask |
100 | #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) | |
101 | ||
f42c85e7 SR |
102 | #undef TP_STRUCT__entry |
103 | #define TP_STRUCT__entry(args...) args | |
104 | ||
091ad365 IM |
105 | #undef DECLARE_EVENT_CLASS |
106 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ | |
a7237765 | 107 | struct trace_event_raw_##name { \ |
ff038f5c SR |
108 | struct trace_entry ent; \ |
109 | tstruct \ | |
110 | char __data[0]; \ | |
8f082018 SR |
111 | }; \ |
112 | \ | |
2425bcb9 | 113 | static struct trace_event_class event_class_##name; |
8f082018 | 114 | |
ff038f5c SR |
115 | #undef DEFINE_EVENT |
116 | #define DEFINE_EVENT(template, name, proto, args) \ | |
2425bcb9 | 117 | static struct trace_event_call __used \ |
86c38a31 | 118 | __attribute__((__aligned__(4))) event_##name |
f42c85e7 | 119 | |
b1dace68 JD |
120 | #undef DEFINE_EVENT_MAP |
121 | #define DEFINE_EVENT_MAP(template, name, map, proto, args) \ | |
122 | static struct trace_event_call __used \ | |
123 | __attribute__((__aligned__(4))) event_##map | |
124 | ||
f5abaa1b SR |
125 | #undef DEFINE_EVENT_FN |
126 | #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ | |
127 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
128 | ||
e5bc9721 SR |
129 | #undef DEFINE_EVENT_PRINT |
130 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
131 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
132 | ||
97419875 JS |
133 | /* Callbacks are meaningless to ftrace. */ |
134 | #undef TRACE_EVENT_FN | |
0dd7b747 FW |
135 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ |
136 | assign, print, reg, unreg) \ | |
819ce45a FW |
137 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ |
138 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | |
2701121b DK |
139 | |
140 | #undef TRACE_EVENT_FN_COND | |
141 | #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ | |
142 | assign, print, reg, unreg) \ | |
143 | TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ | |
144 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | |
97419875 | 145 | |
1ed0c597 FW |
146 | #undef TRACE_EVENT_FLAGS |
147 | #define TRACE_EVENT_FLAGS(name, value) \ | |
53cf810b | 148 | __TRACE_EVENT_FLAGS(name, value) |
1ed0c597 | 149 | |
d5b5f391 PZ |
150 | #undef TRACE_EVENT_PERF_PERM |
151 | #define TRACE_EVENT_PERF_PERM(name, expr...) \ | |
152 | __TRACE_EVENT_PERF_PERM(name, expr) | |
153 | ||
f42c85e7 SR |
154 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
155 | ||
156 | /* | |
157 | * Stage 2 of the trace events. | |
158 | * | |
9cbf1176 FW |
159 | * Include the following: |
160 | * | |
62323a14 | 161 | * struct trace_event_data_offsets_<call> { |
7d536cb3 LZ |
162 | * u32 <item1>; |
163 | * u32 <item2>; | |
9cbf1176 FW |
164 | * [...] |
165 | * }; | |
166 | * | |
7d536cb3 | 167 | * The __dynamic_array() macro will create each u32 <item>, this is |
7fcb7c47 | 168 | * to keep the offset of each array from the beginning of the event. |
7d536cb3 | 169 | * The size of an array is also encoded, in the higher 16 bits of <item>. |
9cbf1176 FW |
170 | */ |
171 | ||
0c564a53 SRRH |
172 | #undef TRACE_DEFINE_ENUM |
173 | #define TRACE_DEFINE_ENUM(a) | |
174 | ||
7fcb7c47 | 175 | #undef __field |
43b51ead LZ |
176 | #define __field(type, item) |
177 | ||
178 | #undef __field_ext | |
179 | #define __field_ext(type, item, filter_type) | |
7fcb7c47 | 180 | |
4d4c9cc8 SR |
181 | #undef __field_struct |
182 | #define __field_struct(type, item) | |
183 | ||
184 | #undef __field_struct_ext | |
185 | #define __field_struct_ext(type, item, filter_type) | |
186 | ||
9cbf1176 FW |
187 | #undef __array |
188 | #define __array(type, item, len) | |
189 | ||
7fcb7c47 | 190 | #undef __dynamic_array |
7d536cb3 | 191 | #define __dynamic_array(type, item, len) u32 item; |
9cbf1176 FW |
192 | |
193 | #undef __string | |
7fcb7c47 | 194 | #define __string(item, src) __dynamic_array(char, item, -1) |
9cbf1176 | 195 | |
4449bf92 SRRH |
196 | #undef __bitmask |
197 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | |
198 | ||
091ad365 IM |
199 | #undef DECLARE_EVENT_CLASS |
200 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
62323a14 | 201 | struct trace_event_data_offsets_##call { \ |
9cbf1176 FW |
202 | tstruct; \ |
203 | }; | |
204 | ||
ff038f5c SR |
205 | #undef DEFINE_EVENT |
206 | #define DEFINE_EVENT(template, name, proto, args) | |
207 | ||
b1dace68 JD |
208 | #undef DEFINE_EVENT_MAP |
209 | #define DEFINE_EVENT_MAP(template, name, map, proto, args) | |
210 | ||
e5bc9721 SR |
211 | #undef DEFINE_EVENT_PRINT |
212 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
213 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
214 | ||
1ed0c597 FW |
215 | #undef TRACE_EVENT_FLAGS |
216 | #define TRACE_EVENT_FLAGS(event, flag) | |
217 | ||
d5b5f391 PZ |
218 | #undef TRACE_EVENT_PERF_PERM |
219 | #define TRACE_EVENT_PERF_PERM(event, expr...) | |
220 | ||
9cbf1176 FW |
221 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
222 | ||
223 | /* | |
224 | * Stage 3 of the trace events. | |
225 | * | |
f42c85e7 SR |
226 | * Override the macros in <trace/trace_events.h> to include the following: |
227 | * | |
228 | * enum print_line_t | |
892c505a | 229 | * trace_raw_output_<call>(struct trace_iterator *iter, int flags) |
f42c85e7 SR |
230 | * { |
231 | * struct trace_seq *s = &iter->seq; | |
a7237765 | 232 | * struct trace_event_raw_<call> *field; <-- defined in stage 1 |
f42c85e7 | 233 | * struct trace_entry *entry; |
bc289ae9 | 234 | * struct trace_seq *p = &iter->tmp_seq; |
f42c85e7 SR |
235 | * int ret; |
236 | * | |
237 | * entry = iter->ent; | |
238 | * | |
32c0edae | 239 | * if (entry->type != event_<call>->event.type) { |
f42c85e7 SR |
240 | * WARN_ON_ONCE(1); |
241 | * return TRACE_TYPE_UNHANDLED; | |
242 | * } | |
243 | * | |
244 | * field = (typeof(field))entry; | |
245 | * | |
56d8bd3f | 246 | * trace_seq_init(p); |
50354a8a LZ |
247 | * ret = trace_seq_printf(s, "%s: ", <call>); |
248 | * if (ret) | |
249 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | |
f42c85e7 SR |
250 | * if (!ret) |
251 | * return TRACE_TYPE_PARTIAL_LINE; | |
252 | * | |
253 | * return TRACE_TYPE_HANDLED; | |
254 | * } | |
255 | * | |
256 | * This is the method used to print the raw event to the trace | |
257 | * output format. Note, this is not needed if the data is read | |
258 | * in binary. | |
259 | */ | |
260 | ||
261 | #undef __entry | |
262 | #define __entry field | |
263 | ||
264 | #undef TP_printk | |
265 | #define TP_printk(fmt, args...) fmt "\n", args | |
266 | ||
7fcb7c47 LZ |
267 | #undef __get_dynamic_array |
268 | #define __get_dynamic_array(field) \ | |
7d536cb3 | 269 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) |
7fcb7c47 | 270 | |
beba4bb0 SRRH |
271 | #undef __get_dynamic_array_len |
272 | #define __get_dynamic_array_len(field) \ | |
273 | ((__entry->__data_loc_##field >> 16) & 0xffff) | |
274 | ||
9cbf1176 | 275 | #undef __get_str |
934de5f2 | 276 | #define __get_str(field) ((char *)__get_dynamic_array(field)) |
9cbf1176 | 277 | |
4449bf92 SRRH |
278 | #undef __get_bitmask |
279 | #define __get_bitmask(field) \ | |
280 | ({ \ | |
281 | void *__bitmask = __get_dynamic_array(field); \ | |
282 | unsigned int __bitmask_size; \ | |
beba4bb0 | 283 | __bitmask_size = __get_dynamic_array_len(field); \ |
645df987 | 284 | trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ |
4449bf92 SRRH |
285 | }) |
286 | ||
be74b73a SR |
287 | #undef __print_flags |
288 | #define __print_flags(flag, delim, flag_array...) \ | |
289 | ({ \ | |
a48f494e | 290 | static const struct trace_print_flags __flags[] = \ |
be74b73a | 291 | { flag_array, { -1, NULL }}; \ |
645df987 | 292 | trace_print_flags_seq(p, delim, flag, __flags); \ |
be74b73a SR |
293 | }) |
294 | ||
0f4fc29d SR |
295 | #undef __print_symbolic |
296 | #define __print_symbolic(value, symbol_array...) \ | |
297 | ({ \ | |
298 | static const struct trace_print_flags symbols[] = \ | |
299 | { symbol_array, { -1, NULL }}; \ | |
645df987 | 300 | trace_print_symbols_seq(p, value, symbols); \ |
0f4fc29d SR |
301 | }) |
302 | ||
2fc1b6f0 | 303 | #undef __print_symbolic_u64 |
304 | #if BITS_PER_LONG == 32 | |
305 | #define __print_symbolic_u64(value, symbol_array...) \ | |
306 | ({ \ | |
307 | static const struct trace_print_flags_u64 symbols[] = \ | |
308 | { symbol_array, { -1, NULL } }; \ | |
645df987 | 309 | trace_print_symbols_seq_u64(p, value, symbols); \ |
2fc1b6f0 | 310 | }) |
311 | #else | |
312 | #define __print_symbolic_u64(value, symbol_array...) \ | |
313 | __print_symbolic(value, symbol_array) | |
314 | #endif | |
315 | ||
5a2e3995 | 316 | #undef __print_hex |
645df987 | 317 | #define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len) |
5a2e3995 | 318 | |
6ea22486 DM |
319 | #undef __print_array |
320 | #define __print_array(array, count, el_size) \ | |
321 | ({ \ | |
322 | BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ | |
323 | el_size != 4 && el_size != 8); \ | |
645df987 | 324 | trace_print_array_seq(p, array, count, el_size); \ |
6ea22486 DM |
325 | }) |
326 | ||
091ad365 IM |
327 | #undef DECLARE_EVENT_CLASS |
328 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
83f0d539 | 329 | static notrace enum print_line_t \ |
892c505a SRRH |
330 | trace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
331 | struct trace_event *trace_event) \ | |
f42c85e7 SR |
332 | { \ |
333 | struct trace_seq *s = &iter->seq; \ | |
f71130de | 334 | struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ |
a7237765 | 335 | struct trace_event_raw_##call *field; \ |
f42c85e7 SR |
336 | int ret; \ |
337 | \ | |
f71130de | 338 | field = (typeof(field))iter->ent; \ |
80decc70 | 339 | \ |
892c505a | 340 | ret = trace_raw_output_prep(iter, trace_event); \ |
8e2e095c | 341 | if (ret != TRACE_TYPE_HANDLED) \ |
f71130de LZ |
342 | return ret; \ |
343 | \ | |
19a7fe20 | 344 | trace_seq_printf(s, print); \ |
f42c85e7 | 345 | \ |
19a7fe20 | 346 | return trace_handle_return(s); \ |
80decc70 | 347 | } \ |
3ad017ba | 348 | static struct trace_event_functions trace_event_type_funcs_##call = { \ |
892c505a | 349 | .trace = trace_raw_output_##call, \ |
80decc70 | 350 | }; |
ff038f5c | 351 | |
e5bc9721 SR |
352 | #undef DEFINE_EVENT_PRINT |
353 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | |
83f0d539 | 354 | static notrace enum print_line_t \ |
892c505a | 355 | trace_raw_output_##call(struct trace_iterator *iter, int flags, \ |
a9a57763 | 356 | struct trace_event *event) \ |
e5bc9721 | 357 | { \ |
a7237765 | 358 | struct trace_event_raw_##template *field; \ |
e5bc9721 | 359 | struct trace_entry *entry; \ |
bc289ae9 | 360 | struct trace_seq *p = &iter->tmp_seq; \ |
f42c85e7 SR |
361 | \ |
362 | entry = iter->ent; \ | |
363 | \ | |
32c0edae | 364 | if (entry->type != event_##call.event.type) { \ |
f42c85e7 SR |
365 | WARN_ON_ONCE(1); \ |
366 | return TRACE_TYPE_UNHANDLED; \ | |
367 | } \ | |
368 | \ | |
369 | field = (typeof(field))entry; \ | |
370 | \ | |
56d8bd3f | 371 | trace_seq_init(p); \ |
892c505a | 372 | return trace_output_call(iter, #call, print); \ |
80decc70 | 373 | } \ |
3ad017ba | 374 | static struct trace_event_functions trace_event_type_funcs_##call = { \ |
892c505a | 375 | .trace = trace_raw_output_##call, \ |
80decc70 | 376 | }; |
e5bc9721 | 377 | |
f42c85e7 SR |
378 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
379 | ||
43b51ead LZ |
380 | #undef __field_ext |
381 | #define __field_ext(type, item, filter_type) \ | |
f42c85e7 SR |
382 | ret = trace_define_field(event_call, #type, #item, \ |
383 | offsetof(typeof(field), item), \ | |
43b51ead LZ |
384 | sizeof(field.item), \ |
385 | is_signed_type(type), filter_type); \ | |
f42c85e7 SR |
386 | if (ret) \ |
387 | return ret; | |
388 | ||
4d4c9cc8 SR |
389 | #undef __field_struct_ext |
390 | #define __field_struct_ext(type, item, filter_type) \ | |
391 | ret = trace_define_field(event_call, #type, #item, \ | |
392 | offsetof(typeof(field), item), \ | |
393 | sizeof(field.item), \ | |
394 | 0, filter_type); \ | |
395 | if (ret) \ | |
396 | return ret; | |
397 | ||
43b51ead LZ |
398 | #undef __field |
399 | #define __field(type, item) __field_ext(type, item, FILTER_OTHER) | |
400 | ||
4d4c9cc8 SR |
401 | #undef __field_struct |
402 | #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) | |
403 | ||
f42c85e7 SR |
404 | #undef __array |
405 | #define __array(type, item, len) \ | |
04295780 | 406 | do { \ |
87291347 | 407 | char *type_str = #type"["__stringify(len)"]"; \ |
04295780 | 408 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
87291347 | 409 | ret = trace_define_field(event_call, type_str, #item, \ |
f42c85e7 | 410 | offsetof(typeof(field), item), \ |
fb7ae981 LJ |
411 | sizeof(field.item), \ |
412 | is_signed_type(type), FILTER_OTHER); \ | |
04295780 SR |
413 | if (ret) \ |
414 | return ret; \ | |
415 | } while (0); | |
f42c85e7 | 416 | |
7fcb7c47 LZ |
417 | #undef __dynamic_array |
418 | #define __dynamic_array(type, item, len) \ | |
68fd60a8 | 419 | ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ |
43b51ead | 420 | offsetof(typeof(field), __data_loc_##item), \ |
fb7ae981 LJ |
421 | sizeof(field.__data_loc_##item), \ |
422 | is_signed_type(type), FILTER_OTHER); | |
7fcb7c47 | 423 | |
9cbf1176 | 424 | #undef __string |
7fcb7c47 | 425 | #define __string(item, src) __dynamic_array(char, item, -1) |
9cbf1176 | 426 | |
4449bf92 SRRH |
427 | #undef __bitmask |
428 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | |
429 | ||
091ad365 IM |
430 | #undef DECLARE_EVENT_CLASS |
431 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ | |
7e4f44b1 | 432 | static int notrace __init \ |
33d0f35e | 433 | trace_event_define_fields_##call(struct trace_event_call *event_call) \ |
f42c85e7 | 434 | { \ |
a7237765 | 435 | struct trace_event_raw_##call field; \ |
f42c85e7 SR |
436 | int ret; \ |
437 | \ | |
f42c85e7 SR |
438 | tstruct; \ |
439 | \ | |
440 | return ret; \ | |
441 | } | |
442 | ||
ff038f5c SR |
443 | #undef DEFINE_EVENT |
444 | #define DEFINE_EVENT(template, name, proto, args) | |
445 | ||
b1dace68 JD |
446 | #undef DEFINE_EVENT_MAP |
447 | #define DEFINE_EVENT_MAP(template, name, map, proto, args) | |
448 | ||
e5bc9721 SR |
449 | #undef DEFINE_EVENT_PRINT |
450 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
451 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
452 | ||
f42c85e7 SR |
453 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
454 | ||
7fcb7c47 LZ |
455 | /* |
456 | * remember the offset of each array from the beginning of the event. | |
457 | */ | |
458 | ||
459 | #undef __entry | |
460 | #define __entry entry | |
461 | ||
462 | #undef __field | |
463 | #define __field(type, item) | |
464 | ||
43b51ead LZ |
465 | #undef __field_ext |
466 | #define __field_ext(type, item, filter_type) | |
467 | ||
4d4c9cc8 SR |
468 | #undef __field_struct |
469 | #define __field_struct(type, item) | |
470 | ||
471 | #undef __field_struct_ext | |
472 | #define __field_struct_ext(type, item, filter_type) | |
473 | ||
7fcb7c47 LZ |
474 | #undef __array |
475 | #define __array(type, item, len) | |
476 | ||
477 | #undef __dynamic_array | |
478 | #define __dynamic_array(type, item, len) \ | |
114e7b52 | 479 | __item_length = (len) * sizeof(type); \ |
7fcb7c47 LZ |
480 | __data_offsets->item = __data_size + \ |
481 | offsetof(typeof(*entry), __data); \ | |
114e7b52 FB |
482 | __data_offsets->item |= __item_length << 16; \ |
483 | __data_size += __item_length; | |
7fcb7c47 LZ |
484 | |
485 | #undef __string | |
4e58e547 SRRH |
486 | #define __string(item, src) __dynamic_array(char, item, \ |
487 | strlen((src) ? (const char *)(src) : "(null)") + 1) | |
7fcb7c47 | 488 | |
4449bf92 SRRH |
489 | /* |
490 | * __bitmask_size_in_bytes_raw is the number of bytes needed to hold | |
491 | * num_possible_cpus(). | |
492 | */ | |
493 | #define __bitmask_size_in_bytes_raw(nr_bits) \ | |
494 | (((nr_bits) + 7) / 8) | |
495 | ||
496 | #define __bitmask_size_in_longs(nr_bits) \ | |
497 | ((__bitmask_size_in_bytes_raw(nr_bits) + \ | |
498 | ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) | |
499 | ||
500 | /* | |
501 | * __bitmask_size_in_bytes is the number of bytes needed to hold | |
502 | * num_possible_cpus() padded out to the nearest long. This is what | |
503 | * is saved in the buffer, just to be consistent. | |
504 | */ | |
505 | #define __bitmask_size_in_bytes(nr_bits) \ | |
506 | (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) | |
507 | ||
508 | #undef __bitmask | |
509 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ | |
510 | __bitmask_size_in_longs(nr_bits)) | |
511 | ||
091ad365 IM |
512 | #undef DECLARE_EVENT_CLASS |
513 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
d0ee8f4a | 514 | static inline notrace int trace_event_get_offsets_##call( \ |
62323a14 | 515 | struct trace_event_data_offsets_##call *__data_offsets, proto) \ |
7fcb7c47 LZ |
516 | { \ |
517 | int __data_size = 0; \ | |
114e7b52 | 518 | int __maybe_unused __item_length; \ |
a7237765 | 519 | struct trace_event_raw_##call __maybe_unused *entry; \ |
7fcb7c47 LZ |
520 | \ |
521 | tstruct; \ | |
522 | \ | |
523 | return __data_size; \ | |
524 | } | |
525 | ||
ff038f5c SR |
526 | #undef DEFINE_EVENT |
527 | #define DEFINE_EVENT(template, name, proto, args) | |
528 | ||
b1dace68 JD |
529 | #undef DEFINE_EVENT_MAP |
530 | #define DEFINE_EVENT_MAP(template, name, map, proto, args) | |
531 | ||
e5bc9721 SR |
532 | #undef DEFINE_EVENT_PRINT |
533 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
534 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
535 | ||
7fcb7c47 LZ |
536 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
537 | ||
46ac5182 SRRH |
538 | /* |
539 | * Stage 4 of the trace events. | |
540 | * | |
541 | * Override the macros in <trace/trace_events.h> to include the following: | |
542 | * | |
543 | * For those macros defined with TRACE_EVENT: | |
544 | * | |
545 | * static struct trace_event_call event_<call>; | |
546 | * | |
547 | * static void trace_event_raw_event_<call>(void *__data, proto) | |
548 | * { | |
549 | * struct trace_event_file *trace_file = __data; | |
550 | * struct trace_event_call *event_call = trace_file->event_call; | |
551 | * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; | |
552 | * unsigned long eflags = trace_file->flags; | |
553 | * enum event_trigger_type __tt = ETT_NONE; | |
554 | * struct ring_buffer_event *event; | |
555 | * struct trace_event_raw_<call> *entry; <-- defined in stage 1 | |
556 | * struct ring_buffer *buffer; | |
557 | * unsigned long irq_flags; | |
558 | * int __data_size; | |
559 | * int pc; | |
560 | * | |
561 | * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { | |
562 | * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) | |
563 | * event_triggers_call(trace_file, NULL); | |
564 | * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) | |
565 | * return; | |
566 | * } | |
567 | * | |
568 | * local_save_flags(irq_flags); | |
569 | * pc = preempt_count(); | |
570 | * | |
571 | * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); | |
572 | * | |
573 | * event = trace_event_buffer_lock_reserve(&buffer, trace_file, | |
574 | * event_<call>->event.type, | |
575 | * sizeof(*entry) + __data_size, | |
576 | * irq_flags, pc); | |
577 | * if (!event) | |
578 | * return; | |
579 | * entry = ring_buffer_event_data(event); | |
580 | * | |
581 | * { <assign>; } <-- Here we assign the entries by the __field and | |
582 | * __array macros. | |
583 | * | |
584 | * if (eflags & EVENT_FILE_FL_TRIGGER_COND) | |
585 | * __tt = event_triggers_call(trace_file, entry); | |
586 | * | |
587 | * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, | |
588 | * &trace_file->flags)) | |
589 | * ring_buffer_discard_commit(buffer, event); | |
590 | * else if (!filter_check_discard(trace_file, entry, buffer, event)) | |
591 | * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); | |
592 | * | |
593 | * if (__tt) | |
594 | * event_triggers_post_call(trace_file, __tt); | |
595 | * } | |
596 | * | |
597 | * static struct trace_event ftrace_event_type_<call> = { | |
598 | * .trace = trace_raw_output_<call>, <-- stage 2 | |
599 | * }; | |
600 | * | |
601 | * static char print_fmt_<call>[] = <TP_printk>; | |
602 | * | |
603 | * static struct trace_event_class __used event_class_<template> = { | |
604 | * .system = "<system>", | |
605 | * .define_fields = trace_event_define_fields_<call>, | |
606 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), | |
607 | * .raw_init = trace_event_raw_init, | |
608 | * .probe = trace_event_raw_event_##call, | |
609 | * .reg = trace_event_reg, | |
610 | * }; | |
611 | * | |
612 | * static struct trace_event_call event_<call> = { | |
613 | * .class = event_class_<template>, | |
614 | * { | |
615 | * .tp = &__tracepoint_<call>, | |
616 | * }, | |
617 | * .event = &ftrace_event_type_<call>, | |
618 | * .print_fmt = print_fmt_<call>, | |
619 | * .flags = TRACE_EVENT_FL_TRACEPOINT, | |
620 | * }; | |
621 | * // its only safe to use pointers when doing linker tricks to | |
622 | * // create an array. | |
623 | * static struct trace_event_call __used | |
624 | * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; | |
625 | * | |
626 | */ | |
627 | ||
628 | #ifdef CONFIG_PERF_EVENTS | |
629 | ||
630 | #define _TRACE_PERF_PROTO(call, proto) \ | |
631 | static notrace void \ | |
632 | perf_trace_##call(void *__data, proto); | |
633 | ||
634 | #define _TRACE_PERF_INIT(call) \ | |
635 | .perf_probe = perf_trace_##call, | |
636 | ||
637 | #else | |
638 | #define _TRACE_PERF_PROTO(call, proto) | |
639 | #define _TRACE_PERF_INIT(call) | |
640 | #endif /* CONFIG_PERF_EVENTS */ | |
641 | ||
642 | #undef __entry | |
643 | #define __entry entry | |
644 | ||
645 | #undef __field | |
646 | #define __field(type, item) | |
647 | ||
648 | #undef __field_struct | |
649 | #define __field_struct(type, item) | |
650 | ||
651 | #undef __array | |
652 | #define __array(type, item, len) | |
653 | ||
654 | #undef __dynamic_array | |
655 | #define __dynamic_array(type, item, len) \ | |
656 | __entry->__data_loc_##item = __data_offsets.item; | |
657 | ||
658 | #undef __string | |
659 | #define __string(item, src) __dynamic_array(char, item, -1) | |
660 | ||
661 | #undef __assign_str | |
662 | #define __assign_str(dst, src) \ | |
663 | strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); | |
664 | ||
665 | #undef __bitmask | |
666 | #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) | |
667 | ||
668 | #undef __get_bitmask | |
669 | #define __get_bitmask(field) (char *)__get_dynamic_array(field) | |
670 | ||
671 | #undef __assign_bitmask | |
672 | #define __assign_bitmask(dst, src, nr_bits) \ | |
673 | memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) | |
674 | ||
675 | #undef TP_fast_assign | |
676 | #define TP_fast_assign(args...) args | |
677 | ||
46ac5182 SRRH |
678 | #undef __perf_count |
679 | #define __perf_count(c) (c) | |
680 | ||
681 | #undef __perf_task | |
682 | #define __perf_task(t) (t) | |
683 | ||
684 | #undef DECLARE_EVENT_CLASS | |
685 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
686 | \ | |
687 | static notrace void \ | |
688 | trace_event_raw_event_##call(void *__data, proto) \ | |
689 | { \ | |
690 | struct trace_event_file *trace_file = __data; \ | |
691 | struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ | |
692 | struct trace_event_buffer fbuffer; \ | |
693 | struct trace_event_raw_##call *entry; \ | |
694 | int __data_size; \ | |
695 | \ | |
696 | if (trace_trigger_soft_disabled(trace_file)) \ | |
697 | return; \ | |
698 | \ | |
699 | __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ | |
700 | \ | |
701 | entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ | |
702 | sizeof(*entry) + __data_size); \ | |
703 | \ | |
704 | if (!entry) \ | |
705 | return; \ | |
706 | \ | |
707 | tstruct \ | |
708 | \ | |
709 | { assign; } \ | |
710 | \ | |
711 | trace_event_buffer_commit(&fbuffer); \ | |
712 | } | |
713 | /* | |
714 | * The ftrace_test_probe is compiled out, it is only here as a build time check | |
715 | * to make sure that if the tracepoint handling changes, the ftrace probe will | |
716 | * fail to compile unless it too is updated. | |
717 | */ | |
718 | ||
719 | #undef DEFINE_EVENT | |
720 | #define DEFINE_EVENT(template, call, proto, args) \ | |
721 | static inline void ftrace_test_probe_##call(void) \ | |
722 | { \ | |
723 | check_trace_callback_type_##call(trace_event_raw_event_##template); \ | |
724 | } | |
725 | ||
b1dace68 JD |
726 | #undef DEFINE_EVENT_MAP |
727 | #define DEFINE_EVENT_MAP(template, call, map, proto, args) \ | |
728 | static inline void ftrace_test_probe_##map(void) \ | |
729 | { \ | |
730 | check_trace_callback_type_##call(trace_event_raw_event_##template); \ | |
731 | } | |
732 | ||
46ac5182 SRRH |
733 | #undef DEFINE_EVENT_PRINT |
734 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) | |
735 | ||
736 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
737 | ||
738 | #undef __entry | |
739 | #define __entry REC | |
740 | ||
741 | #undef __print_flags | |
742 | #undef __print_symbolic | |
743 | #undef __print_hex | |
744 | #undef __get_dynamic_array | |
745 | #undef __get_dynamic_array_len | |
746 | #undef __get_str | |
747 | #undef __get_bitmask | |
748 | #undef __print_array | |
749 | ||
750 | #undef TP_printk | |
751 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) | |
752 | ||
753 | #undef DECLARE_EVENT_CLASS | |
754 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
755 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ | |
756 | static char print_fmt_##call[] = print; \ | |
757 | static struct trace_event_class __used __refdata event_class_##call = { \ | |
758 | .system = TRACE_SYSTEM_STRING, \ | |
759 | .define_fields = trace_event_define_fields_##call, \ | |
760 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | |
761 | .raw_init = trace_event_raw_init, \ | |
762 | .probe = trace_event_raw_event_##call, \ | |
763 | .reg = trace_event_reg, \ | |
764 | _TRACE_PERF_INIT(call) \ | |
765 | }; | |
766 | ||
767 | #undef DEFINE_EVENT | |
768 | #define DEFINE_EVENT(template, call, proto, args) \ | |
769 | \ | |
770 | static struct trace_event_call __used event_##call = { \ | |
771 | .class = &event_class_##template, \ | |
772 | { \ | |
773 | .tp = &__tracepoint_##call, \ | |
774 | }, \ | |
775 | .event.funcs = &trace_event_type_funcs_##template, \ | |
776 | .print_fmt = print_fmt_##template, \ | |
777 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | |
778 | }; \ | |
779 | static struct trace_event_call __used \ | |
780 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | |
781 | ||
b1dace68 JD |
782 | #undef DEFINE_EVENT_MAP |
783 | #define DEFINE_EVENT_MAP(_template, _call, _map, _proto, _args) \ | |
784 | \ | |
785 | static struct trace_event_map event_map_##_map = { \ | |
786 | .tp = &__tracepoint_##_call, \ | |
787 | .name = #_map, \ | |
788 | }; \ | |
789 | \ | |
790 | static struct trace_event_call __used event_##_map = { \ | |
791 | .class = &event_class_##_template, \ | |
792 | { \ | |
793 | .map = &event_map_##_map, \ | |
794 | }, \ | |
795 | .event.funcs = &trace_event_type_funcs_##_template, \ | |
796 | .print_fmt = print_fmt_##_template, \ | |
797 | .flags = TRACE_EVENT_FL_TRACEPOINT | TRACE_EVENT_FL_MAP,\ | |
798 | }; \ | |
799 | static struct trace_event_call __used \ | |
800 | __attribute__((section("_ftrace_events"))) *__event_##_map = &event_##_map | |
801 | ||
46ac5182 SRRH |
802 | #undef DEFINE_EVENT_PRINT |
803 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | |
804 | \ | |
805 | static char print_fmt_##call[] = print; \ | |
806 | \ | |
807 | static struct trace_event_call __used event_##call = { \ | |
808 | .class = &event_class_##template, \ | |
809 | { \ | |
810 | .tp = &__tracepoint_##call, \ | |
811 | }, \ | |
812 | .event.funcs = &trace_event_type_funcs_##call, \ | |
813 | .print_fmt = print_fmt_##call, \ | |
814 | .flags = TRACE_EVENT_FL_TRACEPOINT, \ | |
815 | }; \ | |
816 | static struct trace_event_call __used \ | |
817 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | |
818 | ||
819 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |