Commit | Line | Data |
---|---|---|
bc0c38d1 SR |
1 | #ifndef _LINUX_KERNEL_TRACE_H |
2 | #define _LINUX_KERNEL_TRACE_H | |
3 | ||
4 | #include <linux/fs.h> | |
60063497 | 5 | #include <linux/atomic.h> |
bc0c38d1 SR |
6 | #include <linux/sched.h> |
7 | #include <linux/clocksource.h> | |
3928a8a2 | 8 | #include <linux/ring_buffer.h> |
bd8ac686 | 9 | #include <linux/mmiotrace.h> |
4e5292ea | 10 | #include <linux/tracepoint.h> |
d13744cd | 11 | #include <linux/ftrace.h> |
24f1e32c | 12 | #include <linux/hw_breakpoint.h> |
9504504c | 13 | #include <linux/trace_seq.h> |
97f20251 | 14 | #include <linux/ftrace_event.h> |
9504504c | 15 | |
12ab74ee SR |
16 | #ifdef CONFIG_FTRACE_SYSCALLS |
17 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | |
18 | #include <asm/syscall.h> /* some archs define it here */ | |
19 | #endif | |
20 | ||
72829bc3 TG |
21 | enum trace_type { |
22 | __TRACE_FIRST_TYPE = 0, | |
23 | ||
24 | TRACE_FN, | |
25 | TRACE_CTX, | |
26 | TRACE_WAKE, | |
27 | TRACE_STACK, | |
dd0e545f | 28 | TRACE_PRINT, |
48ead020 | 29 | TRACE_BPRINT, |
bd8ac686 PP |
30 | TRACE_MMIO_RW, |
31 | TRACE_MMIO_MAP, | |
9f029e83 | 32 | TRACE_BRANCH, |
287b6e68 FW |
33 | TRACE_GRAPH_RET, |
34 | TRACE_GRAPH_ENT, | |
02b67518 | 35 | TRACE_USER_STACK, |
c71a8961 | 36 | TRACE_BLK, |
09ae7234 | 37 | TRACE_BPUTS, |
72829bc3 | 38 | |
f0868d1e | 39 | __TRACE_LAST_TYPE, |
72829bc3 TG |
40 | }; |
41 | ||
bc0c38d1 | 42 | |
0a1c49db SR |
43 | #undef __field |
44 | #define __field(type, item) type item; | |
86387f7e | 45 | |
d7315094 SR |
46 | #undef __field_struct |
47 | #define __field_struct(type, item) __field(type, item) | |
86387f7e | 48 | |
d7315094 SR |
49 | #undef __field_desc |
50 | #define __field_desc(type, container, item) | |
02b67518 | 51 | |
0a1c49db SR |
52 | #undef __array |
53 | #define __array(type, item, size) type item[size]; | |
1427cdf0 | 54 | |
d7315094 SR |
55 | #undef __array_desc |
56 | #define __array_desc(type, container, item, size) | |
777e208d | 57 | |
0a1c49db SR |
58 | #undef __dynamic_array |
59 | #define __dynamic_array(type, item) type item[]; | |
777e208d | 60 | |
0a1c49db SR |
61 | #undef F_STRUCT |
62 | #define F_STRUCT(args...) args | |
74239072 | 63 | |
0a1c49db | 64 | #undef FTRACE_ENTRY |
02aa3162 JO |
65 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ |
66 | struct struct_name { \ | |
67 | struct trace_entry ent; \ | |
68 | tstruct \ | |
0a1c49db | 69 | } |
777e208d | 70 | |
0a1c49db SR |
71 | #undef TP_ARGS |
72 | #define TP_ARGS(args...) args | |
52f232cb | 73 | |
0a1c49db | 74 | #undef FTRACE_ENTRY_DUP |
02aa3162 | 75 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) |
1e9b51c2 | 76 | |
e59a0bff | 77 | #undef FTRACE_ENTRY_REG |
02aa3162 JO |
78 | #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ |
79 | filter, regfn) \ | |
80 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | |
81 | filter) | |
e59a0bff | 82 | |
0a1c49db | 83 | #include "trace_entries.h" |
36994e58 | 84 | |
0a1c49db SR |
85 | /* |
86 | * syscalls are special, and need special handling, this is why | |
87 | * they are not included in trace_entries.h | |
88 | */ | |
bed1ffca FW |
89 | struct syscall_trace_enter { |
90 | struct trace_entry ent; | |
91 | int nr; | |
92 | unsigned long args[]; | |
93 | }; | |
94 | ||
95 | struct syscall_trace_exit { | |
96 | struct trace_entry ent; | |
97 | int nr; | |
99df5a6a | 98 | long ret; |
bed1ffca FW |
99 | }; |
100 | ||
93ccae7a | 101 | struct kprobe_trace_entry_head { |
413d37d1 MH |
102 | struct trace_entry ent; |
103 | unsigned long ip; | |
413d37d1 MH |
104 | }; |
105 | ||
93ccae7a | 106 | struct kretprobe_trace_entry_head { |
413d37d1 MH |
107 | struct trace_entry ent; |
108 | unsigned long func; | |
109 | unsigned long ret_ip; | |
413d37d1 MH |
110 | }; |
111 | ||
fc5e27ae PP |
112 | /* |
113 | * trace_flag_type is an enumeration that holds different | |
114 | * states when a trace occurs. These are: | |
9244489a | 115 | * IRQS_OFF - interrupts were disabled |
9de36825 | 116 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags |
bd9cfca9 | 117 | * NEED_RESCHED - reschedule is requested |
9244489a SR |
118 | * HARDIRQ - inside an interrupt handler |
119 | * SOFTIRQ - inside a softirq handler | |
fc5e27ae PP |
120 | */ |
121 | enum trace_flag_type { | |
122 | TRACE_FLAG_IRQS_OFF = 0x01, | |
9244489a SR |
123 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, |
124 | TRACE_FLAG_NEED_RESCHED = 0x04, | |
125 | TRACE_FLAG_HARDIRQ = 0x08, | |
126 | TRACE_FLAG_SOFTIRQ = 0x10, | |
fc5e27ae PP |
127 | }; |
128 | ||
5bf9a1ee | 129 | #define TRACE_BUF_SIZE 1024 |
bc0c38d1 | 130 | |
2b6080f2 SR |
131 | struct trace_array; |
132 | ||
bc0c38d1 SR |
133 | /* |
134 | * The CPU trace array - it consists of thousands of trace entries | |
135 | * plus some other descriptor data: (for example which task started | |
136 | * the trace, etc.) | |
137 | */ | |
138 | struct trace_array_cpu { | |
bc0c38d1 | 139 | atomic_t disabled; |
2cadf913 | 140 | void *buffer_page; /* ring buffer spare */ |
4e3c3333 | 141 | |
438ced17 | 142 | unsigned long entries; |
bc0c38d1 SR |
143 | unsigned long saved_latency; |
144 | unsigned long critical_start; | |
145 | unsigned long critical_end; | |
146 | unsigned long critical_sequence; | |
147 | unsigned long nice; | |
148 | unsigned long policy; | |
149 | unsigned long rt_priority; | |
2f26ebd5 | 150 | unsigned long skipped_entries; |
bc0c38d1 SR |
151 | cycle_t preempt_timestamp; |
152 | pid_t pid; | |
d20b92ab | 153 | kuid_t uid; |
bc0c38d1 SR |
154 | char comm[TASK_COMM_LEN]; |
155 | }; | |
156 | ||
2b6080f2 SR |
157 | struct tracer; |
158 | ||
12883efb SRRH |
159 | struct trace_buffer { |
160 | struct trace_array *tr; | |
161 | struct ring_buffer *buffer; | |
162 | struct trace_array_cpu __percpu *data; | |
163 | cycle_t time_start; | |
164 | int cpu; | |
165 | }; | |
166 | ||
bc0c38d1 SR |
167 | /* |
168 | * The trace array - an array of per-CPU trace arrays. This is the | |
169 | * highest level data structure that individual tracers deal with. | |
170 | * They have on/off state as well: | |
171 | */ | |
172 | struct trace_array { | |
ae63b31e | 173 | struct list_head list; |
277ba044 | 174 | char *name; |
12883efb SRRH |
175 | struct trace_buffer trace_buffer; |
176 | #ifdef CONFIG_TRACER_MAX_TRACE | |
177 | /* | |
178 | * The max_buffer is used to snapshot the trace when a maximum | |
179 | * latency is reached, or when the user initiates a snapshot. | |
180 | * Some tracers will use this to store a maximum trace while | |
181 | * it continues examining live traces. | |
182 | * | |
183 | * The buffers for the max_buffer are set up the same as the trace_buffer | |
184 | * When a snapshot is taken, the buffer of the max_buffer is swapped | |
185 | * with the buffer of the trace_buffer and the buffers are reset for | |
186 | * the trace_buffer so the tracing can continue. | |
187 | */ | |
188 | struct trace_buffer max_buffer; | |
45ad21ca | 189 | bool allocated_snapshot; |
12883efb | 190 | #endif |
499e5470 | 191 | int buffer_disabled; |
12ab74ee SR |
192 | #ifdef CONFIG_FTRACE_SYSCALLS |
193 | int sys_refcount_enter; | |
194 | int sys_refcount_exit; | |
195 | DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | |
196 | DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | |
197 | #endif | |
2b6080f2 SR |
198 | int stop_count; |
199 | int clock_id; | |
200 | struct tracer *current_trace; | |
ae63b31e | 201 | unsigned int flags; |
2b6080f2 | 202 | raw_spinlock_t start_lock; |
ae63b31e | 203 | struct dentry *dir; |
2b6080f2 SR |
204 | struct dentry *options; |
205 | struct dentry *percpu_dir; | |
ae63b31e SR |
206 | struct dentry *event_dir; |
207 | struct list_head systems; | |
208 | struct list_head events; | |
ccfe9e42 | 209 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
a695cb58 | 210 | int ref; |
bc0c38d1 SR |
211 | }; |
212 | ||
ae63b31e SR |
213 | enum { |
214 | TRACE_ARRAY_FL_GLOBAL = (1 << 0) | |
215 | }; | |
216 | ||
217 | extern struct list_head ftrace_trace_arrays; | |
218 | ||
a8227415 AL |
219 | extern struct mutex trace_types_lock; |
220 | ||
8e2e2fa4 SRRH |
221 | extern int trace_array_get(struct trace_array *tr); |
222 | extern void trace_array_put(struct trace_array *tr); | |
223 | ||
ae63b31e SR |
224 | /* |
225 | * The global tracer (top) should be the first trace array added, | |
226 | * but we check the flag anyway. | |
227 | */ | |
228 | static inline struct trace_array *top_trace_array(void) | |
229 | { | |
230 | struct trace_array *tr; | |
231 | ||
232 | tr = list_entry(ftrace_trace_arrays.prev, | |
233 | typeof(*tr), list); | |
234 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); | |
235 | return tr; | |
236 | } | |
237 | ||
7104f300 SR |
238 | #define FTRACE_CMP_TYPE(var, type) \ |
239 | __builtin_types_compatible_p(typeof(var), type *) | |
240 | ||
241 | #undef IF_ASSIGN | |
242 | #define IF_ASSIGN(var, entry, etype, id) \ | |
243 | if (FTRACE_CMP_TYPE(var, etype)) { \ | |
244 | var = (typeof(var))(entry); \ | |
245 | WARN_ON(id && (entry)->type != id); \ | |
246 | break; \ | |
247 | } | |
248 | ||
249 | /* Will cause compile errors if type is not found. */ | |
250 | extern void __ftrace_bad_type(void); | |
251 | ||
252 | /* | |
253 | * The trace_assign_type is a verifier that the entry type is | |
254 | * the same as the type being assigned. To add new types simply | |
255 | * add a line with the following format: | |
256 | * | |
257 | * IF_ASSIGN(var, ent, type, id); | |
258 | * | |
259 | * Where "type" is the trace type that includes the trace_entry | |
260 | * as the "ent" item. And "id" is the trace identifier that is | |
261 | * used in the trace_type enum. | |
262 | * | |
263 | * If the type can have more than one id, then use zero. | |
264 | */ | |
265 | #define trace_assign_type(var, ent) \ | |
266 | do { \ | |
267 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | |
268 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | |
7104f300 | 269 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
02b67518 | 270 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
7104f300 | 271 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
48ead020 | 272 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
09ae7234 | 273 | IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ |
7104f300 SR |
274 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
275 | TRACE_MMIO_RW); \ | |
276 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | |
277 | TRACE_MMIO_MAP); \ | |
9f029e83 | 278 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
287b6e68 FW |
279 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
280 | TRACE_GRAPH_ENT); \ | |
281 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | |
282 | TRACE_GRAPH_RET); \ | |
7104f300 SR |
283 | __ftrace_bad_type(); \ |
284 | } while (0) | |
2c4f035f | 285 | |
adf9f195 FW |
286 | /* |
287 | * An option specific to a tracer. This is a boolean value. | |
288 | * The bit is the bit index that sets its value on the | |
289 | * flags value in struct tracer_flags. | |
290 | */ | |
291 | struct tracer_opt { | |
9de36825 IM |
292 | const char *name; /* Will appear on the trace_options file */ |
293 | u32 bit; /* Mask assigned in val field in tracer_flags */ | |
adf9f195 FW |
294 | }; |
295 | ||
296 | /* | |
297 | * The set of specific options for a tracer. Your tracer | |
298 | * have to set the initial value of the flags val. | |
299 | */ | |
300 | struct tracer_flags { | |
301 | u32 val; | |
9de36825 | 302 | struct tracer_opt *opts; |
adf9f195 FW |
303 | }; |
304 | ||
305 | /* Makes more easy to define a tracer opt */ | |
306 | #define TRACER_OPT(s, b) .name = #s, .bit = b | |
307 | ||
034939b6 | 308 | |
6eaaa5d5 FW |
309 | /** |
310 | * struct tracer - a specific tracer and its callbacks to interact with debugfs | |
311 | * @name: the name chosen to select it on the available_tracers file | |
312 | * @init: called when one switches to this tracer (echo name > current_tracer) | |
313 | * @reset: called when one switches to another tracer | |
314 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) | |
315 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | |
316 | * @open: called when the trace file is opened | |
317 | * @pipe_open: called when the trace_pipe file is opened | |
318 | * @wait_pipe: override how the user waits for traces on trace_pipe | |
319 | * @close: called when the trace file is released | |
c521efd1 | 320 | * @pipe_close: called when the trace_pipe file is released |
6eaaa5d5 FW |
321 | * @read: override the default read callback on trace_pipe |
322 | * @splice_read: override the default splice_read callback on trace_pipe | |
323 | * @selftest: selftest to run on boot (see trace_selftest.c) | |
324 | * @print_headers: override the first lines that describe your columns | |
325 | * @print_line: callback that prints a trace | |
326 | * @set_flag: signals one of your private flags changed (trace_options file) | |
327 | * @flags: your private flags | |
bc0c38d1 SR |
328 | */ |
329 | struct tracer { | |
330 | const char *name; | |
1c80025a | 331 | int (*init)(struct trace_array *tr); |
bc0c38d1 | 332 | void (*reset)(struct trace_array *tr); |
9036990d SR |
333 | void (*start)(struct trace_array *tr); |
334 | void (*stop)(struct trace_array *tr); | |
bc0c38d1 | 335 | void (*open)(struct trace_iterator *iter); |
107bad8b | 336 | void (*pipe_open)(struct trace_iterator *iter); |
6eaaa5d5 | 337 | void (*wait_pipe)(struct trace_iterator *iter); |
bc0c38d1 | 338 | void (*close)(struct trace_iterator *iter); |
c521efd1 | 339 | void (*pipe_close)(struct trace_iterator *iter); |
107bad8b SR |
340 | ssize_t (*read)(struct trace_iterator *iter, |
341 | struct file *filp, char __user *ubuf, | |
342 | size_t cnt, loff_t *ppos); | |
3c56819b EGM |
343 | ssize_t (*splice_read)(struct trace_iterator *iter, |
344 | struct file *filp, | |
345 | loff_t *ppos, | |
346 | struct pipe_inode_info *pipe, | |
347 | size_t len, | |
348 | unsigned int flags); | |
60a11774 SR |
349 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
350 | int (*selftest)(struct tracer *trace, | |
351 | struct trace_array *tr); | |
352 | #endif | |
8bba1bf5 | 353 | void (*print_header)(struct seq_file *m); |
2c4f035f | 354 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
adf9f195 FW |
355 | /* If you handled the flag setting, return 0 */ |
356 | int (*set_flag)(u32 old_flags, u32 bit, int set); | |
613f04a0 SRRH |
357 | /* Return 0 if OK with change, else return non-zero */ |
358 | int (*flag_changed)(struct tracer *tracer, | |
359 | u32 mask, int set); | |
bc0c38d1 | 360 | struct tracer *next; |
9de36825 | 361 | struct tracer_flags *flags; |
f43c738b | 362 | bool print_max; |
12883efb SRRH |
363 | bool enabled; |
364 | #ifdef CONFIG_TRACER_MAX_TRACE | |
f43c738b | 365 | bool use_max_tr; |
12883efb | 366 | #endif |
bc0c38d1 SR |
367 | }; |
368 | ||
f9520750 | 369 | |
e4a3f541 | 370 | /* Only current can touch trace_recursion */ |
e4a3f541 | 371 | |
edc15caf SR |
372 | /* |
373 | * For function tracing recursion: | |
374 | * The order of these bits are important. | |
375 | * | |
376 | * When function tracing occurs, the following steps are made: | |
377 | * If arch does not support a ftrace feature: | |
378 | * call internal function (uses INTERNAL bits) which calls... | |
379 | * If callback is registered to the "global" list, the list | |
380 | * function is called and recursion checks the GLOBAL bits. | |
381 | * then this function calls... | |
382 | * The function callback, which can use the FTRACE bits to | |
383 | * check for recursion. | |
384 | * | |
385 | * Now if the arch does not suppport a feature, and it calls | |
386 | * the global list function which calls the ftrace callback | |
387 | * all three of these steps will do a recursion protection. | |
388 | * There's no reason to do one if the previous caller already | |
389 | * did. The recursion that we are protecting against will | |
390 | * go through the same steps again. | |
391 | * | |
392 | * To prevent the multiple recursion checks, if a recursion | |
393 | * bit is set that is higher than the MAX bit of the current | |
394 | * check, then we know that the check was made by the previous | |
395 | * caller, and we can skip the current check. | |
396 | */ | |
e46cbf75 | 397 | enum { |
567cd4da SR |
398 | TRACE_BUFFER_BIT, |
399 | TRACE_BUFFER_NMI_BIT, | |
400 | TRACE_BUFFER_IRQ_BIT, | |
401 | TRACE_BUFFER_SIRQ_BIT, | |
402 | ||
403 | /* Start of function recursion bits */ | |
404 | TRACE_FTRACE_BIT, | |
edc15caf SR |
405 | TRACE_FTRACE_NMI_BIT, |
406 | TRACE_FTRACE_IRQ_BIT, | |
407 | TRACE_FTRACE_SIRQ_BIT, | |
e46cbf75 | 408 | |
edc15caf | 409 | /* GLOBAL_BITs must be greater than FTRACE_BITs */ |
e46cbf75 SR |
410 | TRACE_GLOBAL_BIT, |
411 | TRACE_GLOBAL_NMI_BIT, | |
412 | TRACE_GLOBAL_IRQ_BIT, | |
413 | TRACE_GLOBAL_SIRQ_BIT, | |
414 | ||
edc15caf SR |
415 | /* INTERNAL_BITs must be greater than GLOBAL_BITs */ |
416 | TRACE_INTERNAL_BIT, | |
417 | TRACE_INTERNAL_NMI_BIT, | |
418 | TRACE_INTERNAL_IRQ_BIT, | |
419 | TRACE_INTERNAL_SIRQ_BIT, | |
420 | ||
e46cbf75 | 421 | TRACE_CONTROL_BIT, |
e248491a | 422 | |
e4a3f541 SR |
423 | /* |
424 | * Abuse of the trace_recursion. | |
425 | * As we need a way to maintain state if we are tracing the function | |
426 | * graph in irq because we want to trace a particular function that | |
427 | * was called in irq context but we have irq tracing off. Since this | |
428 | * can only be modified by current, we can reuse trace_recursion. | |
429 | */ | |
e46cbf75 SR |
430 | TRACE_IRQ_BIT, |
431 | }; | |
e4a3f541 | 432 | |
e46cbf75 SR |
433 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) |
434 | #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) | |
435 | #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) | |
e4a3f541 | 436 | |
edc15caf SR |
437 | #define TRACE_CONTEXT_BITS 4 |
438 | ||
439 | #define TRACE_FTRACE_START TRACE_FTRACE_BIT | |
440 | #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) | |
441 | ||
442 | #define TRACE_GLOBAL_START TRACE_GLOBAL_BIT | |
443 | #define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1) | |
444 | ||
445 | #define TRACE_LIST_START TRACE_INTERNAL_BIT | |
446 | #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) | |
447 | ||
448 | #define TRACE_CONTEXT_MASK TRACE_LIST_MAX | |
449 | ||
450 | static __always_inline int trace_get_context_bit(void) | |
451 | { | |
452 | int bit; | |
453 | ||
454 | if (in_interrupt()) { | |
455 | if (in_nmi()) | |
456 | bit = 0; | |
457 | ||
458 | else if (in_irq()) | |
459 | bit = 1; | |
460 | else | |
461 | bit = 2; | |
462 | } else | |
463 | bit = 3; | |
464 | ||
465 | return bit; | |
466 | } | |
467 | ||
468 | static __always_inline int trace_test_and_set_recursion(int start, int max) | |
469 | { | |
470 | unsigned int val = current->trace_recursion; | |
471 | int bit; | |
472 | ||
473 | /* A previous recursion check was made */ | |
474 | if ((val & TRACE_CONTEXT_MASK) > max) | |
475 | return 0; | |
476 | ||
477 | bit = trace_get_context_bit() + start; | |
478 | if (unlikely(val & (1 << bit))) | |
479 | return -1; | |
480 | ||
481 | val |= 1 << bit; | |
482 | current->trace_recursion = val; | |
483 | barrier(); | |
484 | ||
485 | return bit; | |
486 | } | |
487 | ||
488 | static __always_inline void trace_clear_recursion(int bit) | |
489 | { | |
490 | unsigned int val = current->trace_recursion; | |
491 | ||
492 | if (!bit) | |
493 | return; | |
494 | ||
495 | bit = 1 << bit; | |
496 | val &= ~bit; | |
497 | ||
498 | barrier(); | |
499 | current->trace_recursion = val; | |
500 | } | |
501 | ||
6d158a81 SR |
502 | static inline struct ring_buffer_iter * |
503 | trace_buffer_iter(struct trace_iterator *iter, int cpu) | |
504 | { | |
505 | if (iter->buffer_iter && iter->buffer_iter[cpu]) | |
506 | return iter->buffer_iter[cpu]; | |
507 | return NULL; | |
508 | } | |
509 | ||
b6f11df2 | 510 | int tracer_init(struct tracer *t, struct trace_array *tr); |
9036990d | 511 | int tracing_is_enabled(void); |
12883efb SRRH |
512 | void tracing_reset(struct trace_buffer *buf, int cpu); |
513 | void tracing_reset_online_cpus(struct trace_buffer *buf); | |
9456f0fa | 514 | void tracing_reset_current(int cpu); |
873c642f | 515 | void tracing_reset_all_online_cpus(void); |
bc0c38d1 | 516 | int tracing_open_generic(struct inode *inode, struct file *filp); |
5452af66 | 517 | struct dentry *trace_create_file(const char *name, |
f4ae40a6 | 518 | umode_t mode, |
5452af66 FW |
519 | struct dentry *parent, |
520 | void *data, | |
521 | const struct file_operations *fops); | |
522 | ||
2b6080f2 | 523 | struct dentry *tracing_init_dentry_tr(struct trace_array *tr); |
bc0c38d1 | 524 | struct dentry *tracing_init_dentry(void); |
d618b3e6 | 525 | |
51a763dd ACM |
526 | struct ring_buffer_event; |
527 | ||
e77405ad SR |
528 | struct ring_buffer_event * |
529 | trace_buffer_lock_reserve(struct ring_buffer *buffer, | |
530 | int type, | |
531 | unsigned long len, | |
532 | unsigned long flags, | |
533 | int pc); | |
51a763dd | 534 | |
45dcd8b8 PP |
535 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
536 | struct trace_array_cpu *data); | |
c4a8e8be FW |
537 | |
538 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | |
539 | int *ent_cpu, u64 *ent_ts); | |
540 | ||
7ffbd48d SR |
541 | void __buffer_unlock_commit(struct ring_buffer *buffer, |
542 | struct ring_buffer_event *event); | |
543 | ||
955b61e5 JW |
544 | int trace_empty(struct trace_iterator *iter); |
545 | ||
546 | void *trace_find_next_entry_inc(struct trace_iterator *iter); | |
547 | ||
548 | void trace_init_global_iter(struct trace_iterator *iter); | |
549 | ||
550 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | |
551 | ||
6eaaa5d5 FW |
552 | void poll_wait_pipe(struct trace_iterator *iter); |
553 | ||
bc0c38d1 | 554 | void tracing_sched_switch_trace(struct trace_array *tr, |
bc0c38d1 SR |
555 | struct task_struct *prev, |
556 | struct task_struct *next, | |
38697053 | 557 | unsigned long flags, int pc); |
57422797 IM |
558 | |
559 | void tracing_sched_wakeup_trace(struct trace_array *tr, | |
57422797 IM |
560 | struct task_struct *wakee, |
561 | struct task_struct *cur, | |
38697053 | 562 | unsigned long flags, int pc); |
6fb44b71 | 563 | void trace_function(struct trace_array *tr, |
6fb44b71 SR |
564 | unsigned long ip, |
565 | unsigned long parent_ip, | |
38697053 | 566 | unsigned long flags, int pc); |
0a772620 JO |
567 | void trace_graph_function(struct trace_array *tr, |
568 | unsigned long ip, | |
569 | unsigned long parent_ip, | |
570 | unsigned long flags, int pc); | |
7e9a49ef | 571 | void trace_latency_header(struct seq_file *m); |
62b915f1 JO |
572 | void trace_default_header(struct seq_file *m); |
573 | void print_trace_header(struct seq_file *m, struct trace_iterator *iter); | |
574 | int trace_empty(struct trace_iterator *iter); | |
bc0c38d1 | 575 | |
287b6e68 | 576 | void trace_graph_return(struct ftrace_graph_ret *trace); |
e49dc19c | 577 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
1a0799a8 | 578 | void set_graph_array(struct trace_array *tr); |
1e9b51c2 | 579 | |
41bc8144 SR |
580 | void tracing_start_cmdline_record(void); |
581 | void tracing_stop_cmdline_record(void); | |
e168e051 SR |
582 | void tracing_sched_switch_assign_trace(struct trace_array *tr); |
583 | void tracing_stop_sched_switch_record(void); | |
584 | void tracing_start_sched_switch_record(void); | |
bc0c38d1 | 585 | int register_tracer(struct tracer *type); |
b5130b1e | 586 | int is_tracing_stopped(void); |
955b61e5 JW |
587 | |
588 | extern cpumask_var_t __read_mostly tracing_buffer_mask; | |
589 | ||
590 | #define for_each_tracing_cpu(cpu) \ | |
591 | for_each_cpu(cpu, tracing_buffer_mask) | |
bc0c38d1 SR |
592 | |
593 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | |
594 | ||
0e950173 TB |
595 | extern unsigned long tracing_thresh; |
596 | ||
5d4a9dba | 597 | #ifdef CONFIG_TRACER_MAX_TRACE |
bc0c38d1 | 598 | extern unsigned long tracing_max_latency; |
bc0c38d1 SR |
599 | |
600 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |
601 | void update_max_tr_single(struct trace_array *tr, | |
602 | struct task_struct *tsk, int cpu); | |
5d4a9dba | 603 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
bc0c38d1 | 604 | |
c0a0d0d3 | 605 | #ifdef CONFIG_STACKTRACE |
e77405ad | 606 | void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, |
c0a0d0d3 FW |
607 | int skip, int pc); |
608 | ||
1fd8df2c MH |
609 | void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, |
610 | int skip, int pc, struct pt_regs *regs); | |
611 | ||
e77405ad | 612 | void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, |
c0a0d0d3 FW |
613 | int pc); |
614 | ||
615 | void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |
616 | int pc); | |
617 | #else | |
e1f7992e | 618 | static inline void ftrace_trace_stack(struct ring_buffer *buffer, |
c0a0d0d3 FW |
619 | unsigned long flags, int skip, int pc) |
620 | { | |
621 | } | |
622 | ||
1fd8df2c MH |
623 | static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer, |
624 | unsigned long flags, int skip, | |
625 | int pc, struct pt_regs *regs) | |
626 | { | |
627 | } | |
628 | ||
e1f7992e | 629 | static inline void ftrace_trace_userstack(struct ring_buffer *buffer, |
c0a0d0d3 FW |
630 | unsigned long flags, int pc) |
631 | { | |
632 | } | |
633 | ||
634 | static inline void __trace_stack(struct trace_array *tr, unsigned long flags, | |
635 | int skip, int pc) | |
636 | { | |
637 | } | |
638 | #endif /* CONFIG_STACKTRACE */ | |
53614991 | 639 | |
e309b41d | 640 | extern cycle_t ftrace_now(int cpu); |
bc0c38d1 | 641 | |
4ca53085 | 642 | extern void trace_find_cmdline(int pid, char comm[]); |
f7d48cbd | 643 | |
bc0c38d1 SR |
644 | #ifdef CONFIG_DYNAMIC_FTRACE |
645 | extern unsigned long ftrace_update_tot_cnt; | |
ad97772a | 646 | #endif |
d05cdb25 SR |
647 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
648 | extern int DYN_FTRACE_TEST_NAME(void); | |
95950c2e SR |
649 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 |
650 | extern int DYN_FTRACE_TEST_NAME2(void); | |
bc0c38d1 | 651 | |
55034cd6 | 652 | extern bool ring_buffer_expanded; |
020e5f85 | 653 | extern bool tracing_selftest_disabled; |
9288f99a | 654 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
020e5f85 | 655 | |
60a11774 | 656 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
60a11774 SR |
657 | extern int trace_selftest_startup_function(struct tracer *trace, |
658 | struct trace_array *tr); | |
7447dce9 FW |
659 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
660 | struct trace_array *tr); | |
60a11774 SR |
661 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
662 | struct trace_array *tr); | |
60a11774 SR |
663 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
664 | struct trace_array *tr); | |
60a11774 SR |
665 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
666 | struct trace_array *tr); | |
60a11774 SR |
667 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
668 | struct trace_array *tr); | |
fb1b6d8b SN |
669 | extern int trace_selftest_startup_nop(struct tracer *trace, |
670 | struct trace_array *tr); | |
60a11774 SR |
671 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
672 | struct trace_array *tr); | |
80e5ea45 SR |
673 | extern int trace_selftest_startup_branch(struct tracer *trace, |
674 | struct trace_array *tr); | |
8f768993 SRRH |
675 | /* |
676 | * Tracer data references selftest functions that only occur | |
677 | * on boot up. These can be __init functions. Thus, when selftests | |
678 | * are enabled, then the tracers need to reference __init functions. | |
679 | */ | |
680 | #define __tracer_data __refdata | |
681 | #else | |
682 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ | |
683 | #define __tracer_data __read_mostly | |
60a11774 SR |
684 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
685 | ||
c7aafc54 | 686 | extern void *head_page(struct trace_array_cpu *data); |
cf8e3474 | 687 | extern unsigned long long ns2usecs(cycle_t nsec); |
1fd8f2a3 | 688 | extern int |
40ce74f1 | 689 | trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
48ead020 | 690 | extern int |
40ce74f1 | 691 | trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
659372d3 SR |
692 | extern int |
693 | trace_array_vprintk(struct trace_array *tr, | |
694 | unsigned long ip, const char *fmt, va_list args); | |
695 | int trace_array_printk(struct trace_array *tr, | |
696 | unsigned long ip, const char *fmt, ...); | |
12883efb SRRH |
697 | int trace_array_printk_buf(struct ring_buffer *buffer, |
698 | unsigned long ip, const char *fmt, ...); | |
955b61e5 JW |
699 | void trace_printk_seq(struct trace_seq *s); |
700 | enum print_line_t print_trace_line(struct trace_iterator *iter); | |
c7aafc54 | 701 | |
4e655519 IM |
702 | extern unsigned long trace_flags; |
703 | ||
15e6cb36 | 704 | /* Standard output formatting function used for function return traces */ |
fb52607a | 705 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
62b915f1 JO |
706 | |
707 | /* Flag options */ | |
708 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | |
709 | #define TRACE_GRAPH_PRINT_CPU 0x2 | |
710 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | |
711 | #define TRACE_GRAPH_PRINT_PROC 0x8 | |
712 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | |
713 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | |
714 | ||
d7a8d9e9 JO |
715 | extern enum print_line_t |
716 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | |
717 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); | |
0706f1c4 SR |
718 | extern enum print_line_t |
719 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | |
62b915f1 JO |
720 | extern void graph_trace_open(struct trace_iterator *iter); |
721 | extern void graph_trace_close(struct trace_iterator *iter); | |
722 | extern int __trace_graph_entry(struct trace_array *tr, | |
723 | struct ftrace_graph_ent *trace, | |
724 | unsigned long flags, int pc); | |
725 | extern void __trace_graph_return(struct trace_array *tr, | |
726 | struct ftrace_graph_ret *trace, | |
727 | unsigned long flags, int pc); | |
728 | ||
ea4e2bc4 SR |
729 | |
730 | #ifdef CONFIG_DYNAMIC_FTRACE | |
731 | /* TODO: make this variable */ | |
732 | #define FTRACE_GRAPH_MAX_FUNCS 32 | |
c7c6b1fe | 733 | extern int ftrace_graph_filter_enabled; |
ea4e2bc4 SR |
734 | extern int ftrace_graph_count; |
735 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | |
736 | ||
737 | static inline int ftrace_graph_addr(unsigned long addr) | |
738 | { | |
739 | int i; | |
740 | ||
c7c6b1fe | 741 | if (!ftrace_graph_filter_enabled) |
ea4e2bc4 SR |
742 | return 1; |
743 | ||
744 | for (i = 0; i < ftrace_graph_count; i++) { | |
e4a3f541 SR |
745 | if (addr == ftrace_graph_funcs[i]) { |
746 | /* | |
747 | * If no irqs are to be traced, but a set_graph_function | |
748 | * is set, and called by an interrupt handler, we still | |
749 | * want to trace it. | |
750 | */ | |
751 | if (in_irq()) | |
752 | trace_recursion_set(TRACE_IRQ_BIT); | |
753 | else | |
754 | trace_recursion_clear(TRACE_IRQ_BIT); | |
ea4e2bc4 | 755 | return 1; |
e4a3f541 | 756 | } |
ea4e2bc4 SR |
757 | } |
758 | ||
759 | return 0; | |
760 | } | |
15e6cb36 | 761 | #else |
6b253930 IM |
762 | static inline int ftrace_graph_addr(unsigned long addr) |
763 | { | |
764 | return 1; | |
ea4e2bc4 SR |
765 | } |
766 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
ea4e2bc4 | 767 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 768 | static inline enum print_line_t |
d7a8d9e9 | 769 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
15e6cb36 FW |
770 | { |
771 | return TRACE_TYPE_UNHANDLED; | |
772 | } | |
ea4e2bc4 | 773 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
15e6cb36 | 774 | |
756d17ee | 775 | extern struct list_head ftrace_pids; |
804a6851 | 776 | |
1155de47 | 777 | #ifdef CONFIG_FUNCTION_TRACER |
f1ed7c74 | 778 | extern bool ftrace_filter_param __initdata; |
804a6851 SR |
779 | static inline int ftrace_trace_task(struct task_struct *task) |
780 | { | |
756d17ee | 781 | if (list_empty(&ftrace_pids)) |
804a6851 SR |
782 | return 1; |
783 | ||
784 | return test_tsk_trace_trace(task); | |
785 | } | |
e0a413f6 | 786 | extern int ftrace_is_dead(void); |
1155de47 PM |
787 | #else |
788 | static inline int ftrace_trace_task(struct task_struct *task) | |
789 | { | |
790 | return 1; | |
791 | } | |
e0a413f6 | 792 | static inline int ftrace_is_dead(void) { return 0; } |
1155de47 | 793 | #endif |
804a6851 | 794 | |
ced39002 JO |
795 | int ftrace_event_is_function(struct ftrace_event_call *call); |
796 | ||
b63f39ea | 797 | /* |
798 | * struct trace_parser - servers for reading the user input separated by spaces | |
799 | * @cont: set if the input is not complete - no final space char was found | |
800 | * @buffer: holds the parsed user input | |
1537a363 | 801 | * @idx: user input length |
b63f39ea | 802 | * @size: buffer size |
803 | */ | |
804 | struct trace_parser { | |
805 | bool cont; | |
806 | char *buffer; | |
807 | unsigned idx; | |
808 | unsigned size; | |
809 | }; | |
810 | ||
811 | static inline bool trace_parser_loaded(struct trace_parser *parser) | |
812 | { | |
813 | return (parser->idx != 0); | |
814 | } | |
815 | ||
816 | static inline bool trace_parser_cont(struct trace_parser *parser) | |
817 | { | |
818 | return parser->cont; | |
819 | } | |
820 | ||
821 | static inline void trace_parser_clear(struct trace_parser *parser) | |
822 | { | |
823 | parser->cont = false; | |
824 | parser->idx = 0; | |
825 | } | |
826 | ||
827 | extern int trace_parser_get_init(struct trace_parser *parser, int size); | |
828 | extern void trace_parser_put(struct trace_parser *parser); | |
829 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |
830 | size_t cnt, loff_t *ppos); | |
831 | ||
4fcdae83 SR |
832 | /* |
833 | * trace_iterator_flags is an enumeration that defines bit | |
834 | * positions into trace_flags that controls the output. | |
835 | * | |
836 | * NOTE: These bits must match the trace_options array in | |
837 | * trace.c. | |
838 | */ | |
4e655519 IM |
839 | enum trace_iterator_flags { |
840 | TRACE_ITER_PRINT_PARENT = 0x01, | |
841 | TRACE_ITER_SYM_OFFSET = 0x02, | |
842 | TRACE_ITER_SYM_ADDR = 0x04, | |
843 | TRACE_ITER_VERBOSE = 0x08, | |
844 | TRACE_ITER_RAW = 0x10, | |
845 | TRACE_ITER_HEX = 0x20, | |
846 | TRACE_ITER_BIN = 0x40, | |
847 | TRACE_ITER_BLOCK = 0x80, | |
848 | TRACE_ITER_STACKTRACE = 0x100, | |
2cbafd68 LZ |
849 | TRACE_ITER_PRINTK = 0x200, |
850 | TRACE_ITER_PREEMPTONLY = 0x400, | |
851 | TRACE_ITER_BRANCH = 0x800, | |
852 | TRACE_ITER_ANNOTATE = 0x1000, | |
853 | TRACE_ITER_USERSTACKTRACE = 0x2000, | |
854 | TRACE_ITER_SYM_USEROBJ = 0x4000, | |
855 | TRACE_ITER_PRINTK_MSGONLY = 0x8000, | |
856 | TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ | |
857 | TRACE_ITER_LATENCY_FMT = 0x20000, | |
858 | TRACE_ITER_SLEEP_TIME = 0x40000, | |
859 | TRACE_ITER_GRAPH_TIME = 0x80000, | |
e870e9a1 | 860 | TRACE_ITER_RECORD_CMD = 0x100000, |
750912fa | 861 | TRACE_ITER_OVERWRITE = 0x200000, |
cf30cf67 | 862 | TRACE_ITER_STOP_ON_FREE = 0x400000, |
77271ce4 | 863 | TRACE_ITER_IRQ_INFO = 0x800000, |
5224c3a3 | 864 | TRACE_ITER_MARKERS = 0x1000000, |
328df475 | 865 | TRACE_ITER_FUNCTION = 0x2000000, |
4e655519 IM |
866 | }; |
867 | ||
15e6cb36 FW |
868 | /* |
869 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | |
870 | * control the output of kernel symbols. | |
871 | */ | |
872 | #define TRACE_ITER_SYM_MASK \ | |
873 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | |
874 | ||
43a15386 FW |
875 | extern struct tracer nop_trace; |
876 | ||
2ed84eeb | 877 | #ifdef CONFIG_BRANCH_TRACER |
9f029e83 SR |
878 | extern int enable_branch_tracing(struct trace_array *tr); |
879 | extern void disable_branch_tracing(void); | |
880 | static inline int trace_branch_enable(struct trace_array *tr) | |
52f232cb | 881 | { |
9f029e83 SR |
882 | if (trace_flags & TRACE_ITER_BRANCH) |
883 | return enable_branch_tracing(tr); | |
52f232cb SR |
884 | return 0; |
885 | } | |
9f029e83 | 886 | static inline void trace_branch_disable(void) |
52f232cb SR |
887 | { |
888 | /* due to races, always disable */ | |
9f029e83 | 889 | disable_branch_tracing(); |
52f232cb SR |
890 | } |
891 | #else | |
9f029e83 | 892 | static inline int trace_branch_enable(struct trace_array *tr) |
52f232cb SR |
893 | { |
894 | return 0; | |
895 | } | |
9f029e83 | 896 | static inline void trace_branch_disable(void) |
52f232cb SR |
897 | { |
898 | } | |
2ed84eeb | 899 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 900 | |
1852fcce SR |
901 | /* set ring buffers to default size if not already done so */ |
902 | int tracing_update_buffers(void); | |
903 | ||
cf027f64 TZ |
904 | struct ftrace_event_field { |
905 | struct list_head link; | |
92edca07 SR |
906 | const char *name; |
907 | const char *type; | |
aa38e9fc | 908 | int filter_type; |
cf027f64 TZ |
909 | int offset; |
910 | int size; | |
a118e4d1 | 911 | int is_signed; |
cf027f64 TZ |
912 | }; |
913 | ||
30e673b2 | 914 | struct event_filter { |
c9c53ca0 SR |
915 | int n_preds; /* Number assigned */ |
916 | int a_preds; /* allocated */ | |
74e9e58c | 917 | struct filter_pred *preds; |
61e9dea2 | 918 | struct filter_pred *root; |
8b372562 | 919 | char *filter_string; |
30e673b2 TZ |
920 | }; |
921 | ||
cfb180f3 TZ |
922 | struct event_subsystem { |
923 | struct list_head list; | |
924 | const char *name; | |
1f9963cb | 925 | struct event_filter *filter; |
e9dbfae5 | 926 | int ref_count; |
cfb180f3 TZ |
927 | }; |
928 | ||
ae63b31e SR |
929 | struct ftrace_subsystem_dir { |
930 | struct list_head list; | |
931 | struct event_subsystem *subsystem; | |
932 | struct trace_array *tr; | |
933 | struct dentry *entry; | |
934 | int ref_count; | |
935 | int nr_events; | |
936 | }; | |
937 | ||
61e9dea2 SR |
938 | #define FILTER_PRED_INVALID ((unsigned short)-1) |
939 | #define FILTER_PRED_IS_RIGHT (1 << 15) | |
43cd4145 | 940 | #define FILTER_PRED_FOLD (1 << 15) |
61e9dea2 | 941 | |
bf93f9ed SR |
942 | /* |
943 | * The max preds is the size of unsigned short with | |
944 | * two flags at the MSBs. One bit is used for both the IS_RIGHT | |
945 | * and FOLD flags. The other is reserved. | |
946 | * | |
947 | * 2^14 preds is way more than enough. | |
948 | */ | |
949 | #define MAX_FILTER_PRED 16384 | |
4a3d27e9 | 950 | |
7ce7e424 | 951 | struct filter_pred; |
1889d209 | 952 | struct regex; |
7ce7e424 | 953 | |
58d9a597 | 954 | typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); |
7ce7e424 | 955 | |
1889d209 FW |
956 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
957 | ||
3f6fe06d | 958 | enum regex_type { |
b0f1a59a | 959 | MATCH_FULL = 0, |
3f6fe06d FW |
960 | MATCH_FRONT_ONLY, |
961 | MATCH_MIDDLE_ONLY, | |
962 | MATCH_END_ONLY, | |
963 | }; | |
964 | ||
1889d209 FW |
965 | struct regex { |
966 | char pattern[MAX_FILTER_STR_VAL]; | |
967 | int len; | |
968 | int field_len; | |
969 | regex_match_func match; | |
970 | }; | |
971 | ||
7ce7e424 | 972 | struct filter_pred { |
1889d209 FW |
973 | filter_pred_fn_t fn; |
974 | u64 val; | |
975 | struct regex regex; | |
61aaef55 | 976 | unsigned short *ops; |
1d0e78e3 | 977 | struct ftrace_event_field *field; |
1889d209 FW |
978 | int offset; |
979 | int not; | |
980 | int op; | |
61e9dea2 SR |
981 | unsigned short index; |
982 | unsigned short parent; | |
983 | unsigned short left; | |
984 | unsigned short right; | |
7ce7e424 TZ |
985 | }; |
986 | ||
3f6fe06d FW |
987 | extern enum regex_type |
988 | filter_parse_regex(char *buff, int len, char **search, int *not); | |
8b372562 | 989 | extern void print_event_filter(struct ftrace_event_call *call, |
4bda2d51 | 990 | struct trace_seq *s); |
8b372562 TZ |
991 | extern int apply_event_filter(struct ftrace_event_call *call, |
992 | char *filter_string); | |
ae63b31e | 993 | extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, |
8b372562 TZ |
994 | char *filter_string); |
995 | extern void print_subsystem_event_filter(struct event_subsystem *system, | |
ac1adc55 | 996 | struct trace_seq *s); |
aa38e9fc | 997 | extern int filter_assign_type(const char *type); |
7ce7e424 | 998 | |
b3a8c6fd J |
999 | struct ftrace_event_field * |
1000 | trace_find_event_field(struct ftrace_event_call *call, char *name); | |
2e33af02 | 1001 | |
eb02ce01 | 1002 | static inline int |
e1112b4d | 1003 | filter_check_discard(struct ftrace_event_call *call, void *rec, |
eb02ce01 | 1004 | struct ring_buffer *buffer, |
e1112b4d TZ |
1005 | struct ring_buffer_event *event) |
1006 | { | |
553552ce | 1007 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && |
6fb2915d | 1008 | !filter_match_preds(call->filter, rec)) { |
eb02ce01 TZ |
1009 | ring_buffer_discard_commit(buffer, event); |
1010 | return 1; | |
1011 | } | |
1012 | ||
1013 | return 0; | |
e1112b4d TZ |
1014 | } |
1015 | ||
e870e9a1 | 1016 | extern void trace_event_enable_cmd_record(bool enable); |
277ba044 | 1017 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
0c8916c3 | 1018 | extern int event_trace_del_tracer(struct trace_array *tr); |
e870e9a1 | 1019 | |
20c8928a | 1020 | extern struct mutex event_mutex; |
a59fd602 | 1021 | extern struct list_head ftrace_events; |
ac199db0 | 1022 | |
e9fb2b6d SR |
1023 | extern const char *__start___trace_bprintk_fmt[]; |
1024 | extern const char *__stop___trace_bprintk_fmt[]; | |
1025 | ||
102c9323 SRRH |
1026 | extern const char *__start___tracepoint_str[]; |
1027 | extern const char *__stop___tracepoint_str[]; | |
1028 | ||
07d777fe | 1029 | void trace_printk_init_buffers(void); |
81698831 | 1030 | void trace_printk_start_comm(void); |
613f04a0 | 1031 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
2b6080f2 | 1032 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
07d777fe | 1033 | |
ca268da6 SRRH |
1034 | /* |
1035 | * Normal trace_printk() and friends allocates special buffers | |
1036 | * to do the manipulation, as well as saves the print formats | |
1037 | * into sections to display. But the trace infrastructure wants | |
1038 | * to use these without the added overhead at the price of being | |
1039 | * a bit slower (used mainly for warnings, where we don't care | |
1040 | * about performance). The internal_trace_puts() is for such | |
1041 | * a purpose. | |
1042 | */ | |
1043 | #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) | |
1044 | ||
4e5292ea | 1045 | #undef FTRACE_ENTRY |
02aa3162 | 1046 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
86c38a31 JM |
1047 | extern struct ftrace_event_call \ |
1048 | __attribute__((__aligned__(4))) event_##call; | |
4e5292ea | 1049 | #undef FTRACE_ENTRY_DUP |
02aa3162 JO |
1050 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ |
1051 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ | |
1052 | filter) | |
4e5292ea | 1053 | #include "trace_entries.h" |
e1112b4d | 1054 | |
6e48b550 | 1055 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) |
ced39002 JO |
1056 | int perf_ftrace_event_register(struct ftrace_event_call *call, |
1057 | enum trace_reg type, void *data); | |
1058 | #else | |
1059 | #define perf_ftrace_event_register NULL | |
6e48b550 | 1060 | #endif |
ced39002 | 1061 | |
bc0c38d1 | 1062 | #endif /* _LINUX_KERNEL_TRACE_H */ |