Kconfig: rename HAS_IOPORT to HAS_IOPORT_MAP
[deliverable/linux.git] / kernel / trace / trace.h
CommitLineData
bac5fb97 1
bc0c38d1
SR
2#ifndef _LINUX_KERNEL_TRACE_H
3#define _LINUX_KERNEL_TRACE_H
4
5#include <linux/fs.h>
60063497 6#include <linux/atomic.h>
bc0c38d1
SR
7#include <linux/sched.h>
8#include <linux/clocksource.h>
3928a8a2 9#include <linux/ring_buffer.h>
bd8ac686 10#include <linux/mmiotrace.h>
4e5292ea 11#include <linux/tracepoint.h>
d13744cd 12#include <linux/ftrace.h>
24f1e32c 13#include <linux/hw_breakpoint.h>
9504504c 14#include <linux/trace_seq.h>
97f20251 15#include <linux/ftrace_event.h>
9504504c 16
12ab74ee
SR
17#ifdef CONFIG_FTRACE_SYSCALLS
18#include <asm/unistd.h> /* For NR_SYSCALLS */
19#include <asm/syscall.h> /* some archs define it here */
20#endif
21
72829bc3
TG
22enum trace_type {
23 __TRACE_FIRST_TYPE = 0,
24
25 TRACE_FN,
26 TRACE_CTX,
27 TRACE_WAKE,
28 TRACE_STACK,
dd0e545f 29 TRACE_PRINT,
48ead020 30 TRACE_BPRINT,
bd8ac686
PP
31 TRACE_MMIO_RW,
32 TRACE_MMIO_MAP,
9f029e83 33 TRACE_BRANCH,
287b6e68
FW
34 TRACE_GRAPH_RET,
35 TRACE_GRAPH_ENT,
02b67518 36 TRACE_USER_STACK,
c71a8961 37 TRACE_BLK,
09ae7234 38 TRACE_BPUTS,
72829bc3 39
f0868d1e 40 __TRACE_LAST_TYPE,
72829bc3
TG
41};
42
bc0c38d1 43
0a1c49db
SR
44#undef __field
45#define __field(type, item) type item;
86387f7e 46
d7315094
SR
47#undef __field_struct
48#define __field_struct(type, item) __field(type, item)
86387f7e 49
d7315094
SR
50#undef __field_desc
51#define __field_desc(type, container, item)
02b67518 52
0a1c49db
SR
53#undef __array
54#define __array(type, item, size) type item[size];
1427cdf0 55
d7315094
SR
56#undef __array_desc
57#define __array_desc(type, container, item, size)
777e208d 58
0a1c49db
SR
59#undef __dynamic_array
60#define __dynamic_array(type, item) type item[];
777e208d 61
0a1c49db
SR
62#undef F_STRUCT
63#define F_STRUCT(args...) args
74239072 64
0a1c49db 65#undef FTRACE_ENTRY
02aa3162
JO
66#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
67 struct struct_name { \
68 struct trace_entry ent; \
69 tstruct \
0a1c49db 70 }
777e208d 71
0a1c49db
SR
72#undef TP_ARGS
73#define TP_ARGS(args...) args
52f232cb 74
0a1c49db 75#undef FTRACE_ENTRY_DUP
02aa3162 76#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
1e9b51c2 77
e59a0bff 78#undef FTRACE_ENTRY_REG
02aa3162
JO
79#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
80 filter, regfn) \
81 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
82 filter)
e59a0bff 83
0a1c49db 84#include "trace_entries.h"
36994e58 85
0a1c49db
SR
86/*
87 * syscalls are special, and need special handling, this is why
88 * they are not included in trace_entries.h
89 */
bed1ffca
FW
90struct syscall_trace_enter {
91 struct trace_entry ent;
92 int nr;
93 unsigned long args[];
94};
95
96struct syscall_trace_exit {
97 struct trace_entry ent;
98 int nr;
99df5a6a 99 long ret;
bed1ffca
FW
100};
101
93ccae7a 102struct kprobe_trace_entry_head {
413d37d1
MH
103 struct trace_entry ent;
104 unsigned long ip;
413d37d1
MH
105};
106
93ccae7a 107struct kretprobe_trace_entry_head {
413d37d1
MH
108 struct trace_entry ent;
109 unsigned long func;
110 unsigned long ret_ip;
413d37d1
MH
111};
112
fc5e27ae
PP
113/*
114 * trace_flag_type is an enumeration that holds different
115 * states when a trace occurs. These are:
9244489a 116 * IRQS_OFF - interrupts were disabled
9de36825 117 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
bd9cfca9 118 * NEED_RESCHED - reschedule is requested
9244489a
SR
119 * HARDIRQ - inside an interrupt handler
120 * SOFTIRQ - inside a softirq handler
fc5e27ae
PP
121 */
122enum trace_flag_type {
123 TRACE_FLAG_IRQS_OFF = 0x01,
9244489a
SR
124 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
125 TRACE_FLAG_NEED_RESCHED = 0x04,
126 TRACE_FLAG_HARDIRQ = 0x08,
127 TRACE_FLAG_SOFTIRQ = 0x10,
e5137b50 128 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
fc5e27ae
PP
129};
130
5bf9a1ee 131#define TRACE_BUF_SIZE 1024
bc0c38d1 132
2b6080f2
SR
133struct trace_array;
134
bc0c38d1
SR
135/*
136 * The CPU trace array - it consists of thousands of trace entries
137 * plus some other descriptor data: (for example which task started
138 * the trace, etc.)
139 */
140struct trace_array_cpu {
bc0c38d1 141 atomic_t disabled;
2cadf913 142 void *buffer_page; /* ring buffer spare */
4e3c3333 143
438ced17 144 unsigned long entries;
bc0c38d1
SR
145 unsigned long saved_latency;
146 unsigned long critical_start;
147 unsigned long critical_end;
148 unsigned long critical_sequence;
149 unsigned long nice;
150 unsigned long policy;
151 unsigned long rt_priority;
2f26ebd5 152 unsigned long skipped_entries;
bc0c38d1
SR
153 cycle_t preempt_timestamp;
154 pid_t pid;
d20b92ab 155 kuid_t uid;
bc0c38d1
SR
156 char comm[TASK_COMM_LEN];
157};
158
2b6080f2
SR
159struct tracer;
160
12883efb
SRRH
161struct trace_buffer {
162 struct trace_array *tr;
163 struct ring_buffer *buffer;
164 struct trace_array_cpu __percpu *data;
165 cycle_t time_start;
166 int cpu;
167};
168
bc0c38d1
SR
169/*
170 * The trace array - an array of per-CPU trace arrays. This is the
171 * highest level data structure that individual tracers deal with.
172 * They have on/off state as well:
173 */
174struct trace_array {
ae63b31e 175 struct list_head list;
277ba044 176 char *name;
12883efb
SRRH
177 struct trace_buffer trace_buffer;
178#ifdef CONFIG_TRACER_MAX_TRACE
179 /*
180 * The max_buffer is used to snapshot the trace when a maximum
181 * latency is reached, or when the user initiates a snapshot.
182 * Some tracers will use this to store a maximum trace while
183 * it continues examining live traces.
184 *
185 * The buffers for the max_buffer are set up the same as the trace_buffer
186 * When a snapshot is taken, the buffer of the max_buffer is swapped
187 * with the buffer of the trace_buffer and the buffers are reset for
188 * the trace_buffer so the tracing can continue.
189 */
190 struct trace_buffer max_buffer;
45ad21ca 191 bool allocated_snapshot;
12883efb 192#endif
499e5470 193 int buffer_disabled;
12ab74ee
SR
194#ifdef CONFIG_FTRACE_SYSCALLS
195 int sys_refcount_enter;
196 int sys_refcount_exit;
3a81a521
SRRH
197 struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
198 struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
12ab74ee 199#endif
2b6080f2
SR
200 int stop_count;
201 int clock_id;
202 struct tracer *current_trace;
ae63b31e 203 unsigned int flags;
2b6080f2 204 raw_spinlock_t start_lock;
ae63b31e 205 struct dentry *dir;
2b6080f2
SR
206 struct dentry *options;
207 struct dentry *percpu_dir;
ae63b31e
SR
208 struct dentry *event_dir;
209 struct list_head systems;
210 struct list_head events;
ccfe9e42 211 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
a695cb58 212 int ref;
f20a5806
SRRH
213#ifdef CONFIG_FUNCTION_TRACER
214 struct ftrace_ops *ops;
215 /* function tracing enabled */
216 int function_enabled;
217#endif
bc0c38d1
SR
218};
219
ae63b31e
SR
220enum {
221 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
222};
223
224extern struct list_head ftrace_trace_arrays;
225
a8227415
AL
226extern struct mutex trace_types_lock;
227
8e2e2fa4
SRRH
228extern int trace_array_get(struct trace_array *tr);
229extern void trace_array_put(struct trace_array *tr);
230
ae63b31e
SR
231/*
232 * The global tracer (top) should be the first trace array added,
233 * but we check the flag anyway.
234 */
235static inline struct trace_array *top_trace_array(void)
236{
237 struct trace_array *tr;
238
239 tr = list_entry(ftrace_trace_arrays.prev,
240 typeof(*tr), list);
241 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
242 return tr;
243}
244
7104f300
SR
245#define FTRACE_CMP_TYPE(var, type) \
246 __builtin_types_compatible_p(typeof(var), type *)
247
248#undef IF_ASSIGN
249#define IF_ASSIGN(var, entry, etype, id) \
250 if (FTRACE_CMP_TYPE(var, etype)) { \
251 var = (typeof(var))(entry); \
252 WARN_ON(id && (entry)->type != id); \
253 break; \
254 }
255
256/* Will cause compile errors if type is not found. */
257extern void __ftrace_bad_type(void);
258
259/*
260 * The trace_assign_type is a verifier that the entry type is
261 * the same as the type being assigned. To add new types simply
262 * add a line with the following format:
263 *
264 * IF_ASSIGN(var, ent, type, id);
265 *
266 * Where "type" is the trace type that includes the trace_entry
267 * as the "ent" item. And "id" is the trace identifier that is
268 * used in the trace_type enum.
269 *
270 * If the type can have more than one id, then use zero.
271 */
272#define trace_assign_type(var, ent) \
273 do { \
274 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
275 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
7104f300 276 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
02b67518 277 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
7104f300 278 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
48ead020 279 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
09ae7234 280 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
7104f300
SR
281 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
282 TRACE_MMIO_RW); \
283 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
284 TRACE_MMIO_MAP); \
9f029e83 285 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
287b6e68
FW
286 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
287 TRACE_GRAPH_ENT); \
288 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
289 TRACE_GRAPH_RET); \
7104f300
SR
290 __ftrace_bad_type(); \
291 } while (0)
2c4f035f 292
adf9f195
FW
293/*
294 * An option specific to a tracer. This is a boolean value.
295 * The bit is the bit index that sets its value on the
296 * flags value in struct tracer_flags.
297 */
298struct tracer_opt {
9de36825
IM
299 const char *name; /* Will appear on the trace_options file */
300 u32 bit; /* Mask assigned in val field in tracer_flags */
adf9f195
FW
301};
302
303/*
304 * The set of specific options for a tracer. Your tracer
305 * have to set the initial value of the flags val.
306 */
307struct tracer_flags {
308 u32 val;
9de36825 309 struct tracer_opt *opts;
adf9f195
FW
310};
311
312/* Makes more easy to define a tracer opt */
313#define TRACER_OPT(s, b) .name = #s, .bit = b
314
034939b6 315
6eaaa5d5
FW
316/**
317 * struct tracer - a specific tracer and its callbacks to interact with debugfs
318 * @name: the name chosen to select it on the available_tracers file
319 * @init: called when one switches to this tracer (echo name > current_tracer)
320 * @reset: called when one switches to another tracer
321 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
322 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
323 * @open: called when the trace file is opened
324 * @pipe_open: called when the trace_pipe file is opened
325 * @wait_pipe: override how the user waits for traces on trace_pipe
326 * @close: called when the trace file is released
c521efd1 327 * @pipe_close: called when the trace_pipe file is released
6eaaa5d5
FW
328 * @read: override the default read callback on trace_pipe
329 * @splice_read: override the default splice_read callback on trace_pipe
330 * @selftest: selftest to run on boot (see trace_selftest.c)
331 * @print_headers: override the first lines that describe your columns
332 * @print_line: callback that prints a trace
333 * @set_flag: signals one of your private flags changed (trace_options file)
334 * @flags: your private flags
bc0c38d1
SR
335 */
336struct tracer {
337 const char *name;
1c80025a 338 int (*init)(struct trace_array *tr);
bc0c38d1 339 void (*reset)(struct trace_array *tr);
9036990d
SR
340 void (*start)(struct trace_array *tr);
341 void (*stop)(struct trace_array *tr);
bc0c38d1 342 void (*open)(struct trace_iterator *iter);
107bad8b 343 void (*pipe_open)(struct trace_iterator *iter);
6eaaa5d5 344 void (*wait_pipe)(struct trace_iterator *iter);
bc0c38d1 345 void (*close)(struct trace_iterator *iter);
c521efd1 346 void (*pipe_close)(struct trace_iterator *iter);
107bad8b
SR
347 ssize_t (*read)(struct trace_iterator *iter,
348 struct file *filp, char __user *ubuf,
349 size_t cnt, loff_t *ppos);
3c56819b
EGM
350 ssize_t (*splice_read)(struct trace_iterator *iter,
351 struct file *filp,
352 loff_t *ppos,
353 struct pipe_inode_info *pipe,
354 size_t len,
355 unsigned int flags);
60a11774
SR
356#ifdef CONFIG_FTRACE_STARTUP_TEST
357 int (*selftest)(struct tracer *trace,
358 struct trace_array *tr);
359#endif
8bba1bf5 360 void (*print_header)(struct seq_file *m);
2c4f035f 361 enum print_line_t (*print_line)(struct trace_iterator *iter);
adf9f195 362 /* If you handled the flag setting, return 0 */
8c1a49ae
SRRH
363 int (*set_flag)(struct trace_array *tr,
364 u32 old_flags, u32 bit, int set);
613f04a0 365 /* Return 0 if OK with change, else return non-zero */
bf6065b5 366 int (*flag_changed)(struct trace_array *tr,
613f04a0 367 u32 mask, int set);
bc0c38d1 368 struct tracer *next;
9de36825 369 struct tracer_flags *flags;
50512ab5 370 int enabled;
f43c738b 371 bool print_max;
607e2ea1 372 bool allow_instances;
12883efb 373#ifdef CONFIG_TRACER_MAX_TRACE
f43c738b 374 bool use_max_tr;
12883efb 375#endif
bc0c38d1
SR
376};
377
f9520750 378
e4a3f541 379/* Only current can touch trace_recursion */
e4a3f541 380
edc15caf
SR
381/*
382 * For function tracing recursion:
383 * The order of these bits are important.
384 *
385 * When function tracing occurs, the following steps are made:
386 * If arch does not support a ftrace feature:
387 * call internal function (uses INTERNAL bits) which calls...
388 * If callback is registered to the "global" list, the list
389 * function is called and recursion checks the GLOBAL bits.
390 * then this function calls...
391 * The function callback, which can use the FTRACE bits to
392 * check for recursion.
393 *
394 * Now if the arch does not suppport a feature, and it calls
395 * the global list function which calls the ftrace callback
396 * all three of these steps will do a recursion protection.
397 * There's no reason to do one if the previous caller already
398 * did. The recursion that we are protecting against will
399 * go through the same steps again.
400 *
401 * To prevent the multiple recursion checks, if a recursion
402 * bit is set that is higher than the MAX bit of the current
403 * check, then we know that the check was made by the previous
404 * caller, and we can skip the current check.
405 */
e46cbf75 406enum {
567cd4da
SR
407 TRACE_BUFFER_BIT,
408 TRACE_BUFFER_NMI_BIT,
409 TRACE_BUFFER_IRQ_BIT,
410 TRACE_BUFFER_SIRQ_BIT,
411
412 /* Start of function recursion bits */
413 TRACE_FTRACE_BIT,
edc15caf
SR
414 TRACE_FTRACE_NMI_BIT,
415 TRACE_FTRACE_IRQ_BIT,
416 TRACE_FTRACE_SIRQ_BIT,
e46cbf75 417
edc15caf 418 /* GLOBAL_BITs must be greater than FTRACE_BITs */
e46cbf75
SR
419 TRACE_GLOBAL_BIT,
420 TRACE_GLOBAL_NMI_BIT,
421 TRACE_GLOBAL_IRQ_BIT,
422 TRACE_GLOBAL_SIRQ_BIT,
423
edc15caf
SR
424 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
425 TRACE_INTERNAL_BIT,
426 TRACE_INTERNAL_NMI_BIT,
427 TRACE_INTERNAL_IRQ_BIT,
428 TRACE_INTERNAL_SIRQ_BIT,
429
e46cbf75 430 TRACE_CONTROL_BIT,
e248491a 431
e4a3f541
SR
432/*
433 * Abuse of the trace_recursion.
434 * As we need a way to maintain state if we are tracing the function
435 * graph in irq because we want to trace a particular function that
436 * was called in irq context but we have irq tracing off. Since this
437 * can only be modified by current, we can reuse trace_recursion.
438 */
e46cbf75
SR
439 TRACE_IRQ_BIT,
440};
e4a3f541 441
e46cbf75
SR
442#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
443#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
444#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
e4a3f541 445
edc15caf
SR
446#define TRACE_CONTEXT_BITS 4
447
448#define TRACE_FTRACE_START TRACE_FTRACE_BIT
449#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
450
451#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
452#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
453
454#define TRACE_LIST_START TRACE_INTERNAL_BIT
455#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
456
457#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
458
459static __always_inline int trace_get_context_bit(void)
460{
461 int bit;
462
463 if (in_interrupt()) {
464 if (in_nmi())
465 bit = 0;
466
467 else if (in_irq())
468 bit = 1;
469 else
470 bit = 2;
471 } else
472 bit = 3;
473
474 return bit;
475}
476
477static __always_inline int trace_test_and_set_recursion(int start, int max)
478{
479 unsigned int val = current->trace_recursion;
480 int bit;
481
482 /* A previous recursion check was made */
483 if ((val & TRACE_CONTEXT_MASK) > max)
484 return 0;
485
486 bit = trace_get_context_bit() + start;
487 if (unlikely(val & (1 << bit)))
488 return -1;
489
490 val |= 1 << bit;
491 current->trace_recursion = val;
492 barrier();
493
494 return bit;
495}
496
497static __always_inline void trace_clear_recursion(int bit)
498{
499 unsigned int val = current->trace_recursion;
500
501 if (!bit)
502 return;
503
504 bit = 1 << bit;
505 val &= ~bit;
506
507 barrier();
508 current->trace_recursion = val;
509}
510
6d158a81
SR
511static inline struct ring_buffer_iter *
512trace_buffer_iter(struct trace_iterator *iter, int cpu)
513{
514 if (iter->buffer_iter && iter->buffer_iter[cpu])
515 return iter->buffer_iter[cpu];
516 return NULL;
517}
518
b6f11df2 519int tracer_init(struct tracer *t, struct trace_array *tr);
9036990d 520int tracing_is_enabled(void);
12883efb
SRRH
521void tracing_reset(struct trace_buffer *buf, int cpu);
522void tracing_reset_online_cpus(struct trace_buffer *buf);
9456f0fa 523void tracing_reset_current(int cpu);
873c642f 524void tracing_reset_all_online_cpus(void);
bc0c38d1 525int tracing_open_generic(struct inode *inode, struct file *filp);
2e86421d 526bool tracing_is_disabled(void);
5452af66 527struct dentry *trace_create_file(const char *name,
f4ae40a6 528 umode_t mode,
5452af66
FW
529 struct dentry *parent,
530 void *data,
531 const struct file_operations *fops);
532
2b6080f2 533struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
bc0c38d1 534struct dentry *tracing_init_dentry(void);
d618b3e6 535
51a763dd
ACM
536struct ring_buffer_event;
537
e77405ad
SR
538struct ring_buffer_event *
539trace_buffer_lock_reserve(struct ring_buffer *buffer,
540 int type,
541 unsigned long len,
542 unsigned long flags,
543 int pc);
51a763dd 544
45dcd8b8
PP
545struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
546 struct trace_array_cpu *data);
c4a8e8be
FW
547
548struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
549 int *ent_cpu, u64 *ent_ts);
550
7ffbd48d
SR
551void __buffer_unlock_commit(struct ring_buffer *buffer,
552 struct ring_buffer_event *event);
553
955b61e5
JW
554int trace_empty(struct trace_iterator *iter);
555
556void *trace_find_next_entry_inc(struct trace_iterator *iter);
557
558void trace_init_global_iter(struct trace_iterator *iter);
559
560void tracing_iter_reset(struct trace_iterator *iter, int cpu);
561
6eaaa5d5
FW
562void poll_wait_pipe(struct trace_iterator *iter);
563
bc0c38d1 564void tracing_sched_switch_trace(struct trace_array *tr,
bc0c38d1
SR
565 struct task_struct *prev,
566 struct task_struct *next,
38697053 567 unsigned long flags, int pc);
57422797
IM
568
569void tracing_sched_wakeup_trace(struct trace_array *tr,
57422797
IM
570 struct task_struct *wakee,
571 struct task_struct *cur,
38697053 572 unsigned long flags, int pc);
6fb44b71 573void trace_function(struct trace_array *tr,
6fb44b71
SR
574 unsigned long ip,
575 unsigned long parent_ip,
38697053 576 unsigned long flags, int pc);
0a772620
JO
577void trace_graph_function(struct trace_array *tr,
578 unsigned long ip,
579 unsigned long parent_ip,
580 unsigned long flags, int pc);
7e9a49ef 581void trace_latency_header(struct seq_file *m);
62b915f1
JO
582void trace_default_header(struct seq_file *m);
583void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
584int trace_empty(struct trace_iterator *iter);
bc0c38d1 585
287b6e68 586void trace_graph_return(struct ftrace_graph_ret *trace);
e49dc19c 587int trace_graph_entry(struct ftrace_graph_ent *trace);
1a0799a8 588void set_graph_array(struct trace_array *tr);
1e9b51c2 589
41bc8144
SR
590void tracing_start_cmdline_record(void);
591void tracing_stop_cmdline_record(void);
e168e051
SR
592void tracing_sched_switch_assign_trace(struct trace_array *tr);
593void tracing_stop_sched_switch_record(void);
594void tracing_start_sched_switch_record(void);
bc0c38d1 595int register_tracer(struct tracer *type);
b5130b1e 596int is_tracing_stopped(void);
955b61e5 597
098c879e
SRRH
598loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
599
955b61e5
JW
600extern cpumask_var_t __read_mostly tracing_buffer_mask;
601
602#define for_each_tracing_cpu(cpu) \
603 for_each_cpu(cpu, tracing_buffer_mask)
bc0c38d1
SR
604
605extern unsigned long nsecs_to_usecs(unsigned long nsecs);
606
0e950173
TB
607extern unsigned long tracing_thresh;
608
5d4a9dba 609#ifdef CONFIG_TRACER_MAX_TRACE
bc0c38d1 610extern unsigned long tracing_max_latency;
bc0c38d1
SR
611
612void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
613void update_max_tr_single(struct trace_array *tr,
614 struct task_struct *tsk, int cpu);
5d4a9dba 615#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 616
c0a0d0d3 617#ifdef CONFIG_STACKTRACE
e77405ad 618void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3
FW
619 int skip, int pc);
620
1fd8df2c
MH
621void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
622 int skip, int pc, struct pt_regs *regs);
623
e77405ad 624void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3
FW
625 int pc);
626
627void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
628 int pc);
629#else
e1f7992e 630static inline void ftrace_trace_stack(struct ring_buffer *buffer,
c0a0d0d3
FW
631 unsigned long flags, int skip, int pc)
632{
633}
634
1fd8df2c
MH
635static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
636 unsigned long flags, int skip,
637 int pc, struct pt_regs *regs)
638{
639}
640
e1f7992e 641static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
c0a0d0d3
FW
642 unsigned long flags, int pc)
643{
644}
645
646static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
647 int skip, int pc)
648{
649}
650#endif /* CONFIG_STACKTRACE */
53614991 651
e309b41d 652extern cycle_t ftrace_now(int cpu);
bc0c38d1 653
4ca53085 654extern void trace_find_cmdline(int pid, char comm[]);
f7d48cbd 655
bc0c38d1
SR
656#ifdef CONFIG_DYNAMIC_FTRACE
657extern unsigned long ftrace_update_tot_cnt;
ad97772a 658#endif
d05cdb25
SR
659#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
660extern int DYN_FTRACE_TEST_NAME(void);
95950c2e
SR
661#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
662extern int DYN_FTRACE_TEST_NAME2(void);
bc0c38d1 663
55034cd6 664extern bool ring_buffer_expanded;
020e5f85 665extern bool tracing_selftest_disabled;
9288f99a 666DECLARE_PER_CPU(int, ftrace_cpu_disabled);
020e5f85 667
60a11774 668#ifdef CONFIG_FTRACE_STARTUP_TEST
60a11774
SR
669extern int trace_selftest_startup_function(struct tracer *trace,
670 struct trace_array *tr);
7447dce9
FW
671extern int trace_selftest_startup_function_graph(struct tracer *trace,
672 struct trace_array *tr);
60a11774
SR
673extern int trace_selftest_startup_irqsoff(struct tracer *trace,
674 struct trace_array *tr);
60a11774
SR
675extern int trace_selftest_startup_preemptoff(struct tracer *trace,
676 struct trace_array *tr);
60a11774
SR
677extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
678 struct trace_array *tr);
60a11774
SR
679extern int trace_selftest_startup_wakeup(struct tracer *trace,
680 struct trace_array *tr);
fb1b6d8b
SN
681extern int trace_selftest_startup_nop(struct tracer *trace,
682 struct trace_array *tr);
60a11774
SR
683extern int trace_selftest_startup_sched_switch(struct tracer *trace,
684 struct trace_array *tr);
80e5ea45
SR
685extern int trace_selftest_startup_branch(struct tracer *trace,
686 struct trace_array *tr);
8f768993
SRRH
687/*
688 * Tracer data references selftest functions that only occur
689 * on boot up. These can be __init functions. Thus, when selftests
690 * are enabled, then the tracers need to reference __init functions.
691 */
692#define __tracer_data __refdata
693#else
694/* Tracers are seldom changed. Optimize when selftests are disabled. */
695#define __tracer_data __read_mostly
60a11774
SR
696#endif /* CONFIG_FTRACE_STARTUP_TEST */
697
c7aafc54 698extern void *head_page(struct trace_array_cpu *data);
cf8e3474 699extern unsigned long long ns2usecs(cycle_t nsec);
1fd8f2a3 700extern int
40ce74f1 701trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
48ead020 702extern int
40ce74f1 703trace_vprintk(unsigned long ip, const char *fmt, va_list args);
659372d3
SR
704extern int
705trace_array_vprintk(struct trace_array *tr,
706 unsigned long ip, const char *fmt, va_list args);
707int trace_array_printk(struct trace_array *tr,
708 unsigned long ip, const char *fmt, ...);
12883efb
SRRH
709int trace_array_printk_buf(struct ring_buffer *buffer,
710 unsigned long ip, const char *fmt, ...);
955b61e5
JW
711void trace_printk_seq(struct trace_seq *s);
712enum print_line_t print_trace_line(struct trace_iterator *iter);
c7aafc54 713
4e655519
IM
714extern unsigned long trace_flags;
715
15e6cb36 716/* Standard output formatting function used for function return traces */
fb52607a 717#ifdef CONFIG_FUNCTION_GRAPH_TRACER
62b915f1
JO
718
719/* Flag options */
720#define TRACE_GRAPH_PRINT_OVERRUN 0x1
721#define TRACE_GRAPH_PRINT_CPU 0x2
722#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
723#define TRACE_GRAPH_PRINT_PROC 0x8
724#define TRACE_GRAPH_PRINT_DURATION 0x10
725#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
6fc84ea7
SRRH
726#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
727#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
62b915f1 728
d7a8d9e9
JO
729extern enum print_line_t
730print_graph_function_flags(struct trace_iterator *iter, u32 flags);
731extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
0706f1c4
SR
732extern enum print_line_t
733trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
62b915f1
JO
734extern void graph_trace_open(struct trace_iterator *iter);
735extern void graph_trace_close(struct trace_iterator *iter);
736extern int __trace_graph_entry(struct trace_array *tr,
737 struct ftrace_graph_ent *trace,
738 unsigned long flags, int pc);
739extern void __trace_graph_return(struct trace_array *tr,
740 struct ftrace_graph_ret *trace,
741 unsigned long flags, int pc);
742
ea4e2bc4
SR
743
744#ifdef CONFIG_DYNAMIC_FTRACE
745/* TODO: make this variable */
746#define FTRACE_GRAPH_MAX_FUNCS 32
747extern int ftrace_graph_count;
748extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
29ad23b0
NK
749extern int ftrace_graph_notrace_count;
750extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
ea4e2bc4
SR
751
752static inline int ftrace_graph_addr(unsigned long addr)
753{
754 int i;
755
9aa72b4b 756 if (!ftrace_graph_count)
ea4e2bc4
SR
757 return 1;
758
759 for (i = 0; i < ftrace_graph_count; i++) {
e4a3f541
SR
760 if (addr == ftrace_graph_funcs[i]) {
761 /*
762 * If no irqs are to be traced, but a set_graph_function
763 * is set, and called by an interrupt handler, we still
764 * want to trace it.
765 */
766 if (in_irq())
767 trace_recursion_set(TRACE_IRQ_BIT);
768 else
769 trace_recursion_clear(TRACE_IRQ_BIT);
ea4e2bc4 770 return 1;
e4a3f541 771 }
ea4e2bc4
SR
772 }
773
774 return 0;
775}
29ad23b0
NK
776
777static inline int ftrace_graph_notrace_addr(unsigned long addr)
778{
779 int i;
780
781 if (!ftrace_graph_notrace_count)
782 return 0;
783
784 for (i = 0; i < ftrace_graph_notrace_count; i++) {
785 if (addr == ftrace_graph_notrace_funcs[i])
786 return 1;
787 }
788
789 return 0;
790}
15e6cb36 791#else
6b253930
IM
792static inline int ftrace_graph_addr(unsigned long addr)
793{
794 return 1;
ea4e2bc4 795}
29ad23b0
NK
796
797static inline int ftrace_graph_notrace_addr(unsigned long addr)
798{
799 return 0;
800}
ea4e2bc4 801#endif /* CONFIG_DYNAMIC_FTRACE */
ea4e2bc4 802#else /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 803static inline enum print_line_t
d7a8d9e9 804print_graph_function_flags(struct trace_iterator *iter, u32 flags)
15e6cb36
FW
805{
806 return TRACE_TYPE_UNHANDLED;
807}
ea4e2bc4 808#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 809
756d17ee 810extern struct list_head ftrace_pids;
804a6851 811
1155de47 812#ifdef CONFIG_FUNCTION_TRACER
f1ed7c74 813extern bool ftrace_filter_param __initdata;
804a6851
SR
814static inline int ftrace_trace_task(struct task_struct *task)
815{
756d17ee 816 if (list_empty(&ftrace_pids))
804a6851
SR
817 return 1;
818
819 return test_tsk_trace_trace(task);
820}
e0a413f6 821extern int ftrace_is_dead(void);
591dffda
SRRH
822int ftrace_create_function_files(struct trace_array *tr,
823 struct dentry *parent);
824void ftrace_destroy_function_files(struct trace_array *tr);
1155de47
PM
825#else
826static inline int ftrace_trace_task(struct task_struct *task)
827{
828 return 1;
829}
e0a413f6 830static inline int ftrace_is_dead(void) { return 0; }
591dffda
SRRH
831static inline int
832ftrace_create_function_files(struct trace_array *tr,
833 struct dentry *parent)
834{
835 return 0;
836}
837static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
838#endif /* CONFIG_FUNCTION_TRACER */
839
840#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
841void ftrace_create_filter_files(struct ftrace_ops *ops,
842 struct dentry *parent);
843void ftrace_destroy_filter_files(struct ftrace_ops *ops);
844#else
845/*
846 * The ops parameter passed in is usually undefined.
847 * This must be a macro.
848 */
849#define ftrace_create_filter_files(ops, parent) do { } while (0)
850#define ftrace_destroy_filter_files(ops) do { } while (0)
851#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
804a6851 852
ced39002
JO
853int ftrace_event_is_function(struct ftrace_event_call *call);
854
b63f39ea 855/*
856 * struct trace_parser - servers for reading the user input separated by spaces
857 * @cont: set if the input is not complete - no final space char was found
858 * @buffer: holds the parsed user input
1537a363 859 * @idx: user input length
b63f39ea 860 * @size: buffer size
861 */
862struct trace_parser {
863 bool cont;
864 char *buffer;
865 unsigned idx;
866 unsigned size;
867};
868
869static inline bool trace_parser_loaded(struct trace_parser *parser)
870{
871 return (parser->idx != 0);
872}
873
874static inline bool trace_parser_cont(struct trace_parser *parser)
875{
876 return parser->cont;
877}
878
879static inline void trace_parser_clear(struct trace_parser *parser)
880{
881 parser->cont = false;
882 parser->idx = 0;
883}
884
885extern int trace_parser_get_init(struct trace_parser *parser, int size);
886extern void trace_parser_put(struct trace_parser *parser);
887extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
888 size_t cnt, loff_t *ppos);
889
4fcdae83
SR
890/*
891 * trace_iterator_flags is an enumeration that defines bit
892 * positions into trace_flags that controls the output.
893 *
894 * NOTE: These bits must match the trace_options array in
895 * trace.c.
896 */
4e655519
IM
897enum trace_iterator_flags {
898 TRACE_ITER_PRINT_PARENT = 0x01,
899 TRACE_ITER_SYM_OFFSET = 0x02,
900 TRACE_ITER_SYM_ADDR = 0x04,
901 TRACE_ITER_VERBOSE = 0x08,
902 TRACE_ITER_RAW = 0x10,
903 TRACE_ITER_HEX = 0x20,
904 TRACE_ITER_BIN = 0x40,
905 TRACE_ITER_BLOCK = 0x80,
906 TRACE_ITER_STACKTRACE = 0x100,
2cbafd68
LZ
907 TRACE_ITER_PRINTK = 0x200,
908 TRACE_ITER_PREEMPTONLY = 0x400,
909 TRACE_ITER_BRANCH = 0x800,
910 TRACE_ITER_ANNOTATE = 0x1000,
911 TRACE_ITER_USERSTACKTRACE = 0x2000,
912 TRACE_ITER_SYM_USEROBJ = 0x4000,
913 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
914 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
915 TRACE_ITER_LATENCY_FMT = 0x20000,
916 TRACE_ITER_SLEEP_TIME = 0x40000,
917 TRACE_ITER_GRAPH_TIME = 0x80000,
e870e9a1 918 TRACE_ITER_RECORD_CMD = 0x100000,
750912fa 919 TRACE_ITER_OVERWRITE = 0x200000,
cf30cf67 920 TRACE_ITER_STOP_ON_FREE = 0x400000,
77271ce4 921 TRACE_ITER_IRQ_INFO = 0x800000,
5224c3a3 922 TRACE_ITER_MARKERS = 0x1000000,
328df475 923 TRACE_ITER_FUNCTION = 0x2000000,
4e655519
IM
924};
925
15e6cb36
FW
926/*
927 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
928 * control the output of kernel symbols.
929 */
930#define TRACE_ITER_SYM_MASK \
931 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
932
43a15386
FW
933extern struct tracer nop_trace;
934
2ed84eeb 935#ifdef CONFIG_BRANCH_TRACER
9f029e83
SR
936extern int enable_branch_tracing(struct trace_array *tr);
937extern void disable_branch_tracing(void);
938static inline int trace_branch_enable(struct trace_array *tr)
52f232cb 939{
9f029e83
SR
940 if (trace_flags & TRACE_ITER_BRANCH)
941 return enable_branch_tracing(tr);
52f232cb
SR
942 return 0;
943}
9f029e83 944static inline void trace_branch_disable(void)
52f232cb
SR
945{
946 /* due to races, always disable */
9f029e83 947 disable_branch_tracing();
52f232cb
SR
948}
949#else
9f029e83 950static inline int trace_branch_enable(struct trace_array *tr)
52f232cb
SR
951{
952 return 0;
953}
9f029e83 954static inline void trace_branch_disable(void)
52f232cb
SR
955{
956}
2ed84eeb 957#endif /* CONFIG_BRANCH_TRACER */
52f232cb 958
1852fcce
SR
959/* set ring buffers to default size if not already done so */
960int tracing_update_buffers(void);
961
cf027f64
TZ
962struct ftrace_event_field {
963 struct list_head link;
92edca07
SR
964 const char *name;
965 const char *type;
aa38e9fc 966 int filter_type;
cf027f64
TZ
967 int offset;
968 int size;
a118e4d1 969 int is_signed;
cf027f64
TZ
970};
971
30e673b2 972struct event_filter {
c9c53ca0
SR
973 int n_preds; /* Number assigned */
974 int a_preds; /* allocated */
74e9e58c 975 struct filter_pred *preds;
61e9dea2 976 struct filter_pred *root;
8b372562 977 char *filter_string;
30e673b2
TZ
978};
979
cfb180f3
TZ
980struct event_subsystem {
981 struct list_head list;
982 const char *name;
1f9963cb 983 struct event_filter *filter;
e9dbfae5 984 int ref_count;
cfb180f3
TZ
985};
986
ae63b31e
SR
987struct ftrace_subsystem_dir {
988 struct list_head list;
989 struct event_subsystem *subsystem;
990 struct trace_array *tr;
991 struct dentry *entry;
992 int ref_count;
993 int nr_events;
994};
995
61e9dea2
SR
996#define FILTER_PRED_INVALID ((unsigned short)-1)
997#define FILTER_PRED_IS_RIGHT (1 << 15)
43cd4145 998#define FILTER_PRED_FOLD (1 << 15)
61e9dea2 999
bf93f9ed
SR
1000/*
1001 * The max preds is the size of unsigned short with
1002 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1003 * and FOLD flags. The other is reserved.
1004 *
1005 * 2^14 preds is way more than enough.
1006 */
1007#define MAX_FILTER_PRED 16384
4a3d27e9 1008
7ce7e424 1009struct filter_pred;
1889d209 1010struct regex;
7ce7e424 1011
58d9a597 1012typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
7ce7e424 1013
1889d209
FW
1014typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1015
3f6fe06d 1016enum regex_type {
b0f1a59a 1017 MATCH_FULL = 0,
3f6fe06d
FW
1018 MATCH_FRONT_ONLY,
1019 MATCH_MIDDLE_ONLY,
1020 MATCH_END_ONLY,
1021};
1022
1889d209
FW
1023struct regex {
1024 char pattern[MAX_FILTER_STR_VAL];
1025 int len;
1026 int field_len;
1027 regex_match_func match;
1028};
1029
7ce7e424 1030struct filter_pred {
1889d209
FW
1031 filter_pred_fn_t fn;
1032 u64 val;
1033 struct regex regex;
61aaef55 1034 unsigned short *ops;
1d0e78e3 1035 struct ftrace_event_field *field;
1889d209
FW
1036 int offset;
1037 int not;
1038 int op;
61e9dea2
SR
1039 unsigned short index;
1040 unsigned short parent;
1041 unsigned short left;
1042 unsigned short right;
7ce7e424
TZ
1043};
1044
3f6fe06d
FW
1045extern enum regex_type
1046filter_parse_regex(char *buff, int len, char **search, int *not);
f306cc82 1047extern void print_event_filter(struct ftrace_event_file *file,
4bda2d51 1048 struct trace_seq *s);
f306cc82 1049extern int apply_event_filter(struct ftrace_event_file *file,
8b372562 1050 char *filter_string);
ae63b31e 1051extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
8b372562
TZ
1052 char *filter_string);
1053extern void print_subsystem_event_filter(struct event_subsystem *system,
ac1adc55 1054 struct trace_seq *s);
aa38e9fc 1055extern int filter_assign_type(const char *type);
bac5fb97
TZ
1056extern int create_event_filter(struct ftrace_event_call *call,
1057 char *filter_str, bool set_str,
1058 struct event_filter **filterp);
1059extern void free_event_filter(struct event_filter *filter);
7ce7e424 1060
b3a8c6fd
J
1061struct ftrace_event_field *
1062trace_find_event_field(struct ftrace_event_call *call, char *name);
2e33af02 1063
e870e9a1 1064extern void trace_event_enable_cmd_record(bool enable);
277ba044 1065extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
0c8916c3 1066extern int event_trace_del_tracer(struct trace_array *tr);
e870e9a1 1067
7862ad18
TZ
1068extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
1069 const char *system,
1070 const char *event);
1071
85f2b082
TZ
1072static inline void *event_file_data(struct file *filp)
1073{
1074 return ACCESS_ONCE(file_inode(filp)->i_private);
1075}
1076
20c8928a 1077extern struct mutex event_mutex;
a59fd602 1078extern struct list_head ftrace_events;
ac199db0 1079
85f2b082
TZ
1080extern const struct file_operations event_trigger_fops;
1081
1082extern int register_trigger_cmds(void);
1083extern void clear_event_triggers(struct trace_array *tr);
1084
1085struct event_trigger_data {
1086 unsigned long count;
1087 int ref;
1088 struct event_trigger_ops *ops;
1089 struct event_command *cmd_ops;
d8a30f20 1090 struct event_filter __rcu *filter;
85f2b082
TZ
1091 char *filter_str;
1092 void *private_data;
1093 struct list_head list;
1094};
1095
1096/**
1097 * struct event_trigger_ops - callbacks for trace event triggers
1098 *
1099 * The methods in this structure provide per-event trigger hooks for
1100 * various trigger operations.
1101 *
1102 * All the methods below, except for @init() and @free(), must be
1103 * implemented.
1104 *
1105 * @func: The trigger 'probe' function called when the triggering
1106 * event occurs. The data passed into this callback is the data
1107 * that was supplied to the event_command @reg() function that
1108 * registered the trigger (see struct event_command).
1109 *
1110 * @init: An optional initialization function called for the trigger
1111 * when the trigger is registered (via the event_command reg()
1112 * function). This can be used to perform per-trigger
1113 * initialization such as incrementing a per-trigger reference
1114 * count, for instance. This is usually implemented by the
1115 * generic utility function @event_trigger_init() (see
1116 * trace_event_triggers.c).
1117 *
1118 * @free: An optional de-initialization function called for the
1119 * trigger when the trigger is unregistered (via the
1120 * event_command @reg() function). This can be used to perform
1121 * per-trigger de-initialization such as decrementing a
1122 * per-trigger reference count and freeing corresponding trigger
1123 * data, for instance. This is usually implemented by the
1124 * generic utility function @event_trigger_free() (see
1125 * trace_event_triggers.c).
1126 *
1127 * @print: The callback function invoked to have the trigger print
1128 * itself. This is usually implemented by a wrapper function
1129 * that calls the generic utility function @event_trigger_print()
1130 * (see trace_event_triggers.c).
1131 */
1132struct event_trigger_ops {
1133 void (*func)(struct event_trigger_data *data);
1134 int (*init)(struct event_trigger_ops *ops,
1135 struct event_trigger_data *data);
1136 void (*free)(struct event_trigger_ops *ops,
1137 struct event_trigger_data *data);
1138 int (*print)(struct seq_file *m,
1139 struct event_trigger_ops *ops,
1140 struct event_trigger_data *data);
1141};
1142
1143/**
1144 * struct event_command - callbacks and data members for event commands
1145 *
1146 * Event commands are invoked by users by writing the command name
1147 * into the 'trigger' file associated with a trace event. The
1148 * parameters associated with a specific invocation of an event
1149 * command are used to create an event trigger instance, which is
1150 * added to the list of trigger instances associated with that trace
1151 * event. When the event is hit, the set of triggers associated with
1152 * that event is invoked.
1153 *
1154 * The data members in this structure provide per-event command data
1155 * for various event commands.
1156 *
1157 * All the data members below, except for @post_trigger, must be set
1158 * for each event command.
1159 *
1160 * @name: The unique name that identifies the event command. This is
1161 * the name used when setting triggers via trigger files.
1162 *
1163 * @trigger_type: A unique id that identifies the event command
1164 * 'type'. This value has two purposes, the first to ensure that
1165 * only one trigger of the same type can be set at a given time
1166 * for a particular event e.g. it doesn't make sense to have both
1167 * a traceon and traceoff trigger attached to a single event at
1168 * the same time, so traceon and traceoff have the same type
1169 * though they have different names. The @trigger_type value is
1170 * also used as a bit value for deferring the actual trigger
1171 * action until after the current event is finished. Some
1172 * commands need to do this if they themselves log to the trace
1173 * buffer (see the @post_trigger() member below). @trigger_type
1174 * values are defined by adding new values to the trigger_type
1175 * enum in include/linux/ftrace_event.h.
1176 *
1177 * @post_trigger: A flag that says whether or not this command needs
1178 * to have its action delayed until after the current event has
1179 * been closed. Some triggers need to avoid being invoked while
1180 * an event is currently in the process of being logged, since
1181 * the trigger may itself log data into the trace buffer. Thus
1182 * we make sure the current event is committed before invoking
1183 * those triggers. To do that, the trigger invocation is split
1184 * in two - the first part checks the filter using the current
1185 * trace record; if a command has the @post_trigger flag set, it
1186 * sets a bit for itself in the return value, otherwise it
1187 * directly invokes the trigger. Once all commands have been
1188 * either invoked or set their return flag, the current record is
1189 * either committed or discarded. At that point, if any commands
1190 * have deferred their triggers, those commands are finally
1191 * invoked following the close of the current event. In other
1192 * words, if the event_trigger_ops @func() probe implementation
1193 * itself logs to the trace buffer, this flag should be set,
1194 * otherwise it can be left unspecified.
1195 *
1196 * All the methods below, except for @set_filter(), must be
1197 * implemented.
1198 *
1199 * @func: The callback function responsible for parsing and
1200 * registering the trigger written to the 'trigger' file by the
1201 * user. It allocates the trigger instance and registers it with
1202 * the appropriate trace event. It makes use of the other
1203 * event_command callback functions to orchestrate this, and is
1204 * usually implemented by the generic utility function
1205 * @event_trigger_callback() (see trace_event_triggers.c).
1206 *
1207 * @reg: Adds the trigger to the list of triggers associated with the
1208 * event, and enables the event trigger itself, after
1209 * initializing it (via the event_trigger_ops @init() function).
1210 * This is also where commands can use the @trigger_type value to
1211 * make the decision as to whether or not multiple instances of
1212 * the trigger should be allowed. This is usually implemented by
1213 * the generic utility function @register_trigger() (see
1214 * trace_event_triggers.c).
1215 *
1216 * @unreg: Removes the trigger from the list of triggers associated
1217 * with the event, and disables the event trigger itself, after
1218 * initializing it (via the event_trigger_ops @free() function).
1219 * This is usually implemented by the generic utility function
1220 * @unregister_trigger() (see trace_event_triggers.c).
1221 *
1222 * @set_filter: An optional function called to parse and set a filter
1223 * for the trigger. If no @set_filter() method is set for the
1224 * event command, filters set by the user for the command will be
1225 * ignored. This is usually implemented by the generic utility
1226 * function @set_trigger_filter() (see trace_event_triggers.c).
1227 *
1228 * @get_trigger_ops: The callback function invoked to retrieve the
1229 * event_trigger_ops implementation associated with the command.
1230 */
1231struct event_command {
1232 struct list_head list;
1233 char *name;
1234 enum event_trigger_type trigger_type;
1235 bool post_trigger;
1236 int (*func)(struct event_command *cmd_ops,
1237 struct ftrace_event_file *file,
1238 char *glob, char *cmd, char *params);
1239 int (*reg)(char *glob,
1240 struct event_trigger_ops *ops,
1241 struct event_trigger_data *data,
1242 struct ftrace_event_file *file);
1243 void (*unreg)(char *glob,
1244 struct event_trigger_ops *ops,
1245 struct event_trigger_data *data,
1246 struct ftrace_event_file *file);
1247 int (*set_filter)(char *filter_str,
1248 struct event_trigger_data *data,
1249 struct ftrace_event_file *file);
1250 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1251};
1252
1253extern int trace_event_enable_disable(struct ftrace_event_file *file,
1254 int enable, int soft_disable);
93e31ffb 1255extern int tracing_alloc_snapshot(void);
85f2b082 1256
e9fb2b6d
SR
1257extern const char *__start___trace_bprintk_fmt[];
1258extern const char *__stop___trace_bprintk_fmt[];
1259
102c9323
SRRH
1260extern const char *__start___tracepoint_str[];
1261extern const char *__stop___tracepoint_str[];
1262
07d777fe 1263void trace_printk_init_buffers(void);
81698831 1264void trace_printk_start_comm(void);
613f04a0 1265int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
2b6080f2 1266int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
07d777fe 1267
ca268da6
SRRH
1268/*
1269 * Normal trace_printk() and friends allocates special buffers
1270 * to do the manipulation, as well as saves the print formats
1271 * into sections to display. But the trace infrastructure wants
1272 * to use these without the added overhead at the price of being
1273 * a bit slower (used mainly for warnings, where we don't care
1274 * about performance). The internal_trace_puts() is for such
1275 * a purpose.
1276 */
1277#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1278
4e5292ea 1279#undef FTRACE_ENTRY
02aa3162 1280#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
86c38a31
JM
1281 extern struct ftrace_event_call \
1282 __attribute__((__aligned__(4))) event_##call;
4e5292ea 1283#undef FTRACE_ENTRY_DUP
02aa3162
JO
1284#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1285 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1286 filter)
4e5292ea 1287#include "trace_entries.h"
e1112b4d 1288
6e48b550 1289#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
ced39002
JO
1290int perf_ftrace_event_register(struct ftrace_event_call *call,
1291 enum trace_reg type, void *data);
1292#else
1293#define perf_ftrace_event_register NULL
6e48b550 1294#endif
ced39002 1295
bc0c38d1 1296#endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.51155 seconds and 5 git commands to generate.