tracing: Add support for named triggers
[deliverable/linux.git] / kernel / trace / trace.h
1
2 #ifndef _LINUX_KERNEL_TRACE_H
3 #define _LINUX_KERNEL_TRACE_H
4
5 #include <linux/fs.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/clocksource.h>
9 #include <linux/ring_buffer.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/tracepoint.h>
12 #include <linux/ftrace.h>
13 #include <linux/hw_breakpoint.h>
14 #include <linux/trace_seq.h>
15 #include <linux/trace_events.h>
16 #include <linux/compiler.h>
17 #include <linux/trace_seq.h>
18
19 #ifdef CONFIG_FTRACE_SYSCALLS
20 #include <asm/unistd.h> /* For NR_SYSCALLS */
21 #include <asm/syscall.h> /* some archs define it here */
22 #endif
23
24 enum trace_type {
25 __TRACE_FIRST_TYPE = 0,
26
27 TRACE_FN,
28 TRACE_CTX,
29 TRACE_WAKE,
30 TRACE_STACK,
31 TRACE_PRINT,
32 TRACE_BPRINT,
33 TRACE_MMIO_RW,
34 TRACE_MMIO_MAP,
35 TRACE_BRANCH,
36 TRACE_GRAPH_RET,
37 TRACE_GRAPH_ENT,
38 TRACE_USER_STACK,
39 TRACE_BLK,
40 TRACE_BPUTS,
41
42 __TRACE_LAST_TYPE,
43 };
44
45
46 #undef __field
47 #define __field(type, item) type item;
48
49 #undef __field_struct
50 #define __field_struct(type, item) __field(type, item)
51
52 #undef __field_desc
53 #define __field_desc(type, container, item)
54
55 #undef __array
56 #define __array(type, item, size) type item[size];
57
58 #undef __array_desc
59 #define __array_desc(type, container, item, size)
60
61 #undef __dynamic_array
62 #define __dynamic_array(type, item) type item[];
63
64 #undef F_STRUCT
65 #define F_STRUCT(args...) args
66
67 #undef FTRACE_ENTRY
68 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
69 struct struct_name { \
70 struct trace_entry ent; \
71 tstruct \
72 }
73
74 #undef FTRACE_ENTRY_DUP
75 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
76
77 #undef FTRACE_ENTRY_REG
78 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
79 filter, regfn) \
80 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
81 filter)
82
83 #include "trace_entries.h"
84
85 /*
86 * syscalls are special, and need special handling, this is why
87 * they are not included in trace_entries.h
88 */
89 struct syscall_trace_enter {
90 struct trace_entry ent;
91 int nr;
92 unsigned long args[];
93 };
94
95 struct syscall_trace_exit {
96 struct trace_entry ent;
97 int nr;
98 long ret;
99 };
100
101 struct kprobe_trace_entry_head {
102 struct trace_entry ent;
103 unsigned long ip;
104 };
105
106 struct kretprobe_trace_entry_head {
107 struct trace_entry ent;
108 unsigned long func;
109 unsigned long ret_ip;
110 };
111
112 /*
113 * trace_flag_type is an enumeration that holds different
114 * states when a trace occurs. These are:
115 * IRQS_OFF - interrupts were disabled
116 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
117 * NEED_RESCHED - reschedule is requested
118 * HARDIRQ - inside an interrupt handler
119 * SOFTIRQ - inside a softirq handler
120 */
121 enum trace_flag_type {
122 TRACE_FLAG_IRQS_OFF = 0x01,
123 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
124 TRACE_FLAG_NEED_RESCHED = 0x04,
125 TRACE_FLAG_HARDIRQ = 0x08,
126 TRACE_FLAG_SOFTIRQ = 0x10,
127 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
128 TRACE_FLAG_NMI = 0x40,
129 };
130
131 #define TRACE_BUF_SIZE 1024
132
133 struct trace_array;
134
135 /*
136 * The CPU trace array - it consists of thousands of trace entries
137 * plus some other descriptor data: (for example which task started
138 * the trace, etc.)
139 */
140 struct trace_array_cpu {
141 atomic_t disabled;
142 void *buffer_page; /* ring buffer spare */
143
144 unsigned long entries;
145 unsigned long saved_latency;
146 unsigned long critical_start;
147 unsigned long critical_end;
148 unsigned long critical_sequence;
149 unsigned long nice;
150 unsigned long policy;
151 unsigned long rt_priority;
152 unsigned long skipped_entries;
153 cycle_t preempt_timestamp;
154 pid_t pid;
155 kuid_t uid;
156 char comm[TASK_COMM_LEN];
157
158 bool ignore_pid;
159 };
160
161 struct tracer;
162 struct trace_option_dentry;
163
164 struct trace_buffer {
165 struct trace_array *tr;
166 struct ring_buffer *buffer;
167 struct trace_array_cpu __percpu *data;
168 cycle_t time_start;
169 int cpu;
170 };
171
172 #define TRACE_FLAGS_MAX_SIZE 32
173
174 struct trace_options {
175 struct tracer *tracer;
176 struct trace_option_dentry *topts;
177 };
178
179 struct trace_pid_list {
180 int pid_max;
181 unsigned long *pids;
182 };
183
184 /*
185 * The trace array - an array of per-CPU trace arrays. This is the
186 * highest level data structure that individual tracers deal with.
187 * They have on/off state as well:
188 */
189 struct trace_array {
190 struct list_head list;
191 char *name;
192 struct trace_buffer trace_buffer;
193 #ifdef CONFIG_TRACER_MAX_TRACE
194 /*
195 * The max_buffer is used to snapshot the trace when a maximum
196 * latency is reached, or when the user initiates a snapshot.
197 * Some tracers will use this to store a maximum trace while
198 * it continues examining live traces.
199 *
200 * The buffers for the max_buffer are set up the same as the trace_buffer
201 * When a snapshot is taken, the buffer of the max_buffer is swapped
202 * with the buffer of the trace_buffer and the buffers are reset for
203 * the trace_buffer so the tracing can continue.
204 */
205 struct trace_buffer max_buffer;
206 bool allocated_snapshot;
207 unsigned long max_latency;
208 #endif
209 struct trace_pid_list __rcu *filtered_pids;
210 /*
211 * max_lock is used to protect the swapping of buffers
212 * when taking a max snapshot. The buffers themselves are
213 * protected by per_cpu spinlocks. But the action of the swap
214 * needs its own lock.
215 *
216 * This is defined as a arch_spinlock_t in order to help
217 * with performance when lockdep debugging is enabled.
218 *
219 * It is also used in other places outside the update_max_tr
220 * so it needs to be defined outside of the
221 * CONFIG_TRACER_MAX_TRACE.
222 */
223 arch_spinlock_t max_lock;
224 int buffer_disabled;
225 #ifdef CONFIG_FTRACE_SYSCALLS
226 int sys_refcount_enter;
227 int sys_refcount_exit;
228 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
229 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
230 #endif
231 int stop_count;
232 int clock_id;
233 int nr_topts;
234 struct tracer *current_trace;
235 unsigned int trace_flags;
236 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
237 unsigned int flags;
238 raw_spinlock_t start_lock;
239 struct dentry *dir;
240 struct dentry *options;
241 struct dentry *percpu_dir;
242 struct dentry *event_dir;
243 struct trace_options *topts;
244 struct list_head systems;
245 struct list_head events;
246 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
247 int ref;
248 #ifdef CONFIG_FUNCTION_TRACER
249 struct ftrace_ops *ops;
250 /* function tracing enabled */
251 int function_enabled;
252 #endif
253 };
254
255 enum {
256 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
257 };
258
259 extern struct list_head ftrace_trace_arrays;
260
261 extern struct mutex trace_types_lock;
262
263 extern int trace_array_get(struct trace_array *tr);
264 extern void trace_array_put(struct trace_array *tr);
265
266 /*
267 * The global tracer (top) should be the first trace array added,
268 * but we check the flag anyway.
269 */
270 static inline struct trace_array *top_trace_array(void)
271 {
272 struct trace_array *tr;
273
274 if (list_empty(&ftrace_trace_arrays))
275 return NULL;
276
277 tr = list_entry(ftrace_trace_arrays.prev,
278 typeof(*tr), list);
279 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
280 return tr;
281 }
282
283 #define FTRACE_CMP_TYPE(var, type) \
284 __builtin_types_compatible_p(typeof(var), type *)
285
286 #undef IF_ASSIGN
287 #define IF_ASSIGN(var, entry, etype, id) \
288 if (FTRACE_CMP_TYPE(var, etype)) { \
289 var = (typeof(var))(entry); \
290 WARN_ON(id && (entry)->type != id); \
291 break; \
292 }
293
294 /* Will cause compile errors if type is not found. */
295 extern void __ftrace_bad_type(void);
296
297 /*
298 * The trace_assign_type is a verifier that the entry type is
299 * the same as the type being assigned. To add new types simply
300 * add a line with the following format:
301 *
302 * IF_ASSIGN(var, ent, type, id);
303 *
304 * Where "type" is the trace type that includes the trace_entry
305 * as the "ent" item. And "id" is the trace identifier that is
306 * used in the trace_type enum.
307 *
308 * If the type can have more than one id, then use zero.
309 */
310 #define trace_assign_type(var, ent) \
311 do { \
312 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
313 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
314 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
315 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
316 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
317 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
318 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
319 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
320 TRACE_MMIO_RW); \
321 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
322 TRACE_MMIO_MAP); \
323 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
324 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
325 TRACE_GRAPH_ENT); \
326 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
327 TRACE_GRAPH_RET); \
328 __ftrace_bad_type(); \
329 } while (0)
330
331 /*
332 * An option specific to a tracer. This is a boolean value.
333 * The bit is the bit index that sets its value on the
334 * flags value in struct tracer_flags.
335 */
336 struct tracer_opt {
337 const char *name; /* Will appear on the trace_options file */
338 u32 bit; /* Mask assigned in val field in tracer_flags */
339 };
340
341 /*
342 * The set of specific options for a tracer. Your tracer
343 * have to set the initial value of the flags val.
344 */
345 struct tracer_flags {
346 u32 val;
347 struct tracer_opt *opts;
348 struct tracer *trace;
349 };
350
351 /* Makes more easy to define a tracer opt */
352 #define TRACER_OPT(s, b) .name = #s, .bit = b
353
354
355 struct trace_option_dentry {
356 struct tracer_opt *opt;
357 struct tracer_flags *flags;
358 struct trace_array *tr;
359 struct dentry *entry;
360 };
361
362 /**
363 * struct tracer - a specific tracer and its callbacks to interact with tracefs
364 * @name: the name chosen to select it on the available_tracers file
365 * @init: called when one switches to this tracer (echo name > current_tracer)
366 * @reset: called when one switches to another tracer
367 * @start: called when tracing is unpaused (echo 1 > tracing_on)
368 * @stop: called when tracing is paused (echo 0 > tracing_on)
369 * @update_thresh: called when tracing_thresh is updated
370 * @open: called when the trace file is opened
371 * @pipe_open: called when the trace_pipe file is opened
372 * @close: called when the trace file is released
373 * @pipe_close: called when the trace_pipe file is released
374 * @read: override the default read callback on trace_pipe
375 * @splice_read: override the default splice_read callback on trace_pipe
376 * @selftest: selftest to run on boot (see trace_selftest.c)
377 * @print_headers: override the first lines that describe your columns
378 * @print_line: callback that prints a trace
379 * @set_flag: signals one of your private flags changed (trace_options file)
380 * @flags: your private flags
381 */
382 struct tracer {
383 const char *name;
384 int (*init)(struct trace_array *tr);
385 void (*reset)(struct trace_array *tr);
386 void (*start)(struct trace_array *tr);
387 void (*stop)(struct trace_array *tr);
388 int (*update_thresh)(struct trace_array *tr);
389 void (*open)(struct trace_iterator *iter);
390 void (*pipe_open)(struct trace_iterator *iter);
391 void (*close)(struct trace_iterator *iter);
392 void (*pipe_close)(struct trace_iterator *iter);
393 ssize_t (*read)(struct trace_iterator *iter,
394 struct file *filp, char __user *ubuf,
395 size_t cnt, loff_t *ppos);
396 ssize_t (*splice_read)(struct trace_iterator *iter,
397 struct file *filp,
398 loff_t *ppos,
399 struct pipe_inode_info *pipe,
400 size_t len,
401 unsigned int flags);
402 #ifdef CONFIG_FTRACE_STARTUP_TEST
403 int (*selftest)(struct tracer *trace,
404 struct trace_array *tr);
405 #endif
406 void (*print_header)(struct seq_file *m);
407 enum print_line_t (*print_line)(struct trace_iterator *iter);
408 /* If you handled the flag setting, return 0 */
409 int (*set_flag)(struct trace_array *tr,
410 u32 old_flags, u32 bit, int set);
411 /* Return 0 if OK with change, else return non-zero */
412 int (*flag_changed)(struct trace_array *tr,
413 u32 mask, int set);
414 struct tracer *next;
415 struct tracer_flags *flags;
416 int enabled;
417 int ref;
418 bool print_max;
419 bool allow_instances;
420 #ifdef CONFIG_TRACER_MAX_TRACE
421 bool use_max_tr;
422 #endif
423 };
424
425
426 /* Only current can touch trace_recursion */
427
428 /*
429 * For function tracing recursion:
430 * The order of these bits are important.
431 *
432 * When function tracing occurs, the following steps are made:
433 * If arch does not support a ftrace feature:
434 * call internal function (uses INTERNAL bits) which calls...
435 * If callback is registered to the "global" list, the list
436 * function is called and recursion checks the GLOBAL bits.
437 * then this function calls...
438 * The function callback, which can use the FTRACE bits to
439 * check for recursion.
440 *
441 * Now if the arch does not suppport a feature, and it calls
442 * the global list function which calls the ftrace callback
443 * all three of these steps will do a recursion protection.
444 * There's no reason to do one if the previous caller already
445 * did. The recursion that we are protecting against will
446 * go through the same steps again.
447 *
448 * To prevent the multiple recursion checks, if a recursion
449 * bit is set that is higher than the MAX bit of the current
450 * check, then we know that the check was made by the previous
451 * caller, and we can skip the current check.
452 */
453 enum {
454 TRACE_BUFFER_BIT,
455 TRACE_BUFFER_NMI_BIT,
456 TRACE_BUFFER_IRQ_BIT,
457 TRACE_BUFFER_SIRQ_BIT,
458
459 /* Start of function recursion bits */
460 TRACE_FTRACE_BIT,
461 TRACE_FTRACE_NMI_BIT,
462 TRACE_FTRACE_IRQ_BIT,
463 TRACE_FTRACE_SIRQ_BIT,
464
465 /* INTERNAL_BITs must be greater than FTRACE_BITs */
466 TRACE_INTERNAL_BIT,
467 TRACE_INTERNAL_NMI_BIT,
468 TRACE_INTERNAL_IRQ_BIT,
469 TRACE_INTERNAL_SIRQ_BIT,
470
471 TRACE_BRANCH_BIT,
472 /*
473 * Abuse of the trace_recursion.
474 * As we need a way to maintain state if we are tracing the function
475 * graph in irq because we want to trace a particular function that
476 * was called in irq context but we have irq tracing off. Since this
477 * can only be modified by current, we can reuse trace_recursion.
478 */
479 TRACE_IRQ_BIT,
480 };
481
482 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
483 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
484 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
485
486 #define TRACE_CONTEXT_BITS 4
487
488 #define TRACE_FTRACE_START TRACE_FTRACE_BIT
489 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
490
491 #define TRACE_LIST_START TRACE_INTERNAL_BIT
492 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
493
494 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX
495
496 static __always_inline int trace_get_context_bit(void)
497 {
498 int bit;
499
500 if (in_interrupt()) {
501 if (in_nmi())
502 bit = 0;
503
504 else if (in_irq())
505 bit = 1;
506 else
507 bit = 2;
508 } else
509 bit = 3;
510
511 return bit;
512 }
513
514 static __always_inline int trace_test_and_set_recursion(int start, int max)
515 {
516 unsigned int val = current->trace_recursion;
517 int bit;
518
519 /* A previous recursion check was made */
520 if ((val & TRACE_CONTEXT_MASK) > max)
521 return 0;
522
523 bit = trace_get_context_bit() + start;
524 if (unlikely(val & (1 << bit)))
525 return -1;
526
527 val |= 1 << bit;
528 current->trace_recursion = val;
529 barrier();
530
531 return bit;
532 }
533
534 static __always_inline void trace_clear_recursion(int bit)
535 {
536 unsigned int val = current->trace_recursion;
537
538 if (!bit)
539 return;
540
541 bit = 1 << bit;
542 val &= ~bit;
543
544 barrier();
545 current->trace_recursion = val;
546 }
547
548 static inline struct ring_buffer_iter *
549 trace_buffer_iter(struct trace_iterator *iter, int cpu)
550 {
551 if (iter->buffer_iter && iter->buffer_iter[cpu])
552 return iter->buffer_iter[cpu];
553 return NULL;
554 }
555
556 int tracer_init(struct tracer *t, struct trace_array *tr);
557 int tracing_is_enabled(void);
558 void tracing_reset(struct trace_buffer *buf, int cpu);
559 void tracing_reset_online_cpus(struct trace_buffer *buf);
560 void tracing_reset_current(int cpu);
561 void tracing_reset_all_online_cpus(void);
562 int tracing_open_generic(struct inode *inode, struct file *filp);
563 bool tracing_is_disabled(void);
564 struct dentry *trace_create_file(const char *name,
565 umode_t mode,
566 struct dentry *parent,
567 void *data,
568 const struct file_operations *fops);
569
570 struct dentry *tracing_init_dentry(void);
571
572 struct ring_buffer_event;
573
574 struct ring_buffer_event *
575 trace_buffer_lock_reserve(struct ring_buffer *buffer,
576 int type,
577 unsigned long len,
578 unsigned long flags,
579 int pc);
580
581 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
582 struct trace_array_cpu *data);
583
584 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
585 int *ent_cpu, u64 *ent_ts);
586
587 void __buffer_unlock_commit(struct ring_buffer *buffer,
588 struct ring_buffer_event *event);
589
590 int trace_empty(struct trace_iterator *iter);
591
592 void *trace_find_next_entry_inc(struct trace_iterator *iter);
593
594 void trace_init_global_iter(struct trace_iterator *iter);
595
596 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
597
598 void trace_function(struct trace_array *tr,
599 unsigned long ip,
600 unsigned long parent_ip,
601 unsigned long flags, int pc);
602 void trace_graph_function(struct trace_array *tr,
603 unsigned long ip,
604 unsigned long parent_ip,
605 unsigned long flags, int pc);
606 void trace_latency_header(struct seq_file *m);
607 void trace_default_header(struct seq_file *m);
608 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
609 int trace_empty(struct trace_iterator *iter);
610
611 void trace_graph_return(struct ftrace_graph_ret *trace);
612 int trace_graph_entry(struct ftrace_graph_ent *trace);
613 void set_graph_array(struct trace_array *tr);
614
615 void tracing_start_cmdline_record(void);
616 void tracing_stop_cmdline_record(void);
617 int register_tracer(struct tracer *type);
618 int is_tracing_stopped(void);
619
620 loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
621
622 extern cpumask_var_t __read_mostly tracing_buffer_mask;
623
624 #define for_each_tracing_cpu(cpu) \
625 for_each_cpu(cpu, tracing_buffer_mask)
626
627 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
628
629 extern unsigned long tracing_thresh;
630
631 #ifdef CONFIG_TRACER_MAX_TRACE
632 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
633 void update_max_tr_single(struct trace_array *tr,
634 struct task_struct *tsk, int cpu);
635 #endif /* CONFIG_TRACER_MAX_TRACE */
636
637 #ifdef CONFIG_STACKTRACE
638 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
639 int pc);
640
641 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
642 int pc);
643 #else
644 static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
645 unsigned long flags, int pc)
646 {
647 }
648
649 static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
650 int skip, int pc)
651 {
652 }
653 #endif /* CONFIG_STACKTRACE */
654
655 extern cycle_t ftrace_now(int cpu);
656
657 extern void trace_find_cmdline(int pid, char comm[]);
658 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
659
660 #ifdef CONFIG_DYNAMIC_FTRACE
661 extern unsigned long ftrace_update_tot_cnt;
662 #endif
663 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
664 extern int DYN_FTRACE_TEST_NAME(void);
665 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
666 extern int DYN_FTRACE_TEST_NAME2(void);
667
668 extern bool ring_buffer_expanded;
669 extern bool tracing_selftest_disabled;
670
671 #ifdef CONFIG_FTRACE_STARTUP_TEST
672 extern int trace_selftest_startup_function(struct tracer *trace,
673 struct trace_array *tr);
674 extern int trace_selftest_startup_function_graph(struct tracer *trace,
675 struct trace_array *tr);
676 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
677 struct trace_array *tr);
678 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
679 struct trace_array *tr);
680 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
681 struct trace_array *tr);
682 extern int trace_selftest_startup_wakeup(struct tracer *trace,
683 struct trace_array *tr);
684 extern int trace_selftest_startup_nop(struct tracer *trace,
685 struct trace_array *tr);
686 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
687 struct trace_array *tr);
688 extern int trace_selftest_startup_branch(struct tracer *trace,
689 struct trace_array *tr);
690 /*
691 * Tracer data references selftest functions that only occur
692 * on boot up. These can be __init functions. Thus, when selftests
693 * are enabled, then the tracers need to reference __init functions.
694 */
695 #define __tracer_data __refdata
696 #else
697 /* Tracers are seldom changed. Optimize when selftests are disabled. */
698 #define __tracer_data __read_mostly
699 #endif /* CONFIG_FTRACE_STARTUP_TEST */
700
701 extern void *head_page(struct trace_array_cpu *data);
702 extern unsigned long long ns2usecs(cycle_t nsec);
703 extern int
704 trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
705 extern int
706 trace_vprintk(unsigned long ip, const char *fmt, va_list args);
707 extern int
708 trace_array_vprintk(struct trace_array *tr,
709 unsigned long ip, const char *fmt, va_list args);
710 int trace_array_printk(struct trace_array *tr,
711 unsigned long ip, const char *fmt, ...);
712 int trace_array_printk_buf(struct ring_buffer *buffer,
713 unsigned long ip, const char *fmt, ...);
714 void trace_printk_seq(struct trace_seq *s);
715 enum print_line_t print_trace_line(struct trace_iterator *iter);
716
717 extern char trace_find_mark(unsigned long long duration);
718
719 /* Standard output formatting function used for function return traces */
720 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
721
722 /* Flag options */
723 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
724 #define TRACE_GRAPH_PRINT_CPU 0x2
725 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
726 #define TRACE_GRAPH_PRINT_PROC 0x8
727 #define TRACE_GRAPH_PRINT_DURATION 0x10
728 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
729 #define TRACE_GRAPH_PRINT_IRQS 0x40
730 #define TRACE_GRAPH_PRINT_TAIL 0x80
731 #define TRACE_GRAPH_SLEEP_TIME 0x100
732 #define TRACE_GRAPH_GRAPH_TIME 0x200
733 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
734 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
735
736 extern void ftrace_graph_sleep_time_control(bool enable);
737 extern void ftrace_graph_graph_time_control(bool enable);
738
739 extern enum print_line_t
740 print_graph_function_flags(struct trace_iterator *iter, u32 flags);
741 extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
742 extern void
743 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
744 extern void graph_trace_open(struct trace_iterator *iter);
745 extern void graph_trace_close(struct trace_iterator *iter);
746 extern int __trace_graph_entry(struct trace_array *tr,
747 struct ftrace_graph_ent *trace,
748 unsigned long flags, int pc);
749 extern void __trace_graph_return(struct trace_array *tr,
750 struct ftrace_graph_ret *trace,
751 unsigned long flags, int pc);
752
753
754 #ifdef CONFIG_DYNAMIC_FTRACE
755 /* TODO: make this variable */
756 #define FTRACE_GRAPH_MAX_FUNCS 32
757 extern int ftrace_graph_count;
758 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
759 extern int ftrace_graph_notrace_count;
760 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
761
762 static inline int ftrace_graph_addr(unsigned long addr)
763 {
764 int i;
765
766 if (!ftrace_graph_count)
767 return 1;
768
769 for (i = 0; i < ftrace_graph_count; i++) {
770 if (addr == ftrace_graph_funcs[i]) {
771 /*
772 * If no irqs are to be traced, but a set_graph_function
773 * is set, and called by an interrupt handler, we still
774 * want to trace it.
775 */
776 if (in_irq())
777 trace_recursion_set(TRACE_IRQ_BIT);
778 else
779 trace_recursion_clear(TRACE_IRQ_BIT);
780 return 1;
781 }
782 }
783
784 return 0;
785 }
786
787 static inline int ftrace_graph_notrace_addr(unsigned long addr)
788 {
789 int i;
790
791 if (!ftrace_graph_notrace_count)
792 return 0;
793
794 for (i = 0; i < ftrace_graph_notrace_count; i++) {
795 if (addr == ftrace_graph_notrace_funcs[i])
796 return 1;
797 }
798
799 return 0;
800 }
801 #else
802 static inline int ftrace_graph_addr(unsigned long addr)
803 {
804 return 1;
805 }
806
807 static inline int ftrace_graph_notrace_addr(unsigned long addr)
808 {
809 return 0;
810 }
811 #endif /* CONFIG_DYNAMIC_FTRACE */
812 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
813 static inline enum print_line_t
814 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
815 {
816 return TRACE_TYPE_UNHANDLED;
817 }
818 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
819
820 extern struct list_head ftrace_pids;
821
822 #ifdef CONFIG_FUNCTION_TRACER
823 extern bool ftrace_filter_param __initdata;
824 static inline int ftrace_trace_task(struct task_struct *task)
825 {
826 if (list_empty(&ftrace_pids))
827 return 1;
828
829 return test_tsk_trace_trace(task);
830 }
831 extern int ftrace_is_dead(void);
832 int ftrace_create_function_files(struct trace_array *tr,
833 struct dentry *parent);
834 void ftrace_destroy_function_files(struct trace_array *tr);
835 void ftrace_init_global_array_ops(struct trace_array *tr);
836 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
837 void ftrace_reset_array_ops(struct trace_array *tr);
838 int using_ftrace_ops_list_func(void);
839 #else
840 static inline int ftrace_trace_task(struct task_struct *task)
841 {
842 return 1;
843 }
844 static inline int ftrace_is_dead(void) { return 0; }
845 static inline int
846 ftrace_create_function_files(struct trace_array *tr,
847 struct dentry *parent)
848 {
849 return 0;
850 }
851 static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
852 static inline __init void
853 ftrace_init_global_array_ops(struct trace_array *tr) { }
854 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
855 /* ftace_func_t type is not defined, use macro instead of static inline */
856 #define ftrace_init_array_ops(tr, func) do { } while (0)
857 #endif /* CONFIG_FUNCTION_TRACER */
858
859 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
860 void ftrace_create_filter_files(struct ftrace_ops *ops,
861 struct dentry *parent);
862 void ftrace_destroy_filter_files(struct ftrace_ops *ops);
863 #else
864 /*
865 * The ops parameter passed in is usually undefined.
866 * This must be a macro.
867 */
868 #define ftrace_create_filter_files(ops, parent) do { } while (0)
869 #define ftrace_destroy_filter_files(ops) do { } while (0)
870 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
871
872 bool ftrace_event_is_function(struct trace_event_call *call);
873
874 /*
875 * struct trace_parser - servers for reading the user input separated by spaces
876 * @cont: set if the input is not complete - no final space char was found
877 * @buffer: holds the parsed user input
878 * @idx: user input length
879 * @size: buffer size
880 */
881 struct trace_parser {
882 bool cont;
883 char *buffer;
884 unsigned idx;
885 unsigned size;
886 };
887
888 static inline bool trace_parser_loaded(struct trace_parser *parser)
889 {
890 return (parser->idx != 0);
891 }
892
893 static inline bool trace_parser_cont(struct trace_parser *parser)
894 {
895 return parser->cont;
896 }
897
898 static inline void trace_parser_clear(struct trace_parser *parser)
899 {
900 parser->cont = false;
901 parser->idx = 0;
902 }
903
904 extern int trace_parser_get_init(struct trace_parser *parser, int size);
905 extern void trace_parser_put(struct trace_parser *parser);
906 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
907 size_t cnt, loff_t *ppos);
908
909 /*
910 * Only create function graph options if function graph is configured.
911 */
912 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
913 # define FGRAPH_FLAGS \
914 C(DISPLAY_GRAPH, "display-graph"),
915 #else
916 # define FGRAPH_FLAGS
917 #endif
918
919 #ifdef CONFIG_BRANCH_TRACER
920 # define BRANCH_FLAGS \
921 C(BRANCH, "branch"),
922 #else
923 # define BRANCH_FLAGS
924 #endif
925
926 #ifdef CONFIG_FUNCTION_TRACER
927 # define FUNCTION_FLAGS \
928 C(FUNCTION, "function-trace"),
929 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
930 #else
931 # define FUNCTION_FLAGS
932 # define FUNCTION_DEFAULT_FLAGS 0UL
933 #endif
934
935 #ifdef CONFIG_STACKTRACE
936 # define STACK_FLAGS \
937 C(STACKTRACE, "stacktrace"),
938 #else
939 # define STACK_FLAGS
940 #endif
941
942 /*
943 * trace_iterator_flags is an enumeration that defines bit
944 * positions into trace_flags that controls the output.
945 *
946 * NOTE: These bits must match the trace_options array in
947 * trace.c (this macro guarantees it).
948 */
949 #define TRACE_FLAGS \
950 C(PRINT_PARENT, "print-parent"), \
951 C(SYM_OFFSET, "sym-offset"), \
952 C(SYM_ADDR, "sym-addr"), \
953 C(VERBOSE, "verbose"), \
954 C(RAW, "raw"), \
955 C(HEX, "hex"), \
956 C(BIN, "bin"), \
957 C(BLOCK, "block"), \
958 C(PRINTK, "trace_printk"), \
959 C(ANNOTATE, "annotate"), \
960 C(USERSTACKTRACE, "userstacktrace"), \
961 C(SYM_USEROBJ, "sym-userobj"), \
962 C(PRINTK_MSGONLY, "printk-msg-only"), \
963 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
964 C(LATENCY_FMT, "latency-format"), \
965 C(RECORD_CMD, "record-cmd"), \
966 C(OVERWRITE, "overwrite"), \
967 C(STOP_ON_FREE, "disable_on_free"), \
968 C(IRQ_INFO, "irq-info"), \
969 C(MARKERS, "markers"), \
970 C(EVENT_FORK, "event-fork"), \
971 FUNCTION_FLAGS \
972 FGRAPH_FLAGS \
973 STACK_FLAGS \
974 BRANCH_FLAGS
975
976 /*
977 * By defining C, we can make TRACE_FLAGS a list of bit names
978 * that will define the bits for the flag masks.
979 */
980 #undef C
981 #define C(a, b) TRACE_ITER_##a##_BIT
982
983 enum trace_iterator_bits {
984 TRACE_FLAGS
985 /* Make sure we don't go more than we have bits for */
986 TRACE_ITER_LAST_BIT
987 };
988
989 /*
990 * By redefining C, we can make TRACE_FLAGS a list of masks that
991 * use the bits as defined above.
992 */
993 #undef C
994 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
995
996 enum trace_iterator_flags { TRACE_FLAGS };
997
998 /*
999 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1000 * control the output of kernel symbols.
1001 */
1002 #define TRACE_ITER_SYM_MASK \
1003 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1004
1005 extern struct tracer nop_trace;
1006
1007 #ifdef CONFIG_BRANCH_TRACER
1008 extern int enable_branch_tracing(struct trace_array *tr);
1009 extern void disable_branch_tracing(void);
1010 static inline int trace_branch_enable(struct trace_array *tr)
1011 {
1012 if (tr->trace_flags & TRACE_ITER_BRANCH)
1013 return enable_branch_tracing(tr);
1014 return 0;
1015 }
1016 static inline void trace_branch_disable(void)
1017 {
1018 /* due to races, always disable */
1019 disable_branch_tracing();
1020 }
1021 #else
1022 static inline int trace_branch_enable(struct trace_array *tr)
1023 {
1024 return 0;
1025 }
1026 static inline void trace_branch_disable(void)
1027 {
1028 }
1029 #endif /* CONFIG_BRANCH_TRACER */
1030
1031 /* set ring buffers to default size if not already done so */
1032 int tracing_update_buffers(void);
1033
1034 struct ftrace_event_field {
1035 struct list_head link;
1036 const char *name;
1037 const char *type;
1038 int filter_type;
1039 int offset;
1040 int size;
1041 int is_signed;
1042 };
1043
1044 struct event_filter {
1045 int n_preds; /* Number assigned */
1046 int a_preds; /* allocated */
1047 struct filter_pred *preds;
1048 struct filter_pred *root;
1049 char *filter_string;
1050 };
1051
1052 struct event_subsystem {
1053 struct list_head list;
1054 const char *name;
1055 struct event_filter *filter;
1056 int ref_count;
1057 };
1058
1059 struct trace_subsystem_dir {
1060 struct list_head list;
1061 struct event_subsystem *subsystem;
1062 struct trace_array *tr;
1063 struct dentry *entry;
1064 int ref_count;
1065 int nr_events;
1066 };
1067
1068 #define FILTER_PRED_INVALID ((unsigned short)-1)
1069 #define FILTER_PRED_IS_RIGHT (1 << 15)
1070 #define FILTER_PRED_FOLD (1 << 15)
1071
1072 /*
1073 * The max preds is the size of unsigned short with
1074 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1075 * and FOLD flags. The other is reserved.
1076 *
1077 * 2^14 preds is way more than enough.
1078 */
1079 #define MAX_FILTER_PRED 16384
1080
1081 struct filter_pred;
1082 struct regex;
1083
1084 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1085
1086 typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1087
1088 enum regex_type {
1089 MATCH_FULL = 0,
1090 MATCH_FRONT_ONLY,
1091 MATCH_MIDDLE_ONLY,
1092 MATCH_END_ONLY,
1093 };
1094
1095 struct regex {
1096 char pattern[MAX_FILTER_STR_VAL];
1097 int len;
1098 int field_len;
1099 regex_match_func match;
1100 };
1101
1102 struct filter_pred {
1103 filter_pred_fn_t fn;
1104 u64 val;
1105 struct regex regex;
1106 unsigned short *ops;
1107 struct ftrace_event_field *field;
1108 int offset;
1109 int not;
1110 int op;
1111 unsigned short index;
1112 unsigned short parent;
1113 unsigned short left;
1114 unsigned short right;
1115 };
1116
1117 static inline bool is_string_field(struct ftrace_event_field *field)
1118 {
1119 return field->filter_type == FILTER_DYN_STRING ||
1120 field->filter_type == FILTER_STATIC_STRING ||
1121 field->filter_type == FILTER_PTR_STRING;
1122 }
1123
1124 static inline bool is_function_field(struct ftrace_event_field *field)
1125 {
1126 return field->filter_type == FILTER_TRACE_FN;
1127 }
1128
1129 extern enum regex_type
1130 filter_parse_regex(char *buff, int len, char **search, int *not);
1131 extern void print_event_filter(struct trace_event_file *file,
1132 struct trace_seq *s);
1133 extern int apply_event_filter(struct trace_event_file *file,
1134 char *filter_string);
1135 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1136 char *filter_string);
1137 extern void print_subsystem_event_filter(struct event_subsystem *system,
1138 struct trace_seq *s);
1139 extern int filter_assign_type(const char *type);
1140 extern int create_event_filter(struct trace_event_call *call,
1141 char *filter_str, bool set_str,
1142 struct event_filter **filterp);
1143 extern void free_event_filter(struct event_filter *filter);
1144
1145 struct ftrace_event_field *
1146 trace_find_event_field(struct trace_event_call *call, char *name);
1147
1148 extern void trace_event_enable_cmd_record(bool enable);
1149 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1150 extern int event_trace_del_tracer(struct trace_array *tr);
1151
1152 extern struct trace_event_file *find_event_file(struct trace_array *tr,
1153 const char *system,
1154 const char *event);
1155
1156 static inline void *event_file_data(struct file *filp)
1157 {
1158 return ACCESS_ONCE(file_inode(filp)->i_private);
1159 }
1160
1161 extern struct mutex event_mutex;
1162 extern struct list_head ftrace_events;
1163
1164 extern const struct file_operations event_trigger_fops;
1165 extern const struct file_operations event_hist_fops;
1166
1167 #ifdef CONFIG_HIST_TRIGGERS
1168 extern int register_trigger_hist_cmd(void);
1169 extern int register_trigger_hist_enable_disable_cmds(void);
1170 #else
1171 static inline int register_trigger_hist_cmd(void) { return 0; }
1172 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1173 #endif
1174
1175 extern int register_trigger_cmds(void);
1176 extern void clear_event_triggers(struct trace_array *tr);
1177
1178 struct event_trigger_data {
1179 unsigned long count;
1180 int ref;
1181 struct event_trigger_ops *ops;
1182 struct event_command *cmd_ops;
1183 struct event_filter __rcu *filter;
1184 char *filter_str;
1185 void *private_data;
1186 bool paused;
1187 bool paused_tmp;
1188 struct list_head list;
1189 char *name;
1190 struct list_head named_list;
1191 struct event_trigger_data *named_data;
1192 };
1193
1194 /* Avoid typos */
1195 #define ENABLE_EVENT_STR "enable_event"
1196 #define DISABLE_EVENT_STR "disable_event"
1197 #define ENABLE_HIST_STR "enable_hist"
1198 #define DISABLE_HIST_STR "disable_hist"
1199
1200 struct enable_trigger_data {
1201 struct trace_event_file *file;
1202 bool enable;
1203 bool hist;
1204 };
1205
1206 extern int event_enable_trigger_print(struct seq_file *m,
1207 struct event_trigger_ops *ops,
1208 struct event_trigger_data *data);
1209 extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1210 struct event_trigger_data *data);
1211 extern int event_enable_trigger_func(struct event_command *cmd_ops,
1212 struct trace_event_file *file,
1213 char *glob, char *cmd, char *param);
1214 extern int event_enable_register_trigger(char *glob,
1215 struct event_trigger_ops *ops,
1216 struct event_trigger_data *data,
1217 struct trace_event_file *file);
1218 extern void event_enable_unregister_trigger(char *glob,
1219 struct event_trigger_ops *ops,
1220 struct event_trigger_data *test,
1221 struct trace_event_file *file);
1222 extern void trigger_data_free(struct event_trigger_data *data);
1223 extern int event_trigger_init(struct event_trigger_ops *ops,
1224 struct event_trigger_data *data);
1225 extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1226 int trigger_enable);
1227 extern void update_cond_flag(struct trace_event_file *file);
1228 extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
1229 struct event_trigger_data *test,
1230 struct trace_event_file *file);
1231 extern int set_trigger_filter(char *filter_str,
1232 struct event_trigger_data *trigger_data,
1233 struct trace_event_file *file);
1234 extern struct event_trigger_data *find_named_trigger(const char *name);
1235 extern bool is_named_trigger(struct event_trigger_data *test);
1236 extern int save_named_trigger(const char *name,
1237 struct event_trigger_data *data);
1238 extern void del_named_trigger(struct event_trigger_data *data);
1239 extern void pause_named_trigger(struct event_trigger_data *data);
1240 extern void unpause_named_trigger(struct event_trigger_data *data);
1241 extern void set_named_trigger_data(struct event_trigger_data *data,
1242 struct event_trigger_data *named_data);
1243 extern int register_event_command(struct event_command *cmd);
1244 extern int unregister_event_command(struct event_command *cmd);
1245 extern int register_trigger_hist_enable_disable_cmds(void);
1246
1247 /**
1248 * struct event_trigger_ops - callbacks for trace event triggers
1249 *
1250 * The methods in this structure provide per-event trigger hooks for
1251 * various trigger operations.
1252 *
1253 * All the methods below, except for @init() and @free(), must be
1254 * implemented.
1255 *
1256 * @func: The trigger 'probe' function called when the triggering
1257 * event occurs. The data passed into this callback is the data
1258 * that was supplied to the event_command @reg() function that
1259 * registered the trigger (see struct event_command) along with
1260 * the trace record, rec.
1261 *
1262 * @init: An optional initialization function called for the trigger
1263 * when the trigger is registered (via the event_command reg()
1264 * function). This can be used to perform per-trigger
1265 * initialization such as incrementing a per-trigger reference
1266 * count, for instance. This is usually implemented by the
1267 * generic utility function @event_trigger_init() (see
1268 * trace_event_triggers.c).
1269 *
1270 * @free: An optional de-initialization function called for the
1271 * trigger when the trigger is unregistered (via the
1272 * event_command @reg() function). This can be used to perform
1273 * per-trigger de-initialization such as decrementing a
1274 * per-trigger reference count and freeing corresponding trigger
1275 * data, for instance. This is usually implemented by the
1276 * generic utility function @event_trigger_free() (see
1277 * trace_event_triggers.c).
1278 *
1279 * @print: The callback function invoked to have the trigger print
1280 * itself. This is usually implemented by a wrapper function
1281 * that calls the generic utility function @event_trigger_print()
1282 * (see trace_event_triggers.c).
1283 */
1284 struct event_trigger_ops {
1285 void (*func)(struct event_trigger_data *data,
1286 void *rec);
1287 int (*init)(struct event_trigger_ops *ops,
1288 struct event_trigger_data *data);
1289 void (*free)(struct event_trigger_ops *ops,
1290 struct event_trigger_data *data);
1291 int (*print)(struct seq_file *m,
1292 struct event_trigger_ops *ops,
1293 struct event_trigger_data *data);
1294 };
1295
1296 /**
1297 * struct event_command - callbacks and data members for event commands
1298 *
1299 * Event commands are invoked by users by writing the command name
1300 * into the 'trigger' file associated with a trace event. The
1301 * parameters associated with a specific invocation of an event
1302 * command are used to create an event trigger instance, which is
1303 * added to the list of trigger instances associated with that trace
1304 * event. When the event is hit, the set of triggers associated with
1305 * that event is invoked.
1306 *
1307 * The data members in this structure provide per-event command data
1308 * for various event commands.
1309 *
1310 * All the data members below, except for @post_trigger, must be set
1311 * for each event command.
1312 *
1313 * @name: The unique name that identifies the event command. This is
1314 * the name used when setting triggers via trigger files.
1315 *
1316 * @trigger_type: A unique id that identifies the event command
1317 * 'type'. This value has two purposes, the first to ensure that
1318 * only one trigger of the same type can be set at a given time
1319 * for a particular event e.g. it doesn't make sense to have both
1320 * a traceon and traceoff trigger attached to a single event at
1321 * the same time, so traceon and traceoff have the same type
1322 * though they have different names. The @trigger_type value is
1323 * also used as a bit value for deferring the actual trigger
1324 * action until after the current event is finished. Some
1325 * commands need to do this if they themselves log to the trace
1326 * buffer (see the @post_trigger() member below). @trigger_type
1327 * values are defined by adding new values to the trigger_type
1328 * enum in include/linux/trace_events.h.
1329 *
1330 * @flags: See the enum event_command_flags below.
1331 *
1332 * All the methods below, except for @set_filter() and @unreg_all(),
1333 * must be implemented.
1334 *
1335 * @func: The callback function responsible for parsing and
1336 * registering the trigger written to the 'trigger' file by the
1337 * user. It allocates the trigger instance and registers it with
1338 * the appropriate trace event. It makes use of the other
1339 * event_command callback functions to orchestrate this, and is
1340 * usually implemented by the generic utility function
1341 * @event_trigger_callback() (see trace_event_triggers.c).
1342 *
1343 * @reg: Adds the trigger to the list of triggers associated with the
1344 * event, and enables the event trigger itself, after
1345 * initializing it (via the event_trigger_ops @init() function).
1346 * This is also where commands can use the @trigger_type value to
1347 * make the decision as to whether or not multiple instances of
1348 * the trigger should be allowed. This is usually implemented by
1349 * the generic utility function @register_trigger() (see
1350 * trace_event_triggers.c).
1351 *
1352 * @unreg: Removes the trigger from the list of triggers associated
1353 * with the event, and disables the event trigger itself, after
1354 * initializing it (via the event_trigger_ops @free() function).
1355 * This is usually implemented by the generic utility function
1356 * @unregister_trigger() (see trace_event_triggers.c).
1357 *
1358 * @unreg_all: An optional function called to remove all the triggers
1359 * from the list of triggers associated with the event. Called
1360 * when a trigger file is opened in truncate mode.
1361 *
1362 * @set_filter: An optional function called to parse and set a filter
1363 * for the trigger. If no @set_filter() method is set for the
1364 * event command, filters set by the user for the command will be
1365 * ignored. This is usually implemented by the generic utility
1366 * function @set_trigger_filter() (see trace_event_triggers.c).
1367 *
1368 * @get_trigger_ops: The callback function invoked to retrieve the
1369 * event_trigger_ops implementation associated with the command.
1370 */
1371 struct event_command {
1372 struct list_head list;
1373 char *name;
1374 enum event_trigger_type trigger_type;
1375 int flags;
1376 int (*func)(struct event_command *cmd_ops,
1377 struct trace_event_file *file,
1378 char *glob, char *cmd, char *params);
1379 int (*reg)(char *glob,
1380 struct event_trigger_ops *ops,
1381 struct event_trigger_data *data,
1382 struct trace_event_file *file);
1383 void (*unreg)(char *glob,
1384 struct event_trigger_ops *ops,
1385 struct event_trigger_data *data,
1386 struct trace_event_file *file);
1387 void (*unreg_all)(struct trace_event_file *file);
1388 int (*set_filter)(char *filter_str,
1389 struct event_trigger_data *data,
1390 struct trace_event_file *file);
1391 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1392 };
1393
1394 /**
1395 * enum event_command_flags - flags for struct event_command
1396 *
1397 * @POST_TRIGGER: A flag that says whether or not this command needs
1398 * to have its action delayed until after the current event has
1399 * been closed. Some triggers need to avoid being invoked while
1400 * an event is currently in the process of being logged, since
1401 * the trigger may itself log data into the trace buffer. Thus
1402 * we make sure the current event is committed before invoking
1403 * those triggers. To do that, the trigger invocation is split
1404 * in two - the first part checks the filter using the current
1405 * trace record; if a command has the @post_trigger flag set, it
1406 * sets a bit for itself in the return value, otherwise it
1407 * directly invokes the trigger. Once all commands have been
1408 * either invoked or set their return flag, the current record is
1409 * either committed or discarded. At that point, if any commands
1410 * have deferred their triggers, those commands are finally
1411 * invoked following the close of the current event. In other
1412 * words, if the event_trigger_ops @func() probe implementation
1413 * itself logs to the trace buffer, this flag should be set,
1414 * otherwise it can be left unspecified.
1415 *
1416 * @NEEDS_REC: A flag that says whether or not this command needs
1417 * access to the trace record in order to perform its function,
1418 * regardless of whether or not it has a filter associated with
1419 * it (filters make a trigger require access to the trace record
1420 * but are not always present).
1421 */
1422 enum event_command_flags {
1423 EVENT_CMD_FL_POST_TRIGGER = 1,
1424 EVENT_CMD_FL_NEEDS_REC = 2,
1425 };
1426
1427 static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1428 {
1429 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1430 }
1431
1432 static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1433 {
1434 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1435 }
1436
1437 extern int trace_event_enable_disable(struct trace_event_file *file,
1438 int enable, int soft_disable);
1439 extern int tracing_alloc_snapshot(void);
1440
1441 extern const char *__start___trace_bprintk_fmt[];
1442 extern const char *__stop___trace_bprintk_fmt[];
1443
1444 extern const char *__start___tracepoint_str[];
1445 extern const char *__stop___tracepoint_str[];
1446
1447 void trace_printk_control(bool enabled);
1448 void trace_printk_init_buffers(void);
1449 void trace_printk_start_comm(void);
1450 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1451 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1452
1453 /*
1454 * Normal trace_printk() and friends allocates special buffers
1455 * to do the manipulation, as well as saves the print formats
1456 * into sections to display. But the trace infrastructure wants
1457 * to use these without the added overhead at the price of being
1458 * a bit slower (used mainly for warnings, where we don't care
1459 * about performance). The internal_trace_puts() is for such
1460 * a purpose.
1461 */
1462 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1463
1464 #undef FTRACE_ENTRY
1465 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1466 extern struct trace_event_call \
1467 __aligned(4) event_##call;
1468 #undef FTRACE_ENTRY_DUP
1469 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1470 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1471 filter)
1472 #include "trace_entries.h"
1473
1474 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1475 int perf_ftrace_event_register(struct trace_event_call *call,
1476 enum trace_reg type, void *data);
1477 #else
1478 #define perf_ftrace_event_register NULL
1479 #endif
1480
1481 #ifdef CONFIG_FTRACE_SYSCALLS
1482 void init_ftrace_syscalls(void);
1483 const char *get_syscall_name(int syscall);
1484 #else
1485 static inline void init_ftrace_syscalls(void) { }
1486 static inline const char *get_syscall_name(int syscall)
1487 {
1488 return NULL;
1489 }
1490 #endif
1491
1492 #ifdef CONFIG_EVENT_TRACING
1493 void trace_event_init(void);
1494 void trace_event_enum_update(struct trace_enum_map **map, int len);
1495 #else
1496 static inline void __init trace_event_init(void) { }
1497 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1498 #endif
1499
1500 extern struct trace_iterator *tracepoint_print_iter;
1501
1502 #endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.061209 seconds and 5 git commands to generate.