Merge tag 'regulator-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[deliverable/linux.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
60063497 5#include <linux/atomic.h>
bc0c38d1
SR
6#include <linux/sched.h>
7#include <linux/clocksource.h>
3928a8a2 8#include <linux/ring_buffer.h>
bd8ac686 9#include <linux/mmiotrace.h>
4e5292ea 10#include <linux/tracepoint.h>
d13744cd 11#include <linux/ftrace.h>
24f1e32c 12#include <linux/hw_breakpoint.h>
9504504c 13#include <linux/trace_seq.h>
97f20251 14#include <linux/ftrace_event.h>
9504504c 15
12ab74ee
SR
16#ifdef CONFIG_FTRACE_SYSCALLS
17#include <asm/unistd.h> /* For NR_SYSCALLS */
18#include <asm/syscall.h> /* some archs define it here */
19#endif
20
72829bc3
TG
21enum trace_type {
22 __TRACE_FIRST_TYPE = 0,
23
24 TRACE_FN,
25 TRACE_CTX,
26 TRACE_WAKE,
27 TRACE_STACK,
dd0e545f 28 TRACE_PRINT,
48ead020 29 TRACE_BPRINT,
bd8ac686
PP
30 TRACE_MMIO_RW,
31 TRACE_MMIO_MAP,
9f029e83 32 TRACE_BRANCH,
287b6e68
FW
33 TRACE_GRAPH_RET,
34 TRACE_GRAPH_ENT,
02b67518 35 TRACE_USER_STACK,
c71a8961 36 TRACE_BLK,
09ae7234 37 TRACE_BPUTS,
72829bc3 38
f0868d1e 39 __TRACE_LAST_TYPE,
72829bc3
TG
40};
41
bc0c38d1 42
0a1c49db
SR
43#undef __field
44#define __field(type, item) type item;
86387f7e 45
d7315094
SR
46#undef __field_struct
47#define __field_struct(type, item) __field(type, item)
86387f7e 48
d7315094
SR
49#undef __field_desc
50#define __field_desc(type, container, item)
02b67518 51
0a1c49db
SR
52#undef __array
53#define __array(type, item, size) type item[size];
1427cdf0 54
d7315094
SR
55#undef __array_desc
56#define __array_desc(type, container, item, size)
777e208d 57
0a1c49db
SR
58#undef __dynamic_array
59#define __dynamic_array(type, item) type item[];
777e208d 60
0a1c49db
SR
61#undef F_STRUCT
62#define F_STRUCT(args...) args
74239072 63
0a1c49db 64#undef FTRACE_ENTRY
02aa3162
JO
65#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
66 struct struct_name { \
67 struct trace_entry ent; \
68 tstruct \
0a1c49db 69 }
777e208d 70
0a1c49db
SR
71#undef TP_ARGS
72#define TP_ARGS(args...) args
52f232cb 73
0a1c49db 74#undef FTRACE_ENTRY_DUP
02aa3162 75#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
1e9b51c2 76
e59a0bff 77#undef FTRACE_ENTRY_REG
02aa3162
JO
78#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
79 filter, regfn) \
80 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
81 filter)
e59a0bff 82
0a1c49db 83#include "trace_entries.h"
36994e58 84
0a1c49db
SR
85/*
86 * syscalls are special, and need special handling, this is why
87 * they are not included in trace_entries.h
88 */
bed1ffca
FW
89struct syscall_trace_enter {
90 struct trace_entry ent;
91 int nr;
92 unsigned long args[];
93};
94
95struct syscall_trace_exit {
96 struct trace_entry ent;
97 int nr;
99df5a6a 98 long ret;
bed1ffca
FW
99};
100
93ccae7a 101struct kprobe_trace_entry_head {
413d37d1
MH
102 struct trace_entry ent;
103 unsigned long ip;
413d37d1
MH
104};
105
93ccae7a 106struct kretprobe_trace_entry_head {
413d37d1
MH
107 struct trace_entry ent;
108 unsigned long func;
109 unsigned long ret_ip;
413d37d1
MH
110};
111
fc5e27ae
PP
112/*
113 * trace_flag_type is an enumeration that holds different
114 * states when a trace occurs. These are:
9244489a 115 * IRQS_OFF - interrupts were disabled
9de36825 116 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
bd9cfca9 117 * NEED_RESCHED - reschedule is requested
9244489a
SR
118 * HARDIRQ - inside an interrupt handler
119 * SOFTIRQ - inside a softirq handler
fc5e27ae
PP
120 */
121enum trace_flag_type {
122 TRACE_FLAG_IRQS_OFF = 0x01,
9244489a
SR
123 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
124 TRACE_FLAG_NEED_RESCHED = 0x04,
125 TRACE_FLAG_HARDIRQ = 0x08,
126 TRACE_FLAG_SOFTIRQ = 0x10,
e5137b50 127 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
fc5e27ae
PP
128};
129
5bf9a1ee 130#define TRACE_BUF_SIZE 1024
bc0c38d1 131
2b6080f2
SR
132struct trace_array;
133
bc0c38d1
SR
134/*
135 * The CPU trace array - it consists of thousands of trace entries
136 * plus some other descriptor data: (for example which task started
137 * the trace, etc.)
138 */
139struct trace_array_cpu {
bc0c38d1 140 atomic_t disabled;
2cadf913 141 void *buffer_page; /* ring buffer spare */
4e3c3333 142
438ced17 143 unsigned long entries;
bc0c38d1
SR
144 unsigned long saved_latency;
145 unsigned long critical_start;
146 unsigned long critical_end;
147 unsigned long critical_sequence;
148 unsigned long nice;
149 unsigned long policy;
150 unsigned long rt_priority;
2f26ebd5 151 unsigned long skipped_entries;
bc0c38d1
SR
152 cycle_t preempt_timestamp;
153 pid_t pid;
d20b92ab 154 kuid_t uid;
bc0c38d1
SR
155 char comm[TASK_COMM_LEN];
156};
157
2b6080f2
SR
158struct tracer;
159
12883efb
SRRH
160struct trace_buffer {
161 struct trace_array *tr;
162 struct ring_buffer *buffer;
163 struct trace_array_cpu __percpu *data;
164 cycle_t time_start;
165 int cpu;
166};
167
bc0c38d1
SR
168/*
169 * The trace array - an array of per-CPU trace arrays. This is the
170 * highest level data structure that individual tracers deal with.
171 * They have on/off state as well:
172 */
173struct trace_array {
ae63b31e 174 struct list_head list;
277ba044 175 char *name;
12883efb
SRRH
176 struct trace_buffer trace_buffer;
177#ifdef CONFIG_TRACER_MAX_TRACE
178 /*
179 * The max_buffer is used to snapshot the trace when a maximum
180 * latency is reached, or when the user initiates a snapshot.
181 * Some tracers will use this to store a maximum trace while
182 * it continues examining live traces.
183 *
184 * The buffers for the max_buffer are set up the same as the trace_buffer
185 * When a snapshot is taken, the buffer of the max_buffer is swapped
186 * with the buffer of the trace_buffer and the buffers are reset for
187 * the trace_buffer so the tracing can continue.
188 */
189 struct trace_buffer max_buffer;
45ad21ca 190 bool allocated_snapshot;
12883efb 191#endif
499e5470 192 int buffer_disabled;
12ab74ee
SR
193#ifdef CONFIG_FTRACE_SYSCALLS
194 int sys_refcount_enter;
195 int sys_refcount_exit;
196 DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
197 DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
198#endif
2b6080f2
SR
199 int stop_count;
200 int clock_id;
201 struct tracer *current_trace;
ae63b31e 202 unsigned int flags;
2b6080f2 203 raw_spinlock_t start_lock;
ae63b31e 204 struct dentry *dir;
2b6080f2
SR
205 struct dentry *options;
206 struct dentry *percpu_dir;
ae63b31e
SR
207 struct dentry *event_dir;
208 struct list_head systems;
209 struct list_head events;
ccfe9e42 210 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
a695cb58 211 int ref;
bc0c38d1
SR
212};
213
ae63b31e
SR
214enum {
215 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
216};
217
218extern struct list_head ftrace_trace_arrays;
219
a8227415
AL
220extern struct mutex trace_types_lock;
221
8e2e2fa4
SRRH
222extern int trace_array_get(struct trace_array *tr);
223extern void trace_array_put(struct trace_array *tr);
224
ae63b31e
SR
225/*
226 * The global tracer (top) should be the first trace array added,
227 * but we check the flag anyway.
228 */
229static inline struct trace_array *top_trace_array(void)
230{
231 struct trace_array *tr;
232
233 tr = list_entry(ftrace_trace_arrays.prev,
234 typeof(*tr), list);
235 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
236 return tr;
237}
238
7104f300
SR
239#define FTRACE_CMP_TYPE(var, type) \
240 __builtin_types_compatible_p(typeof(var), type *)
241
242#undef IF_ASSIGN
243#define IF_ASSIGN(var, entry, etype, id) \
244 if (FTRACE_CMP_TYPE(var, etype)) { \
245 var = (typeof(var))(entry); \
246 WARN_ON(id && (entry)->type != id); \
247 break; \
248 }
249
250/* Will cause compile errors if type is not found. */
251extern void __ftrace_bad_type(void);
252
253/*
254 * The trace_assign_type is a verifier that the entry type is
255 * the same as the type being assigned. To add new types simply
256 * add a line with the following format:
257 *
258 * IF_ASSIGN(var, ent, type, id);
259 *
260 * Where "type" is the trace type that includes the trace_entry
261 * as the "ent" item. And "id" is the trace identifier that is
262 * used in the trace_type enum.
263 *
264 * If the type can have more than one id, then use zero.
265 */
266#define trace_assign_type(var, ent) \
267 do { \
268 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
269 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
7104f300 270 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
02b67518 271 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
7104f300 272 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
48ead020 273 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
09ae7234 274 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
7104f300
SR
275 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
276 TRACE_MMIO_RW); \
277 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
278 TRACE_MMIO_MAP); \
9f029e83 279 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
287b6e68
FW
280 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
281 TRACE_GRAPH_ENT); \
282 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
283 TRACE_GRAPH_RET); \
7104f300
SR
284 __ftrace_bad_type(); \
285 } while (0)
2c4f035f 286
adf9f195
FW
287/*
288 * An option specific to a tracer. This is a boolean value.
289 * The bit is the bit index that sets its value on the
290 * flags value in struct tracer_flags.
291 */
292struct tracer_opt {
9de36825
IM
293 const char *name; /* Will appear on the trace_options file */
294 u32 bit; /* Mask assigned in val field in tracer_flags */
adf9f195
FW
295};
296
297/*
298 * The set of specific options for a tracer. Your tracer
299 * have to set the initial value of the flags val.
300 */
301struct tracer_flags {
302 u32 val;
9de36825 303 struct tracer_opt *opts;
adf9f195
FW
304};
305
306/* Makes more easy to define a tracer opt */
307#define TRACER_OPT(s, b) .name = #s, .bit = b
308
034939b6 309
6eaaa5d5
FW
310/**
311 * struct tracer - a specific tracer and its callbacks to interact with debugfs
312 * @name: the name chosen to select it on the available_tracers file
313 * @init: called when one switches to this tracer (echo name > current_tracer)
314 * @reset: called when one switches to another tracer
315 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
316 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
317 * @open: called when the trace file is opened
318 * @pipe_open: called when the trace_pipe file is opened
319 * @wait_pipe: override how the user waits for traces on trace_pipe
320 * @close: called when the trace file is released
c521efd1 321 * @pipe_close: called when the trace_pipe file is released
6eaaa5d5
FW
322 * @read: override the default read callback on trace_pipe
323 * @splice_read: override the default splice_read callback on trace_pipe
324 * @selftest: selftest to run on boot (see trace_selftest.c)
325 * @print_headers: override the first lines that describe your columns
326 * @print_line: callback that prints a trace
327 * @set_flag: signals one of your private flags changed (trace_options file)
328 * @flags: your private flags
bc0c38d1
SR
329 */
330struct tracer {
331 const char *name;
1c80025a 332 int (*init)(struct trace_array *tr);
bc0c38d1 333 void (*reset)(struct trace_array *tr);
9036990d
SR
334 void (*start)(struct trace_array *tr);
335 void (*stop)(struct trace_array *tr);
bc0c38d1 336 void (*open)(struct trace_iterator *iter);
107bad8b 337 void (*pipe_open)(struct trace_iterator *iter);
6eaaa5d5 338 void (*wait_pipe)(struct trace_iterator *iter);
bc0c38d1 339 void (*close)(struct trace_iterator *iter);
c521efd1 340 void (*pipe_close)(struct trace_iterator *iter);
107bad8b
SR
341 ssize_t (*read)(struct trace_iterator *iter,
342 struct file *filp, char __user *ubuf,
343 size_t cnt, loff_t *ppos);
3c56819b
EGM
344 ssize_t (*splice_read)(struct trace_iterator *iter,
345 struct file *filp,
346 loff_t *ppos,
347 struct pipe_inode_info *pipe,
348 size_t len,
349 unsigned int flags);
60a11774
SR
350#ifdef CONFIG_FTRACE_STARTUP_TEST
351 int (*selftest)(struct tracer *trace,
352 struct trace_array *tr);
353#endif
8bba1bf5 354 void (*print_header)(struct seq_file *m);
2c4f035f 355 enum print_line_t (*print_line)(struct trace_iterator *iter);
adf9f195
FW
356 /* If you handled the flag setting, return 0 */
357 int (*set_flag)(u32 old_flags, u32 bit, int set);
613f04a0
SRRH
358 /* Return 0 if OK with change, else return non-zero */
359 int (*flag_changed)(struct tracer *tracer,
360 u32 mask, int set);
bc0c38d1 361 struct tracer *next;
9de36825 362 struct tracer_flags *flags;
f43c738b 363 bool print_max;
12883efb
SRRH
364 bool enabled;
365#ifdef CONFIG_TRACER_MAX_TRACE
f43c738b 366 bool use_max_tr;
12883efb 367#endif
bc0c38d1
SR
368};
369
f9520750 370
e4a3f541 371/* Only current can touch trace_recursion */
e4a3f541 372
edc15caf
SR
373/*
374 * For function tracing recursion:
375 * The order of these bits are important.
376 *
377 * When function tracing occurs, the following steps are made:
378 * If arch does not support a ftrace feature:
379 * call internal function (uses INTERNAL bits) which calls...
380 * If callback is registered to the "global" list, the list
381 * function is called and recursion checks the GLOBAL bits.
382 * then this function calls...
383 * The function callback, which can use the FTRACE bits to
384 * check for recursion.
385 *
386 * Now if the arch does not suppport a feature, and it calls
387 * the global list function which calls the ftrace callback
388 * all three of these steps will do a recursion protection.
389 * There's no reason to do one if the previous caller already
390 * did. The recursion that we are protecting against will
391 * go through the same steps again.
392 *
393 * To prevent the multiple recursion checks, if a recursion
394 * bit is set that is higher than the MAX bit of the current
395 * check, then we know that the check was made by the previous
396 * caller, and we can skip the current check.
397 */
e46cbf75 398enum {
567cd4da
SR
399 TRACE_BUFFER_BIT,
400 TRACE_BUFFER_NMI_BIT,
401 TRACE_BUFFER_IRQ_BIT,
402 TRACE_BUFFER_SIRQ_BIT,
403
404 /* Start of function recursion bits */
405 TRACE_FTRACE_BIT,
edc15caf
SR
406 TRACE_FTRACE_NMI_BIT,
407 TRACE_FTRACE_IRQ_BIT,
408 TRACE_FTRACE_SIRQ_BIT,
e46cbf75 409
edc15caf 410 /* GLOBAL_BITs must be greater than FTRACE_BITs */
e46cbf75
SR
411 TRACE_GLOBAL_BIT,
412 TRACE_GLOBAL_NMI_BIT,
413 TRACE_GLOBAL_IRQ_BIT,
414 TRACE_GLOBAL_SIRQ_BIT,
415
edc15caf
SR
416 /* INTERNAL_BITs must be greater than GLOBAL_BITs */
417 TRACE_INTERNAL_BIT,
418 TRACE_INTERNAL_NMI_BIT,
419 TRACE_INTERNAL_IRQ_BIT,
420 TRACE_INTERNAL_SIRQ_BIT,
421
e46cbf75 422 TRACE_CONTROL_BIT,
e248491a 423
e4a3f541
SR
424/*
425 * Abuse of the trace_recursion.
426 * As we need a way to maintain state if we are tracing the function
427 * graph in irq because we want to trace a particular function that
428 * was called in irq context but we have irq tracing off. Since this
429 * can only be modified by current, we can reuse trace_recursion.
430 */
e46cbf75
SR
431 TRACE_IRQ_BIT,
432};
e4a3f541 433
e46cbf75
SR
434#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
435#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
436#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
e4a3f541 437
edc15caf
SR
438#define TRACE_CONTEXT_BITS 4
439
440#define TRACE_FTRACE_START TRACE_FTRACE_BIT
441#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
442
443#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
444#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
445
446#define TRACE_LIST_START TRACE_INTERNAL_BIT
447#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
448
449#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
450
451static __always_inline int trace_get_context_bit(void)
452{
453 int bit;
454
455 if (in_interrupt()) {
456 if (in_nmi())
457 bit = 0;
458
459 else if (in_irq())
460 bit = 1;
461 else
462 bit = 2;
463 } else
464 bit = 3;
465
466 return bit;
467}
468
469static __always_inline int trace_test_and_set_recursion(int start, int max)
470{
471 unsigned int val = current->trace_recursion;
472 int bit;
473
474 /* A previous recursion check was made */
475 if ((val & TRACE_CONTEXT_MASK) > max)
476 return 0;
477
478 bit = trace_get_context_bit() + start;
479 if (unlikely(val & (1 << bit)))
480 return -1;
481
482 val |= 1 << bit;
483 current->trace_recursion = val;
484 barrier();
485
486 return bit;
487}
488
489static __always_inline void trace_clear_recursion(int bit)
490{
491 unsigned int val = current->trace_recursion;
492
493 if (!bit)
494 return;
495
496 bit = 1 << bit;
497 val &= ~bit;
498
499 barrier();
500 current->trace_recursion = val;
501}
502
6d158a81
SR
503static inline struct ring_buffer_iter *
504trace_buffer_iter(struct trace_iterator *iter, int cpu)
505{
506 if (iter->buffer_iter && iter->buffer_iter[cpu])
507 return iter->buffer_iter[cpu];
508 return NULL;
509}
510
b6f11df2 511int tracer_init(struct tracer *t, struct trace_array *tr);
9036990d 512int tracing_is_enabled(void);
12883efb
SRRH
513void tracing_reset(struct trace_buffer *buf, int cpu);
514void tracing_reset_online_cpus(struct trace_buffer *buf);
9456f0fa 515void tracing_reset_current(int cpu);
873c642f 516void tracing_reset_all_online_cpus(void);
bc0c38d1 517int tracing_open_generic(struct inode *inode, struct file *filp);
5452af66 518struct dentry *trace_create_file(const char *name,
f4ae40a6 519 umode_t mode,
5452af66
FW
520 struct dentry *parent,
521 void *data,
522 const struct file_operations *fops);
523
2b6080f2 524struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
bc0c38d1 525struct dentry *tracing_init_dentry(void);
d618b3e6 526
51a763dd
ACM
527struct ring_buffer_event;
528
e77405ad
SR
529struct ring_buffer_event *
530trace_buffer_lock_reserve(struct ring_buffer *buffer,
531 int type,
532 unsigned long len,
533 unsigned long flags,
534 int pc);
51a763dd 535
45dcd8b8
PP
536struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
537 struct trace_array_cpu *data);
c4a8e8be
FW
538
539struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
540 int *ent_cpu, u64 *ent_ts);
541
7ffbd48d
SR
542void __buffer_unlock_commit(struct ring_buffer *buffer,
543 struct ring_buffer_event *event);
544
955b61e5
JW
545int trace_empty(struct trace_iterator *iter);
546
547void *trace_find_next_entry_inc(struct trace_iterator *iter);
548
549void trace_init_global_iter(struct trace_iterator *iter);
550
551void tracing_iter_reset(struct trace_iterator *iter, int cpu);
552
6eaaa5d5
FW
553void poll_wait_pipe(struct trace_iterator *iter);
554
bc0c38d1 555void tracing_sched_switch_trace(struct trace_array *tr,
bc0c38d1
SR
556 struct task_struct *prev,
557 struct task_struct *next,
38697053 558 unsigned long flags, int pc);
57422797
IM
559
560void tracing_sched_wakeup_trace(struct trace_array *tr,
57422797
IM
561 struct task_struct *wakee,
562 struct task_struct *cur,
38697053 563 unsigned long flags, int pc);
6fb44b71 564void trace_function(struct trace_array *tr,
6fb44b71
SR
565 unsigned long ip,
566 unsigned long parent_ip,
38697053 567 unsigned long flags, int pc);
0a772620
JO
568void trace_graph_function(struct trace_array *tr,
569 unsigned long ip,
570 unsigned long parent_ip,
571 unsigned long flags, int pc);
7e9a49ef 572void trace_latency_header(struct seq_file *m);
62b915f1
JO
573void trace_default_header(struct seq_file *m);
574void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
575int trace_empty(struct trace_iterator *iter);
bc0c38d1 576
287b6e68 577void trace_graph_return(struct ftrace_graph_ret *trace);
e49dc19c 578int trace_graph_entry(struct ftrace_graph_ent *trace);
1a0799a8 579void set_graph_array(struct trace_array *tr);
1e9b51c2 580
41bc8144
SR
581void tracing_start_cmdline_record(void);
582void tracing_stop_cmdline_record(void);
e168e051
SR
583void tracing_sched_switch_assign_trace(struct trace_array *tr);
584void tracing_stop_sched_switch_record(void);
585void tracing_start_sched_switch_record(void);
bc0c38d1 586int register_tracer(struct tracer *type);
b5130b1e 587int is_tracing_stopped(void);
955b61e5
JW
588
589extern cpumask_var_t __read_mostly tracing_buffer_mask;
590
591#define for_each_tracing_cpu(cpu) \
592 for_each_cpu(cpu, tracing_buffer_mask)
bc0c38d1
SR
593
594extern unsigned long nsecs_to_usecs(unsigned long nsecs);
595
0e950173
TB
596extern unsigned long tracing_thresh;
597
5d4a9dba 598#ifdef CONFIG_TRACER_MAX_TRACE
bc0c38d1 599extern unsigned long tracing_max_latency;
bc0c38d1
SR
600
601void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
602void update_max_tr_single(struct trace_array *tr,
603 struct task_struct *tsk, int cpu);
5d4a9dba 604#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 605
c0a0d0d3 606#ifdef CONFIG_STACKTRACE
e77405ad 607void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3
FW
608 int skip, int pc);
609
1fd8df2c
MH
610void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
611 int skip, int pc, struct pt_regs *regs);
612
e77405ad 613void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
c0a0d0d3
FW
614 int pc);
615
616void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
617 int pc);
618#else
e1f7992e 619static inline void ftrace_trace_stack(struct ring_buffer *buffer,
c0a0d0d3
FW
620 unsigned long flags, int skip, int pc)
621{
622}
623
1fd8df2c
MH
624static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
625 unsigned long flags, int skip,
626 int pc, struct pt_regs *regs)
627{
628}
629
e1f7992e 630static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
c0a0d0d3
FW
631 unsigned long flags, int pc)
632{
633}
634
635static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
636 int skip, int pc)
637{
638}
639#endif /* CONFIG_STACKTRACE */
53614991 640
e309b41d 641extern cycle_t ftrace_now(int cpu);
bc0c38d1 642
4ca53085 643extern void trace_find_cmdline(int pid, char comm[]);
f7d48cbd 644
bc0c38d1
SR
645#ifdef CONFIG_DYNAMIC_FTRACE
646extern unsigned long ftrace_update_tot_cnt;
ad97772a 647#endif
d05cdb25
SR
648#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
649extern int DYN_FTRACE_TEST_NAME(void);
95950c2e
SR
650#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
651extern int DYN_FTRACE_TEST_NAME2(void);
bc0c38d1 652
55034cd6 653extern bool ring_buffer_expanded;
020e5f85 654extern bool tracing_selftest_disabled;
9288f99a 655DECLARE_PER_CPU(int, ftrace_cpu_disabled);
020e5f85 656
60a11774 657#ifdef CONFIG_FTRACE_STARTUP_TEST
60a11774
SR
658extern int trace_selftest_startup_function(struct tracer *trace,
659 struct trace_array *tr);
7447dce9
FW
660extern int trace_selftest_startup_function_graph(struct tracer *trace,
661 struct trace_array *tr);
60a11774
SR
662extern int trace_selftest_startup_irqsoff(struct tracer *trace,
663 struct trace_array *tr);
60a11774
SR
664extern int trace_selftest_startup_preemptoff(struct tracer *trace,
665 struct trace_array *tr);
60a11774
SR
666extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
667 struct trace_array *tr);
60a11774
SR
668extern int trace_selftest_startup_wakeup(struct tracer *trace,
669 struct trace_array *tr);
fb1b6d8b
SN
670extern int trace_selftest_startup_nop(struct tracer *trace,
671 struct trace_array *tr);
60a11774
SR
672extern int trace_selftest_startup_sched_switch(struct tracer *trace,
673 struct trace_array *tr);
80e5ea45
SR
674extern int trace_selftest_startup_branch(struct tracer *trace,
675 struct trace_array *tr);
8f768993
SRRH
676/*
677 * Tracer data references selftest functions that only occur
678 * on boot up. These can be __init functions. Thus, when selftests
679 * are enabled, then the tracers need to reference __init functions.
680 */
681#define __tracer_data __refdata
682#else
683/* Tracers are seldom changed. Optimize when selftests are disabled. */
684#define __tracer_data __read_mostly
60a11774
SR
685#endif /* CONFIG_FTRACE_STARTUP_TEST */
686
c7aafc54 687extern void *head_page(struct trace_array_cpu *data);
cf8e3474 688extern unsigned long long ns2usecs(cycle_t nsec);
1fd8f2a3 689extern int
40ce74f1 690trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
48ead020 691extern int
40ce74f1 692trace_vprintk(unsigned long ip, const char *fmt, va_list args);
659372d3
SR
693extern int
694trace_array_vprintk(struct trace_array *tr,
695 unsigned long ip, const char *fmt, va_list args);
696int trace_array_printk(struct trace_array *tr,
697 unsigned long ip, const char *fmt, ...);
12883efb
SRRH
698int trace_array_printk_buf(struct ring_buffer *buffer,
699 unsigned long ip, const char *fmt, ...);
955b61e5
JW
700void trace_printk_seq(struct trace_seq *s);
701enum print_line_t print_trace_line(struct trace_iterator *iter);
c7aafc54 702
4e655519
IM
703extern unsigned long trace_flags;
704
15e6cb36 705/* Standard output formatting function used for function return traces */
fb52607a 706#ifdef CONFIG_FUNCTION_GRAPH_TRACER
62b915f1
JO
707
708/* Flag options */
709#define TRACE_GRAPH_PRINT_OVERRUN 0x1
710#define TRACE_GRAPH_PRINT_CPU 0x2
711#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
712#define TRACE_GRAPH_PRINT_PROC 0x8
713#define TRACE_GRAPH_PRINT_DURATION 0x10
714#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
715
d7a8d9e9
JO
716extern enum print_line_t
717print_graph_function_flags(struct trace_iterator *iter, u32 flags);
718extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
0706f1c4
SR
719extern enum print_line_t
720trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
62b915f1
JO
721extern void graph_trace_open(struct trace_iterator *iter);
722extern void graph_trace_close(struct trace_iterator *iter);
723extern int __trace_graph_entry(struct trace_array *tr,
724 struct ftrace_graph_ent *trace,
725 unsigned long flags, int pc);
726extern void __trace_graph_return(struct trace_array *tr,
727 struct ftrace_graph_ret *trace,
728 unsigned long flags, int pc);
729
ea4e2bc4
SR
730
731#ifdef CONFIG_DYNAMIC_FTRACE
732/* TODO: make this variable */
733#define FTRACE_GRAPH_MAX_FUNCS 32
c7c6b1fe 734extern int ftrace_graph_filter_enabled;
ea4e2bc4
SR
735extern int ftrace_graph_count;
736extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
737
738static inline int ftrace_graph_addr(unsigned long addr)
739{
740 int i;
741
c7c6b1fe 742 if (!ftrace_graph_filter_enabled)
ea4e2bc4
SR
743 return 1;
744
745 for (i = 0; i < ftrace_graph_count; i++) {
e4a3f541
SR
746 if (addr == ftrace_graph_funcs[i]) {
747 /*
748 * If no irqs are to be traced, but a set_graph_function
749 * is set, and called by an interrupt handler, we still
750 * want to trace it.
751 */
752 if (in_irq())
753 trace_recursion_set(TRACE_IRQ_BIT);
754 else
755 trace_recursion_clear(TRACE_IRQ_BIT);
ea4e2bc4 756 return 1;
e4a3f541 757 }
ea4e2bc4
SR
758 }
759
760 return 0;
761}
15e6cb36 762#else
6b253930
IM
763static inline int ftrace_graph_addr(unsigned long addr)
764{
765 return 1;
ea4e2bc4
SR
766}
767#endif /* CONFIG_DYNAMIC_FTRACE */
ea4e2bc4 768#else /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 769static inline enum print_line_t
d7a8d9e9 770print_graph_function_flags(struct trace_iterator *iter, u32 flags)
15e6cb36
FW
771{
772 return TRACE_TYPE_UNHANDLED;
773}
ea4e2bc4 774#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
15e6cb36 775
756d17ee 776extern struct list_head ftrace_pids;
804a6851 777
1155de47 778#ifdef CONFIG_FUNCTION_TRACER
f1ed7c74 779extern bool ftrace_filter_param __initdata;
804a6851
SR
780static inline int ftrace_trace_task(struct task_struct *task)
781{
756d17ee 782 if (list_empty(&ftrace_pids))
804a6851
SR
783 return 1;
784
785 return test_tsk_trace_trace(task);
786}
e0a413f6 787extern int ftrace_is_dead(void);
1155de47
PM
788#else
789static inline int ftrace_trace_task(struct task_struct *task)
790{
791 return 1;
792}
e0a413f6 793static inline int ftrace_is_dead(void) { return 0; }
1155de47 794#endif
804a6851 795
ced39002
JO
796int ftrace_event_is_function(struct ftrace_event_call *call);
797
b63f39ea 798/*
799 * struct trace_parser - servers for reading the user input separated by spaces
800 * @cont: set if the input is not complete - no final space char was found
801 * @buffer: holds the parsed user input
1537a363 802 * @idx: user input length
b63f39ea 803 * @size: buffer size
804 */
805struct trace_parser {
806 bool cont;
807 char *buffer;
808 unsigned idx;
809 unsigned size;
810};
811
812static inline bool trace_parser_loaded(struct trace_parser *parser)
813{
814 return (parser->idx != 0);
815}
816
817static inline bool trace_parser_cont(struct trace_parser *parser)
818{
819 return parser->cont;
820}
821
822static inline void trace_parser_clear(struct trace_parser *parser)
823{
824 parser->cont = false;
825 parser->idx = 0;
826}
827
828extern int trace_parser_get_init(struct trace_parser *parser, int size);
829extern void trace_parser_put(struct trace_parser *parser);
830extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
831 size_t cnt, loff_t *ppos);
832
4fcdae83
SR
833/*
834 * trace_iterator_flags is an enumeration that defines bit
835 * positions into trace_flags that controls the output.
836 *
837 * NOTE: These bits must match the trace_options array in
838 * trace.c.
839 */
4e655519
IM
840enum trace_iterator_flags {
841 TRACE_ITER_PRINT_PARENT = 0x01,
842 TRACE_ITER_SYM_OFFSET = 0x02,
843 TRACE_ITER_SYM_ADDR = 0x04,
844 TRACE_ITER_VERBOSE = 0x08,
845 TRACE_ITER_RAW = 0x10,
846 TRACE_ITER_HEX = 0x20,
847 TRACE_ITER_BIN = 0x40,
848 TRACE_ITER_BLOCK = 0x80,
849 TRACE_ITER_STACKTRACE = 0x100,
2cbafd68
LZ
850 TRACE_ITER_PRINTK = 0x200,
851 TRACE_ITER_PREEMPTONLY = 0x400,
852 TRACE_ITER_BRANCH = 0x800,
853 TRACE_ITER_ANNOTATE = 0x1000,
854 TRACE_ITER_USERSTACKTRACE = 0x2000,
855 TRACE_ITER_SYM_USEROBJ = 0x4000,
856 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
857 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
858 TRACE_ITER_LATENCY_FMT = 0x20000,
859 TRACE_ITER_SLEEP_TIME = 0x40000,
860 TRACE_ITER_GRAPH_TIME = 0x80000,
e870e9a1 861 TRACE_ITER_RECORD_CMD = 0x100000,
750912fa 862 TRACE_ITER_OVERWRITE = 0x200000,
cf30cf67 863 TRACE_ITER_STOP_ON_FREE = 0x400000,
77271ce4 864 TRACE_ITER_IRQ_INFO = 0x800000,
5224c3a3 865 TRACE_ITER_MARKERS = 0x1000000,
328df475 866 TRACE_ITER_FUNCTION = 0x2000000,
4e655519
IM
867};
868
15e6cb36
FW
869/*
870 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
871 * control the output of kernel symbols.
872 */
873#define TRACE_ITER_SYM_MASK \
874 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
875
43a15386
FW
876extern struct tracer nop_trace;
877
2ed84eeb 878#ifdef CONFIG_BRANCH_TRACER
9f029e83
SR
879extern int enable_branch_tracing(struct trace_array *tr);
880extern void disable_branch_tracing(void);
881static inline int trace_branch_enable(struct trace_array *tr)
52f232cb 882{
9f029e83
SR
883 if (trace_flags & TRACE_ITER_BRANCH)
884 return enable_branch_tracing(tr);
52f232cb
SR
885 return 0;
886}
9f029e83 887static inline void trace_branch_disable(void)
52f232cb
SR
888{
889 /* due to races, always disable */
9f029e83 890 disable_branch_tracing();
52f232cb
SR
891}
892#else
9f029e83 893static inline int trace_branch_enable(struct trace_array *tr)
52f232cb
SR
894{
895 return 0;
896}
9f029e83 897static inline void trace_branch_disable(void)
52f232cb
SR
898{
899}
2ed84eeb 900#endif /* CONFIG_BRANCH_TRACER */
52f232cb 901
1852fcce
SR
902/* set ring buffers to default size if not already done so */
903int tracing_update_buffers(void);
904
cf027f64
TZ
905struct ftrace_event_field {
906 struct list_head link;
92edca07
SR
907 const char *name;
908 const char *type;
aa38e9fc 909 int filter_type;
cf027f64
TZ
910 int offset;
911 int size;
a118e4d1 912 int is_signed;
cf027f64
TZ
913};
914
30e673b2 915struct event_filter {
c9c53ca0
SR
916 int n_preds; /* Number assigned */
917 int a_preds; /* allocated */
74e9e58c 918 struct filter_pred *preds;
61e9dea2 919 struct filter_pred *root;
8b372562 920 char *filter_string;
30e673b2
TZ
921};
922
cfb180f3
TZ
923struct event_subsystem {
924 struct list_head list;
925 const char *name;
1f9963cb 926 struct event_filter *filter;
e9dbfae5 927 int ref_count;
cfb180f3
TZ
928};
929
ae63b31e
SR
930struct ftrace_subsystem_dir {
931 struct list_head list;
932 struct event_subsystem *subsystem;
933 struct trace_array *tr;
934 struct dentry *entry;
935 int ref_count;
936 int nr_events;
937};
938
61e9dea2
SR
939#define FILTER_PRED_INVALID ((unsigned short)-1)
940#define FILTER_PRED_IS_RIGHT (1 << 15)
43cd4145 941#define FILTER_PRED_FOLD (1 << 15)
61e9dea2 942
bf93f9ed
SR
943/*
944 * The max preds is the size of unsigned short with
945 * two flags at the MSBs. One bit is used for both the IS_RIGHT
946 * and FOLD flags. The other is reserved.
947 *
948 * 2^14 preds is way more than enough.
949 */
950#define MAX_FILTER_PRED 16384
4a3d27e9 951
7ce7e424 952struct filter_pred;
1889d209 953struct regex;
7ce7e424 954
58d9a597 955typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
7ce7e424 956
1889d209
FW
957typedef int (*regex_match_func)(char *str, struct regex *r, int len);
958
3f6fe06d 959enum regex_type {
b0f1a59a 960 MATCH_FULL = 0,
3f6fe06d
FW
961 MATCH_FRONT_ONLY,
962 MATCH_MIDDLE_ONLY,
963 MATCH_END_ONLY,
964};
965
1889d209
FW
966struct regex {
967 char pattern[MAX_FILTER_STR_VAL];
968 int len;
969 int field_len;
970 regex_match_func match;
971};
972
7ce7e424 973struct filter_pred {
1889d209
FW
974 filter_pred_fn_t fn;
975 u64 val;
976 struct regex regex;
61aaef55 977 unsigned short *ops;
1d0e78e3 978 struct ftrace_event_field *field;
1889d209
FW
979 int offset;
980 int not;
981 int op;
61e9dea2
SR
982 unsigned short index;
983 unsigned short parent;
984 unsigned short left;
985 unsigned short right;
7ce7e424
TZ
986};
987
3f6fe06d
FW
988extern enum regex_type
989filter_parse_regex(char *buff, int len, char **search, int *not);
8b372562 990extern void print_event_filter(struct ftrace_event_call *call,
4bda2d51 991 struct trace_seq *s);
8b372562
TZ
992extern int apply_event_filter(struct ftrace_event_call *call,
993 char *filter_string);
ae63b31e 994extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
8b372562
TZ
995 char *filter_string);
996extern void print_subsystem_event_filter(struct event_subsystem *system,
ac1adc55 997 struct trace_seq *s);
aa38e9fc 998extern int filter_assign_type(const char *type);
7ce7e424 999
b3a8c6fd
J
1000struct ftrace_event_field *
1001trace_find_event_field(struct ftrace_event_call *call, char *name);
2e33af02 1002
eb02ce01 1003static inline int
e1112b4d 1004filter_check_discard(struct ftrace_event_call *call, void *rec,
eb02ce01 1005 struct ring_buffer *buffer,
e1112b4d
TZ
1006 struct ring_buffer_event *event)
1007{
553552ce 1008 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
6fb2915d 1009 !filter_match_preds(call->filter, rec)) {
eb02ce01
TZ
1010 ring_buffer_discard_commit(buffer, event);
1011 return 1;
1012 }
1013
1014 return 0;
e1112b4d
TZ
1015}
1016
e870e9a1 1017extern void trace_event_enable_cmd_record(bool enable);
277ba044 1018extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
0c8916c3 1019extern int event_trace_del_tracer(struct trace_array *tr);
e870e9a1 1020
20c8928a 1021extern struct mutex event_mutex;
a59fd602 1022extern struct list_head ftrace_events;
ac199db0 1023
e9fb2b6d
SR
1024extern const char *__start___trace_bprintk_fmt[];
1025extern const char *__stop___trace_bprintk_fmt[];
1026
102c9323
SRRH
1027extern const char *__start___tracepoint_str[];
1028extern const char *__stop___tracepoint_str[];
1029
07d777fe 1030void trace_printk_init_buffers(void);
81698831 1031void trace_printk_start_comm(void);
613f04a0 1032int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
2b6080f2 1033int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
07d777fe 1034
ca268da6
SRRH
1035/*
1036 * Normal trace_printk() and friends allocates special buffers
1037 * to do the manipulation, as well as saves the print formats
1038 * into sections to display. But the trace infrastructure wants
1039 * to use these without the added overhead at the price of being
1040 * a bit slower (used mainly for warnings, where we don't care
1041 * about performance). The internal_trace_puts() is for such
1042 * a purpose.
1043 */
1044#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1045
4e5292ea 1046#undef FTRACE_ENTRY
02aa3162 1047#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
86c38a31
JM
1048 extern struct ftrace_event_call \
1049 __attribute__((__aligned__(4))) event_##call;
4e5292ea 1050#undef FTRACE_ENTRY_DUP
02aa3162
JO
1051#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1052 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1053 filter)
4e5292ea 1054#include "trace_entries.h"
e1112b4d 1055
6e48b550 1056#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
ced39002
JO
1057int perf_ftrace_event_register(struct ftrace_event_call *call,
1058 enum trace_reg type, void *data);
1059#else
1060#define perf_ftrace_event_register NULL
6e48b550 1061#endif
ced39002 1062
bc0c38d1 1063#endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.34732 seconds and 5 git commands to generate.