Merge branch 'tracing/function-return-tracer' into tracing/fastboot
[deliverable/linux.git] / kernel / trace / trace.h
1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
3
4 #include <linux/fs.h>
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
11
12 enum trace_type {
13 __TRACE_FIRST_TYPE = 0,
14
15 TRACE_FN,
16 TRACE_CTX,
17 TRACE_WAKE,
18 TRACE_CONT,
19 TRACE_STACK,
20 TRACE_PRINT,
21 TRACE_SPECIAL,
22 TRACE_MMIO_RW,
23 TRACE_MMIO_MAP,
24 TRACE_BOOT,
25 TRACE_FN_RET,
26
27 __TRACE_LAST_TYPE
28 };
29
30 /*
31 * The trace entry - the most basic unit of tracing. This is what
32 * is printed in the end as a single line in the trace output, such as:
33 *
34 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
35 */
36 struct trace_entry {
37 unsigned char type;
38 unsigned char cpu;
39 unsigned char flags;
40 unsigned char preempt_count;
41 int pid;
42 };
43
44 /*
45 * Function trace entry - function address and parent function addres:
46 */
47 struct ftrace_entry {
48 struct trace_entry ent;
49 unsigned long ip;
50 unsigned long parent_ip;
51 };
52
53 /* Function return entry */
54 struct ftrace_ret_entry {
55 struct trace_entry ent;
56 unsigned long ip;
57 unsigned long parent_ip;
58 unsigned long long calltime;
59 unsigned long long rettime;
60 };
61 extern struct tracer boot_tracer;
62
63 /*
64 * Context switch trace entry - which task (and prio) we switched from/to:
65 */
66 struct ctx_switch_entry {
67 struct trace_entry ent;
68 unsigned int prev_pid;
69 unsigned char prev_prio;
70 unsigned char prev_state;
71 unsigned int next_pid;
72 unsigned char next_prio;
73 unsigned char next_state;
74 unsigned int next_cpu;
75 };
76
77 /*
78 * Special (free-form) trace entry:
79 */
80 struct special_entry {
81 struct trace_entry ent;
82 unsigned long arg1;
83 unsigned long arg2;
84 unsigned long arg3;
85 };
86
87 /*
88 * Stack-trace entry:
89 */
90
91 #define FTRACE_STACK_ENTRIES 8
92
93 struct stack_entry {
94 struct trace_entry ent;
95 unsigned long caller[FTRACE_STACK_ENTRIES];
96 };
97
98 /*
99 * ftrace_printk entry:
100 */
101 struct print_entry {
102 struct trace_entry ent;
103 unsigned long ip;
104 char buf[];
105 };
106
107 #define TRACE_OLD_SIZE 88
108
109 struct trace_field_cont {
110 unsigned char type;
111 /* Temporary till we get rid of this completely */
112 char buf[TRACE_OLD_SIZE - 1];
113 };
114
115 struct trace_mmiotrace_rw {
116 struct trace_entry ent;
117 struct mmiotrace_rw rw;
118 };
119
120 struct trace_mmiotrace_map {
121 struct trace_entry ent;
122 struct mmiotrace_map map;
123 };
124
125 struct trace_boot {
126 struct trace_entry ent;
127 struct boot_trace initcall;
128 };
129
130 /*
131 * trace_flag_type is an enumeration that holds different
132 * states when a trace occurs. These are:
133 * IRQS_OFF - interrupts were disabled
134 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
135 * NEED_RESCED - reschedule is requested
136 * HARDIRQ - inside an interrupt handler
137 * SOFTIRQ - inside a softirq handler
138 * CONT - multiple entries hold the trace item
139 */
140 enum trace_flag_type {
141 TRACE_FLAG_IRQS_OFF = 0x01,
142 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
143 TRACE_FLAG_NEED_RESCHED = 0x04,
144 TRACE_FLAG_HARDIRQ = 0x08,
145 TRACE_FLAG_SOFTIRQ = 0x10,
146 TRACE_FLAG_CONT = 0x20,
147 };
148
149 #define TRACE_BUF_SIZE 1024
150
151 /*
152 * The CPU trace array - it consists of thousands of trace entries
153 * plus some other descriptor data: (for example which task started
154 * the trace, etc.)
155 */
156 struct trace_array_cpu {
157 atomic_t disabled;
158
159 /* these fields get copied into max-trace: */
160 unsigned long trace_idx;
161 unsigned long overrun;
162 unsigned long saved_latency;
163 unsigned long critical_start;
164 unsigned long critical_end;
165 unsigned long critical_sequence;
166 unsigned long nice;
167 unsigned long policy;
168 unsigned long rt_priority;
169 cycle_t preempt_timestamp;
170 pid_t pid;
171 uid_t uid;
172 char comm[TASK_COMM_LEN];
173 };
174
175 struct trace_iterator;
176
177 /*
178 * The trace array - an array of per-CPU trace arrays. This is the
179 * highest level data structure that individual tracers deal with.
180 * They have on/off state as well:
181 */
182 struct trace_array {
183 struct ring_buffer *buffer;
184 unsigned long entries;
185 int cpu;
186 cycle_t time_start;
187 struct task_struct *waiter;
188 struct trace_array_cpu *data[NR_CPUS];
189 };
190
191 #define FTRACE_CMP_TYPE(var, type) \
192 __builtin_types_compatible_p(typeof(var), type *)
193
194 #undef IF_ASSIGN
195 #define IF_ASSIGN(var, entry, etype, id) \
196 if (FTRACE_CMP_TYPE(var, etype)) { \
197 var = (typeof(var))(entry); \
198 WARN_ON(id && (entry)->type != id); \
199 break; \
200 }
201
202 /* Will cause compile errors if type is not found. */
203 extern void __ftrace_bad_type(void);
204
205 /*
206 * The trace_assign_type is a verifier that the entry type is
207 * the same as the type being assigned. To add new types simply
208 * add a line with the following format:
209 *
210 * IF_ASSIGN(var, ent, type, id);
211 *
212 * Where "type" is the trace type that includes the trace_entry
213 * as the "ent" item. And "id" is the trace identifier that is
214 * used in the trace_type enum.
215 *
216 * If the type can have more than one id, then use zero.
217 */
218 #define trace_assign_type(var, ent) \
219 do { \
220 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
221 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
222 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
223 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
224 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
225 IF_ASSIGN(var, ent, struct special_entry, 0); \
226 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
227 TRACE_MMIO_RW); \
228 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
229 TRACE_MMIO_MAP); \
230 IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \
231 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET); \
232 __ftrace_bad_type(); \
233 } while (0)
234
235 /* Return values for print_line callback */
236 enum print_line_t {
237 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
238 TRACE_TYPE_HANDLED = 1,
239 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
240 };
241
242 /*
243 * A specific tracer, represented by methods that operate on a trace array:
244 */
245 struct tracer {
246 const char *name;
247 void (*init)(struct trace_array *tr);
248 void (*reset)(struct trace_array *tr);
249 void (*start)(struct trace_array *tr);
250 void (*stop)(struct trace_array *tr);
251 void (*open)(struct trace_iterator *iter);
252 void (*pipe_open)(struct trace_iterator *iter);
253 void (*close)(struct trace_iterator *iter);
254 ssize_t (*read)(struct trace_iterator *iter,
255 struct file *filp, char __user *ubuf,
256 size_t cnt, loff_t *ppos);
257 #ifdef CONFIG_FTRACE_STARTUP_TEST
258 int (*selftest)(struct tracer *trace,
259 struct trace_array *tr);
260 #endif
261 enum print_line_t (*print_line)(struct trace_iterator *iter);
262 struct tracer *next;
263 int print_max;
264 };
265
266 struct trace_seq {
267 unsigned char buffer[PAGE_SIZE];
268 unsigned int len;
269 unsigned int readpos;
270 };
271
272 /*
273 * Trace iterator - used by printout routines who present trace
274 * results to users and which routines might sleep, etc:
275 */
276 struct trace_iterator {
277 struct trace_array *tr;
278 struct tracer *trace;
279 void *private;
280 struct ring_buffer_iter *buffer_iter[NR_CPUS];
281
282 /* The below is zeroed out in pipe_read */
283 struct trace_seq seq;
284 struct trace_entry *ent;
285 int cpu;
286 u64 ts;
287
288 unsigned long iter_flags;
289 loff_t pos;
290 long idx;
291
292 cpumask_t started;
293 };
294
295 int tracing_is_enabled(void);
296 void trace_wake_up(void);
297 void tracing_reset(struct trace_array *tr, int cpu);
298 int tracing_open_generic(struct inode *inode, struct file *filp);
299 struct dentry *tracing_init_dentry(void);
300 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
301
302 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
303 struct trace_array_cpu *data);
304 void tracing_generic_entry_update(struct trace_entry *entry,
305 unsigned long flags,
306 int pc);
307
308 void ftrace(struct trace_array *tr,
309 struct trace_array_cpu *data,
310 unsigned long ip,
311 unsigned long parent_ip,
312 unsigned long flags, int pc);
313 void tracing_sched_switch_trace(struct trace_array *tr,
314 struct trace_array_cpu *data,
315 struct task_struct *prev,
316 struct task_struct *next,
317 unsigned long flags, int pc);
318 void tracing_record_cmdline(struct task_struct *tsk);
319
320 void tracing_sched_wakeup_trace(struct trace_array *tr,
321 struct trace_array_cpu *data,
322 struct task_struct *wakee,
323 struct task_struct *cur,
324 unsigned long flags, int pc);
325 void trace_special(struct trace_array *tr,
326 struct trace_array_cpu *data,
327 unsigned long arg1,
328 unsigned long arg2,
329 unsigned long arg3, int pc);
330 void trace_function(struct trace_array *tr,
331 struct trace_array_cpu *data,
332 unsigned long ip,
333 unsigned long parent_ip,
334 unsigned long flags, int pc);
335 void
336 trace_function_return(struct ftrace_retfunc *trace);
337
338 void tracing_start_cmdline_record(void);
339 void tracing_stop_cmdline_record(void);
340 void tracing_sched_switch_assign_trace(struct trace_array *tr);
341 void tracing_stop_sched_switch_record(void);
342 void tracing_start_sched_switch_record(void);
343 int register_tracer(struct tracer *type);
344 void unregister_tracer(struct tracer *type);
345
346 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
347
348 extern unsigned long tracing_max_latency;
349 extern unsigned long tracing_thresh;
350
351 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
352 void update_max_tr_single(struct trace_array *tr,
353 struct task_struct *tsk, int cpu);
354
355 extern cycle_t ftrace_now(int cpu);
356
357 #ifdef CONFIG_FUNCTION_TRACER
358 void tracing_start_function_trace(void);
359 void tracing_stop_function_trace(void);
360 #else
361 # define tracing_start_function_trace() do { } while (0)
362 # define tracing_stop_function_trace() do { } while (0)
363 #endif
364
365 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
366 typedef void
367 (*tracer_switch_func_t)(void *private,
368 void *__rq,
369 struct task_struct *prev,
370 struct task_struct *next);
371
372 struct tracer_switch_ops {
373 tracer_switch_func_t func;
374 void *private;
375 struct tracer_switch_ops *next;
376 };
377
378 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
379
380 #ifdef CONFIG_DYNAMIC_FTRACE
381 extern unsigned long ftrace_update_tot_cnt;
382 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
383 extern int DYN_FTRACE_TEST_NAME(void);
384 #endif
385
386 #ifdef CONFIG_FTRACE_STARTUP_TEST
387 extern int trace_selftest_startup_function(struct tracer *trace,
388 struct trace_array *tr);
389 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
390 struct trace_array *tr);
391 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
392 struct trace_array *tr);
393 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
394 struct trace_array *tr);
395 extern int trace_selftest_startup_wakeup(struct tracer *trace,
396 struct trace_array *tr);
397 extern int trace_selftest_startup_nop(struct tracer *trace,
398 struct trace_array *tr);
399 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
400 struct trace_array *tr);
401 extern int trace_selftest_startup_sysprof(struct tracer *trace,
402 struct trace_array *tr);
403 #endif /* CONFIG_FTRACE_STARTUP_TEST */
404
405 extern void *head_page(struct trace_array_cpu *data);
406 extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
407 extern void trace_seq_print_cont(struct trace_seq *s,
408 struct trace_iterator *iter);
409
410 extern int
411 seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
412 unsigned long sym_flags);
413 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
414 size_t cnt);
415 extern long ns2usecs(cycle_t nsec);
416 extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
417
418 extern unsigned long trace_flags;
419
420 /* Standard output formatting function used for function return traces */
421 #ifdef CONFIG_FUNCTION_RET_TRACER
422 extern enum print_line_t print_return_function(struct trace_iterator *iter);
423 #else
424 static inline enum print_line_t
425 print_return_function(struct trace_iterator *iter)
426 {
427 return TRACE_TYPE_UNHANDLED;
428 }
429 #endif
430
431 /*
432 * trace_iterator_flags is an enumeration that defines bit
433 * positions into trace_flags that controls the output.
434 *
435 * NOTE: These bits must match the trace_options array in
436 * trace.c.
437 */
438 enum trace_iterator_flags {
439 TRACE_ITER_PRINT_PARENT = 0x01,
440 TRACE_ITER_SYM_OFFSET = 0x02,
441 TRACE_ITER_SYM_ADDR = 0x04,
442 TRACE_ITER_VERBOSE = 0x08,
443 TRACE_ITER_RAW = 0x10,
444 TRACE_ITER_HEX = 0x20,
445 TRACE_ITER_BIN = 0x40,
446 TRACE_ITER_BLOCK = 0x80,
447 TRACE_ITER_STACKTRACE = 0x100,
448 TRACE_ITER_SCHED_TREE = 0x200,
449 TRACE_ITER_PRINTK = 0x400,
450 TRACE_ITER_PREEMPTONLY = 0x800,
451 };
452
453 /*
454 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
455 * control the output of kernel symbols.
456 */
457 #define TRACE_ITER_SYM_MASK \
458 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
459
460 extern struct tracer nop_trace;
461
462 /**
463 * ftrace_preempt_disable - disable preemption scheduler safe
464 *
465 * When tracing can happen inside the scheduler, there exists
466 * cases that the tracing might happen before the need_resched
467 * flag is checked. If this happens and the tracer calls
468 * preempt_enable (after a disable), a schedule might take place
469 * causing an infinite recursion.
470 *
471 * To prevent this, we read the need_recshed flag before
472 * disabling preemption. When we want to enable preemption we
473 * check the flag, if it is set, then we call preempt_enable_no_resched.
474 * Otherwise, we call preempt_enable.
475 *
476 * The rational for doing the above is that if need resched is set
477 * and we have yet to reschedule, we are either in an atomic location
478 * (where we do not need to check for scheduling) or we are inside
479 * the scheduler and do not want to resched.
480 */
481 static inline int ftrace_preempt_disable(void)
482 {
483 int resched;
484
485 resched = need_resched();
486 preempt_disable_notrace();
487
488 return resched;
489 }
490
491 /**
492 * ftrace_preempt_enable - enable preemption scheduler safe
493 * @resched: the return value from ftrace_preempt_disable
494 *
495 * This is a scheduler safe way to enable preemption and not miss
496 * any preemption checks. The disabled saved the state of preemption.
497 * If resched is set, then we were either inside an atomic or
498 * are inside the scheduler (we would have already scheduled
499 * otherwise). In this case, we do not want to call normal
500 * preempt_enable, but preempt_enable_no_resched instead.
501 */
502 static inline void ftrace_preempt_enable(int resched)
503 {
504 if (resched)
505 preempt_enable_no_resched_notrace();
506 else
507 preempt_enable_notrace();
508 }
509
510 #endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.042004 seconds and 6 git commands to generate.