1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
13 __TRACE_FIRST_TYPE
= 0,
31 * The trace entry - the most basic unit of tracing. This is what
32 * is printed in the end as a single line in the trace output, such as:
34 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
40 unsigned char preempt_count
;
45 * Function trace entry - function address and parent function addres:
48 struct trace_entry ent
;
50 unsigned long parent_ip
;
53 /* Function return entry */
54 struct ftrace_ret_entry
{
55 struct trace_entry ent
;
57 unsigned long parent_ip
;
58 unsigned long long calltime
;
59 unsigned long long rettime
;
61 extern struct tracer boot_tracer
;
64 * Context switch trace entry - which task (and prio) we switched from/to:
66 struct ctx_switch_entry
{
67 struct trace_entry ent
;
68 unsigned int prev_pid
;
69 unsigned char prev_prio
;
70 unsigned char prev_state
;
71 unsigned int next_pid
;
72 unsigned char next_prio
;
73 unsigned char next_state
;
74 unsigned int next_cpu
;
78 * Special (free-form) trace entry:
80 struct special_entry
{
81 struct trace_entry ent
;
91 #define FTRACE_STACK_ENTRIES 8
94 struct trace_entry ent
;
95 unsigned long caller
[FTRACE_STACK_ENTRIES
];
99 * ftrace_printk entry:
102 struct trace_entry ent
;
107 #define TRACE_OLD_SIZE 88
109 struct trace_field_cont
{
111 /* Temporary till we get rid of this completely */
112 char buf
[TRACE_OLD_SIZE
- 1];
115 struct trace_mmiotrace_rw
{
116 struct trace_entry ent
;
117 struct mmiotrace_rw rw
;
120 struct trace_mmiotrace_map
{
121 struct trace_entry ent
;
122 struct mmiotrace_map map
;
126 struct trace_entry ent
;
127 struct boot_trace initcall
;
131 * trace_flag_type is an enumeration that holds different
132 * states when a trace occurs. These are:
133 * IRQS_OFF - interrupts were disabled
134 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
135 * NEED_RESCED - reschedule is requested
136 * HARDIRQ - inside an interrupt handler
137 * SOFTIRQ - inside a softirq handler
138 * CONT - multiple entries hold the trace item
140 enum trace_flag_type
{
141 TRACE_FLAG_IRQS_OFF
= 0x01,
142 TRACE_FLAG_IRQS_NOSUPPORT
= 0x02,
143 TRACE_FLAG_NEED_RESCHED
= 0x04,
144 TRACE_FLAG_HARDIRQ
= 0x08,
145 TRACE_FLAG_SOFTIRQ
= 0x10,
146 TRACE_FLAG_CONT
= 0x20,
149 #define TRACE_BUF_SIZE 1024
152 * The CPU trace array - it consists of thousands of trace entries
153 * plus some other descriptor data: (for example which task started
156 struct trace_array_cpu
{
159 /* these fields get copied into max-trace: */
160 unsigned long trace_idx
;
161 unsigned long overrun
;
162 unsigned long saved_latency
;
163 unsigned long critical_start
;
164 unsigned long critical_end
;
165 unsigned long critical_sequence
;
167 unsigned long policy
;
168 unsigned long rt_priority
;
169 cycle_t preempt_timestamp
;
172 char comm
[TASK_COMM_LEN
];
175 struct trace_iterator
;
178 * The trace array - an array of per-CPU trace arrays. This is the
179 * highest level data structure that individual tracers deal with.
180 * They have on/off state as well:
183 struct ring_buffer
*buffer
;
184 unsigned long entries
;
187 struct task_struct
*waiter
;
188 struct trace_array_cpu
*data
[NR_CPUS
];
191 #define FTRACE_CMP_TYPE(var, type) \
192 __builtin_types_compatible_p(typeof(var), type *)
195 #define IF_ASSIGN(var, entry, etype, id) \
196 if (FTRACE_CMP_TYPE(var, etype)) { \
197 var = (typeof(var))(entry); \
198 WARN_ON(id && (entry)->type != id); \
202 /* Will cause compile errors if type is not found. */
203 extern void __ftrace_bad_type(void);
206 * The trace_assign_type is a verifier that the entry type is
207 * the same as the type being assigned. To add new types simply
208 * add a line with the following format:
210 * IF_ASSIGN(var, ent, type, id);
212 * Where "type" is the trace type that includes the trace_entry
213 * as the "ent" item. And "id" is the trace identifier that is
214 * used in the trace_type enum.
216 * If the type can have more than one id, then use zero.
218 #define trace_assign_type(var, ent) \
220 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
221 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
222 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
223 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
224 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
225 IF_ASSIGN(var, ent, struct special_entry, 0); \
226 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
228 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
230 IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \
231 IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET); \
232 __ftrace_bad_type(); \
235 /* Return values for print_line callback */
237 TRACE_TYPE_PARTIAL_LINE
= 0, /* Retry after flushing the seq */
238 TRACE_TYPE_HANDLED
= 1,
239 TRACE_TYPE_UNHANDLED
= 2 /* Relay to other output functions */
243 * A specific tracer, represented by methods that operate on a trace array:
247 void (*init
)(struct trace_array
*tr
);
248 void (*reset
)(struct trace_array
*tr
);
249 void (*start
)(struct trace_array
*tr
);
250 void (*stop
)(struct trace_array
*tr
);
251 void (*open
)(struct trace_iterator
*iter
);
252 void (*pipe_open
)(struct trace_iterator
*iter
);
253 void (*close
)(struct trace_iterator
*iter
);
254 ssize_t (*read
)(struct trace_iterator
*iter
,
255 struct file
*filp
, char __user
*ubuf
,
256 size_t cnt
, loff_t
*ppos
);
257 #ifdef CONFIG_FTRACE_STARTUP_TEST
258 int (*selftest
)(struct tracer
*trace
,
259 struct trace_array
*tr
);
261 enum print_line_t (*print_line
)(struct trace_iterator
*iter
);
267 unsigned char buffer
[PAGE_SIZE
];
269 unsigned int readpos
;
273 * Trace iterator - used by printout routines who present trace
274 * results to users and which routines might sleep, etc:
276 struct trace_iterator
{
277 struct trace_array
*tr
;
278 struct tracer
*trace
;
280 struct ring_buffer_iter
*buffer_iter
[NR_CPUS
];
282 /* The below is zeroed out in pipe_read */
283 struct trace_seq seq
;
284 struct trace_entry
*ent
;
288 unsigned long iter_flags
;
295 int tracing_is_enabled(void);
296 void trace_wake_up(void);
297 void tracing_reset(struct trace_array
*tr
, int cpu
);
298 int tracing_open_generic(struct inode
*inode
, struct file
*filp
);
299 struct dentry
*tracing_init_dentry(void);
300 void init_tracer_sysprof_debugfs(struct dentry
*d_tracer
);
302 struct trace_entry
*tracing_get_trace_entry(struct trace_array
*tr
,
303 struct trace_array_cpu
*data
);
304 void tracing_generic_entry_update(struct trace_entry
*entry
,
308 void ftrace(struct trace_array
*tr
,
309 struct trace_array_cpu
*data
,
311 unsigned long parent_ip
,
312 unsigned long flags
, int pc
);
313 void tracing_sched_switch_trace(struct trace_array
*tr
,
314 struct trace_array_cpu
*data
,
315 struct task_struct
*prev
,
316 struct task_struct
*next
,
317 unsigned long flags
, int pc
);
318 void tracing_record_cmdline(struct task_struct
*tsk
);
320 void tracing_sched_wakeup_trace(struct trace_array
*tr
,
321 struct trace_array_cpu
*data
,
322 struct task_struct
*wakee
,
323 struct task_struct
*cur
,
324 unsigned long flags
, int pc
);
325 void trace_special(struct trace_array
*tr
,
326 struct trace_array_cpu
*data
,
329 unsigned long arg3
, int pc
);
330 void trace_function(struct trace_array
*tr
,
331 struct trace_array_cpu
*data
,
333 unsigned long parent_ip
,
334 unsigned long flags
, int pc
);
336 trace_function_return(struct ftrace_retfunc
*trace
);
338 void tracing_start_cmdline_record(void);
339 void tracing_stop_cmdline_record(void);
340 void tracing_sched_switch_assign_trace(struct trace_array
*tr
);
341 void tracing_stop_sched_switch_record(void);
342 void tracing_start_sched_switch_record(void);
343 int register_tracer(struct tracer
*type
);
344 void unregister_tracer(struct tracer
*type
);
346 extern unsigned long nsecs_to_usecs(unsigned long nsecs
);
348 extern unsigned long tracing_max_latency
;
349 extern unsigned long tracing_thresh
;
351 void update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
);
352 void update_max_tr_single(struct trace_array
*tr
,
353 struct task_struct
*tsk
, int cpu
);
355 extern cycle_t
ftrace_now(int cpu
);
357 #ifdef CONFIG_FUNCTION_TRACER
358 void tracing_start_function_trace(void);
359 void tracing_stop_function_trace(void);
361 # define tracing_start_function_trace() do { } while (0)
362 # define tracing_stop_function_trace() do { } while (0)
365 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
367 (*tracer_switch_func_t
)(void *private,
369 struct task_struct
*prev
,
370 struct task_struct
*next
);
372 struct tracer_switch_ops
{
373 tracer_switch_func_t func
;
375 struct tracer_switch_ops
*next
;
378 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
380 #ifdef CONFIG_DYNAMIC_FTRACE
381 extern unsigned long ftrace_update_tot_cnt
;
382 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
383 extern int DYN_FTRACE_TEST_NAME(void);
386 #ifdef CONFIG_FTRACE_STARTUP_TEST
387 extern int trace_selftest_startup_function(struct tracer
*trace
,
388 struct trace_array
*tr
);
389 extern int trace_selftest_startup_irqsoff(struct tracer
*trace
,
390 struct trace_array
*tr
);
391 extern int trace_selftest_startup_preemptoff(struct tracer
*trace
,
392 struct trace_array
*tr
);
393 extern int trace_selftest_startup_preemptirqsoff(struct tracer
*trace
,
394 struct trace_array
*tr
);
395 extern int trace_selftest_startup_wakeup(struct tracer
*trace
,
396 struct trace_array
*tr
);
397 extern int trace_selftest_startup_nop(struct tracer
*trace
,
398 struct trace_array
*tr
);
399 extern int trace_selftest_startup_sched_switch(struct tracer
*trace
,
400 struct trace_array
*tr
);
401 extern int trace_selftest_startup_sysprof(struct tracer
*trace
,
402 struct trace_array
*tr
);
403 #endif /* CONFIG_FTRACE_STARTUP_TEST */
405 extern void *head_page(struct trace_array_cpu
*data
);
406 extern int trace_seq_printf(struct trace_seq
*s
, const char *fmt
, ...);
407 extern void trace_seq_print_cont(struct trace_seq
*s
,
408 struct trace_iterator
*iter
);
411 seq_print_ip_sym(struct trace_seq
*s
, unsigned long ip
,
412 unsigned long sym_flags
);
413 extern ssize_t
trace_seq_to_user(struct trace_seq
*s
, char __user
*ubuf
,
415 extern long ns2usecs(cycle_t nsec
);
416 extern int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
);
418 extern unsigned long trace_flags
;
420 /* Standard output formatting function used for function return traces */
421 #ifdef CONFIG_FUNCTION_RET_TRACER
422 extern enum print_line_t
print_return_function(struct trace_iterator
*iter
);
424 static inline enum print_line_t
425 print_return_function(struct trace_iterator
*iter
)
427 return TRACE_TYPE_UNHANDLED
;
432 * trace_iterator_flags is an enumeration that defines bit
433 * positions into trace_flags that controls the output.
435 * NOTE: These bits must match the trace_options array in
438 enum trace_iterator_flags
{
439 TRACE_ITER_PRINT_PARENT
= 0x01,
440 TRACE_ITER_SYM_OFFSET
= 0x02,
441 TRACE_ITER_SYM_ADDR
= 0x04,
442 TRACE_ITER_VERBOSE
= 0x08,
443 TRACE_ITER_RAW
= 0x10,
444 TRACE_ITER_HEX
= 0x20,
445 TRACE_ITER_BIN
= 0x40,
446 TRACE_ITER_BLOCK
= 0x80,
447 TRACE_ITER_STACKTRACE
= 0x100,
448 TRACE_ITER_SCHED_TREE
= 0x200,
449 TRACE_ITER_PRINTK
= 0x400,
450 TRACE_ITER_PREEMPTONLY
= 0x800,
454 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
455 * control the output of kernel symbols.
457 #define TRACE_ITER_SYM_MASK \
458 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
460 extern struct tracer nop_trace
;
463 * ftrace_preempt_disable - disable preemption scheduler safe
465 * When tracing can happen inside the scheduler, there exists
466 * cases that the tracing might happen before the need_resched
467 * flag is checked. If this happens and the tracer calls
468 * preempt_enable (after a disable), a schedule might take place
469 * causing an infinite recursion.
471 * To prevent this, we read the need_recshed flag before
472 * disabling preemption. When we want to enable preemption we
473 * check the flag, if it is set, then we call preempt_enable_no_resched.
474 * Otherwise, we call preempt_enable.
476 * The rational for doing the above is that if need resched is set
477 * and we have yet to reschedule, we are either in an atomic location
478 * (where we do not need to check for scheduling) or we are inside
479 * the scheduler and do not want to resched.
481 static inline int ftrace_preempt_disable(void)
485 resched
= need_resched();
486 preempt_disable_notrace();
492 * ftrace_preempt_enable - enable preemption scheduler safe
493 * @resched: the return value from ftrace_preempt_disable
495 * This is a scheduler safe way to enable preemption and not miss
496 * any preemption checks. The disabled saved the state of preemption.
497 * If resched is set, then we were either inside an atomic or
498 * are inside the scheduler (we would have already scheduled
499 * otherwise). In this case, we do not want to call normal
500 * preempt_enable, but preempt_enable_no_resched instead.
502 static inline void ftrace_preempt_enable(int resched
)
505 preempt_enable_no_resched_notrace();
507 preempt_enable_notrace();
510 #endif /* _LINUX_KERNEL_TRACE_H */