ftrace: convert single large buffer into single pages.
[deliverable/linux.git] / kernel / trace / trace.h
CommitLineData
bc0c38d1
SR
1#ifndef _LINUX_KERNEL_TRACE_H
2#define _LINUX_KERNEL_TRACE_H
3
4#include <linux/fs.h>
5#include <asm/atomic.h>
6#include <linux/sched.h>
7#include <linux/clocksource.h>
8
9/*
10 * Function trace entry - function address and parent function addres:
11 */
12struct ftrace_entry {
13 unsigned long ip;
14 unsigned long parent_ip;
15};
16
17/*
18 * Context switch trace entry - which task (and prio) we switched from/to:
19 */
20struct ctx_switch_entry {
21 unsigned int prev_pid;
22 unsigned char prev_prio;
23 unsigned char prev_state;
24 unsigned int next_pid;
25 unsigned char next_prio;
26};
27
28/*
29 * The trace entry - the most basic unit of tracing. This is what
30 * is printed in the end as a single line in the trace output, such as:
31 *
32 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
33 */
34struct trace_entry {
35 char type;
36 char cpu;
37 char flags;
38 char preempt_count;
39 int pid;
40 cycle_t t;
41 unsigned long idx;
42 union {
43 struct ftrace_entry fn;
44 struct ctx_switch_entry ctx;
45 };
46};
47
48#define TRACE_ENTRY_SIZE sizeof(struct trace_entry)
49
50/*
51 * The CPU trace array - it consists of thousands of trace entries
52 * plus some other descriptor data: (for example which task started
53 * the trace, etc.)
54 */
55struct trace_array_cpu {
56 void *trace;
4c11d7ae
SR
57 void *trace_current;
58 unsigned trace_current_idx;
59 struct list_head trace_pages;
bc0c38d1
SR
60 unsigned long trace_idx;
61 atomic_t disabled;
bc0c38d1
SR
62 unsigned long saved_latency;
63 unsigned long critical_start;
64 unsigned long critical_end;
65 unsigned long critical_sequence;
66 unsigned long nice;
67 unsigned long policy;
68 unsigned long rt_priority;
69 cycle_t preempt_timestamp;
70 pid_t pid;
71 uid_t uid;
72 char comm[TASK_COMM_LEN];
73};
74
75struct trace_iterator;
76
77/*
78 * The trace array - an array of per-CPU trace arrays. This is the
79 * highest level data structure that individual tracers deal with.
80 * They have on/off state as well:
81 */
82struct trace_array {
83 unsigned long entries;
84 long ctrl;
85 int cpu;
86 cycle_t time_start;
87 struct trace_array_cpu *data[NR_CPUS];
88};
89
90/*
91 * A specific tracer, represented by methods that operate on a trace array:
92 */
93struct tracer {
94 const char *name;
95 void (*init)(struct trace_array *tr);
96 void (*reset)(struct trace_array *tr);
97 void (*open)(struct trace_iterator *iter);
98 void (*close)(struct trace_iterator *iter);
99 void (*start)(struct trace_iterator *iter);
100 void (*stop)(struct trace_iterator *iter);
101 void (*ctrl_update)(struct trace_array *tr);
102 struct tracer *next;
103 int print_max;
104};
105
106/*
107 * Trace iterator - used by printout routines who present trace
108 * results to users and which routines might sleep, etc:
109 */
110struct trace_iterator {
111 struct trace_array *tr;
112 struct tracer *trace;
113 struct trace_entry *ent;
114 unsigned long iter_flags;
115 loff_t pos;
116 unsigned long next_idx[NR_CPUS];
4c11d7ae
SR
117 struct list_head *next_page[NR_CPUS];
118 unsigned next_page_idx[NR_CPUS];
119 long idx;
bc0c38d1 120 int cpu;
bc0c38d1
SR
121};
122
123void notrace tracing_reset(struct trace_array_cpu *data);
124int tracing_open_generic(struct inode *inode, struct file *filp);
125struct dentry *tracing_init_dentry(void);
126void ftrace(struct trace_array *tr,
127 struct trace_array_cpu *data,
128 unsigned long ip,
129 unsigned long parent_ip,
130 unsigned long flags);
131void tracing_sched_switch_trace(struct trace_array *tr,
132 struct trace_array_cpu *data,
133 struct task_struct *prev,
134 struct task_struct *next,
135 unsigned long flags);
136void tracing_record_cmdline(struct task_struct *tsk);
137
138void tracing_start_function_trace(void);
139void tracing_stop_function_trace(void);
140int register_tracer(struct tracer *type);
141void unregister_tracer(struct tracer *type);
142
143extern unsigned long nsecs_to_usecs(unsigned long nsecs);
144
145extern unsigned long tracing_max_latency;
146extern unsigned long tracing_thresh;
147
148void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
149void update_max_tr_single(struct trace_array *tr,
150 struct task_struct *tsk, int cpu);
151
152static inline notrace cycle_t now(int cpu)
153{
154 return cpu_clock(cpu);
155}
156
157#ifdef CONFIG_SCHED_TRACER
158extern void notrace
159wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
160#else
161static inline void
162wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
163{
164}
165#endif
166
167#ifdef CONFIG_CONTEXT_SWITCH_TRACER
168typedef void
169(*tracer_switch_func_t)(void *private,
170 struct task_struct *prev,
171 struct task_struct *next);
172
173struct tracer_switch_ops {
174 tracer_switch_func_t func;
175 void *private;
176 struct tracer_switch_ops *next;
177};
178
179extern int register_tracer_switch(struct tracer_switch_ops *ops);
180extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
181
182#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
183
184#ifdef CONFIG_DYNAMIC_FTRACE
185extern unsigned long ftrace_update_tot_cnt;
186#endif
187
188#endif /* _LINUX_KERNEL_TRACE_H */
This page took 0.061573 seconds and 5 git commands to generate.