Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / tools / perf / perf.h
1 #ifndef _PERF_PERF_H
2 #define _PERF_PERF_H
3
4 #include <asm/unistd.h>
5
6 #if defined(__i386__)
7 #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
8 #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
9 #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
10 #define cpu_relax() asm volatile("rep; nop" ::: "memory");
11 #define CPUINFO_PROC "model name"
12 #ifndef __NR_perf_event_open
13 # define __NR_perf_event_open 336
14 #endif
15 #endif
16
17 #if defined(__x86_64__)
18 #define mb() asm volatile("mfence" ::: "memory")
19 #define wmb() asm volatile("sfence" ::: "memory")
20 #define rmb() asm volatile("lfence" ::: "memory")
21 #define cpu_relax() asm volatile("rep; nop" ::: "memory");
22 #define CPUINFO_PROC "model name"
23 #ifndef __NR_perf_event_open
24 # define __NR_perf_event_open 298
25 #endif
26 #endif
27
28 #ifdef __powerpc__
29 #include "../../arch/powerpc/include/uapi/asm/unistd.h"
30 #define mb() asm volatile ("sync" ::: "memory")
31 #define wmb() asm volatile ("sync" ::: "memory")
32 #define rmb() asm volatile ("sync" ::: "memory")
33 #define CPUINFO_PROC "cpu"
34 #endif
35
36 #ifdef __s390__
37 #define mb() asm volatile("bcr 15,0" ::: "memory")
38 #define wmb() asm volatile("bcr 15,0" ::: "memory")
39 #define rmb() asm volatile("bcr 15,0" ::: "memory")
40 #endif
41
42 #ifdef __sh__
43 #if defined(__SH4A__) || defined(__SH5__)
44 # define mb() asm volatile("synco" ::: "memory")
45 # define wmb() asm volatile("synco" ::: "memory")
46 # define rmb() asm volatile("synco" ::: "memory")
47 #else
48 # define mb() asm volatile("" ::: "memory")
49 # define wmb() asm volatile("" ::: "memory")
50 # define rmb() asm volatile("" ::: "memory")
51 #endif
52 #define CPUINFO_PROC "cpu type"
53 #endif
54
55 #ifdef __hppa__
56 #define mb() asm volatile("" ::: "memory")
57 #define wmb() asm volatile("" ::: "memory")
58 #define rmb() asm volatile("" ::: "memory")
59 #define CPUINFO_PROC "cpu"
60 #endif
61
62 #ifdef __sparc__
63 #ifdef __LP64__
64 #define mb() asm volatile("ba,pt %%xcc, 1f\n" \
65 "membar #StoreLoad\n" \
66 "1:\n":::"memory")
67 #else
68 #define mb() asm volatile("":::"memory")
69 #endif
70 #define wmb() asm volatile("":::"memory")
71 #define rmb() asm volatile("":::"memory")
72 #define CPUINFO_PROC "cpu"
73 #endif
74
75 #ifdef __alpha__
76 #define mb() asm volatile("mb" ::: "memory")
77 #define wmb() asm volatile("wmb" ::: "memory")
78 #define rmb() asm volatile("mb" ::: "memory")
79 #define CPUINFO_PROC "cpu model"
80 #endif
81
82 #ifdef __ia64__
83 #define mb() asm volatile ("mf" ::: "memory")
84 #define wmb() asm volatile ("mf" ::: "memory")
85 #define rmb() asm volatile ("mf" ::: "memory")
86 #define cpu_relax() asm volatile ("hint @pause" ::: "memory")
87 #define CPUINFO_PROC "model name"
88 #endif
89
90 #ifdef __arm__
91 /*
92 * Use the __kuser_memory_barrier helper in the CPU helper page. See
93 * arch/arm/kernel/entry-armv.S in the kernel source for details.
94 */
95 #define mb() ((void(*)(void))0xffff0fa0)()
96 #define wmb() ((void(*)(void))0xffff0fa0)()
97 #define rmb() ((void(*)(void))0xffff0fa0)()
98 #define CPUINFO_PROC "Processor"
99 #endif
100
101 #ifdef __aarch64__
102 #define mb() asm volatile("dmb ish" ::: "memory")
103 #define wmb() asm volatile("dmb ishld" ::: "memory")
104 #define rmb() asm volatile("dmb ishst" ::: "memory")
105 #define cpu_relax() asm volatile("yield" ::: "memory")
106 #endif
107
108 #ifdef __mips__
109 #define mb() asm volatile( \
110 ".set mips2\n\t" \
111 "sync\n\t" \
112 ".set mips0" \
113 : /* no output */ \
114 : /* no input */ \
115 : "memory")
116 #define wmb() mb()
117 #define rmb() mb()
118 #define CPUINFO_PROC "cpu model"
119 #endif
120
121 #ifdef __arc__
122 #define mb() asm volatile("" ::: "memory")
123 #define wmb() asm volatile("" ::: "memory")
124 #define rmb() asm volatile("" ::: "memory")
125 #define CPUINFO_PROC "Processor"
126 #endif
127
128 #ifdef __metag__
129 #define mb() asm volatile("" ::: "memory")
130 #define wmb() asm volatile("" ::: "memory")
131 #define rmb() asm volatile("" ::: "memory")
132 #define CPUINFO_PROC "CPU"
133 #endif
134
135 #define barrier() asm volatile ("" ::: "memory")
136
137 #ifndef cpu_relax
138 #define cpu_relax() barrier()
139 #endif
140
141 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
142
143
144 #include <time.h>
145 #include <unistd.h>
146 #include <sys/types.h>
147 #include <sys/syscall.h>
148
149 #include <linux/perf_event.h>
150 #include "util/types.h"
151 #include <stdbool.h>
152
153 /*
154 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
155 * counters in the current task.
156 */
157 #define PR_TASK_PERF_EVENTS_DISABLE 31
158 #define PR_TASK_PERF_EVENTS_ENABLE 32
159
160 #ifndef NSEC_PER_SEC
161 # define NSEC_PER_SEC 1000000000ULL
162 #endif
163 #ifndef NSEC_PER_USEC
164 # define NSEC_PER_USEC 1000ULL
165 #endif
166
167 static inline unsigned long long rdclock(void)
168 {
169 struct timespec ts;
170
171 clock_gettime(CLOCK_MONOTONIC, &ts);
172 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
173 }
174
175 /*
176 * Pick up some kernel type conventions:
177 */
178 #define __user
179 #define asmlinkage
180
181 #define unlikely(x) __builtin_expect(!!(x), 0)
182 #define min(x, y) ({ \
183 typeof(x) _min1 = (x); \
184 typeof(y) _min2 = (y); \
185 (void) (&_min1 == &_min2); \
186 _min1 < _min2 ? _min1 : _min2; })
187
188 extern bool test_attr__enabled;
189 void test_attr__init(void);
190 void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
191 int fd, int group_fd, unsigned long flags);
192
193 static inline int
194 sys_perf_event_open(struct perf_event_attr *attr,
195 pid_t pid, int cpu, int group_fd,
196 unsigned long flags)
197 {
198 int fd;
199
200 fd = syscall(__NR_perf_event_open, attr, pid, cpu,
201 group_fd, flags);
202
203 if (unlikely(test_attr__enabled))
204 test_attr__open(attr, pid, cpu, fd, group_fd, flags);
205
206 return fd;
207 }
208
209 #define MAX_COUNTERS 256
210 #define MAX_NR_CPUS 256
211
212 struct ip_callchain {
213 u64 nr;
214 u64 ips[0];
215 };
216
217 struct branch_flags {
218 u64 mispred:1;
219 u64 predicted:1;
220 u64 in_tx:1;
221 u64 abort:1;
222 u64 reserved:60;
223 };
224
225 struct branch_entry {
226 u64 from;
227 u64 to;
228 struct branch_flags flags;
229 };
230
231 struct branch_stack {
232 u64 nr;
233 struct branch_entry entries[0];
234 };
235
236 extern const char *input_name;
237 extern bool perf_host, perf_guest;
238 extern const char perf_version_string[];
239
240 void pthread__unblock_sigwinch(void);
241
242 #include "util/target.h"
243
244 enum perf_call_graph_mode {
245 CALLCHAIN_NONE,
246 CALLCHAIN_FP,
247 CALLCHAIN_DWARF
248 };
249
250 struct perf_record_opts {
251 struct target target;
252 int call_graph;
253 bool group;
254 bool inherit_stat;
255 bool no_delay;
256 bool no_inherit;
257 bool no_samples;
258 bool raw_samples;
259 bool sample_address;
260 bool sample_weight;
261 bool sample_time;
262 bool period;
263 unsigned int freq;
264 unsigned int mmap_pages;
265 unsigned int user_freq;
266 u64 branch_stack;
267 u64 default_interval;
268 u64 user_interval;
269 u16 stack_dump_size;
270 bool sample_transaction;
271 };
272
273 #endif
This page took 0.036201 seconds and 5 git commands to generate.