Commit | Line | Data |
---|---|---|
cdd6c482 IM |
1 | #ifndef _ASM_X86_PERF_EVENT_H |
2 | #define _ASM_X86_PERF_EVENT_H | |
003a46cf | 3 | |
eb2b8618 | 4 | /* |
cdd6c482 | 5 | * Performance event hw details: |
eb2b8618 IM |
6 | */ |
7 | ||
15c7ad51 RR |
8 | #define INTEL_PMC_MAX_GENERIC 32 |
9 | #define INTEL_PMC_MAX_FIXED 3 | |
10 | #define INTEL_PMC_IDX_FIXED 32 | |
eb2b8618 | 11 | |
862a1a5f IM |
12 | #define X86_PMC_IDX_MAX 64 |
13 | ||
241771ef IM |
14 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 |
15 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | |
003a46cf | 16 | |
241771ef IM |
17 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 |
18 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | |
003a46cf | 19 | |
a098f448 RR |
20 | #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL |
21 | #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL | |
22 | #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16) | |
23 | #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17) | |
24 | #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18) | |
a7b9d2cc | 25 | #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19) |
a098f448 RR |
26 | #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20) |
27 | #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21) | |
28 | #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) | |
29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) | |
30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL | |
31 | ||
011af857 JR |
32 | #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) |
33 | #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) | |
34 | ||
a098f448 RR |
35 | #define AMD64_EVENTSEL_EVENT \ |
36 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) | |
37 | #define INTEL_ARCH_EVENT_MASK \ | |
38 | (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) | |
39 | ||
40 | #define X86_RAW_EVENT_MASK \ | |
41 | (ARCH_PERFMON_EVENTSEL_EVENT | \ | |
42 | ARCH_PERFMON_EVENTSEL_UMASK | \ | |
43 | ARCH_PERFMON_EVENTSEL_EDGE | \ | |
44 | ARCH_PERFMON_EVENTSEL_INV | \ | |
45 | ARCH_PERFMON_EVENTSEL_CMASK) | |
46 | #define AMD64_RAW_EVENT_MASK \ | |
47 | (X86_RAW_EVENT_MASK | \ | |
48 | AMD64_EVENTSEL_EVENT) | |
ee5789db | 49 | #define AMD64_NUM_COUNTERS 4 |
b1dc3c48 | 50 | #define AMD64_NUM_COUNTERS_CORE 6 |
04a705df | 51 | |
ee5789db | 52 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
241771ef | 53 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
ee5789db | 54 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 |
003a46cf | 55 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ |
241771ef IM |
56 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) |
57 | ||
ee5789db | 58 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 |
ffb871bc | 59 | #define ARCH_PERFMON_EVENTS_COUNT 7 |
003a46cf | 60 | |
eb2b8618 IM |
61 | /* |
62 | * Intel "Architectural Performance Monitoring" CPUID | |
63 | * detection/enumeration details: | |
64 | */ | |
003a46cf TG |
65 | union cpuid10_eax { |
66 | struct { | |
67 | unsigned int version_id:8; | |
948b1bb8 | 68 | unsigned int num_counters:8; |
003a46cf TG |
69 | unsigned int bit_width:8; |
70 | unsigned int mask_length:8; | |
71 | } split; | |
72 | unsigned int full; | |
73 | }; | |
74 | ||
ffb871bc GN |
75 | union cpuid10_ebx { |
76 | struct { | |
77 | unsigned int no_unhalted_core_cycles:1; | |
78 | unsigned int no_instructions_retired:1; | |
79 | unsigned int no_unhalted_reference_cycles:1; | |
80 | unsigned int no_llc_reference:1; | |
81 | unsigned int no_llc_misses:1; | |
82 | unsigned int no_branch_instruction_retired:1; | |
83 | unsigned int no_branch_misses_retired:1; | |
84 | } split; | |
85 | unsigned int full; | |
86 | }; | |
87 | ||
703e937c IM |
88 | union cpuid10_edx { |
89 | struct { | |
e768aee8 LS |
90 | unsigned int num_counters_fixed:5; |
91 | unsigned int bit_width_fixed:8; | |
92 | unsigned int reserved:19; | |
703e937c IM |
93 | } split; |
94 | unsigned int full; | |
95 | }; | |
96 | ||
b3d9468a GN |
97 | struct x86_pmu_capability { |
98 | int version; | |
99 | int num_counters_gp; | |
100 | int num_counters_fixed; | |
101 | int bit_width_gp; | |
102 | int bit_width_fixed; | |
103 | unsigned int events_mask; | |
104 | int events_mask_len; | |
105 | }; | |
703e937c IM |
106 | |
107 | /* | |
cdd6c482 | 108 | * Fixed-purpose performance events: |
703e937c IM |
109 | */ |
110 | ||
862a1a5f IM |
111 | /* |
112 | * All 3 fixed-mode PMCs are configured via this single MSR: | |
113 | */ | |
cd09c0c4 | 114 | #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d |
862a1a5f IM |
115 | |
116 | /* | |
117 | * The counts are available in three separate MSRs: | |
118 | */ | |
119 | ||
703e937c | 120 | /* Instr_Retired.Any: */ |
cd09c0c4 | 121 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 |
15c7ad51 | 122 | #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0) |
703e937c IM |
123 | |
124 | /* CPU_CLK_Unhalted.Core: */ | |
cd09c0c4 | 125 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a |
15c7ad51 | 126 | #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1) |
703e937c IM |
127 | |
128 | /* CPU_CLK_Unhalted.Ref: */ | |
cd09c0c4 | 129 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
15c7ad51 RR |
130 | #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2) |
131 | #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES) | |
703e937c | 132 | |
30dd568c MM |
133 | /* |
134 | * We model BTS tracing as another fixed-mode PMC. | |
135 | * | |
cdd6c482 IM |
136 | * We choose a value in the middle of the fixed event range, since lower |
137 | * values are used by actual fixed events and higher values are used | |
30dd568c MM |
138 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. |
139 | */ | |
15c7ad51 | 140 | #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16) |
30dd568c | 141 | |
ee5789db RR |
142 | /* |
143 | * IBS cpuid feature detection | |
144 | */ | |
145 | ||
146 | #define IBS_CPUID_FEATURES 0x8000001b | |
147 | ||
148 | /* | |
149 | * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but | |
150 | * bit 0 is used to indicate the existence of IBS. | |
151 | */ | |
152 | #define IBS_CAPS_AVAIL (1U<<0) | |
153 | #define IBS_CAPS_FETCHSAM (1U<<1) | |
154 | #define IBS_CAPS_OPSAM (1U<<2) | |
155 | #define IBS_CAPS_RDWROPCNT (1U<<3) | |
156 | #define IBS_CAPS_OPCNT (1U<<4) | |
157 | #define IBS_CAPS_BRNTRGT (1U<<5) | |
158 | #define IBS_CAPS_OPCNTEXT (1U<<6) | |
d47e8238 | 159 | #define IBS_CAPS_RIPINVALIDCHK (1U<<7) |
ee5789db RR |
160 | |
161 | #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \ | |
162 | | IBS_CAPS_FETCHSAM \ | |
163 | | IBS_CAPS_OPSAM) | |
164 | ||
165 | /* | |
166 | * IBS APIC setup | |
167 | */ | |
168 | #define IBSCTL 0x1cc | |
169 | #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) | |
170 | #define IBSCTL_LVT_OFFSET_MASK 0x0F | |
171 | ||
d47e8238 | 172 | /* ibs fetch bits/masks */ |
b47fad3b RR |
173 | #define IBS_FETCH_RAND_EN (1ULL<<57) |
174 | #define IBS_FETCH_VAL (1ULL<<49) | |
175 | #define IBS_FETCH_ENABLE (1ULL<<48) | |
176 | #define IBS_FETCH_CNT 0xFFFF0000ULL | |
177 | #define IBS_FETCH_MAX_CNT 0x0000FFFFULL | |
1d6040f1 | 178 | |
d47e8238 | 179 | /* ibs op bits/masks */ |
db98c5fa RR |
180 | /* lower 4 bits of the current count are ignored: */ |
181 | #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) | |
b47fad3b RR |
182 | #define IBS_OP_CNT_CTL (1ULL<<19) |
183 | #define IBS_OP_VAL (1ULL<<18) | |
184 | #define IBS_OP_ENABLE (1ULL<<17) | |
185 | #define IBS_OP_MAX_CNT 0x0000FFFFULL | |
186 | #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ | |
d47e8238 | 187 | #define IBS_RIP_INVALID (1ULL<<38) |
30dd568c | 188 | |
978da300 | 189 | #ifdef CONFIG_X86_LOCAL_APIC |
b7169166 | 190 | extern u32 get_ibs_caps(void); |
978da300 RR |
191 | #else |
192 | static inline u32 get_ibs_caps(void) { return 0; } | |
193 | #endif | |
b7169166 | 194 | |
cdd6c482 | 195 | #ifdef CONFIG_PERF_EVENTS |
cdd6c482 | 196 | extern void perf_events_lapic_init(void); |
194002b2 | 197 | |
ef21f683 PZ |
198 | /* |
199 | * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. | |
200 | * This flag is otherwise unused and ABI specified to be 0, so nobody should | |
201 | * care what we do with it. | |
202 | */ | |
203 | #define PERF_EFLAGS_EXACT (1UL << 3) | |
204 | ||
39447b38 ZY |
205 | struct pt_regs; |
206 | extern unsigned long perf_instruction_pointer(struct pt_regs *regs); | |
207 | extern unsigned long perf_misc_flags(struct pt_regs *regs); | |
208 | #define perf_misc_flags(regs) perf_misc_flags(regs) | |
ef21f683 | 209 | |
b0f82b81 FW |
210 | #include <asm/stacktrace.h> |
211 | ||
212 | /* | |
213 | * We abuse bit 3 from flags to pass exact information, see perf_misc_flags | |
214 | * and the comment with PERF_EFLAGS_EXACT. | |
215 | */ | |
216 | #define perf_arch_fetch_caller_regs(regs, __ip) { \ | |
217 | (regs)->ip = (__ip); \ | |
218 | (regs)->bp = caller_frame_pointer(); \ | |
219 | (regs)->cs = __KERNEL_CS; \ | |
220 | regs->flags = 0; \ | |
9e46294d FW |
221 | asm volatile( \ |
222 | _ASM_MOV "%%"_ASM_SP ", %0\n" \ | |
223 | : "=m" ((regs)->sp) \ | |
224 | :: "memory" \ | |
225 | ); \ | |
b0f82b81 FW |
226 | } |
227 | ||
144d31e6 GN |
228 | struct perf_guest_switch_msr { |
229 | unsigned msr; | |
230 | u64 host, guest; | |
231 | }; | |
232 | ||
233 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); | |
b3d9468a | 234 | extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap); |
c93dc84c | 235 | extern void perf_check_microcode(void); |
241771ef | 236 | #else |
144d31e6 GN |
237 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) |
238 | { | |
239 | *nr = 0; | |
240 | return NULL; | |
241 | } | |
242 | ||
b3d9468a GN |
243 | static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap) |
244 | { | |
245 | memset(cap, 0, sizeof(*cap)); | |
246 | } | |
247 | ||
cdd6c482 | 248 | static inline void perf_events_lapic_init(void) { } |
c93dc84c | 249 | static inline void perf_check_microcode(void) { } |
241771ef IM |
250 | #endif |
251 | ||
1018faa6 JR |
252 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) |
253 | extern void amd_pmu_enable_virt(void); | |
254 | extern void amd_pmu_disable_virt(void); | |
255 | #else | |
256 | static inline void amd_pmu_enable_virt(void) { } | |
257 | static inline void amd_pmu_disable_virt(void) { } | |
258 | #endif | |
259 | ||
cdd6c482 | 260 | #endif /* _ASM_X86_PERF_EVENT_H */ |