Commit | Line | Data |
---|---|---|
de0428a7 KW |
1 | #include <linux/perf_event.h> |
2 | #include <linux/types.h> | |
3 | ||
4 | #include <asm/perf_event.h> | |
5 | #include <asm/msr.h> | |
3e702ff6 | 6 | #include <asm/insn.h> |
de0428a7 KW |
7 | |
8 | #include "perf_event.h" | |
caff2bef PZ |
9 | |
10 | enum { | |
11 | LBR_FORMAT_32 = 0x00, | |
12 | LBR_FORMAT_LIP = 0x01, | |
13 | LBR_FORMAT_EIP = 0x02, | |
14 | LBR_FORMAT_EIP_FLAGS = 0x03, | |
135c5612 | 15 | LBR_FORMAT_EIP_FLAGS2 = 0x04, |
50eab8f6 AK |
16 | LBR_FORMAT_INFO = 0x05, |
17 | LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO, | |
135c5612 AK |
18 | }; |
19 | ||
20 | static enum { | |
21 | LBR_EIP_FLAGS = 1, | |
22 | LBR_TSX = 2, | |
23 | } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = { | |
24 | [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS, | |
25 | [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX, | |
caff2bef PZ |
26 | }; |
27 | ||
c5cc2cd9 SE |
28 | /* |
29 | * Intel LBR_SELECT bits | |
30 | * Intel Vol3a, April 2011, Section 16.7 Table 16-10 | |
31 | * | |
32 | * Hardware branch filter (not available on all CPUs) | |
33 | */ | |
34 | #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */ | |
35 | #define LBR_USER_BIT 1 /* do not capture at ring > 0 */ | |
36 | #define LBR_JCC_BIT 2 /* do not capture conditional branches */ | |
37 | #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */ | |
38 | #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */ | |
39 | #define LBR_RETURN_BIT 5 /* do not capture near returns */ | |
40 | #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ | |
41 | #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ | |
42 | #define LBR_FAR_BIT 8 /* do not capture far branches */ | |
e9d7f7cd | 43 | #define LBR_CALL_STACK_BIT 9 /* enable call stack */ |
c5cc2cd9 SE |
44 | |
45 | #define LBR_KERNEL (1 << LBR_KERNEL_BIT) | |
46 | #define LBR_USER (1 << LBR_USER_BIT) | |
47 | #define LBR_JCC (1 << LBR_JCC_BIT) | |
48 | #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT) | |
49 | #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT) | |
50 | #define LBR_RETURN (1 << LBR_RETURN_BIT) | |
51 | #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) | |
52 | #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) | |
53 | #define LBR_FAR (1 << LBR_FAR_BIT) | |
e9d7f7cd | 54 | #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT) |
c5cc2cd9 SE |
55 | |
56 | #define LBR_PLM (LBR_KERNEL | LBR_USER) | |
57 | ||
58 | #define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ | |
59 | #define LBR_NOT_SUPP -1 /* LBR filter not supported */ | |
60 | #define LBR_IGN 0 /* ignored */ | |
61 | ||
62 | #define LBR_ANY \ | |
63 | (LBR_JCC |\ | |
64 | LBR_REL_CALL |\ | |
65 | LBR_IND_CALL |\ | |
66 | LBR_RETURN |\ | |
67 | LBR_REL_JMP |\ | |
68 | LBR_IND_JMP |\ | |
69 | LBR_FAR) | |
70 | ||
71 | #define LBR_FROM_FLAG_MISPRED (1ULL << 63) | |
135c5612 AK |
72 | #define LBR_FROM_FLAG_IN_TX (1ULL << 62) |
73 | #define LBR_FROM_FLAG_ABORT (1ULL << 61) | |
c5cc2cd9 | 74 | |
3e702ff6 SE |
75 | /* |
76 | * x86control flow change classification | |
77 | * x86control flow changes include branches, interrupts, traps, faults | |
78 | */ | |
79 | enum { | |
e9d7f7cd YZ |
80 | X86_BR_NONE = 0, /* unknown */ |
81 | ||
82 | X86_BR_USER = 1 << 0, /* branch target is user */ | |
83 | X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ | |
84 | ||
85 | X86_BR_CALL = 1 << 2, /* call */ | |
86 | X86_BR_RET = 1 << 3, /* return */ | |
87 | X86_BR_SYSCALL = 1 << 4, /* syscall */ | |
88 | X86_BR_SYSRET = 1 << 5, /* syscall return */ | |
89 | X86_BR_INT = 1 << 6, /* sw interrupt */ | |
90 | X86_BR_IRET = 1 << 7, /* return from interrupt */ | |
91 | X86_BR_JCC = 1 << 8, /* conditional */ | |
92 | X86_BR_JMP = 1 << 9, /* jump */ | |
93 | X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ | |
94 | X86_BR_IND_CALL = 1 << 11,/* indirect calls */ | |
95 | X86_BR_ABORT = 1 << 12,/* transaction abort */ | |
96 | X86_BR_IN_TX = 1 << 13,/* in transaction */ | |
97 | X86_BR_NO_TX = 1 << 14,/* not in transaction */ | |
aa54ae9b YZ |
98 | X86_BR_ZERO_CALL = 1 << 15,/* zero length call */ |
99 | X86_BR_CALL_STACK = 1 << 16,/* call stack */ | |
7b74cfb2 | 100 | X86_BR_IND_JMP = 1 << 17,/* indirect jump */ |
3e702ff6 SE |
101 | }; |
102 | ||
103 | #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) | |
135c5612 | 104 | #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX) |
3e702ff6 SE |
105 | |
106 | #define X86_BR_ANY \ | |
107 | (X86_BR_CALL |\ | |
108 | X86_BR_RET |\ | |
109 | X86_BR_SYSCALL |\ | |
110 | X86_BR_SYSRET |\ | |
111 | X86_BR_INT |\ | |
112 | X86_BR_IRET |\ | |
113 | X86_BR_JCC |\ | |
114 | X86_BR_JMP |\ | |
115 | X86_BR_IRQ |\ | |
135c5612 | 116 | X86_BR_ABORT |\ |
aa54ae9b | 117 | X86_BR_IND_CALL |\ |
7b74cfb2 | 118 | X86_BR_IND_JMP |\ |
aa54ae9b | 119 | X86_BR_ZERO_CALL) |
3e702ff6 SE |
120 | |
121 | #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) | |
122 | ||
123 | #define X86_BR_ANY_CALL \ | |
124 | (X86_BR_CALL |\ | |
125 | X86_BR_IND_CALL |\ | |
aa54ae9b | 126 | X86_BR_ZERO_CALL |\ |
3e702ff6 SE |
127 | X86_BR_SYSCALL |\ |
128 | X86_BR_IRQ |\ | |
129 | X86_BR_INT) | |
130 | ||
131 | static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); | |
132 | ||
caff2bef PZ |
133 | /* |
134 | * We only support LBR implementations that have FREEZE_LBRS_ON_PMI | |
135 | * otherwise it becomes near impossible to get a reliable stack. | |
136 | */ | |
137 | ||
1a78d937 | 138 | static void __intel_pmu_lbr_enable(bool pmi) |
caff2bef | 139 | { |
89cbc767 | 140 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
cd1f11de | 141 | u64 debugctl, lbr_select = 0, orig_debugctl; |
60ce0fbd | 142 | |
1a78d937 AK |
143 | /* |
144 | * No need to reprogram LBR_SELECT in a PMI, as it | |
145 | * did not change. | |
146 | */ | |
147 | if (cpuc->lbr_sel && !pmi) { | |
2c70d008 YZ |
148 | lbr_select = cpuc->lbr_sel->config; |
149 | wrmsrl(MSR_LBR_SELECT, lbr_select); | |
150 | } | |
caff2bef PZ |
151 | |
152 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | |
cd1f11de | 153 | orig_debugctl = debugctl; |
2c70d008 YZ |
154 | debugctl |= DEBUGCTLMSR_LBR; |
155 | /* | |
156 | * LBR callstack does not work well with FREEZE_LBRS_ON_PMI. | |
157 | * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions | |
158 | * may cause superfluous increase/decrease of LBR_TOS. | |
159 | */ | |
160 | if (!(lbr_select & LBR_CALL_STACK)) | |
161 | debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; | |
cd1f11de AK |
162 | if (orig_debugctl != debugctl) |
163 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | |
caff2bef PZ |
164 | } |
165 | ||
166 | static void __intel_pmu_lbr_disable(void) | |
167 | { | |
168 | u64 debugctl; | |
169 | ||
170 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); | |
7c5ecaf7 | 171 | debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); |
caff2bef PZ |
172 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
173 | } | |
174 | ||
175 | static void intel_pmu_lbr_reset_32(void) | |
176 | { | |
177 | int i; | |
178 | ||
179 | for (i = 0; i < x86_pmu.lbr_nr; i++) | |
180 | wrmsrl(x86_pmu.lbr_from + i, 0); | |
181 | } | |
182 | ||
183 | static void intel_pmu_lbr_reset_64(void) | |
184 | { | |
185 | int i; | |
186 | ||
187 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | |
188 | wrmsrl(x86_pmu.lbr_from + i, 0); | |
189 | wrmsrl(x86_pmu.lbr_to + i, 0); | |
50eab8f6 AK |
190 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) |
191 | wrmsrl(MSR_LBR_INFO_0 + i, 0); | |
caff2bef PZ |
192 | } |
193 | } | |
194 | ||
de0428a7 | 195 | void intel_pmu_lbr_reset(void) |
caff2bef | 196 | { |
74846d35 PZ |
197 | if (!x86_pmu.lbr_nr) |
198 | return; | |
199 | ||
8db909a7 | 200 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) |
caff2bef PZ |
201 | intel_pmu_lbr_reset_32(); |
202 | else | |
203 | intel_pmu_lbr_reset_64(); | |
204 | } | |
205 | ||
76cb2c61 YZ |
206 | /* |
207 | * TOS = most recently recorded branch | |
208 | */ | |
209 | static inline u64 intel_pmu_lbr_tos(void) | |
210 | { | |
211 | u64 tos; | |
212 | ||
213 | rdmsrl(x86_pmu.lbr_tos, tos); | |
214 | return tos; | |
215 | } | |
216 | ||
217 | enum { | |
218 | LBR_NONE, | |
219 | LBR_VALID, | |
220 | }; | |
221 | ||
222 | static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) | |
223 | { | |
224 | int i; | |
225 | unsigned lbr_idx, mask; | |
226 | u64 tos; | |
227 | ||
228 | if (task_ctx->lbr_callstack_users == 0 || | |
229 | task_ctx->lbr_stack_state == LBR_NONE) { | |
230 | intel_pmu_lbr_reset(); | |
231 | return; | |
232 | } | |
233 | ||
234 | mask = x86_pmu.lbr_nr - 1; | |
235 | tos = intel_pmu_lbr_tos(); | |
236 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | |
237 | lbr_idx = (tos - i) & mask; | |
238 | wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); | |
239 | wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); | |
50eab8f6 AK |
240 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) |
241 | wrmsrl(MSR_LBR_INFO_0 + i, task_ctx->lbr_info[i]); | |
76cb2c61 YZ |
242 | } |
243 | task_ctx->lbr_stack_state = LBR_NONE; | |
244 | } | |
245 | ||
246 | static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) | |
247 | { | |
248 | int i; | |
249 | unsigned lbr_idx, mask; | |
250 | u64 tos; | |
251 | ||
252 | if (task_ctx->lbr_callstack_users == 0) { | |
253 | task_ctx->lbr_stack_state = LBR_NONE; | |
254 | return; | |
255 | } | |
256 | ||
257 | mask = x86_pmu.lbr_nr - 1; | |
258 | tos = intel_pmu_lbr_tos(); | |
259 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | |
260 | lbr_idx = (tos - i) & mask; | |
261 | rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); | |
262 | rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); | |
50eab8f6 AK |
263 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) |
264 | rdmsrl(MSR_LBR_INFO_0 + i, task_ctx->lbr_info[i]); | |
76cb2c61 YZ |
265 | } |
266 | task_ctx->lbr_stack_state = LBR_VALID; | |
267 | } | |
268 | ||
2a0ad3b3 YZ |
269 | void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) |
270 | { | |
271 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | |
76cb2c61 | 272 | struct x86_perf_task_context *task_ctx; |
2a0ad3b3 | 273 | |
76cb2c61 YZ |
274 | /* |
275 | * If LBR callstack feature is enabled and the stack was saved when | |
276 | * the task was scheduled out, restore the stack. Otherwise flush | |
277 | * the LBR stack. | |
278 | */ | |
279 | task_ctx = ctx ? ctx->task_ctx_data : NULL; | |
280 | if (task_ctx) { | |
281 | if (sched_in) { | |
282 | __intel_pmu_lbr_restore(task_ctx); | |
283 | cpuc->lbr_context = ctx; | |
284 | } else { | |
285 | __intel_pmu_lbr_save(task_ctx); | |
286 | } | |
287 | return; | |
288 | } | |
289 | ||
2a0ad3b3 YZ |
290 | /* |
291 | * When sampling the branck stack in system-wide, it may be | |
292 | * necessary to flush the stack on context switch. This happens | |
293 | * when the branch stack does not tag its entries with the pid | |
294 | * of the current task. Otherwise it becomes impossible to | |
295 | * associate a branch entry with a task. This ambiguity is more | |
296 | * likely to appear when the branch stack supports priv level | |
297 | * filtering and the user sets it to monitor only at the user | |
298 | * level (which could be a useful measurement in system-wide | |
299 | * mode). In that case, the risk is high of having a branch | |
300 | * stack with branch from multiple tasks. | |
301 | */ | |
302 | if (sched_in) { | |
303 | intel_pmu_lbr_reset(); | |
304 | cpuc->lbr_context = ctx; | |
305 | } | |
306 | } | |
307 | ||
63f0c1d8 YZ |
308 | static inline bool branch_user_callstack(unsigned br_sel) |
309 | { | |
310 | return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK); | |
311 | } | |
312 | ||
de0428a7 | 313 | void intel_pmu_lbr_enable(struct perf_event *event) |
caff2bef | 314 | { |
89cbc767 | 315 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
63f0c1d8 | 316 | struct x86_perf_task_context *task_ctx; |
caff2bef PZ |
317 | |
318 | if (!x86_pmu.lbr_nr) | |
319 | return; | |
320 | ||
caff2bef | 321 | /* |
b83a46e7 PZ |
322 | * Reset the LBR stack if we changed task context to |
323 | * avoid data leaks. | |
caff2bef | 324 | */ |
b83a46e7 | 325 | if (event->ctx->task && cpuc->lbr_context != event->ctx) { |
caff2bef PZ |
326 | intel_pmu_lbr_reset(); |
327 | cpuc->lbr_context = event->ctx; | |
328 | } | |
3e702ff6 | 329 | cpuc->br_sel = event->hw.branch_reg.reg; |
caff2bef | 330 | |
63f0c1d8 YZ |
331 | if (branch_user_callstack(cpuc->br_sel) && event->ctx && |
332 | event->ctx->task_ctx_data) { | |
333 | task_ctx = event->ctx->task_ctx_data; | |
334 | task_ctx->lbr_callstack_users++; | |
335 | } | |
336 | ||
caff2bef | 337 | cpuc->lbr_users++; |
2a0ad3b3 | 338 | perf_sched_cb_inc(event->ctx->pmu); |
caff2bef PZ |
339 | } |
340 | ||
de0428a7 | 341 | void intel_pmu_lbr_disable(struct perf_event *event) |
caff2bef | 342 | { |
89cbc767 | 343 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
63f0c1d8 | 344 | struct x86_perf_task_context *task_ctx; |
caff2bef PZ |
345 | |
346 | if (!x86_pmu.lbr_nr) | |
347 | return; | |
348 | ||
63f0c1d8 YZ |
349 | if (branch_user_callstack(cpuc->br_sel) && event->ctx && |
350 | event->ctx->task_ctx_data) { | |
351 | task_ctx = event->ctx->task_ctx_data; | |
352 | task_ctx->lbr_callstack_users--; | |
353 | } | |
354 | ||
caff2bef | 355 | cpuc->lbr_users--; |
b83a46e7 | 356 | WARN_ON_ONCE(cpuc->lbr_users < 0); |
2a0ad3b3 | 357 | perf_sched_cb_dec(event->ctx->pmu); |
2df202bf | 358 | |
60ce0fbd | 359 | if (cpuc->enabled && !cpuc->lbr_users) { |
2df202bf | 360 | __intel_pmu_lbr_disable(); |
60ce0fbd SE |
361 | /* avoid stale pointer */ |
362 | cpuc->lbr_context = NULL; | |
363 | } | |
caff2bef PZ |
364 | } |
365 | ||
1a78d937 | 366 | void intel_pmu_lbr_enable_all(bool pmi) |
caff2bef | 367 | { |
89cbc767 | 368 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
caff2bef PZ |
369 | |
370 | if (cpuc->lbr_users) | |
1a78d937 | 371 | __intel_pmu_lbr_enable(pmi); |
caff2bef PZ |
372 | } |
373 | ||
de0428a7 | 374 | void intel_pmu_lbr_disable_all(void) |
caff2bef | 375 | { |
89cbc767 | 376 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
caff2bef PZ |
377 | |
378 | if (cpuc->lbr_users) | |
379 | __intel_pmu_lbr_disable(); | |
380 | } | |
381 | ||
caff2bef PZ |
382 | static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) |
383 | { | |
384 | unsigned long mask = x86_pmu.lbr_nr - 1; | |
385 | u64 tos = intel_pmu_lbr_tos(); | |
386 | int i; | |
387 | ||
63fb3f9b | 388 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
caff2bef PZ |
389 | unsigned long lbr_idx = (tos - i) & mask; |
390 | union { | |
391 | struct { | |
392 | u32 from; | |
393 | u32 to; | |
394 | }; | |
395 | u64 lbr; | |
396 | } msr_lastbranch; | |
397 | ||
398 | rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); | |
399 | ||
bce38cd5 SE |
400 | cpuc->lbr_entries[i].from = msr_lastbranch.from; |
401 | cpuc->lbr_entries[i].to = msr_lastbranch.to; | |
402 | cpuc->lbr_entries[i].mispred = 0; | |
403 | cpuc->lbr_entries[i].predicted = 0; | |
404 | cpuc->lbr_entries[i].reserved = 0; | |
caff2bef PZ |
405 | } |
406 | cpuc->lbr_stack.nr = i; | |
407 | } | |
408 | ||
caff2bef PZ |
409 | /* |
410 | * Due to lack of segmentation in Linux the effective address (offset) | |
411 | * is the same as the linear address, allowing us to merge the LIP and EIP | |
412 | * LBR formats. | |
413 | */ | |
414 | static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) | |
415 | { | |
416 | unsigned long mask = x86_pmu.lbr_nr - 1; | |
8db909a7 | 417 | int lbr_format = x86_pmu.intel_cap.lbr_format; |
caff2bef PZ |
418 | u64 tos = intel_pmu_lbr_tos(); |
419 | int i; | |
b7af41a1 | 420 | int out = 0; |
caff2bef | 421 | |
63fb3f9b | 422 | for (i = 0; i < x86_pmu.lbr_nr; i++) { |
caff2bef | 423 | unsigned long lbr_idx = (tos - i) & mask; |
135c5612 AK |
424 | u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0; |
425 | int skip = 0; | |
50eab8f6 | 426 | u16 cycles = 0; |
135c5612 | 427 | int lbr_flags = lbr_desc[lbr_format]; |
caff2bef PZ |
428 | |
429 | rdmsrl(x86_pmu.lbr_from + lbr_idx, from); | |
430 | rdmsrl(x86_pmu.lbr_to + lbr_idx, to); | |
431 | ||
50eab8f6 AK |
432 | if (lbr_format == LBR_FORMAT_INFO) { |
433 | u64 info; | |
434 | ||
435 | rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info); | |
436 | mis = !!(info & LBR_INFO_MISPRED); | |
437 | pred = !mis; | |
438 | in_tx = !!(info & LBR_INFO_IN_TX); | |
439 | abort = !!(info & LBR_INFO_ABORT); | |
440 | cycles = (info & LBR_INFO_CYCLES); | |
441 | } | |
135c5612 | 442 | if (lbr_flags & LBR_EIP_FLAGS) { |
bce38cd5 SE |
443 | mis = !!(from & LBR_FROM_FLAG_MISPRED); |
444 | pred = !mis; | |
135c5612 AK |
445 | skip = 1; |
446 | } | |
447 | if (lbr_flags & LBR_TSX) { | |
448 | in_tx = !!(from & LBR_FROM_FLAG_IN_TX); | |
449 | abort = !!(from & LBR_FROM_FLAG_ABORT); | |
450 | skip = 3; | |
caff2bef | 451 | } |
135c5612 | 452 | from = (u64)((((s64)from) << skip) >> skip); |
caff2bef | 453 | |
b7af41a1 AK |
454 | /* |
455 | * Some CPUs report duplicated abort records, | |
456 | * with the second entry not having an abort bit set. | |
457 | * Skip them here. This loop runs backwards, | |
458 | * so we need to undo the previous record. | |
459 | * If the abort just happened outside the window | |
460 | * the extra entry cannot be removed. | |
461 | */ | |
462 | if (abort && x86_pmu.lbr_double_abort && out > 0) | |
463 | out--; | |
464 | ||
465 | cpuc->lbr_entries[out].from = from; | |
466 | cpuc->lbr_entries[out].to = to; | |
467 | cpuc->lbr_entries[out].mispred = mis; | |
468 | cpuc->lbr_entries[out].predicted = pred; | |
469 | cpuc->lbr_entries[out].in_tx = in_tx; | |
470 | cpuc->lbr_entries[out].abort = abort; | |
50eab8f6 | 471 | cpuc->lbr_entries[out].cycles = cycles; |
b7af41a1 AK |
472 | cpuc->lbr_entries[out].reserved = 0; |
473 | out++; | |
caff2bef | 474 | } |
b7af41a1 | 475 | cpuc->lbr_stack.nr = out; |
caff2bef PZ |
476 | } |
477 | ||
de0428a7 | 478 | void intel_pmu_lbr_read(void) |
caff2bef | 479 | { |
89cbc767 | 480 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
caff2bef PZ |
481 | |
482 | if (!cpuc->lbr_users) | |
483 | return; | |
484 | ||
8db909a7 | 485 | if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) |
caff2bef PZ |
486 | intel_pmu_lbr_read_32(cpuc); |
487 | else | |
488 | intel_pmu_lbr_read_64(cpuc); | |
3e702ff6 SE |
489 | |
490 | intel_pmu_lbr_filter(cpuc); | |
491 | } | |
492 | ||
493 | /* | |
494 | * SW filter is used: | |
495 | * - in case there is no HW filter | |
496 | * - in case the HW filter has errata or limitations | |
497 | */ | |
e9d7f7cd | 498 | static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
3e702ff6 SE |
499 | { |
500 | u64 br_type = event->attr.branch_sample_type; | |
501 | int mask = 0; | |
502 | ||
503 | if (br_type & PERF_SAMPLE_BRANCH_USER) | |
504 | mask |= X86_BR_USER; | |
505 | ||
2b923c8f | 506 | if (br_type & PERF_SAMPLE_BRANCH_KERNEL) |
3e702ff6 SE |
507 | mask |= X86_BR_KERNEL; |
508 | ||
509 | /* we ignore BRANCH_HV here */ | |
510 | ||
511 | if (br_type & PERF_SAMPLE_BRANCH_ANY) | |
512 | mask |= X86_BR_ANY; | |
513 | ||
514 | if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL) | |
515 | mask |= X86_BR_ANY_CALL; | |
516 | ||
517 | if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN) | |
518 | mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET; | |
519 | ||
520 | if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) | |
521 | mask |= X86_BR_IND_CALL; | |
135c5612 AK |
522 | |
523 | if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX) | |
524 | mask |= X86_BR_ABORT; | |
525 | ||
526 | if (br_type & PERF_SAMPLE_BRANCH_IN_TX) | |
527 | mask |= X86_BR_IN_TX; | |
528 | ||
529 | if (br_type & PERF_SAMPLE_BRANCH_NO_TX) | |
530 | mask |= X86_BR_NO_TX; | |
531 | ||
37548914 AK |
532 | if (br_type & PERF_SAMPLE_BRANCH_COND) |
533 | mask |= X86_BR_JCC; | |
534 | ||
e9d7f7cd YZ |
535 | if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) { |
536 | if (!x86_pmu_has_lbr_callstack()) | |
537 | return -EOPNOTSUPP; | |
538 | if (mask & ~(X86_BR_USER | X86_BR_KERNEL)) | |
539 | return -EINVAL; | |
540 | mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET | | |
541 | X86_BR_CALL_STACK; | |
542 | } | |
543 | ||
7b74cfb2 SE |
544 | if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP) |
545 | mask |= X86_BR_IND_JMP; | |
546 | ||
3e702ff6 SE |
547 | /* |
548 | * stash actual user request into reg, it may | |
549 | * be used by fixup code for some CPU | |
550 | */ | |
551 | event->hw.branch_reg.reg = mask; | |
e9d7f7cd | 552 | return 0; |
caff2bef PZ |
553 | } |
554 | ||
60ce0fbd SE |
555 | /* |
556 | * setup the HW LBR filter | |
557 | * Used only when available, may not be enough to disambiguate | |
558 | * all branches, may need the help of the SW filter | |
559 | */ | |
560 | static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) | |
561 | { | |
562 | struct hw_perf_event_extra *reg; | |
563 | u64 br_type = event->attr.branch_sample_type; | |
27ac905b YZ |
564 | u64 mask = 0, v; |
565 | int i; | |
60ce0fbd | 566 | |
2c44b193 | 567 | for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) { |
27ac905b | 568 | if (!(br_type & (1ULL << i))) |
60ce0fbd SE |
569 | continue; |
570 | ||
27ac905b | 571 | v = x86_pmu.lbr_sel_map[i]; |
60ce0fbd SE |
572 | if (v == LBR_NOT_SUPP) |
573 | return -EOPNOTSUPP; | |
60ce0fbd | 574 | |
3e702ff6 SE |
575 | if (v != LBR_IGN) |
576 | mask |= v; | |
60ce0fbd SE |
577 | } |
578 | reg = &event->hw.branch_reg; | |
579 | reg->idx = EXTRA_REG_LBR; | |
580 | ||
e9d7f7cd YZ |
581 | /* |
582 | * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate | |
583 | * in suppress mode. So LBR_SELECT should be set to | |
584 | * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) | |
585 | */ | |
586 | reg->config = mask ^ x86_pmu.lbr_sel_mask; | |
60ce0fbd SE |
587 | |
588 | return 0; | |
589 | } | |
590 | ||
60ce0fbd SE |
591 | int intel_pmu_setup_lbr_filter(struct perf_event *event) |
592 | { | |
3e702ff6 | 593 | int ret = 0; |
60ce0fbd SE |
594 | |
595 | /* | |
596 | * no LBR on this PMU | |
597 | */ | |
598 | if (!x86_pmu.lbr_nr) | |
599 | return -EOPNOTSUPP; | |
600 | ||
601 | /* | |
3e702ff6 | 602 | * setup SW LBR filter |
60ce0fbd | 603 | */ |
e9d7f7cd YZ |
604 | ret = intel_pmu_setup_sw_lbr_filter(event); |
605 | if (ret) | |
606 | return ret; | |
3e702ff6 SE |
607 | |
608 | /* | |
609 | * setup HW LBR filter, if any | |
610 | */ | |
611 | if (x86_pmu.lbr_sel_map) | |
612 | ret = intel_pmu_setup_hw_lbr_filter(event); | |
613 | ||
614 | return ret; | |
615 | } | |
616 | ||
617 | /* | |
618 | * return the type of control flow change at address "from" | |
619 | * intruction is not necessarily a branch (in case of interrupt). | |
620 | * | |
621 | * The branch type returned also includes the priv level of the | |
622 | * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). | |
623 | * | |
624 | * If a branch type is unknown OR the instruction cannot be | |
625 | * decoded (e.g., text page not present), then X86_BR_NONE is | |
626 | * returned. | |
627 | */ | |
135c5612 | 628 | static int branch_type(unsigned long from, unsigned long to, int abort) |
3e702ff6 SE |
629 | { |
630 | struct insn insn; | |
631 | void *addr; | |
6ba48ff4 | 632 | int bytes_read, bytes_left; |
3e702ff6 SE |
633 | int ret = X86_BR_NONE; |
634 | int ext, to_plm, from_plm; | |
635 | u8 buf[MAX_INSN_SIZE]; | |
636 | int is64 = 0; | |
637 | ||
638 | to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER; | |
639 | from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER; | |
640 | ||
641 | /* | |
642 | * maybe zero if lbr did not fill up after a reset by the time | |
643 | * we get a PMU interrupt | |
644 | */ | |
645 | if (from == 0 || to == 0) | |
646 | return X86_BR_NONE; | |
647 | ||
135c5612 AK |
648 | if (abort) |
649 | return X86_BR_ABORT | to_plm; | |
650 | ||
3e702ff6 SE |
651 | if (from_plm == X86_BR_USER) { |
652 | /* | |
653 | * can happen if measuring at the user level only | |
654 | * and we interrupt in a kernel thread, e.g., idle. | |
655 | */ | |
656 | if (!current->mm) | |
657 | return X86_BR_NONE; | |
658 | ||
659 | /* may fail if text not present */ | |
6ba48ff4 DH |
660 | bytes_left = copy_from_user_nmi(buf, (void __user *)from, |
661 | MAX_INSN_SIZE); | |
662 | bytes_read = MAX_INSN_SIZE - bytes_left; | |
663 | if (!bytes_read) | |
3e702ff6 SE |
664 | return X86_BR_NONE; |
665 | ||
666 | addr = buf; | |
6e15eb3b PZ |
667 | } else { |
668 | /* | |
669 | * The LBR logs any address in the IP, even if the IP just | |
670 | * faulted. This means userspace can control the from address. | |
671 | * Ensure we don't blindy read any address by validating it is | |
672 | * a known text address. | |
673 | */ | |
6ba48ff4 | 674 | if (kernel_text_address(from)) { |
6e15eb3b | 675 | addr = (void *)from; |
6ba48ff4 DH |
676 | /* |
677 | * Assume we can get the maximum possible size | |
678 | * when grabbing kernel data. This is not | |
679 | * _strictly_ true since we could possibly be | |
680 | * executing up next to a memory hole, but | |
681 | * it is very unlikely to be a problem. | |
682 | */ | |
683 | bytes_read = MAX_INSN_SIZE; | |
684 | } else { | |
6e15eb3b | 685 | return X86_BR_NONE; |
6ba48ff4 | 686 | } |
6e15eb3b | 687 | } |
3e702ff6 SE |
688 | |
689 | /* | |
690 | * decoder needs to know the ABI especially | |
691 | * on 64-bit systems running 32-bit apps | |
692 | */ | |
693 | #ifdef CONFIG_X86_64 | |
694 | is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); | |
695 | #endif | |
6ba48ff4 | 696 | insn_init(&insn, addr, bytes_read, is64); |
3e702ff6 | 697 | insn_get_opcode(&insn); |
6ba48ff4 DH |
698 | if (!insn.opcode.got) |
699 | return X86_BR_ABORT; | |
3e702ff6 SE |
700 | |
701 | switch (insn.opcode.bytes[0]) { | |
702 | case 0xf: | |
703 | switch (insn.opcode.bytes[1]) { | |
704 | case 0x05: /* syscall */ | |
705 | case 0x34: /* sysenter */ | |
706 | ret = X86_BR_SYSCALL; | |
707 | break; | |
708 | case 0x07: /* sysret */ | |
709 | case 0x35: /* sysexit */ | |
710 | ret = X86_BR_SYSRET; | |
711 | break; | |
712 | case 0x80 ... 0x8f: /* conditional */ | |
713 | ret = X86_BR_JCC; | |
714 | break; | |
715 | default: | |
716 | ret = X86_BR_NONE; | |
717 | } | |
718 | break; | |
719 | case 0x70 ... 0x7f: /* conditional */ | |
720 | ret = X86_BR_JCC; | |
721 | break; | |
722 | case 0xc2: /* near ret */ | |
723 | case 0xc3: /* near ret */ | |
724 | case 0xca: /* far ret */ | |
725 | case 0xcb: /* far ret */ | |
726 | ret = X86_BR_RET; | |
727 | break; | |
728 | case 0xcf: /* iret */ | |
729 | ret = X86_BR_IRET; | |
730 | break; | |
731 | case 0xcc ... 0xce: /* int */ | |
732 | ret = X86_BR_INT; | |
733 | break; | |
734 | case 0xe8: /* call near rel */ | |
aa54ae9b YZ |
735 | insn_get_immediate(&insn); |
736 | if (insn.immediate1.value == 0) { | |
737 | /* zero length call */ | |
738 | ret = X86_BR_ZERO_CALL; | |
739 | break; | |
740 | } | |
3e702ff6 SE |
741 | case 0x9a: /* call far absolute */ |
742 | ret = X86_BR_CALL; | |
743 | break; | |
744 | case 0xe0 ... 0xe3: /* loop jmp */ | |
745 | ret = X86_BR_JCC; | |
746 | break; | |
747 | case 0xe9 ... 0xeb: /* jmp */ | |
748 | ret = X86_BR_JMP; | |
749 | break; | |
750 | case 0xff: /* call near absolute, call far absolute ind */ | |
751 | insn_get_modrm(&insn); | |
752 | ext = (insn.modrm.bytes[0] >> 3) & 0x7; | |
753 | switch (ext) { | |
754 | case 2: /* near ind call */ | |
755 | case 3: /* far ind call */ | |
756 | ret = X86_BR_IND_CALL; | |
757 | break; | |
758 | case 4: | |
759 | case 5: | |
7b74cfb2 | 760 | ret = X86_BR_IND_JMP; |
3e702ff6 SE |
761 | break; |
762 | } | |
763 | break; | |
764 | default: | |
765 | ret = X86_BR_NONE; | |
60ce0fbd SE |
766 | } |
767 | /* | |
3e702ff6 SE |
768 | * interrupts, traps, faults (and thus ring transition) may |
769 | * occur on any instructions. Thus, to classify them correctly, | |
770 | * we need to first look at the from and to priv levels. If they | |
771 | * are different and to is in the kernel, then it indicates | |
772 | * a ring transition. If the from instruction is not a ring | |
773 | * transition instr (syscall, systenter, int), then it means | |
774 | * it was a irq, trap or fault. | |
775 | * | |
776 | * we have no way of detecting kernel to kernel faults. | |
777 | */ | |
778 | if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL | |
779 | && ret != X86_BR_SYSCALL && ret != X86_BR_INT) | |
780 | ret = X86_BR_IRQ; | |
781 | ||
782 | /* | |
783 | * branch priv level determined by target as | |
784 | * is done by HW when LBR_SELECT is implemented | |
60ce0fbd | 785 | */ |
3e702ff6 SE |
786 | if (ret != X86_BR_NONE) |
787 | ret |= to_plm; | |
60ce0fbd | 788 | |
3e702ff6 SE |
789 | return ret; |
790 | } | |
791 | ||
792 | /* | |
793 | * implement actual branch filter based on user demand. | |
794 | * Hardware may not exactly satisfy that request, thus | |
795 | * we need to inspect opcodes. Mismatched branches are | |
796 | * discarded. Therefore, the number of branches returned | |
797 | * in PERF_SAMPLE_BRANCH_STACK sample may vary. | |
798 | */ | |
799 | static void | |
800 | intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) | |
801 | { | |
802 | u64 from, to; | |
803 | int br_sel = cpuc->br_sel; | |
804 | int i, j, type; | |
805 | bool compress = false; | |
806 | ||
807 | /* if sampling all branches, then nothing to filter */ | |
808 | if ((br_sel & X86_BR_ALL) == X86_BR_ALL) | |
809 | return; | |
810 | ||
811 | for (i = 0; i < cpuc->lbr_stack.nr; i++) { | |
812 | ||
813 | from = cpuc->lbr_entries[i].from; | |
814 | to = cpuc->lbr_entries[i].to; | |
815 | ||
135c5612 AK |
816 | type = branch_type(from, to, cpuc->lbr_entries[i].abort); |
817 | if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) { | |
818 | if (cpuc->lbr_entries[i].in_tx) | |
819 | type |= X86_BR_IN_TX; | |
820 | else | |
821 | type |= X86_BR_NO_TX; | |
822 | } | |
3e702ff6 SE |
823 | |
824 | /* if type does not correspond, then discard */ | |
825 | if (type == X86_BR_NONE || (br_sel & type) != type) { | |
826 | cpuc->lbr_entries[i].from = 0; | |
827 | compress = true; | |
828 | } | |
829 | } | |
830 | ||
831 | if (!compress) | |
832 | return; | |
833 | ||
834 | /* remove all entries with from=0 */ | |
835 | for (i = 0; i < cpuc->lbr_stack.nr; ) { | |
836 | if (!cpuc->lbr_entries[i].from) { | |
837 | j = i; | |
838 | while (++j < cpuc->lbr_stack.nr) | |
839 | cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; | |
840 | cpuc->lbr_stack.nr--; | |
841 | if (!cpuc->lbr_entries[i].from) | |
842 | continue; | |
843 | } | |
844 | i++; | |
845 | } | |
60ce0fbd SE |
846 | } |
847 | ||
c5cc2cd9 SE |
848 | /* |
849 | * Map interface branch filters onto LBR filters | |
850 | */ | |
2c44b193 | 851 | static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { |
27ac905b YZ |
852 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, |
853 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, | |
854 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, | |
855 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, | |
856 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP | |
857 | | LBR_IND_JMP | LBR_FAR, | |
c5cc2cd9 SE |
858 | /* |
859 | * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches | |
860 | */ | |
27ac905b | 861 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = |
c5cc2cd9 SE |
862 | LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, |
863 | /* | |
864 | * NHM/WSM erratum: must include IND_JMP to capture IND_CALL | |
865 | */ | |
27ac905b YZ |
866 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP, |
867 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, | |
7b74cfb2 | 868 | [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, |
c5cc2cd9 SE |
869 | }; |
870 | ||
2c44b193 | 871 | static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { |
27ac905b YZ |
872 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, |
873 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, | |
874 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, | |
875 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, | |
876 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR, | |
877 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | |
878 | | LBR_FAR, | |
879 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, | |
880 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, | |
7b74cfb2 | 881 | [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, |
c5cc2cd9 SE |
882 | }; |
883 | ||
2c44b193 | 884 | static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { |
e9d7f7cd YZ |
885 | [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY, |
886 | [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER, | |
887 | [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL, | |
888 | [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN, | |
889 | [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR, | |
890 | [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | |
891 | | LBR_FAR, | |
892 | [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, | |
893 | [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, | |
894 | [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | |
895 | | LBR_RETURN | LBR_CALL_STACK, | |
7b74cfb2 | 896 | [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, |
e9d7f7cd YZ |
897 | }; |
898 | ||
c5cc2cd9 | 899 | /* core */ |
066ce64c | 900 | void __init intel_pmu_lbr_init_core(void) |
caff2bef | 901 | { |
caff2bef | 902 | x86_pmu.lbr_nr = 4; |
225ce539 SE |
903 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
904 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; | |
905 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; | |
c5cc2cd9 | 906 | |
3e702ff6 SE |
907 | /* |
908 | * SW branch filter usage: | |
909 | * - compensate for lack of HW filter | |
910 | */ | |
c5cc2cd9 | 911 | pr_cont("4-deep LBR, "); |
caff2bef PZ |
912 | } |
913 | ||
c5cc2cd9 | 914 | /* nehalem/westmere */ |
066ce64c | 915 | void __init intel_pmu_lbr_init_nhm(void) |
caff2bef | 916 | { |
caff2bef | 917 | x86_pmu.lbr_nr = 16; |
225ce539 SE |
918 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
919 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; | |
920 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; | |
c5cc2cd9 SE |
921 | |
922 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | |
923 | x86_pmu.lbr_sel_map = nhm_lbr_sel_map; | |
924 | ||
3e702ff6 SE |
925 | /* |
926 | * SW branch filter usage: | |
927 | * - workaround LBR_SEL errata (see above) | |
928 | * - support syscall, sysret capture. | |
929 | * That requires LBR_FAR but that means far | |
930 | * jmp need to be filtered out | |
931 | */ | |
c5cc2cd9 | 932 | pr_cont("16-deep LBR, "); |
caff2bef PZ |
933 | } |
934 | ||
c5cc2cd9 | 935 | /* sandy bridge */ |
066ce64c | 936 | void __init intel_pmu_lbr_init_snb(void) |
c5cc2cd9 SE |
937 | { |
938 | x86_pmu.lbr_nr = 16; | |
939 | x86_pmu.lbr_tos = MSR_LBR_TOS; | |
940 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; | |
941 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; | |
942 | ||
943 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | |
944 | x86_pmu.lbr_sel_map = snb_lbr_sel_map; | |
945 | ||
3e702ff6 SE |
946 | /* |
947 | * SW branch filter usage: | |
948 | * - support syscall, sysret capture. | |
949 | * That requires LBR_FAR but that means far | |
950 | * jmp need to be filtered out | |
951 | */ | |
c5cc2cd9 SE |
952 | pr_cont("16-deep LBR, "); |
953 | } | |
954 | ||
e9d7f7cd YZ |
955 | /* haswell */ |
956 | void intel_pmu_lbr_init_hsw(void) | |
957 | { | |
958 | x86_pmu.lbr_nr = 16; | |
959 | x86_pmu.lbr_tos = MSR_LBR_TOS; | |
960 | x86_pmu.lbr_from = MSR_LBR_NHM_FROM; | |
961 | x86_pmu.lbr_to = MSR_LBR_NHM_TO; | |
962 | ||
963 | x86_pmu.lbr_sel_mask = LBR_SEL_MASK; | |
964 | x86_pmu.lbr_sel_map = hsw_lbr_sel_map; | |
965 | ||
966 | pr_cont("16-deep LBR, "); | |
967 | } | |
968 | ||
c5cc2cd9 | 969 | /* atom */ |
066ce64c | 970 | void __init intel_pmu_lbr_init_atom(void) |
caff2bef | 971 | { |
88c9a65e SE |
972 | /* |
973 | * only models starting at stepping 10 seems | |
974 | * to have an operational LBR which can freeze | |
975 | * on PMU interrupt | |
976 | */ | |
3ec18cd8 SE |
977 | if (boot_cpu_data.x86_model == 28 |
978 | && boot_cpu_data.x86_mask < 10) { | |
88c9a65e SE |
979 | pr_cont("LBR disabled due to erratum"); |
980 | return; | |
981 | } | |
982 | ||
caff2bef | 983 | x86_pmu.lbr_nr = 8; |
225ce539 SE |
984 | x86_pmu.lbr_tos = MSR_LBR_TOS; |
985 | x86_pmu.lbr_from = MSR_LBR_CORE_FROM; | |
986 | x86_pmu.lbr_to = MSR_LBR_CORE_TO; | |
c5cc2cd9 | 987 | |
3e702ff6 SE |
988 | /* |
989 | * SW branch filter usage: | |
990 | * - compensate for lack of HW filter | |
991 | */ | |
c5cc2cd9 | 992 | pr_cont("8-deep LBR, "); |
caff2bef | 993 | } |